diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 129 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_catas.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 113 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_wqe.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 15 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_fs.c | 177 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 72 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 26 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 7 |
18 files changed, 458 insertions, 158 deletions
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index aed5ca23fb22..5ea741f47fc8 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -31,7 +31,7 @@ | |||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. | 32 | * SOFTWARE. |
33 | * | 33 | * |
34 | * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $ | 34 | * $Id: user_mad.c 4010 2005-11-09 23:11:56Z roland $ |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
@@ -110,13 +110,13 @@ struct ib_umad_device { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | struct ib_umad_file { | 112 | struct ib_umad_file { |
113 | struct ib_umad_port *port; | 113 | struct ib_umad_port *port; |
114 | struct list_head recv_list; | 114 | struct list_head recv_list; |
115 | struct list_head port_list; | 115 | struct list_head port_list; |
116 | spinlock_t recv_lock; | 116 | spinlock_t recv_lock; |
117 | wait_queue_head_t recv_wait; | 117 | wait_queue_head_t recv_wait; |
118 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; | 118 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; |
119 | struct ib_mr *mr[IB_UMAD_MAX_AGENTS]; | 119 | int agents_dead; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | struct ib_umad_packet { | 122 | struct ib_umad_packet { |
@@ -145,6 +145,12 @@ static void ib_umad_release_dev(struct kref *ref) | |||
145 | kfree(dev); | 145 | kfree(dev); |
146 | } | 146 | } |
147 | 147 | ||
148 | /* caller must hold port->mutex at least for reading */ | ||
149 | static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) | ||
150 | { | ||
151 | return file->agents_dead ? NULL : file->agent[id]; | ||
152 | } | ||
153 | |||
148 | static int queue_packet(struct ib_umad_file *file, | 154 | static int queue_packet(struct ib_umad_file *file, |
149 | struct ib_mad_agent *agent, | 155 | struct ib_mad_agent *agent, |
150 | struct ib_umad_packet *packet) | 156 | struct ib_umad_packet *packet) |
@@ -152,10 +158,11 @@ static int queue_packet(struct ib_umad_file *file, | |||
152 | int ret = 1; | 158 | int ret = 1; |
153 | 159 | ||
154 | down_read(&file->port->mutex); | 160 | down_read(&file->port->mutex); |
161 | |||
155 | for (packet->mad.hdr.id = 0; | 162 | for (packet->mad.hdr.id = 0; |
156 | packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; | 163 | packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; |
157 | packet->mad.hdr.id++) | 164 | packet->mad.hdr.id++) |
158 | if (agent == file->agent[packet->mad.hdr.id]) { | 165 | if (agent == __get_agent(file, packet->mad.hdr.id)) { |
159 | spin_lock_irq(&file->recv_lock); | 166 | spin_lock_irq(&file->recv_lock); |
160 | list_add_tail(&packet->list, &file->recv_list); | 167 | list_add_tail(&packet->list, &file->recv_list); |
161 | spin_unlock_irq(&file->recv_lock); | 168 | spin_unlock_irq(&file->recv_lock); |
@@ -327,7 +334,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
327 | 334 | ||
328 | down_read(&file->port->mutex); | 335 | down_read(&file->port->mutex); |
329 | 336 | ||
330 | agent = file->agent[packet->mad.hdr.id]; | 337 | agent = __get_agent(file, packet->mad.hdr.id); |
331 | if (!agent) { | 338 | if (!agent) { |
332 | ret = -EINVAL; | 339 | ret = -EINVAL; |
333 | goto err_up; | 340 | goto err_up; |
@@ -481,7 +488,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg) | |||
481 | } | 488 | } |
482 | 489 | ||
483 | for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) | 490 | for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) |
484 | if (!file->agent[agent_id]) | 491 | if (!__get_agent(file, agent_id)) |
485 | goto found; | 492 | goto found; |
486 | 493 | ||
487 | ret = -ENOMEM; | 494 | ret = -ENOMEM; |
@@ -505,29 +512,15 @@ found: | |||
505 | goto out; | 512 | goto out; |
506 | } | 513 | } |
507 | 514 | ||
508 | file->agent[agent_id] = agent; | ||
509 | |||
510 | file->mr[agent_id] = ib_get_dma_mr(agent->qp->pd, IB_ACCESS_LOCAL_WRITE); | ||
511 | if (IS_ERR(file->mr[agent_id])) { | ||
512 | ret = -ENOMEM; | ||
513 | goto err; | ||
514 | } | ||
515 | |||
516 | if (put_user(agent_id, | 515 | if (put_user(agent_id, |
517 | (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { | 516 | (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { |
518 | ret = -EFAULT; | 517 | ret = -EFAULT; |
519 | goto err_mr; | 518 | ib_unregister_mad_agent(agent); |
519 | goto out; | ||
520 | } | 520 | } |
521 | 521 | ||
522 | file->agent[agent_id] = agent; | ||
522 | ret = 0; | 523 | ret = 0; |
523 | goto out; | ||
524 | |||
525 | err_mr: | ||
526 | ib_dereg_mr(file->mr[agent_id]); | ||
527 | |||
528 | err: | ||
529 | file->agent[agent_id] = NULL; | ||
530 | ib_unregister_mad_agent(agent); | ||
531 | 524 | ||
532 | out: | 525 | out: |
533 | up_write(&file->port->mutex); | 526 | up_write(&file->port->mutex); |
@@ -536,27 +529,29 @@ out: | |||
536 | 529 | ||
537 | static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg) | 530 | static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg) |
538 | { | 531 | { |
532 | struct ib_mad_agent *agent = NULL; | ||
539 | u32 id; | 533 | u32 id; |
540 | int ret = 0; | 534 | int ret = 0; |
541 | 535 | ||
542 | down_write(&file->port->mutex); | 536 | if (get_user(id, (u32 __user *) arg)) |
537 | return -EFAULT; | ||
543 | 538 | ||
544 | if (get_user(id, (u32 __user *) arg)) { | 539 | down_write(&file->port->mutex); |
545 | ret = -EFAULT; | ||
546 | goto out; | ||
547 | } | ||
548 | 540 | ||
549 | if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !file->agent[id]) { | 541 | if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { |
550 | ret = -EINVAL; | 542 | ret = -EINVAL; |
551 | goto out; | 543 | goto out; |
552 | } | 544 | } |
553 | 545 | ||
554 | ib_dereg_mr(file->mr[id]); | 546 | agent = file->agent[id]; |
555 | ib_unregister_mad_agent(file->agent[id]); | ||
556 | file->agent[id] = NULL; | 547 | file->agent[id] = NULL; |
557 | 548 | ||
558 | out: | 549 | out: |
559 | up_write(&file->port->mutex); | 550 | up_write(&file->port->mutex); |
551 | |||
552 | if (agent) | ||
553 | ib_unregister_mad_agent(agent); | ||
554 | |||
560 | return ret; | 555 | return ret; |
561 | } | 556 | } |
562 | 557 | ||
@@ -621,23 +616,29 @@ static int ib_umad_close(struct inode *inode, struct file *filp) | |||
621 | struct ib_umad_file *file = filp->private_data; | 616 | struct ib_umad_file *file = filp->private_data; |
622 | struct ib_umad_device *dev = file->port->umad_dev; | 617 | struct ib_umad_device *dev = file->port->umad_dev; |
623 | struct ib_umad_packet *packet, *tmp; | 618 | struct ib_umad_packet *packet, *tmp; |
619 | int already_dead; | ||
624 | int i; | 620 | int i; |
625 | 621 | ||
626 | down_write(&file->port->mutex); | 622 | down_write(&file->port->mutex); |
627 | for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) | 623 | |
628 | if (file->agent[i]) { | 624 | already_dead = file->agents_dead; |
629 | ib_dereg_mr(file->mr[i]); | 625 | file->agents_dead = 1; |
630 | ib_unregister_mad_agent(file->agent[i]); | ||
631 | } | ||
632 | 626 | ||
633 | list_for_each_entry_safe(packet, tmp, &file->recv_list, list) | 627 | list_for_each_entry_safe(packet, tmp, &file->recv_list, list) |
634 | kfree(packet); | 628 | kfree(packet); |
635 | 629 | ||
636 | list_del(&file->port_list); | 630 | list_del(&file->port_list); |
637 | up_write(&file->port->mutex); | ||
638 | 631 | ||
639 | kfree(file); | 632 | downgrade_write(&file->port->mutex); |
633 | |||
634 | if (!already_dead) | ||
635 | for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) | ||
636 | if (file->agent[i]) | ||
637 | ib_unregister_mad_agent(file->agent[i]); | ||
638 | |||
639 | up_read(&file->port->mutex); | ||
640 | 640 | ||
641 | kfree(file); | ||
641 | kref_put(&dev->ref, ib_umad_release_dev); | 642 | kref_put(&dev->ref, ib_umad_release_dev); |
642 | 643 | ||
643 | return 0; | 644 | return 0; |
@@ -801,7 +802,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, | |||
801 | goto err_class; | 802 | goto err_class; |
802 | port->sm_dev->owner = THIS_MODULE; | 803 | port->sm_dev->owner = THIS_MODULE; |
803 | port->sm_dev->ops = &umad_sm_fops; | 804 | port->sm_dev->ops = &umad_sm_fops; |
804 | kobject_set_name(&port->dev->kobj, "issm%d", port->dev_num); | 805 | kobject_set_name(&port->sm_dev->kobj, "issm%d", port->dev_num); |
805 | if (cdev_add(port->sm_dev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1)) | 806 | if (cdev_add(port->sm_dev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1)) |
806 | goto err_sm_cdev; | 807 | goto err_sm_cdev; |
807 | 808 | ||
@@ -863,14 +864,36 @@ static void ib_umad_kill_port(struct ib_umad_port *port) | |||
863 | 864 | ||
864 | port->ib_dev = NULL; | 865 | port->ib_dev = NULL; |
865 | 866 | ||
866 | list_for_each_entry(file, &port->file_list, port_list) | 867 | /* |
867 | for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) { | 868 | * Now go through the list of files attached to this port and |
868 | if (!file->agent[id]) | 869 | * unregister all of their MAD agents. We need to hold |
869 | continue; | 870 | * port->mutex while doing this to avoid racing with |
870 | ib_dereg_mr(file->mr[id]); | 871 | * ib_umad_close(), but we can't hold the mutex for writing |
871 | ib_unregister_mad_agent(file->agent[id]); | 872 | * while calling ib_unregister_mad_agent(), since that might |
872 | file->agent[id] = NULL; | 873 | * deadlock by calling back into queue_packet(). So we |
873 | } | 874 | * downgrade our lock to a read lock, and then drop and |
875 | * reacquire the write lock for the next iteration. | ||
876 | * | ||
877 | * We do list_del_init() on the file's list_head so that the | ||
878 | * list_del in ib_umad_close() is still OK, even after the | ||
879 | * file is removed from the list. | ||
880 | */ | ||
881 | while (!list_empty(&port->file_list)) { | ||
882 | file = list_entry(port->file_list.next, struct ib_umad_file, | ||
883 | port_list); | ||
884 | |||
885 | file->agents_dead = 1; | ||
886 | list_del_init(&file->port_list); | ||
887 | |||
888 | downgrade_write(&port->mutex); | ||
889 | |||
890 | for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) | ||
891 | if (file->agent[id]) | ||
892 | ib_unregister_mad_agent(file->agent[id]); | ||
893 | |||
894 | up_read(&port->mutex); | ||
895 | down_write(&port->mutex); | ||
896 | } | ||
874 | 897 | ||
875 | up_write(&port->mutex); | 898 | up_write(&port->mutex); |
876 | 899 | ||
@@ -913,7 +936,7 @@ static void ib_umad_add_one(struct ib_device *device) | |||
913 | 936 | ||
914 | err: | 937 | err: |
915 | while (--i >= s) | 938 | while (--i >= s) |
916 | ib_umad_kill_port(&umad_dev->port[i]); | 939 | ib_umad_kill_port(&umad_dev->port[i - s]); |
917 | 940 | ||
918 | kref_put(&umad_dev->ref, ib_umad_release_dev); | 941 | kref_put(&umad_dev->ref, ib_umad_release_dev); |
919 | } | 942 | } |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 63a74151c60b..ed45da892b1c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -708,7 +708,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, | |||
708 | resp->wc[i].opcode = wc[i].opcode; | 708 | resp->wc[i].opcode = wc[i].opcode; |
709 | resp->wc[i].vendor_err = wc[i].vendor_err; | 709 | resp->wc[i].vendor_err = wc[i].vendor_err; |
710 | resp->wc[i].byte_len = wc[i].byte_len; | 710 | resp->wc[i].byte_len = wc[i].byte_len; |
711 | resp->wc[i].imm_data = wc[i].imm_data; | 711 | resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; |
712 | resp->wc[i].qp_num = wc[i].qp_num; | 712 | resp->wc[i].qp_num = wc[i].qp_num; |
713 | resp->wc[i].src_qp = wc[i].src_qp; | 713 | resp->wc[i].src_qp = wc[i].src_qp; |
714 | resp->wc[i].wc_flags = wc[i].wc_flags; | 714 | resp->wc[i].wc_flags = wc[i].wc_flags; |
@@ -908,7 +908,12 @@ retry: | |||
908 | if (ret) | 908 | if (ret) |
909 | goto err_destroy; | 909 | goto err_destroy; |
910 | 910 | ||
911 | resp.qp_handle = uobj->uobject.id; | 911 | resp.qp_handle = uobj->uobject.id; |
912 | resp.max_recv_sge = attr.cap.max_recv_sge; | ||
913 | resp.max_send_sge = attr.cap.max_send_sge; | ||
914 | resp.max_recv_wr = attr.cap.max_recv_wr; | ||
915 | resp.max_send_wr = attr.cap.max_send_wr; | ||
916 | resp.max_inline_data = attr.cap.max_inline_data; | ||
912 | 917 | ||
913 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 918 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
914 | &resp, sizeof resp)) { | 919 | &resp, sizeof resp)) { |
@@ -1135,7 +1140,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
1135 | next->num_sge = user_wr->num_sge; | 1140 | next->num_sge = user_wr->num_sge; |
1136 | next->opcode = user_wr->opcode; | 1141 | next->opcode = user_wr->opcode; |
1137 | next->send_flags = user_wr->send_flags; | 1142 | next->send_flags = user_wr->send_flags; |
1138 | next->imm_data = user_wr->imm_data; | 1143 | next->imm_data = (__be32 __force) user_wr->imm_data; |
1139 | 1144 | ||
1140 | if (qp->qp_type == IB_QPT_UD) { | 1145 | if (qp->qp_type == IB_QPT_UD) { |
1141 | next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, | 1146 | next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, |
@@ -1701,7 +1706,6 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, | |||
1701 | } | 1706 | } |
1702 | 1707 | ||
1703 | attr.max_wr = cmd.max_wr; | 1708 | attr.max_wr = cmd.max_wr; |
1704 | attr.max_sge = cmd.max_sge; | ||
1705 | attr.srq_limit = cmd.srq_limit; | 1709 | attr.srq_limit = cmd.srq_limit; |
1706 | 1710 | ||
1707 | ret = ib_modify_srq(srq, &attr, cmd.attr_mask); | 1711 | ret = ib_modify_srq(srq, &attr, cmd.attr_mask); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 4186cc888ea5..4c15e112736c 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -325,16 +325,8 @@ EXPORT_SYMBOL(ib_destroy_cq); | |||
325 | int ib_resize_cq(struct ib_cq *cq, | 325 | int ib_resize_cq(struct ib_cq *cq, |
326 | int cqe) | 326 | int cqe) |
327 | { | 327 | { |
328 | int ret; | 328 | return cq->device->resize_cq ? |
329 | 329 | cq->device->resize_cq(cq, cqe) : -ENOSYS; | |
330 | if (!cq->device->resize_cq) | ||
331 | return -ENOSYS; | ||
332 | |||
333 | ret = cq->device->resize_cq(cq, &cqe); | ||
334 | if (!ret) | ||
335 | cq->cqe = cqe; | ||
336 | |||
337 | return ret; | ||
338 | } | 330 | } |
339 | EXPORT_SYMBOL(ib_resize_cq); | 331 | EXPORT_SYMBOL(ib_resize_cq); |
340 | 332 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index 25ebab64bc42..c3bec7490f52 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c | |||
@@ -97,7 +97,7 @@ static void poll_catas(unsigned long dev_ptr) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | spin_lock_irqsave(&catas_lock, flags); | 99 | spin_lock_irqsave(&catas_lock, flags); |
100 | if (dev->catas_err.stop) | 100 | if (!dev->catas_err.stop) |
101 | mod_timer(&dev->catas_err.timer, | 101 | mod_timer(&dev->catas_err.timer, |
102 | jiffies + MTHCA_CATAS_POLL_INTERVAL); | 102 | jiffies + MTHCA_CATAS_POLL_INTERVAL); |
103 | spin_unlock_irqrestore(&catas_lock, flags); | 103 | spin_unlock_irqrestore(&catas_lock, flags); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 49f211d55df7..9ed34587fc5c 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1060,6 +1060,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
1060 | dev_lim->hca.arbel.resize_srq = field & 1; | 1060 | dev_lim->hca.arbel.resize_srq = field & 1; |
1061 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); | 1061 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); |
1062 | dev_lim->max_sg = min_t(int, field, dev_lim->max_sg); | 1062 | dev_lim->max_sg = min_t(int, field, dev_lim->max_sg); |
1063 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET); | ||
1064 | dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz); | ||
1063 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); | 1065 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); |
1064 | dev_lim->mpt_entry_sz = size; | 1066 | dev_lim->mpt_entry_sz = size; |
1065 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); | 1067 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index f98e23555826..4a8adcef2079 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -258,7 +258,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
258 | { | 258 | { |
259 | struct mthca_cq *cq; | 259 | struct mthca_cq *cq; |
260 | struct mthca_cqe *cqe; | 260 | struct mthca_cqe *cqe; |
261 | int prod_index; | 261 | u32 prod_index; |
262 | int nfreed = 0; | 262 | int nfreed = 0; |
263 | 263 | ||
264 | spin_lock_irq(&dev->cq_table.lock); | 264 | spin_lock_irq(&dev->cq_table.lock); |
@@ -293,19 +293,15 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
293 | * Now sweep backwards through the CQ, removing CQ entries | 293 | * Now sweep backwards through the CQ, removing CQ entries |
294 | * that match our QP by copying older entries on top of them. | 294 | * that match our QP by copying older entries on top of them. |
295 | */ | 295 | */ |
296 | while (prod_index > cq->cons_index) { | 296 | while ((int) --prod_index - (int) cq->cons_index >= 0) { |
297 | cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); | 297 | cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); |
298 | if (cqe->my_qpn == cpu_to_be32(qpn)) { | 298 | if (cqe->my_qpn == cpu_to_be32(qpn)) { |
299 | if (srq) | 299 | if (srq) |
300 | mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); | 300 | mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); |
301 | ++nfreed; | 301 | ++nfreed; |
302 | } | 302 | } else if (nfreed) |
303 | else if (nfreed) | 303 | memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), |
304 | memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & | 304 | cqe, MTHCA_CQ_ENTRY_SIZE); |
305 | cq->ibcq.cqe), | ||
306 | cqe, | ||
307 | MTHCA_CQ_ENTRY_SIZE); | ||
308 | --prod_index; | ||
309 | } | 305 | } |
310 | 306 | ||
311 | if (nfreed) { | 307 | if (nfreed) { |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index e7e5d3b4f004..497ff794ef6a 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -131,6 +131,7 @@ struct mthca_limits { | |||
131 | int max_sg; | 131 | int max_sg; |
132 | int num_qps; | 132 | int num_qps; |
133 | int max_wqes; | 133 | int max_wqes; |
134 | int max_desc_sz; | ||
134 | int max_qp_init_rdma; | 135 | int max_qp_init_rdma; |
135 | int reserved_qps; | 136 | int reserved_qps; |
136 | int num_srqs; | 137 | int num_srqs; |
@@ -154,6 +155,7 @@ struct mthca_limits { | |||
154 | int reserved_mcgs; | 155 | int reserved_mcgs; |
155 | int num_pds; | 156 | int num_pds; |
156 | int reserved_pds; | 157 | int reserved_pds; |
158 | u32 page_size_cap; | ||
157 | u32 flags; | 159 | u32 flags; |
158 | u8 port_width_cap; | 160 | u8 port_width_cap; |
159 | }; | 161 | }; |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 45c6328e780c..147f248a8073 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -168,6 +168,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
168 | mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; | 168 | mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; |
169 | mdev->limits.reserved_srqs = dev_lim->reserved_srqs; | 169 | mdev->limits.reserved_srqs = dev_lim->reserved_srqs; |
170 | mdev->limits.reserved_eecs = dev_lim->reserved_eecs; | 170 | mdev->limits.reserved_eecs = dev_lim->reserved_eecs; |
171 | mdev->limits.max_desc_sz = dev_lim->max_desc_sz; | ||
171 | /* | 172 | /* |
172 | * Subtract 1 from the limit because we need to allocate a | 173 | * Subtract 1 from the limit because we need to allocate a |
173 | * spare CQE so the HCA HW can tell the difference between an | 174 | * spare CQE so the HCA HW can tell the difference between an |
@@ -181,6 +182,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
181 | mdev->limits.reserved_uars = dev_lim->reserved_uars; | 182 | mdev->limits.reserved_uars = dev_lim->reserved_uars; |
182 | mdev->limits.reserved_pds = dev_lim->reserved_pds; | 183 | mdev->limits.reserved_pds = dev_lim->reserved_pds; |
183 | mdev->limits.port_width_cap = dev_lim->max_port_width; | 184 | mdev->limits.port_width_cap = dev_lim->max_port_width; |
185 | mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); | ||
184 | mdev->limits.flags = dev_lim->flags; | 186 | mdev->limits.flags = dev_lim->flags; |
185 | 187 | ||
186 | /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. | 188 | /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 6b0166668269..4cc7e2846df1 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -90,6 +90,7 @@ static int mthca_query_device(struct ib_device *ibdev, | |||
90 | memcpy(&props->node_guid, out_mad->data + 12, 8); | 90 | memcpy(&props->node_guid, out_mad->data + 12, 8); |
91 | 91 | ||
92 | props->max_mr_size = ~0ull; | 92 | props->max_mr_size = ~0ull; |
93 | props->page_size_cap = mdev->limits.page_size_cap; | ||
93 | props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; | 94 | props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; |
94 | props->max_qp_wr = mdev->limits.max_wqes; | 95 | props->max_qp_wr = mdev->limits.max_wqes; |
95 | props->max_sge = mdev->limits.max_sg; | 96 | props->max_sge = mdev->limits.max_sg; |
@@ -615,11 +616,11 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
615 | return ERR_PTR(err); | 616 | return ERR_PTR(err); |
616 | } | 617 | } |
617 | 618 | ||
618 | init_attr->cap.max_inline_data = 0; | ||
619 | init_attr->cap.max_send_wr = qp->sq.max; | 619 | init_attr->cap.max_send_wr = qp->sq.max; |
620 | init_attr->cap.max_recv_wr = qp->rq.max; | 620 | init_attr->cap.max_recv_wr = qp->rq.max; |
621 | init_attr->cap.max_send_sge = qp->sq.max_gs; | 621 | init_attr->cap.max_send_sge = qp->sq.max_gs; |
622 | init_attr->cap.max_recv_sge = qp->rq.max_gs; | 622 | init_attr->cap.max_recv_sge = qp->rq.max_gs; |
623 | init_attr->cap.max_inline_data = qp->max_inline_data; | ||
623 | 624 | ||
624 | return &qp->ibqp; | 625 | return &qp->ibqp; |
625 | } | 626 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index bcd4b01a339c..1e73947b4702 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -251,6 +251,7 @@ struct mthca_qp { | |||
251 | struct mthca_wq sq; | 251 | struct mthca_wq sq; |
252 | enum ib_sig_type sq_policy; | 252 | enum ib_sig_type sq_policy; |
253 | int send_wqe_offset; | 253 | int send_wqe_offset; |
254 | int max_inline_data; | ||
254 | 255 | ||
255 | u64 *wrid; | 256 | u64 *wrid; |
256 | union mthca_buf queue; | 257 | union mthca_buf queue; |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 8852ea477c21..760c418d5bc9 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -885,6 +885,48 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
885 | return err; | 885 | return err; |
886 | } | 886 | } |
887 | 887 | ||
888 | static void mthca_adjust_qp_caps(struct mthca_dev *dev, | ||
889 | struct mthca_pd *pd, | ||
890 | struct mthca_qp *qp) | ||
891 | { | ||
892 | int max_data_size; | ||
893 | |||
894 | /* | ||
895 | * Calculate the maximum size of WQE s/g segments, excluding | ||
896 | * the next segment and other non-data segments. | ||
897 | */ | ||
898 | max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) - | ||
899 | sizeof (struct mthca_next_seg); | ||
900 | |||
901 | switch (qp->transport) { | ||
902 | case MLX: | ||
903 | max_data_size -= 2 * sizeof (struct mthca_data_seg); | ||
904 | break; | ||
905 | |||
906 | case UD: | ||
907 | if (mthca_is_memfree(dev)) | ||
908 | max_data_size -= sizeof (struct mthca_arbel_ud_seg); | ||
909 | else | ||
910 | max_data_size -= sizeof (struct mthca_tavor_ud_seg); | ||
911 | break; | ||
912 | |||
913 | default: | ||
914 | max_data_size -= sizeof (struct mthca_raddr_seg); | ||
915 | break; | ||
916 | } | ||
917 | |||
918 | /* We don't support inline data for kernel QPs (yet). */ | ||
919 | if (!pd->ibpd.uobject) | ||
920 | qp->max_inline_data = 0; | ||
921 | else | ||
922 | qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE; | ||
923 | |||
924 | qp->sq.max_gs = max_data_size / sizeof (struct mthca_data_seg); | ||
925 | qp->rq.max_gs = (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - | ||
926 | sizeof (struct mthca_next_seg)) / | ||
927 | sizeof (struct mthca_data_seg); | ||
928 | } | ||
929 | |||
888 | /* | 930 | /* |
889 | * Allocate and register buffer for WQEs. qp->rq.max, sq.max, | 931 | * Allocate and register buffer for WQEs. qp->rq.max, sq.max, |
890 | * rq.max_gs and sq.max_gs must all be assigned. | 932 | * rq.max_gs and sq.max_gs must all be assigned. |
@@ -902,27 +944,53 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |||
902 | size = sizeof (struct mthca_next_seg) + | 944 | size = sizeof (struct mthca_next_seg) + |
903 | qp->rq.max_gs * sizeof (struct mthca_data_seg); | 945 | qp->rq.max_gs * sizeof (struct mthca_data_seg); |
904 | 946 | ||
947 | if (size > dev->limits.max_desc_sz) | ||
948 | return -EINVAL; | ||
949 | |||
905 | for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; | 950 | for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; |
906 | qp->rq.wqe_shift++) | 951 | qp->rq.wqe_shift++) |
907 | ; /* nothing */ | 952 | ; /* nothing */ |
908 | 953 | ||
909 | size = sizeof (struct mthca_next_seg) + | 954 | size = qp->sq.max_gs * sizeof (struct mthca_data_seg); |
910 | qp->sq.max_gs * sizeof (struct mthca_data_seg); | ||
911 | switch (qp->transport) { | 955 | switch (qp->transport) { |
912 | case MLX: | 956 | case MLX: |
913 | size += 2 * sizeof (struct mthca_data_seg); | 957 | size += 2 * sizeof (struct mthca_data_seg); |
914 | break; | 958 | break; |
959 | |||
915 | case UD: | 960 | case UD: |
916 | if (mthca_is_memfree(dev)) | 961 | size += mthca_is_memfree(dev) ? |
917 | size += sizeof (struct mthca_arbel_ud_seg); | 962 | sizeof (struct mthca_arbel_ud_seg) : |
918 | else | 963 | sizeof (struct mthca_tavor_ud_seg); |
919 | size += sizeof (struct mthca_tavor_ud_seg); | ||
920 | break; | 964 | break; |
965 | |||
966 | case UC: | ||
967 | size += sizeof (struct mthca_raddr_seg); | ||
968 | break; | ||
969 | |||
970 | case RC: | ||
971 | size += sizeof (struct mthca_raddr_seg); | ||
972 | /* | ||
973 | * An atomic op will require an atomic segment, a | ||
974 | * remote address segment and one scatter entry. | ||
975 | */ | ||
976 | size = max_t(int, size, | ||
977 | sizeof (struct mthca_atomic_seg) + | ||
978 | sizeof (struct mthca_raddr_seg) + | ||
979 | sizeof (struct mthca_data_seg)); | ||
980 | break; | ||
981 | |||
921 | default: | 982 | default: |
922 | /* bind seg is as big as atomic + raddr segs */ | 983 | break; |
923 | size += sizeof (struct mthca_bind_seg); | ||
924 | } | 984 | } |
925 | 985 | ||
986 | /* Make sure that we have enough space for a bind request */ | ||
987 | size = max_t(int, size, sizeof (struct mthca_bind_seg)); | ||
988 | |||
989 | size += sizeof (struct mthca_next_seg); | ||
990 | |||
991 | if (size > dev->limits.max_desc_sz) | ||
992 | return -EINVAL; | ||
993 | |||
926 | for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; | 994 | for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; |
927 | qp->sq.wqe_shift++) | 995 | qp->sq.wqe_shift++) |
928 | ; /* nothing */ | 996 | ; /* nothing */ |
@@ -1066,6 +1134,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1066 | return ret; | 1134 | return ret; |
1067 | } | 1135 | } |
1068 | 1136 | ||
1137 | mthca_adjust_qp_caps(dev, pd, qp); | ||
1138 | |||
1069 | /* | 1139 | /* |
1070 | * If this is a userspace QP, we're done now. The doorbells | 1140 | * If this is a userspace QP, we're done now. The doorbells |
1071 | * will be allocated and buffers will be initialized in | 1141 | * will be allocated and buffers will be initialized in |
@@ -1486,8 +1556,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1486 | } | 1556 | } |
1487 | 1557 | ||
1488 | wqe += sizeof (struct mthca_atomic_seg); | 1558 | wqe += sizeof (struct mthca_atomic_seg); |
1489 | size += sizeof (struct mthca_raddr_seg) / 16 + | 1559 | size += (sizeof (struct mthca_raddr_seg) + |
1490 | sizeof (struct mthca_atomic_seg); | 1560 | sizeof (struct mthca_atomic_seg)) / 16; |
1491 | break; | 1561 | break; |
1492 | 1562 | ||
1493 | case IB_WR_RDMA_WRITE: | 1563 | case IB_WR_RDMA_WRITE: |
@@ -1637,6 +1707,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1637 | { | 1707 | { |
1638 | struct mthca_dev *dev = to_mdev(ibqp->device); | 1708 | struct mthca_dev *dev = to_mdev(ibqp->device); |
1639 | struct mthca_qp *qp = to_mqp(ibqp); | 1709 | struct mthca_qp *qp = to_mqp(ibqp); |
1710 | __be32 doorbell[2]; | ||
1640 | unsigned long flags; | 1711 | unsigned long flags; |
1641 | int err = 0; | 1712 | int err = 0; |
1642 | int nreq; | 1713 | int nreq; |
@@ -1654,6 +1725,22 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1654 | ind = qp->rq.next_ind; | 1725 | ind = qp->rq.next_ind; |
1655 | 1726 | ||
1656 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 1727 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
1728 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1729 | nreq = 0; | ||
1730 | |||
1731 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1732 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1733 | |||
1734 | wmb(); | ||
1735 | |||
1736 | mthca_write64(doorbell, | ||
1737 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1738 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1739 | |||
1740 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1741 | size0 = 0; | ||
1742 | } | ||
1743 | |||
1657 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | 1744 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
1658 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | 1745 | mthca_err(dev, "RQ %06x full (%u head, %u tail," |
1659 | " %d max, %d nreq)\n", qp->qpn, | 1746 | " %d max, %d nreq)\n", qp->qpn, |
@@ -1711,8 +1798,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1711 | 1798 | ||
1712 | out: | 1799 | out: |
1713 | if (likely(nreq)) { | 1800 | if (likely(nreq)) { |
1714 | __be32 doorbell[2]; | ||
1715 | |||
1716 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | 1801 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); |
1717 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); | 1802 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); |
1718 | 1803 | ||
@@ -1806,8 +1891,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1806 | } | 1891 | } |
1807 | 1892 | ||
1808 | wqe += sizeof (struct mthca_atomic_seg); | 1893 | wqe += sizeof (struct mthca_atomic_seg); |
1809 | size += sizeof (struct mthca_raddr_seg) / 16 + | 1894 | size += (sizeof (struct mthca_raddr_seg) + |
1810 | sizeof (struct mthca_atomic_seg); | 1895 | sizeof (struct mthca_atomic_seg)) / 16; |
1811 | break; | 1896 | break; |
1812 | 1897 | ||
1813 | case IB_WR_RDMA_READ: | 1898 | case IB_WR_RDMA_READ: |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 26d5161fde07..f7d234295efe 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -417,6 +417,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
417 | { | 417 | { |
418 | struct mthca_dev *dev = to_mdev(ibsrq->device); | 418 | struct mthca_dev *dev = to_mdev(ibsrq->device); |
419 | struct mthca_srq *srq = to_msrq(ibsrq); | 419 | struct mthca_srq *srq = to_msrq(ibsrq); |
420 | __be32 doorbell[2]; | ||
420 | unsigned long flags; | 421 | unsigned long flags; |
421 | int err = 0; | 422 | int err = 0; |
422 | int first_ind; | 423 | int first_ind; |
@@ -432,6 +433,25 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
432 | first_ind = srq->first_free; | 433 | first_ind = srq->first_free; |
433 | 434 | ||
434 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 435 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
436 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
437 | nreq = 0; | ||
438 | |||
439 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); | ||
440 | doorbell[1] = cpu_to_be32(srq->srqn << 8); | ||
441 | |||
442 | /* | ||
443 | * Make sure that descriptors are written | ||
444 | * before doorbell is rung. | ||
445 | */ | ||
446 | wmb(); | ||
447 | |||
448 | mthca_write64(doorbell, | ||
449 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
450 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
451 | |||
452 | first_ind = srq->first_free; | ||
453 | } | ||
454 | |||
435 | ind = srq->first_free; | 455 | ind = srq->first_free; |
436 | 456 | ||
437 | if (ind < 0) { | 457 | if (ind < 0) { |
@@ -494,8 +514,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
494 | } | 514 | } |
495 | 515 | ||
496 | if (likely(nreq)) { | 516 | if (likely(nreq)) { |
497 | __be32 doorbell[2]; | ||
498 | |||
499 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); | 517 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); |
500 | doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); | 518 | doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); |
501 | 519 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h index 1f4c0ff28f79..73f1c0b9021e 100644 --- a/drivers/infiniband/hw/mthca/mthca_wqe.h +++ b/drivers/infiniband/hw/mthca/mthca_wqe.h | |||
@@ -49,7 +49,8 @@ enum { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | enum { | 51 | enum { |
52 | MTHCA_INVAL_LKEY = 0x100 | 52 | MTHCA_INVAL_LKEY = 0x100, |
53 | MTHCA_TAVOR_MAX_WQES_PER_RECV_DB = 256 | ||
53 | }; | 54 | }; |
54 | 55 | ||
55 | struct mthca_next_seg { | 56 | struct mthca_next_seg { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 0095acc0fbbe..9923a15a9996 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -179,6 +179,7 @@ struct ipoib_dev_priv { | |||
179 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 179 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
180 | struct list_head fs_list; | 180 | struct list_head fs_list; |
181 | struct dentry *mcg_dentry; | 181 | struct dentry *mcg_dentry; |
182 | struct dentry *path_dentry; | ||
182 | #endif | 183 | #endif |
183 | }; | 184 | }; |
184 | 185 | ||
@@ -270,7 +271,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev); | |||
270 | 271 | ||
271 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 272 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
272 | struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); | 273 | struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); |
273 | void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter); | ||
274 | int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); | 274 | int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); |
275 | void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, | 275 | void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, |
276 | union ib_gid *gid, | 276 | union ib_gid *gid, |
@@ -278,6 +278,11 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, | |||
278 | unsigned int *queuelen, | 278 | unsigned int *queuelen, |
279 | unsigned int *complete, | 279 | unsigned int *complete, |
280 | unsigned int *send_only); | 280 | unsigned int *send_only); |
281 | |||
282 | struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev); | ||
283 | int ipoib_path_iter_next(struct ipoib_path_iter *iter); | ||
284 | void ipoib_path_iter_read(struct ipoib_path_iter *iter, | ||
285 | struct ipoib_path *path); | ||
281 | #endif | 286 | #endif |
282 | 287 | ||
283 | int ipoib_mcast_attach(struct net_device *dev, u16 mlid, | 288 | int ipoib_mcast_attach(struct net_device *dev, u16 mlid, |
@@ -299,13 +304,13 @@ void ipoib_pkey_poll(void *dev); | |||
299 | int ipoib_pkey_dev_delay_open(struct net_device *dev); | 304 | int ipoib_pkey_dev_delay_open(struct net_device *dev); |
300 | 305 | ||
301 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 306 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
302 | int ipoib_create_debug_file(struct net_device *dev); | 307 | void ipoib_create_debug_files(struct net_device *dev); |
303 | void ipoib_delete_debug_file(struct net_device *dev); | 308 | void ipoib_delete_debug_files(struct net_device *dev); |
304 | int ipoib_register_debugfs(void); | 309 | int ipoib_register_debugfs(void); |
305 | void ipoib_unregister_debugfs(void); | 310 | void ipoib_unregister_debugfs(void); |
306 | #else | 311 | #else |
307 | static inline int ipoib_create_debug_file(struct net_device *dev) { return 0; } | 312 | static inline void ipoib_create_debug_files(struct net_device *dev) { } |
308 | static inline void ipoib_delete_debug_file(struct net_device *dev) { } | 313 | static inline void ipoib_delete_debug_files(struct net_device *dev) { } |
309 | static inline int ipoib_register_debugfs(void) { return 0; } | 314 | static inline int ipoib_register_debugfs(void) { return 0; } |
310 | static inline void ipoib_unregister_debugfs(void) { } | 315 | static inline void ipoib_unregister_debugfs(void) { } |
311 | #endif | 316 | #endif |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 38b150f775e7..685258e34034 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c | |||
@@ -43,6 +43,18 @@ struct file_operations; | |||
43 | 43 | ||
44 | static struct dentry *ipoib_root; | 44 | static struct dentry *ipoib_root; |
45 | 45 | ||
46 | static void format_gid(union ib_gid *gid, char *buf) | ||
47 | { | ||
48 | int i, n; | ||
49 | |||
50 | for (n = 0, i = 0; i < 8; ++i) { | ||
51 | n += sprintf(buf + n, "%x", | ||
52 | be16_to_cpu(((__be16 *) gid->raw)[i])); | ||
53 | if (i < 7) | ||
54 | buf[n++] = ':'; | ||
55 | } | ||
56 | } | ||
57 | |||
46 | static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos) | 58 | static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos) |
47 | { | 59 | { |
48 | struct ipoib_mcast_iter *iter; | 60 | struct ipoib_mcast_iter *iter; |
@@ -54,7 +66,7 @@ static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos) | |||
54 | 66 | ||
55 | while (n--) { | 67 | while (n--) { |
56 | if (ipoib_mcast_iter_next(iter)) { | 68 | if (ipoib_mcast_iter_next(iter)) { |
57 | ipoib_mcast_iter_free(iter); | 69 | kfree(iter); |
58 | return NULL; | 70 | return NULL; |
59 | } | 71 | } |
60 | } | 72 | } |
@@ -70,7 +82,7 @@ static void *ipoib_mcg_seq_next(struct seq_file *file, void *iter_ptr, | |||
70 | (*pos)++; | 82 | (*pos)++; |
71 | 83 | ||
72 | if (ipoib_mcast_iter_next(iter)) { | 84 | if (ipoib_mcast_iter_next(iter)) { |
73 | ipoib_mcast_iter_free(iter); | 85 | kfree(iter); |
74 | return NULL; | 86 | return NULL; |
75 | } | 87 | } |
76 | 88 | ||
@@ -87,32 +99,32 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr) | |||
87 | struct ipoib_mcast_iter *iter = iter_ptr; | 99 | struct ipoib_mcast_iter *iter = iter_ptr; |
88 | char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"]; | 100 | char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"]; |
89 | union ib_gid mgid; | 101 | union ib_gid mgid; |
90 | int i, n; | ||
91 | unsigned long created; | 102 | unsigned long created; |
92 | unsigned int queuelen, complete, send_only; | 103 | unsigned int queuelen, complete, send_only; |
93 | 104 | ||
94 | if (iter) { | 105 | if (!iter) |
95 | ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen, | 106 | return 0; |
96 | &complete, &send_only); | ||
97 | 107 | ||
98 | for (n = 0, i = 0; i < sizeof mgid / 2; ++i) { | 108 | ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen, |
99 | n += sprintf(gid_buf + n, "%x", | 109 | &complete, &send_only); |
100 | be16_to_cpu(((__be16 *) mgid.raw)[i])); | ||
101 | if (i < sizeof mgid / 2 - 1) | ||
102 | gid_buf[n++] = ':'; | ||
103 | } | ||
104 | } | ||
105 | 110 | ||
106 | seq_printf(file, "GID: %*s", -(1 + (int) sizeof gid_buf), gid_buf); | 111 | format_gid(&mgid, gid_buf); |
107 | 112 | ||
108 | seq_printf(file, | 113 | seq_printf(file, |
109 | " created: %10ld queuelen: %4d complete: %d send_only: %d\n", | 114 | "GID: %s\n" |
110 | created, queuelen, complete, send_only); | 115 | " created: %10ld\n" |
116 | " queuelen: %9d\n" | ||
117 | " complete: %9s\n" | ||
118 | " send_only: %8s\n" | ||
119 | "\n", | ||
120 | gid_buf, created, queuelen, | ||
121 | complete ? "yes" : "no", | ||
122 | send_only ? "yes" : "no"); | ||
111 | 123 | ||
112 | return 0; | 124 | return 0; |
113 | } | 125 | } |
114 | 126 | ||
115 | static struct seq_operations ipoib_seq_ops = { | 127 | static struct seq_operations ipoib_mcg_seq_ops = { |
116 | .start = ipoib_mcg_seq_start, | 128 | .start = ipoib_mcg_seq_start, |
117 | .next = ipoib_mcg_seq_next, | 129 | .next = ipoib_mcg_seq_next, |
118 | .stop = ipoib_mcg_seq_stop, | 130 | .stop = ipoib_mcg_seq_stop, |
@@ -124,7 +136,7 @@ static int ipoib_mcg_open(struct inode *inode, struct file *file) | |||
124 | struct seq_file *seq; | 136 | struct seq_file *seq; |
125 | int ret; | 137 | int ret; |
126 | 138 | ||
127 | ret = seq_open(file, &ipoib_seq_ops); | 139 | ret = seq_open(file, &ipoib_mcg_seq_ops); |
128 | if (ret) | 140 | if (ret) |
129 | return ret; | 141 | return ret; |
130 | 142 | ||
@@ -134,7 +146,7 @@ static int ipoib_mcg_open(struct inode *inode, struct file *file) | |||
134 | return 0; | 146 | return 0; |
135 | } | 147 | } |
136 | 148 | ||
137 | static struct file_operations ipoib_fops = { | 149 | static struct file_operations ipoib_mcg_fops = { |
138 | .owner = THIS_MODULE, | 150 | .owner = THIS_MODULE, |
139 | .open = ipoib_mcg_open, | 151 | .open = ipoib_mcg_open, |
140 | .read = seq_read, | 152 | .read = seq_read, |
@@ -142,25 +154,138 @@ static struct file_operations ipoib_fops = { | |||
142 | .release = seq_release | 154 | .release = seq_release |
143 | }; | 155 | }; |
144 | 156 | ||
145 | int ipoib_create_debug_file(struct net_device *dev) | 157 | static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos) |
158 | { | ||
159 | struct ipoib_path_iter *iter; | ||
160 | loff_t n = *pos; | ||
161 | |||
162 | iter = ipoib_path_iter_init(file->private); | ||
163 | if (!iter) | ||
164 | return NULL; | ||
165 | |||
166 | while (n--) { | ||
167 | if (ipoib_path_iter_next(iter)) { | ||
168 | kfree(iter); | ||
169 | return NULL; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | return iter; | ||
174 | } | ||
175 | |||
176 | static void *ipoib_path_seq_next(struct seq_file *file, void *iter_ptr, | ||
177 | loff_t *pos) | ||
178 | { | ||
179 | struct ipoib_path_iter *iter = iter_ptr; | ||
180 | |||
181 | (*pos)++; | ||
182 | |||
183 | if (ipoib_path_iter_next(iter)) { | ||
184 | kfree(iter); | ||
185 | return NULL; | ||
186 | } | ||
187 | |||
188 | return iter; | ||
189 | } | ||
190 | |||
191 | static void ipoib_path_seq_stop(struct seq_file *file, void *iter_ptr) | ||
192 | { | ||
193 | /* nothing for now */ | ||
194 | } | ||
195 | |||
196 | static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr) | ||
197 | { | ||
198 | struct ipoib_path_iter *iter = iter_ptr; | ||
199 | char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"]; | ||
200 | struct ipoib_path path; | ||
201 | int rate; | ||
202 | |||
203 | if (!iter) | ||
204 | return 0; | ||
205 | |||
206 | ipoib_path_iter_read(iter, &path); | ||
207 | |||
208 | format_gid(&path.pathrec.dgid, gid_buf); | ||
209 | |||
210 | seq_printf(file, | ||
211 | "GID: %s\n" | ||
212 | " complete: %6s\n", | ||
213 | gid_buf, path.pathrec.dlid ? "yes" : "no"); | ||
214 | |||
215 | if (path.pathrec.dlid) { | ||
216 | rate = ib_sa_rate_enum_to_int(path.pathrec.rate) * 25; | ||
217 | |||
218 | seq_printf(file, | ||
219 | " DLID: 0x%04x\n" | ||
220 | " SL: %12d\n" | ||
221 | " rate: %*d%s Gb/sec\n", | ||
222 | be16_to_cpu(path.pathrec.dlid), | ||
223 | path.pathrec.sl, | ||
224 | 10 - ((rate % 10) ? 2 : 0), | ||
225 | rate / 10, rate % 10 ? ".5" : ""); | ||
226 | } | ||
227 | |||
228 | seq_putc(file, '\n'); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static struct seq_operations ipoib_path_seq_ops = { | ||
234 | .start = ipoib_path_seq_start, | ||
235 | .next = ipoib_path_seq_next, | ||
236 | .stop = ipoib_path_seq_stop, | ||
237 | .show = ipoib_path_seq_show, | ||
238 | }; | ||
239 | |||
240 | static int ipoib_path_open(struct inode *inode, struct file *file) | ||
241 | { | ||
242 | struct seq_file *seq; | ||
243 | int ret; | ||
244 | |||
245 | ret = seq_open(file, &ipoib_path_seq_ops); | ||
246 | if (ret) | ||
247 | return ret; | ||
248 | |||
249 | seq = file->private_data; | ||
250 | seq->private = inode->u.generic_ip; | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static struct file_operations ipoib_path_fops = { | ||
256 | .owner = THIS_MODULE, | ||
257 | .open = ipoib_path_open, | ||
258 | .read = seq_read, | ||
259 | .llseek = seq_lseek, | ||
260 | .release = seq_release | ||
261 | }; | ||
262 | |||
263 | void ipoib_create_debug_files(struct net_device *dev) | ||
146 | { | 264 | { |
147 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 265 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
148 | char name[IFNAMSIZ + sizeof "_mcg"]; | 266 | char name[IFNAMSIZ + sizeof "_path"]; |
149 | 267 | ||
150 | snprintf(name, sizeof name, "%s_mcg", dev->name); | 268 | snprintf(name, sizeof name, "%s_mcg", dev->name); |
151 | |||
152 | priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, | 269 | priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, |
153 | ipoib_root, dev, &ipoib_fops); | 270 | ipoib_root, dev, &ipoib_mcg_fops); |
154 | 271 | if (!priv->mcg_dentry) | |
155 | return priv->mcg_dentry ? 0 : -ENOMEM; | 272 | ipoib_warn(priv, "failed to create mcg debug file\n"); |
273 | |||
274 | snprintf(name, sizeof name, "%s_path", dev->name); | ||
275 | priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, | ||
276 | ipoib_root, dev, &ipoib_path_fops); | ||
277 | if (!priv->path_dentry) | ||
278 | ipoib_warn(priv, "failed to create path debug file\n"); | ||
156 | } | 279 | } |
157 | 280 | ||
158 | void ipoib_delete_debug_file(struct net_device *dev) | 281 | void ipoib_delete_debug_files(struct net_device *dev) |
159 | { | 282 | { |
160 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 283 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
161 | 284 | ||
162 | if (priv->mcg_dentry) | 285 | if (priv->mcg_dentry) |
163 | debugfs_remove(priv->mcg_dentry); | 286 | debugfs_remove(priv->mcg_dentry); |
287 | if (priv->path_dentry) | ||
288 | debugfs_remove(priv->path_dentry); | ||
164 | } | 289 | } |
165 | 290 | ||
166 | int ipoib_register_debugfs(void) | 291 | int ipoib_register_debugfs(void) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index ce0296273e76..2fa30751f362 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -58,6 +58,11 @@ module_param_named(debug_level, ipoib_debug_level, int, 0644); | |||
58 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); | 58 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | struct ipoib_path_iter { | ||
62 | struct net_device *dev; | ||
63 | struct ipoib_path path; | ||
64 | }; | ||
65 | |||
61 | static const u8 ipv4_bcast_addr[] = { | 66 | static const u8 ipv4_bcast_addr[] = { |
62 | 0x00, 0xff, 0xff, 0xff, | 67 | 0x00, 0xff, 0xff, 0xff, |
63 | 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, | 68 | 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, |
@@ -250,6 +255,64 @@ static void path_free(struct net_device *dev, struct ipoib_path *path) | |||
250 | kfree(path); | 255 | kfree(path); |
251 | } | 256 | } |
252 | 257 | ||
258 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | ||
259 | |||
260 | struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) | ||
261 | { | ||
262 | struct ipoib_path_iter *iter; | ||
263 | |||
264 | iter = kmalloc(sizeof *iter, GFP_KERNEL); | ||
265 | if (!iter) | ||
266 | return NULL; | ||
267 | |||
268 | iter->dev = dev; | ||
269 | memset(iter->path.pathrec.dgid.raw, 0, 16); | ||
270 | |||
271 | if (ipoib_path_iter_next(iter)) { | ||
272 | kfree(iter); | ||
273 | return NULL; | ||
274 | } | ||
275 | |||
276 | return iter; | ||
277 | } | ||
278 | |||
279 | int ipoib_path_iter_next(struct ipoib_path_iter *iter) | ||
280 | { | ||
281 | struct ipoib_dev_priv *priv = netdev_priv(iter->dev); | ||
282 | struct rb_node *n; | ||
283 | struct ipoib_path *path; | ||
284 | int ret = 1; | ||
285 | |||
286 | spin_lock_irq(&priv->lock); | ||
287 | |||
288 | n = rb_first(&priv->path_tree); | ||
289 | |||
290 | while (n) { | ||
291 | path = rb_entry(n, struct ipoib_path, rb_node); | ||
292 | |||
293 | if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, | ||
294 | sizeof (union ib_gid)) < 0) { | ||
295 | iter->path = *path; | ||
296 | ret = 0; | ||
297 | break; | ||
298 | } | ||
299 | |||
300 | n = rb_next(n); | ||
301 | } | ||
302 | |||
303 | spin_unlock_irq(&priv->lock); | ||
304 | |||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | void ipoib_path_iter_read(struct ipoib_path_iter *iter, | ||
309 | struct ipoib_path *path) | ||
310 | { | ||
311 | *path = iter->path; | ||
312 | } | ||
313 | |||
314 | #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ | ||
315 | |||
253 | void ipoib_flush_paths(struct net_device *dev) | 316 | void ipoib_flush_paths(struct net_device *dev) |
254 | { | 317 | { |
255 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 318 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
@@ -763,7 +826,7 @@ void ipoib_dev_cleanup(struct net_device *dev) | |||
763 | { | 826 | { |
764 | struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; | 827 | struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; |
765 | 828 | ||
766 | ipoib_delete_debug_file(dev); | 829 | ipoib_delete_debug_files(dev); |
767 | 830 | ||
768 | /* Delete any child interfaces first */ | 831 | /* Delete any child interfaces first */ |
769 | list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { | 832 | list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { |
@@ -972,8 +1035,7 @@ static struct net_device *ipoib_add_port(const char *format, | |||
972 | goto register_failed; | 1035 | goto register_failed; |
973 | } | 1036 | } |
974 | 1037 | ||
975 | if (ipoib_create_debug_file(priv->dev)) | 1038 | ipoib_create_debug_files(priv->dev); |
976 | goto debug_failed; | ||
977 | 1039 | ||
978 | if (ipoib_add_pkey_attr(priv->dev)) | 1040 | if (ipoib_add_pkey_attr(priv->dev)) |
979 | goto sysfs_failed; | 1041 | goto sysfs_failed; |
@@ -987,9 +1049,7 @@ static struct net_device *ipoib_add_port(const char *format, | |||
987 | return priv->dev; | 1049 | return priv->dev; |
988 | 1050 | ||
989 | sysfs_failed: | 1051 | sysfs_failed: |
990 | ipoib_delete_debug_file(priv->dev); | 1052 | ipoib_delete_debug_files(priv->dev); |
991 | |||
992 | debug_failed: | ||
993 | unregister_netdev(priv->dev); | 1053 | unregister_netdev(priv->dev); |
994 | 1054 | ||
995 | register_failed: | 1055 | register_failed: |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 3ecf78a9493a..c33ed87f9dff 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -120,12 +120,8 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |||
120 | if (mcast->ah) | 120 | if (mcast->ah) |
121 | ipoib_put_ah(mcast->ah); | 121 | ipoib_put_ah(mcast->ah); |
122 | 122 | ||
123 | while (!skb_queue_empty(&mcast->pkt_queue)) { | 123 | while (!skb_queue_empty(&mcast->pkt_queue)) |
124 | struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); | 124 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); |
125 | |||
126 | skb->dev = dev; | ||
127 | dev_kfree_skb_any(skb); | ||
128 | } | ||
129 | 125 | ||
130 | kfree(mcast); | 126 | kfree(mcast); |
131 | } | 127 | } |
@@ -317,13 +313,8 @@ ipoib_mcast_sendonly_join_complete(int status, | |||
317 | IPOIB_GID_ARG(mcast->mcmember.mgid), status); | 313 | IPOIB_GID_ARG(mcast->mcmember.mgid), status); |
318 | 314 | ||
319 | /* Flush out any queued packets */ | 315 | /* Flush out any queued packets */ |
320 | while (!skb_queue_empty(&mcast->pkt_queue)) { | 316 | while (!skb_queue_empty(&mcast->pkt_queue)) |
321 | struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); | 317 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); |
322 | |||
323 | skb->dev = dev; | ||
324 | |||
325 | dev_kfree_skb_any(skb); | ||
326 | } | ||
327 | 318 | ||
328 | /* Clear the busy flag so we try again */ | 319 | /* Clear the busy flag so we try again */ |
329 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | 320 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); |
@@ -928,21 +919,16 @@ struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) | |||
928 | return NULL; | 919 | return NULL; |
929 | 920 | ||
930 | iter->dev = dev; | 921 | iter->dev = dev; |
931 | memset(iter->mgid.raw, 0, sizeof iter->mgid); | 922 | memset(iter->mgid.raw, 0, 16); |
932 | 923 | ||
933 | if (ipoib_mcast_iter_next(iter)) { | 924 | if (ipoib_mcast_iter_next(iter)) { |
934 | ipoib_mcast_iter_free(iter); | 925 | kfree(iter); |
935 | return NULL; | 926 | return NULL; |
936 | } | 927 | } |
937 | 928 | ||
938 | return iter; | 929 | return iter; |
939 | } | 930 | } |
940 | 931 | ||
941 | void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter) | ||
942 | { | ||
943 | kfree(iter); | ||
944 | } | ||
945 | |||
946 | int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) | 932 | int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) |
947 | { | 933 | { |
948 | struct ipoib_dev_priv *priv = netdev_priv(iter->dev); | 934 | struct ipoib_dev_priv *priv = netdev_priv(iter->dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 332d730e60c2..d280b341a37f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -113,8 +113,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
113 | 113 | ||
114 | priv->parent = ppriv->dev; | 114 | priv->parent = ppriv->dev; |
115 | 115 | ||
116 | if (ipoib_create_debug_file(priv->dev)) | 116 | ipoib_create_debug_files(priv->dev); |
117 | goto debug_failed; | ||
118 | 117 | ||
119 | if (ipoib_add_pkey_attr(priv->dev)) | 118 | if (ipoib_add_pkey_attr(priv->dev)) |
120 | goto sysfs_failed; | 119 | goto sysfs_failed; |
@@ -130,9 +129,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
130 | return 0; | 129 | return 0; |
131 | 130 | ||
132 | sysfs_failed: | 131 | sysfs_failed: |
133 | ipoib_delete_debug_file(priv->dev); | 132 | ipoib_delete_debug_files(priv->dev); |
134 | |||
135 | debug_failed: | ||
136 | unregister_netdev(priv->dev); | 133 | unregister_netdev(priv->dev); |
137 | 134 | ||
138 | register_failed: | 135 | register_failed: |