diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_mad.c | 42 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 23 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mad.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 33 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 40 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_reset.c | 59 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 5 |
10 files changed, 146 insertions, 89 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index f7f8391fe43f..1a9d0a2c33c3 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c | |||
@@ -137,47 +137,11 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp, | |||
137 | return reply(smp); | 137 | return reply(smp); |
138 | } | 138 | } |
139 | 139 | ||
140 | struct port_info { | ||
141 | __be64 mkey; | ||
142 | __be64 gid_prefix; | ||
143 | __be16 lid; | ||
144 | __be16 sm_lid; | ||
145 | __be32 cap_mask; | ||
146 | __be16 diag_code; | ||
147 | __be16 mkey_lease_period; | ||
148 | u8 local_port_num; | ||
149 | u8 link_width_enabled; | ||
150 | u8 link_width_supported; | ||
151 | u8 link_width_active; | ||
152 | u8 linkspeed_portstate; /* 4 bits, 4 bits */ | ||
153 | u8 portphysstate_linkdown; /* 4 bits, 4 bits */ | ||
154 | u8 mkeyprot_resv_lmc; /* 2 bits, 3, 3 */ | ||
155 | u8 linkspeedactive_enabled; /* 4 bits, 4 bits */ | ||
156 | u8 neighbormtu_mastersmsl; /* 4 bits, 4 bits */ | ||
157 | u8 vlcap_inittype; /* 4 bits, 4 bits */ | ||
158 | u8 vl_high_limit; | ||
159 | u8 vl_arb_high_cap; | ||
160 | u8 vl_arb_low_cap; | ||
161 | u8 inittypereply_mtucap; /* 4 bits, 4 bits */ | ||
162 | u8 vlstallcnt_hoqlife; /* 3 bits, 5 bits */ | ||
163 | u8 operationalvl_pei_peo_fpi_fpo; /* 4 bits, 1, 1, 1, 1 */ | ||
164 | __be16 mkey_violations; | ||
165 | __be16 pkey_violations; | ||
166 | __be16 qkey_violations; | ||
167 | u8 guid_cap; | ||
168 | u8 clientrereg_resv_subnetto; /* 1 bit, 2 bits, 5 */ | ||
169 | u8 resv_resptimevalue; /* 3 bits, 5 bits */ | ||
170 | u8 localphyerrors_overrunerrors; /* 4 bits, 4 bits */ | ||
171 | __be16 max_credit_hint; | ||
172 | u8 resv; | ||
173 | u8 link_roundtrip_latency[3]; | ||
174 | } __attribute__ ((packed)); | ||
175 | |||
176 | static int recv_subn_get_portinfo(struct ib_smp *smp, | 140 | static int recv_subn_get_portinfo(struct ib_smp *smp, |
177 | struct ib_device *ibdev, u8 port) | 141 | struct ib_device *ibdev, u8 port) |
178 | { | 142 | { |
179 | struct ipath_ibdev *dev; | 143 | struct ipath_ibdev *dev; |
180 | struct port_info *pip = (struct port_info *)smp->data; | 144 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; |
181 | u16 lid; | 145 | u16 lid; |
182 | u8 ibcstat; | 146 | u8 ibcstat; |
183 | u8 mtu; | 147 | u8 mtu; |
@@ -312,7 +276,7 @@ static int recv_subn_set_guidinfo(struct ib_smp *smp, | |||
312 | static int recv_subn_set_portinfo(struct ib_smp *smp, | 276 | static int recv_subn_set_portinfo(struct ib_smp *smp, |
313 | struct ib_device *ibdev, u8 port) | 277 | struct ib_device *ibdev, u8 port) |
314 | { | 278 | { |
315 | struct port_info *pip = (struct port_info *)smp->data; | 279 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; |
316 | struct ib_event event; | 280 | struct ib_event event; |
317 | struct ipath_ibdev *dev; | 281 | struct ipath_ibdev *dev; |
318 | u32 flags; | 282 | u32 flags; |
@@ -445,7 +409,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
445 | 409 | ||
446 | if (pip->clientrereg_resv_subnetto & 0x80) { | 410 | if (pip->clientrereg_resv_subnetto & 0x80) { |
447 | clientrereg = 1; | 411 | clientrereg = 1; |
448 | event.event = IB_EVENT_LID_CHANGE; | 412 | event.event = IB_EVENT_CLIENT_REREGISTER; |
449 | ib_dispatch_event(&event); | 413 | ib_dispatch_event(&event); |
450 | } | 414 | } |
451 | 415 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 798e13e14faf..d0f7731802c9 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -174,7 +174,6 @@ enum { | |||
174 | 174 | ||
175 | struct mthca_cmd_context { | 175 | struct mthca_cmd_context { |
176 | struct completion done; | 176 | struct completion done; |
177 | struct timer_list timer; | ||
178 | int result; | 177 | int result; |
179 | int next; | 178 | int next; |
180 | u64 out_param; | 179 | u64 out_param; |
@@ -362,15 +361,6 @@ void mthca_cmd_event(struct mthca_dev *dev, | |||
362 | complete(&context->done); | 361 | complete(&context->done); |
363 | } | 362 | } |
364 | 363 | ||
365 | static void event_timeout(unsigned long context_ptr) | ||
366 | { | ||
367 | struct mthca_cmd_context *context = | ||
368 | (struct mthca_cmd_context *) context_ptr; | ||
369 | |||
370 | context->result = -EBUSY; | ||
371 | complete(&context->done); | ||
372 | } | ||
373 | |||
374 | static int mthca_cmd_wait(struct mthca_dev *dev, | 364 | static int mthca_cmd_wait(struct mthca_dev *dev, |
375 | u64 in_param, | 365 | u64 in_param, |
376 | u64 *out_param, | 366 | u64 *out_param, |
@@ -401,11 +391,10 @@ static int mthca_cmd_wait(struct mthca_dev *dev, | |||
401 | if (err) | 391 | if (err) |
402 | goto out; | 392 | goto out; |
403 | 393 | ||
404 | context->timer.expires = jiffies + timeout; | 394 | if (!wait_for_completion_timeout(&context->done, timeout)) { |
405 | add_timer(&context->timer); | 395 | err = -EBUSY; |
406 | 396 | goto out; | |
407 | wait_for_completion(&context->done); | 397 | } |
408 | del_timer_sync(&context->timer); | ||
409 | 398 | ||
410 | err = context->result; | 399 | err = context->result; |
411 | if (err) | 400 | if (err) |
@@ -535,10 +524,6 @@ int mthca_cmd_use_events(struct mthca_dev *dev) | |||
535 | for (i = 0; i < dev->cmd.max_cmds; ++i) { | 524 | for (i = 0; i < dev->cmd.max_cmds; ++i) { |
536 | dev->cmd.context[i].token = i; | 525 | dev->cmd.context[i].token = i; |
537 | dev->cmd.context[i].next = i + 1; | 526 | dev->cmd.context[i].next = i + 1; |
538 | init_timer(&dev->cmd.context[i].timer); | ||
539 | dev->cmd.context[i].timer.data = | ||
540 | (unsigned long) &dev->cmd.context[i]; | ||
541 | dev->cmd.context[i].timer.function = event_timeout; | ||
542 | } | 527 | } |
543 | 528 | ||
544 | dev->cmd.context[dev->cmd.max_cmds - 1].next = -1; | 529 | dev->cmd.context[dev->cmd.max_cmds - 1].next = -1; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 205854e9c662..3e27a084257e 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -540,8 +540,17 @@ static inline int mthca_poll_one(struct mthca_dev *dev, | |||
540 | entry->wr_id = srq->wrid[wqe_index]; | 540 | entry->wr_id = srq->wrid[wqe_index]; |
541 | mthca_free_srq_wqe(srq, wqe); | 541 | mthca_free_srq_wqe(srq, wqe); |
542 | } else { | 542 | } else { |
543 | s32 wqe; | ||
543 | wq = &(*cur_qp)->rq; | 544 | wq = &(*cur_qp)->rq; |
544 | wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; | 545 | wqe = be32_to_cpu(cqe->wqe); |
546 | wqe_index = wqe >> wq->wqe_shift; | ||
547 | /* | ||
548 | * WQE addr == base - 1 might be reported in receive completion | ||
549 | * with error instead of (rq size - 1) by Sinai FW 1.0.800 and | ||
550 | * Arbel FW 5.1.400. This bug should be fixed in later FW revs. | ||
551 | */ | ||
552 | if (unlikely(wqe_index < 0)) | ||
553 | wqe_index = wq->max - 1; | ||
545 | entry->wr_id = (*cur_qp)->wrid[wqe_index]; | 554 | entry->wr_id = (*cur_qp)->wrid[wqe_index]; |
546 | } | 555 | } |
547 | 556 | ||
@@ -813,6 +822,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
813 | spin_lock_init(&cq->lock); | 822 | spin_lock_init(&cq->lock); |
814 | cq->refcount = 1; | 823 | cq->refcount = 1; |
815 | init_waitqueue_head(&cq->wait); | 824 | init_waitqueue_head(&cq->wait); |
825 | mutex_init(&cq->mutex); | ||
816 | 826 | ||
817 | memset(cq_context, 0, sizeof *cq_context); | 827 | memset(cq_context, 0, sizeof *cq_context); |
818 | cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | | 828 | cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | |
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 99f109c3815d..d536217e700e 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -695,10 +695,6 @@ static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset, | |||
695 | 695 | ||
696 | static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) | 696 | static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) |
697 | { | 697 | { |
698 | unsigned long mthca_base; | ||
699 | |||
700 | mthca_base = pci_resource_start(dev->pdev, 0); | ||
701 | |||
702 | if (mthca_is_memfree(dev)) { | 698 | if (mthca_is_memfree(dev)) { |
703 | /* | 699 | /* |
704 | * We assume that the EQ arm and EQ set CI registers | 700 | * We assume that the EQ arm and EQ set CI registers |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 4730863ece9a..d9bc030bcccc 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -114,14 +114,22 @@ static void smp_snoop(struct ib_device *ibdev, | |||
114 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | 114 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && |
115 | mad->mad_hdr.method == IB_MGMT_METHOD_SET) { | 115 | mad->mad_hdr.method == IB_MGMT_METHOD_SET) { |
116 | if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { | 116 | if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { |
117 | struct ib_port_info *pinfo = | ||
118 | (struct ib_port_info *) ((struct ib_smp *) mad)->data; | ||
119 | |||
117 | mthca_update_rate(to_mdev(ibdev), port_num); | 120 | mthca_update_rate(to_mdev(ibdev), port_num); |
118 | update_sm_ah(to_mdev(ibdev), port_num, | 121 | update_sm_ah(to_mdev(ibdev), port_num, |
119 | be16_to_cpup((__be16 *) (mad->data + 58)), | 122 | be16_to_cpu(pinfo->lid), |
120 | (*(u8 *) (mad->data + 76)) & 0xf); | 123 | pinfo->neighbormtu_mastersmsl & 0xf); |
121 | 124 | ||
122 | event.device = ibdev; | 125 | event.device = ibdev; |
123 | event.event = IB_EVENT_LID_CHANGE; | ||
124 | event.element.port_num = port_num; | 126 | event.element.port_num = port_num; |
127 | |||
128 | if(pinfo->clientrereg_resv_subnetto & 0x80) | ||
129 | event.event = IB_EVENT_CLIENT_REREGISTER; | ||
130 | else | ||
131 | event.event = IB_EVENT_LID_CHANGE; | ||
132 | |||
125 | ib_dispatch_event(&event); | 133 | ib_dispatch_event(&event); |
126 | } | 134 | } |
127 | 135 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index a2eae8a30167..230ae21db8fd 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -115,6 +115,16 @@ static int mthca_query_device(struct ib_device *ibdev, | |||
115 | props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; | 115 | props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; |
116 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | 116 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
117 | props->max_mcast_grp; | 117 | props->max_mcast_grp; |
118 | /* | ||
119 | * If Sinai memory key optimization is being used, then only | ||
120 | * the 8-bit key portion will change. For other HCAs, the | ||
121 | * unused index bits will also be used for FMR remapping. | ||
122 | */ | ||
123 | if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) | ||
124 | props->max_map_per_fmr = 255; | ||
125 | else | ||
126 | props->max_map_per_fmr = | ||
127 | (1 << (32 - long_log2(mdev->limits.num_mpts))) - 1; | ||
118 | 128 | ||
119 | err = 0; | 129 | err = 0; |
120 | out: | 130 | out: |
@@ -783,18 +793,24 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda | |||
783 | if (entries < 1 || entries > dev->limits.max_cqes) | 793 | if (entries < 1 || entries > dev->limits.max_cqes) |
784 | return -EINVAL; | 794 | return -EINVAL; |
785 | 795 | ||
796 | mutex_lock(&cq->mutex); | ||
797 | |||
786 | entries = roundup_pow_of_two(entries + 1); | 798 | entries = roundup_pow_of_two(entries + 1); |
787 | if (entries == ibcq->cqe + 1) | 799 | if (entries == ibcq->cqe + 1) { |
788 | return 0; | 800 | ret = 0; |
801 | goto out; | ||
802 | } | ||
789 | 803 | ||
790 | if (cq->is_kernel) { | 804 | if (cq->is_kernel) { |
791 | ret = mthca_alloc_resize_buf(dev, cq, entries); | 805 | ret = mthca_alloc_resize_buf(dev, cq, entries); |
792 | if (ret) | 806 | if (ret) |
793 | return ret; | 807 | goto out; |
794 | lkey = cq->resize_buf->buf.mr.ibmr.lkey; | 808 | lkey = cq->resize_buf->buf.mr.ibmr.lkey; |
795 | } else { | 809 | } else { |
796 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) | 810 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { |
797 | return -EFAULT; | 811 | ret = -EFAULT; |
812 | goto out; | ||
813 | } | ||
798 | lkey = ucmd.lkey; | 814 | lkey = ucmd.lkey; |
799 | } | 815 | } |
800 | 816 | ||
@@ -811,7 +827,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda | |||
811 | cq->resize_buf = NULL; | 827 | cq->resize_buf = NULL; |
812 | spin_unlock_irq(&cq->lock); | 828 | spin_unlock_irq(&cq->lock); |
813 | } | 829 | } |
814 | return ret; | 830 | goto out; |
815 | } | 831 | } |
816 | 832 | ||
817 | if (cq->is_kernel) { | 833 | if (cq->is_kernel) { |
@@ -838,7 +854,10 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda | |||
838 | } else | 854 | } else |
839 | ibcq->cqe = entries - 1; | 855 | ibcq->cqe = entries - 1; |
840 | 856 | ||
841 | return 0; | 857 | out: |
858 | mutex_unlock(&cq->mutex); | ||
859 | |||
860 | return ret; | ||
842 | } | 861 | } |
843 | 862 | ||
844 | static int mthca_destroy_cq(struct ib_cq *cq) | 863 | static int mthca_destroy_cq(struct ib_cq *cq) |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 179a8f610d0f..8de2887ba15c 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -214,6 +214,7 @@ struct mthca_cq { | |||
214 | int arm_sn; | 214 | int arm_sn; |
215 | 215 | ||
216 | wait_queue_head_t wait; | 216 | wait_queue_head_t wait; |
217 | struct mutex mutex; | ||
217 | }; | 218 | }; |
218 | 219 | ||
219 | struct mthca_srq { | 220 | struct mthca_srq { |
@@ -237,6 +238,7 @@ struct mthca_srq { | |||
237 | struct mthca_mr mr; | 238 | struct mthca_mr mr; |
238 | 239 | ||
239 | wait_queue_head_t wait; | 240 | wait_queue_head_t wait; |
241 | struct mutex mutex; | ||
240 | }; | 242 | }; |
241 | 243 | ||
242 | struct mthca_wq { | 244 | struct mthca_wq { |
@@ -278,6 +280,7 @@ struct mthca_qp { | |||
278 | union mthca_buf queue; | 280 | union mthca_buf queue; |
279 | 281 | ||
280 | wait_queue_head_t wait; | 282 | wait_queue_head_t wait; |
283 | struct mutex mutex; | ||
281 | }; | 284 | }; |
282 | 285 | ||
283 | struct mthca_sqp { | 286 | struct mthca_sqp { |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 07c13be07a4a..16c387d8170c 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -534,7 +534,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
534 | struct mthca_qp_context *qp_context; | 534 | struct mthca_qp_context *qp_context; |
535 | u32 sqd_event = 0; | 535 | u32 sqd_event = 0; |
536 | u8 status; | 536 | u8 status; |
537 | int err; | 537 | int err = -EINVAL; |
538 | |||
539 | mutex_lock(&qp->mutex); | ||
538 | 540 | ||
539 | if (attr_mask & IB_QP_CUR_STATE) { | 541 | if (attr_mask & IB_QP_CUR_STATE) { |
540 | cur_state = attr->cur_qp_state; | 542 | cur_state = attr->cur_qp_state; |
@@ -553,39 +555,41 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
553 | "%d->%d with attr 0x%08x\n", | 555 | "%d->%d with attr 0x%08x\n", |
554 | qp->transport, cur_state, new_state, | 556 | qp->transport, cur_state, new_state, |
555 | attr_mask); | 557 | attr_mask); |
556 | return -EINVAL; | 558 | goto out; |
557 | } | 559 | } |
558 | 560 | ||
559 | if ((attr_mask & IB_QP_PKEY_INDEX) && | 561 | if ((attr_mask & IB_QP_PKEY_INDEX) && |
560 | attr->pkey_index >= dev->limits.pkey_table_len) { | 562 | attr->pkey_index >= dev->limits.pkey_table_len) { |
561 | mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", | 563 | mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", |
562 | attr->pkey_index, dev->limits.pkey_table_len-1); | 564 | attr->pkey_index, dev->limits.pkey_table_len-1); |
563 | return -EINVAL; | 565 | goto out; |
564 | } | 566 | } |
565 | 567 | ||
566 | if ((attr_mask & IB_QP_PORT) && | 568 | if ((attr_mask & IB_QP_PORT) && |
567 | (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { | 569 | (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { |
568 | mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); | 570 | mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); |
569 | return -EINVAL; | 571 | goto out; |
570 | } | 572 | } |
571 | 573 | ||
572 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && | 574 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
573 | attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { | 575 | attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { |
574 | mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", | 576 | mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", |
575 | attr->max_rd_atomic, dev->limits.max_qp_init_rdma); | 577 | attr->max_rd_atomic, dev->limits.max_qp_init_rdma); |
576 | return -EINVAL; | 578 | goto out; |
577 | } | 579 | } |
578 | 580 | ||
579 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | 581 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && |
580 | attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { | 582 | attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { |
581 | mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", | 583 | mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", |
582 | attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); | 584 | attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); |
583 | return -EINVAL; | 585 | goto out; |
584 | } | 586 | } |
585 | 587 | ||
586 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | 588 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
587 | if (IS_ERR(mailbox)) | 589 | if (IS_ERR(mailbox)) { |
588 | return PTR_ERR(mailbox); | 590 | err = PTR_ERR(mailbox); |
591 | goto out; | ||
592 | } | ||
589 | qp_param = mailbox->buf; | 593 | qp_param = mailbox->buf; |
590 | qp_context = &qp_param->context; | 594 | qp_context = &qp_param->context; |
591 | memset(qp_param, 0, sizeof *qp_param); | 595 | memset(qp_param, 0, sizeof *qp_param); |
@@ -618,7 +622,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
618 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { | 622 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { |
619 | mthca_dbg(dev, "path MTU (%u) is invalid\n", | 623 | mthca_dbg(dev, "path MTU (%u) is invalid\n", |
620 | attr->path_mtu); | 624 | attr->path_mtu); |
621 | return -EINVAL; | 625 | goto out_mailbox; |
622 | } | 626 | } |
623 | qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; | 627 | qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; |
624 | } | 628 | } |
@@ -672,7 +676,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
672 | if (attr_mask & IB_QP_AV) { | 676 | if (attr_mask & IB_QP_AV) { |
673 | if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, | 677 | if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, |
674 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) | 678 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) |
675 | return -EINVAL; | 679 | goto out_mailbox; |
676 | 680 | ||
677 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); | 681 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); |
678 | } | 682 | } |
@@ -686,18 +690,18 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
686 | if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { | 690 | if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { |
687 | mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", | 691 | mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", |
688 | attr->alt_pkey_index, dev->limits.pkey_table_len-1); | 692 | attr->alt_pkey_index, dev->limits.pkey_table_len-1); |
689 | return -EINVAL; | 693 | goto out_mailbox; |
690 | } | 694 | } |
691 | 695 | ||
692 | if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { | 696 | if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { |
693 | mthca_dbg(dev, "Alternate port number (%u) is invalid\n", | 697 | mthca_dbg(dev, "Alternate port number (%u) is invalid\n", |
694 | attr->alt_port_num); | 698 | attr->alt_port_num); |
695 | return -EINVAL; | 699 | goto out_mailbox; |
696 | } | 700 | } |
697 | 701 | ||
698 | if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, | 702 | if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, |
699 | attr->alt_ah_attr.port_num)) | 703 | attr->alt_ah_attr.port_num)) |
700 | return -EINVAL; | 704 | goto out_mailbox; |
701 | 705 | ||
702 | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | | 706 | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | |
703 | attr->alt_port_num << 24); | 707 | attr->alt_port_num << 24); |
@@ -793,12 +797,12 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
793 | err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, | 797 | err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, |
794 | mailbox, sqd_event, &status); | 798 | mailbox, sqd_event, &status); |
795 | if (err) | 799 | if (err) |
796 | goto out; | 800 | goto out_mailbox; |
797 | if (status) { | 801 | if (status) { |
798 | mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", | 802 | mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", |
799 | cur_state, new_state, status); | 803 | cur_state, new_state, status); |
800 | err = -EINVAL; | 804 | err = -EINVAL; |
801 | goto out; | 805 | goto out_mailbox; |
802 | } | 806 | } |
803 | 807 | ||
804 | qp->state = new_state; | 808 | qp->state = new_state; |
@@ -853,8 +857,11 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
853 | } | 857 | } |
854 | } | 858 | } |
855 | 859 | ||
856 | out: | 860 | out_mailbox: |
857 | mthca_free_mailbox(dev, mailbox); | 861 | mthca_free_mailbox(dev, mailbox); |
862 | |||
863 | out: | ||
864 | mutex_unlock(&qp->mutex); | ||
858 | return err; | 865 | return err; |
859 | } | 866 | } |
860 | 867 | ||
@@ -1100,6 +1107,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1100 | 1107 | ||
1101 | qp->refcount = 1; | 1108 | qp->refcount = 1; |
1102 | init_waitqueue_head(&qp->wait); | 1109 | init_waitqueue_head(&qp->wait); |
1110 | mutex_init(&qp->mutex); | ||
1103 | qp->state = IB_QPS_RESET; | 1111 | qp->state = IB_QPS_RESET; |
1104 | qp->atomic_rd_en = 0; | 1112 | qp->atomic_rd_en = 0; |
1105 | qp->resp_depth = 0; | 1113 | qp->resp_depth = 0; |
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c index df5e494a9d38..f4fddd5327f5 100644 --- a/drivers/infiniband/hw/mthca/mthca_reset.c +++ b/drivers/infiniband/hw/mthca/mthca_reset.c | |||
@@ -49,6 +49,12 @@ int mthca_reset(struct mthca_dev *mdev) | |||
49 | u32 *hca_header = NULL; | 49 | u32 *hca_header = NULL; |
50 | u32 *bridge_header = NULL; | 50 | u32 *bridge_header = NULL; |
51 | struct pci_dev *bridge = NULL; | 51 | struct pci_dev *bridge = NULL; |
52 | int bridge_pcix_cap = 0; | ||
53 | int hca_pcie_cap = 0; | ||
54 | int hca_pcix_cap = 0; | ||
55 | |||
56 | u16 devctl; | ||
57 | u16 linkctl; | ||
52 | 58 | ||
53 | #define MTHCA_RESET_OFFSET 0xf0010 | 59 | #define MTHCA_RESET_OFFSET 0xf0010 |
54 | #define MTHCA_RESET_VALUE swab32(1) | 60 | #define MTHCA_RESET_VALUE swab32(1) |
@@ -110,6 +116,9 @@ int mthca_reset(struct mthca_dev *mdev) | |||
110 | } | 116 | } |
111 | } | 117 | } |
112 | 118 | ||
119 | hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX); | ||
120 | hca_pcie_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP); | ||
121 | |||
113 | if (bridge) { | 122 | if (bridge) { |
114 | bridge_header = kmalloc(256, GFP_KERNEL); | 123 | bridge_header = kmalloc(256, GFP_KERNEL); |
115 | if (!bridge_header) { | 124 | if (!bridge_header) { |
@@ -129,6 +138,13 @@ int mthca_reset(struct mthca_dev *mdev) | |||
129 | goto out; | 138 | goto out; |
130 | } | 139 | } |
131 | } | 140 | } |
141 | bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX); | ||
142 | if (!bridge_pcix_cap) { | ||
143 | err = -ENODEV; | ||
144 | mthca_err(mdev, "Couldn't locate HCA bridge " | ||
145 | "PCI-X capability, aborting.\n"); | ||
146 | goto out; | ||
147 | } | ||
132 | } | 148 | } |
133 | 149 | ||
134 | /* actually hit reset */ | 150 | /* actually hit reset */ |
@@ -178,6 +194,20 @@ int mthca_reset(struct mthca_dev *mdev) | |||
178 | good: | 194 | good: |
179 | /* Now restore the PCI headers */ | 195 | /* Now restore the PCI headers */ |
180 | if (bridge) { | 196 | if (bridge) { |
197 | if (pci_write_config_dword(bridge, bridge_pcix_cap + 0x8, | ||
198 | bridge_header[(bridge_pcix_cap + 0x8) / 4])) { | ||
199 | err = -ENODEV; | ||
200 | mthca_err(mdev, "Couldn't restore HCA bridge Upstream " | ||
201 | "split transaction control, aborting.\n"); | ||
202 | goto out; | ||
203 | } | ||
204 | if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc, | ||
205 | bridge_header[(bridge_pcix_cap + 0xc) / 4])) { | ||
206 | err = -ENODEV; | ||
207 | mthca_err(mdev, "Couldn't restore HCA bridge Downstream " | ||
208 | "split transaction control, aborting.\n"); | ||
209 | goto out; | ||
210 | } | ||
181 | /* | 211 | /* |
182 | * Bridge control register is at 0x3e, so we'll | 212 | * Bridge control register is at 0x3e, so we'll |
183 | * naturally restore it last in this loop. | 213 | * naturally restore it last in this loop. |
@@ -203,6 +233,35 @@ good: | |||
203 | } | 233 | } |
204 | } | 234 | } |
205 | 235 | ||
236 | if (hca_pcix_cap) { | ||
237 | if (pci_write_config_dword(mdev->pdev, hca_pcix_cap, | ||
238 | hca_header[hca_pcix_cap / 4])) { | ||
239 | err = -ENODEV; | ||
240 | mthca_err(mdev, "Couldn't restore HCA PCI-X " | ||
241 | "command register, aborting.\n"); | ||
242 | goto out; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | if (hca_pcie_cap) { | ||
247 | devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4]; | ||
248 | if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL, | ||
249 | devctl)) { | ||
250 | err = -ENODEV; | ||
251 | mthca_err(mdev, "Couldn't restore HCA PCI Express " | ||
252 | "Device Control register, aborting.\n"); | ||
253 | goto out; | ||
254 | } | ||
255 | linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4]; | ||
256 | if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL, | ||
257 | linkctl)) { | ||
258 | err = -ENODEV; | ||
259 | mthca_err(mdev, "Couldn't restore HCA PCI Express " | ||
260 | "Link control register, aborting.\n"); | ||
261 | goto out; | ||
262 | } | ||
263 | } | ||
264 | |||
206 | for (i = 0; i < 16; ++i) { | 265 | for (i = 0; i < 16; ++i) { |
207 | if (i * 4 == PCI_COMMAND) | 266 | if (i * 4 == PCI_COMMAND) |
208 | continue; | 267 | continue; |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index b292fefa3b41..fab417c5cf43 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -243,6 +243,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
243 | spin_lock_init(&srq->lock); | 243 | spin_lock_init(&srq->lock); |
244 | srq->refcount = 1; | 244 | srq->refcount = 1; |
245 | init_waitqueue_head(&srq->wait); | 245 | init_waitqueue_head(&srq->wait); |
246 | mutex_init(&srq->mutex); | ||
246 | 247 | ||
247 | if (mthca_is_memfree(dev)) | 248 | if (mthca_is_memfree(dev)) |
248 | mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); | 249 | mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); |
@@ -371,7 +372,11 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |||
371 | if (attr_mask & IB_SRQ_LIMIT) { | 372 | if (attr_mask & IB_SRQ_LIMIT) { |
372 | if (attr->srq_limit > srq->max) | 373 | if (attr->srq_limit > srq->max) |
373 | return -EINVAL; | 374 | return -EINVAL; |
375 | |||
376 | mutex_lock(&srq->mutex); | ||
374 | ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); | 377 | ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); |
378 | mutex_unlock(&srq->mutex); | ||
379 | |||
375 | if (ret) | 380 | if (ret) |
376 | return ret; | 381 | return ret; |
377 | if (status) | 382 | if (status) |