diff options
author | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-06-10 14:47:26 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-06-10 14:47:26 -0400 |
commit | f0cd91a68acdc9b49d7f6738b514a426da627649 (patch) | |
tree | 8ad73564015794197583b094217ae0a71e71e753 /drivers/infiniband | |
parent | 60eef25701d25e99c991dd0f4a9f3832a0c3ad3e (diff) | |
parent | 128e6ced247cda88f96fa9f2e4ba8b2c4a681560 (diff) |
Merge ../linux-2.6
Diffstat (limited to 'drivers/infiniband')
41 files changed, 653 insertions, 539 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 7cfedb8d9bcd..86fee43502cd 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -34,6 +34,8 @@ | |||
34 | * | 34 | * |
35 | * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ | 35 | * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ |
36 | */ | 36 | */ |
37 | |||
38 | #include <linux/completion.h> | ||
37 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
38 | #include <linux/err.h> | 40 | #include <linux/err.h> |
39 | #include <linux/idr.h> | 41 | #include <linux/idr.h> |
@@ -122,7 +124,7 @@ struct cm_id_private { | |||
122 | struct rb_node service_node; | 124 | struct rb_node service_node; |
123 | struct rb_node sidr_id_node; | 125 | struct rb_node sidr_id_node; |
124 | spinlock_t lock; /* Do not acquire inside cm.lock */ | 126 | spinlock_t lock; /* Do not acquire inside cm.lock */ |
125 | wait_queue_head_t wait; | 127 | struct completion comp; |
126 | atomic_t refcount; | 128 | atomic_t refcount; |
127 | 129 | ||
128 | struct ib_mad_send_buf *msg; | 130 | struct ib_mad_send_buf *msg; |
@@ -159,7 +161,7 @@ static void cm_work_handler(void *data); | |||
159 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) | 161 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) |
160 | { | 162 | { |
161 | if (atomic_dec_and_test(&cm_id_priv->refcount)) | 163 | if (atomic_dec_and_test(&cm_id_priv->refcount)) |
162 | wake_up(&cm_id_priv->wait); | 164 | complete(&cm_id_priv->comp); |
163 | } | 165 | } |
164 | 166 | ||
165 | static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | 167 | static int cm_alloc_msg(struct cm_id_private *cm_id_priv, |
@@ -559,7 +561,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, | |||
559 | goto error; | 561 | goto error; |
560 | 562 | ||
561 | spin_lock_init(&cm_id_priv->lock); | 563 | spin_lock_init(&cm_id_priv->lock); |
562 | init_waitqueue_head(&cm_id_priv->wait); | 564 | init_completion(&cm_id_priv->comp); |
563 | INIT_LIST_HEAD(&cm_id_priv->work_list); | 565 | INIT_LIST_HEAD(&cm_id_priv->work_list); |
564 | atomic_set(&cm_id_priv->work_count, -1); | 566 | atomic_set(&cm_id_priv->work_count, -1); |
565 | atomic_set(&cm_id_priv->refcount, 1); | 567 | atomic_set(&cm_id_priv->refcount, 1); |
@@ -724,8 +726,8 @@ retest: | |||
724 | } | 726 | } |
725 | 727 | ||
726 | cm_free_id(cm_id->local_id); | 728 | cm_free_id(cm_id->local_id); |
727 | atomic_dec(&cm_id_priv->refcount); | 729 | cm_deref_id(cm_id_priv); |
728 | wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount)); | 730 | wait_for_completion(&cm_id_priv->comp); |
729 | while ((work = cm_dequeue_work(cm_id_priv)) != NULL) | 731 | while ((work = cm_dequeue_work(cm_id_priv)) != NULL) |
730 | cm_free_work(work); | 732 | cm_free_work(work); |
731 | if (cm_id_priv->private_data && cm_id_priv->private_data_len) | 733 | if (cm_id_priv->private_data && cm_id_priv->private_data_len) |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 3a702da83e41..5ad41a64314c 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -228,10 +228,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
228 | goto error1; | 228 | goto error1; |
229 | } | 229 | } |
230 | /* Make sure class supplied is consistent with RMPP */ | 230 | /* Make sure class supplied is consistent with RMPP */ |
231 | if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { | 231 | if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { |
232 | if (!rmpp_version) | ||
233 | goto error1; | ||
234 | } else { | ||
235 | if (rmpp_version) | 232 | if (rmpp_version) |
236 | goto error1; | 233 | goto error1; |
237 | } | 234 | } |
@@ -355,7 +352,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
355 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 352 | INIT_WORK(&mad_agent_priv->local_work, local_completions, |
356 | mad_agent_priv); | 353 | mad_agent_priv); |
357 | atomic_set(&mad_agent_priv->refcount, 1); | 354 | atomic_set(&mad_agent_priv->refcount, 1); |
358 | init_waitqueue_head(&mad_agent_priv->wait); | 355 | init_completion(&mad_agent_priv->comp); |
359 | 356 | ||
360 | return &mad_agent_priv->agent; | 357 | return &mad_agent_priv->agent; |
361 | 358 | ||
@@ -470,7 +467,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, | |||
470 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; | 467 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; |
471 | mad_snoop_priv->agent.port_num = port_num; | 468 | mad_snoop_priv->agent.port_num = port_num; |
472 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; | 469 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; |
473 | init_waitqueue_head(&mad_snoop_priv->wait); | 470 | init_completion(&mad_snoop_priv->comp); |
474 | mad_snoop_priv->snoop_index = register_snoop_agent( | 471 | mad_snoop_priv->snoop_index = register_snoop_agent( |
475 | &port_priv->qp_info[qpn], | 472 | &port_priv->qp_info[qpn], |
476 | mad_snoop_priv); | 473 | mad_snoop_priv); |
@@ -489,6 +486,18 @@ error1: | |||
489 | } | 486 | } |
490 | EXPORT_SYMBOL(ib_register_mad_snoop); | 487 | EXPORT_SYMBOL(ib_register_mad_snoop); |
491 | 488 | ||
489 | static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | ||
490 | { | ||
491 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | ||
492 | complete(&mad_agent_priv->comp); | ||
493 | } | ||
494 | |||
495 | static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) | ||
496 | { | ||
497 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | ||
498 | complete(&mad_snoop_priv->comp); | ||
499 | } | ||
500 | |||
492 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | 501 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
493 | { | 502 | { |
494 | struct ib_mad_port_private *port_priv; | 503 | struct ib_mad_port_private *port_priv; |
@@ -512,9 +521,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
512 | flush_workqueue(port_priv->wq); | 521 | flush_workqueue(port_priv->wq); |
513 | ib_cancel_rmpp_recvs(mad_agent_priv); | 522 | ib_cancel_rmpp_recvs(mad_agent_priv); |
514 | 523 | ||
515 | atomic_dec(&mad_agent_priv->refcount); | 524 | deref_mad_agent(mad_agent_priv); |
516 | wait_event(mad_agent_priv->wait, | 525 | wait_for_completion(&mad_agent_priv->comp); |
517 | !atomic_read(&mad_agent_priv->refcount)); | ||
518 | 526 | ||
519 | kfree(mad_agent_priv->reg_req); | 527 | kfree(mad_agent_priv->reg_req); |
520 | ib_dereg_mr(mad_agent_priv->agent.mr); | 528 | ib_dereg_mr(mad_agent_priv->agent.mr); |
@@ -532,9 +540,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) | |||
532 | atomic_dec(&qp_info->snoop_count); | 540 | atomic_dec(&qp_info->snoop_count); |
533 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 541 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
534 | 542 | ||
535 | atomic_dec(&mad_snoop_priv->refcount); | 543 | deref_snoop_agent(mad_snoop_priv); |
536 | wait_event(mad_snoop_priv->wait, | 544 | wait_for_completion(&mad_snoop_priv->comp); |
537 | !atomic_read(&mad_snoop_priv->refcount)); | ||
538 | 545 | ||
539 | kfree(mad_snoop_priv); | 546 | kfree(mad_snoop_priv); |
540 | } | 547 | } |
@@ -603,8 +610,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info, | |||
603 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 610 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
604 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, | 611 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, |
605 | send_buf, mad_send_wc); | 612 | send_buf, mad_send_wc); |
606 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 613 | deref_snoop_agent(mad_snoop_priv); |
607 | wake_up(&mad_snoop_priv->wait); | ||
608 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | 614 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
609 | } | 615 | } |
610 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 616 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
@@ -629,8 +635,7 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info, | |||
629 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 635 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
630 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, | 636 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, |
631 | mad_recv_wc); | 637 | mad_recv_wc); |
632 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 638 | deref_snoop_agent(mad_snoop_priv); |
633 | wake_up(&mad_snoop_priv->wait); | ||
634 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | 639 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
635 | } | 640 | } |
636 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 641 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
@@ -971,8 +976,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | |||
971 | 976 | ||
972 | free_send_rmpp_list(mad_send_wr); | 977 | free_send_rmpp_list(mad_send_wr); |
973 | kfree(send_buf->mad); | 978 | kfree(send_buf->mad); |
974 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 979 | deref_mad_agent(mad_agent_priv); |
975 | wake_up(&mad_agent_priv->wait); | ||
976 | } | 980 | } |
977 | EXPORT_SYMBOL(ib_free_send_mad); | 981 | EXPORT_SYMBOL(ib_free_send_mad); |
978 | 982 | ||
@@ -1760,8 +1764,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1760 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, | 1764 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
1761 | mad_recv_wc); | 1765 | mad_recv_wc); |
1762 | if (!mad_recv_wc) { | 1766 | if (!mad_recv_wc) { |
1763 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1767 | deref_mad_agent(mad_agent_priv); |
1764 | wake_up(&mad_agent_priv->wait); | ||
1765 | return; | 1768 | return; |
1766 | } | 1769 | } |
1767 | } | 1770 | } |
@@ -1773,8 +1776,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1773 | if (!mad_send_wr) { | 1776 | if (!mad_send_wr) { |
1774 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1777 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1775 | ib_free_recv_mad(mad_recv_wc); | 1778 | ib_free_recv_mad(mad_recv_wc); |
1776 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1779 | deref_mad_agent(mad_agent_priv); |
1777 | wake_up(&mad_agent_priv->wait); | ||
1778 | return; | 1780 | return; |
1779 | } | 1781 | } |
1780 | ib_mark_mad_done(mad_send_wr); | 1782 | ib_mark_mad_done(mad_send_wr); |
@@ -1793,8 +1795,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1793 | } else { | 1795 | } else { |
1794 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 1796 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
1795 | mad_recv_wc); | 1797 | mad_recv_wc); |
1796 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1798 | deref_mad_agent(mad_agent_priv); |
1797 | wake_up(&mad_agent_priv->wait); | ||
1798 | } | 1799 | } |
1799 | } | 1800 | } |
1800 | 1801 | ||
@@ -2024,8 +2025,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
2024 | mad_send_wc); | 2025 | mad_send_wc); |
2025 | 2026 | ||
2026 | /* Release reference on agent taken when sending */ | 2027 | /* Release reference on agent taken when sending */ |
2027 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 2028 | deref_mad_agent(mad_agent_priv); |
2028 | wake_up(&mad_agent_priv->wait); | ||
2029 | return; | 2029 | return; |
2030 | done: | 2030 | done: |
2031 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2031 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 6c9c133d71ef..b4fa28d3160f 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #ifndef __IB_MAD_PRIV_H__ | 37 | #ifndef __IB_MAD_PRIV_H__ |
38 | #define __IB_MAD_PRIV_H__ | 38 | #define __IB_MAD_PRIV_H__ |
39 | 39 | ||
40 | #include <linux/completion.h> | ||
40 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
41 | #include <linux/kthread.h> | 42 | #include <linux/kthread.h> |
42 | #include <linux/workqueue.h> | 43 | #include <linux/workqueue.h> |
@@ -108,7 +109,7 @@ struct ib_mad_agent_private { | |||
108 | struct list_head rmpp_list; | 109 | struct list_head rmpp_list; |
109 | 110 | ||
110 | atomic_t refcount; | 111 | atomic_t refcount; |
111 | wait_queue_head_t wait; | 112 | struct completion comp; |
112 | }; | 113 | }; |
113 | 114 | ||
114 | struct ib_mad_snoop_private { | 115 | struct ib_mad_snoop_private { |
@@ -117,7 +118,7 @@ struct ib_mad_snoop_private { | |||
117 | int snoop_index; | 118 | int snoop_index; |
118 | int mad_snoop_flags; | 119 | int mad_snoop_flags; |
119 | atomic_t refcount; | 120 | atomic_t refcount; |
120 | wait_queue_head_t wait; | 121 | struct completion comp; |
121 | }; | 122 | }; |
122 | 123 | ||
123 | struct ib_mad_send_wr_private { | 124 | struct ib_mad_send_wr_private { |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index dfd4e588ce03..d4704e054e30 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -49,7 +49,7 @@ struct mad_rmpp_recv { | |||
49 | struct list_head list; | 49 | struct list_head list; |
50 | struct work_struct timeout_work; | 50 | struct work_struct timeout_work; |
51 | struct work_struct cleanup_work; | 51 | struct work_struct cleanup_work; |
52 | wait_queue_head_t wait; | 52 | struct completion comp; |
53 | enum rmpp_state state; | 53 | enum rmpp_state state; |
54 | spinlock_t lock; | 54 | spinlock_t lock; |
55 | atomic_t refcount; | 55 | atomic_t refcount; |
@@ -69,10 +69,16 @@ struct mad_rmpp_recv { | |||
69 | u8 method; | 69 | u8 method; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) | ||
73 | { | ||
74 | if (atomic_dec_and_test(&rmpp_recv->refcount)) | ||
75 | complete(&rmpp_recv->comp); | ||
76 | } | ||
77 | |||
72 | static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) | 78 | static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) |
73 | { | 79 | { |
74 | atomic_dec(&rmpp_recv->refcount); | 80 | deref_rmpp_recv(rmpp_recv); |
75 | wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount)); | 81 | wait_for_completion(&rmpp_recv->comp); |
76 | ib_destroy_ah(rmpp_recv->ah); | 82 | ib_destroy_ah(rmpp_recv->ah); |
77 | kfree(rmpp_recv); | 83 | kfree(rmpp_recv); |
78 | } | 84 | } |
@@ -253,7 +259,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, | |||
253 | goto error; | 259 | goto error; |
254 | 260 | ||
255 | rmpp_recv->agent = agent; | 261 | rmpp_recv->agent = agent; |
256 | init_waitqueue_head(&rmpp_recv->wait); | 262 | init_completion(&rmpp_recv->comp); |
257 | INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); | 263 | INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); |
258 | INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); | 264 | INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); |
259 | spin_lock_init(&rmpp_recv->lock); | 265 | spin_lock_init(&rmpp_recv->lock); |
@@ -279,12 +285,6 @@ error: kfree(rmpp_recv); | |||
279 | return NULL; | 285 | return NULL; |
280 | } | 286 | } |
281 | 287 | ||
282 | static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) | ||
283 | { | ||
284 | if (atomic_dec_and_test(&rmpp_recv->refcount)) | ||
285 | wake_up(&rmpp_recv->wait); | ||
286 | } | ||
287 | |||
288 | static struct mad_rmpp_recv * | 288 | static struct mad_rmpp_recv * |
289 | find_rmpp_recv(struct ib_mad_agent_private *agent, | 289 | find_rmpp_recv(struct ib_mad_agent_private *agent, |
290 | struct ib_mad_recv_wc *mad_recv_wc) | 290 | struct ib_mad_recv_wc *mad_recv_wc) |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 15121cb5a1f6..21f9282c1b25 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, | |||
336 | switch (width) { | 336 | switch (width) { |
337 | case 4: | 337 | case 4: |
338 | ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> | 338 | ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> |
339 | (offset % 4)) & 0xf); | 339 | (4 - (offset % 8))) & 0xf); |
340 | break; | 340 | break; |
341 | case 8: | 341 | case 8: |
342 | ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); | 342 | ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); |
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f6a05965a4e8..9164a09b6ccd 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -32,6 +32,8 @@ | |||
32 | * | 32 | * |
33 | * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $ | 33 | * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $ |
34 | */ | 34 | */ |
35 | |||
36 | #include <linux/completion.h> | ||
35 | #include <linux/init.h> | 37 | #include <linux/init.h> |
36 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
37 | #include <linux/module.h> | 39 | #include <linux/module.h> |
@@ -72,7 +74,7 @@ struct ib_ucm_file { | |||
72 | 74 | ||
73 | struct ib_ucm_context { | 75 | struct ib_ucm_context { |
74 | int id; | 76 | int id; |
75 | wait_queue_head_t wait; | 77 | struct completion comp; |
76 | atomic_t ref; | 78 | atomic_t ref; |
77 | int events_reported; | 79 | int events_reported; |
78 | 80 | ||
@@ -138,7 +140,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) | |||
138 | static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) | 140 | static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) |
139 | { | 141 | { |
140 | if (atomic_dec_and_test(&ctx->ref)) | 142 | if (atomic_dec_and_test(&ctx->ref)) |
141 | wake_up(&ctx->wait); | 143 | complete(&ctx->comp); |
142 | } | 144 | } |
143 | 145 | ||
144 | static inline int ib_ucm_new_cm_id(int event) | 146 | static inline int ib_ucm_new_cm_id(int event) |
@@ -178,7 +180,7 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) | |||
178 | return NULL; | 180 | return NULL; |
179 | 181 | ||
180 | atomic_set(&ctx->ref, 1); | 182 | atomic_set(&ctx->ref, 1); |
181 | init_waitqueue_head(&ctx->wait); | 183 | init_completion(&ctx->comp); |
182 | ctx->file = file; | 184 | ctx->file = file; |
183 | INIT_LIST_HEAD(&ctx->events); | 185 | INIT_LIST_HEAD(&ctx->events); |
184 | 186 | ||
@@ -586,8 +588,8 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, | |||
586 | if (IS_ERR(ctx)) | 588 | if (IS_ERR(ctx)) |
587 | return PTR_ERR(ctx); | 589 | return PTR_ERR(ctx); |
588 | 590 | ||
589 | atomic_dec(&ctx->ref); | 591 | ib_ucm_ctx_put(ctx); |
590 | wait_event(ctx->wait, !atomic_read(&ctx->ref)); | 592 | wait_for_completion(&ctx->comp); |
591 | 593 | ||
592 | /* No new events will be generated after destroying the cm_id. */ | 594 | /* No new events will be generated after destroying the cm_id. */ |
593 | ib_destroy_cm_id(ctx->cm_id); | 595 | ib_destroy_cm_id(ctx->cm_id); |
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index 36a32c315668..efe147dbeb42 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c | |||
@@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) | |||
211 | */ | 211 | */ |
212 | 212 | ||
213 | work = kmalloc(sizeof *work, GFP_KERNEL); | 213 | work = kmalloc(sizeof *work, GFP_KERNEL); |
214 | if (!work) | 214 | if (!work) { |
215 | mmput(mm); | ||
215 | return; | 216 | return; |
217 | } | ||
216 | 218 | ||
217 | INIT_WORK(&work->work, ib_umem_account, work); | 219 | INIT_WORK(&work->work, ib_umem_account, work); |
218 | work->mm = mm; | 220 | work->mm = mm; |
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h index 593e28969c69..46762387f5f8 100644 --- a/drivers/infiniband/hw/ipath/ipath_debug.h +++ b/drivers/infiniband/hw/ipath/ipath_debug.h | |||
@@ -60,11 +60,11 @@ | |||
60 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ | 60 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ |
61 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ | 61 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ |
62 | #define __IPATH_SMADBG 0x8000 /* sma packet debug */ | 62 | #define __IPATH_SMADBG 0x8000 /* sma packet debug */ |
63 | #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */ | 63 | #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */ |
64 | #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */ | 64 | #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */ |
65 | #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */ | 65 | #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ |
66 | #define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */ | 66 | #define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */ |
67 | #define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */ | 67 | #define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */ |
68 | 68 | ||
69 | #else /* _IPATH_DEBUGGING */ | 69 | #else /* _IPATH_DEBUGGING */ |
70 | 70 | ||
@@ -79,11 +79,12 @@ | |||
79 | #define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */ | 79 | #define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */ |
80 | #define __IPATH_VERBDBG 0x0 /* very verbose debug */ | 80 | #define __IPATH_VERBDBG 0x0 /* very verbose debug */ |
81 | #define __IPATH_PKTDBG 0x0 /* print packet data */ | 81 | #define __IPATH_PKTDBG 0x0 /* print packet data */ |
82 | #define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */ | 82 | #define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */ |
83 | /* print mmap/nopage stuff, not using VDBG any more */ | 83 | /* print mmap/nopage stuff, not using VDBG any more */ |
84 | #define __IPATH_MMDBG 0x0 | 84 | #define __IPATH_MMDBG 0x0 |
85 | #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ | 85 | #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ |
86 | #define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ | 86 | #define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */ |
87 | #define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ | ||
87 | #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ | 88 | #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ |
88 | #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ | 89 | #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ |
89 | #define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ | 90 | #define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index cd533cf951c2..28ddceb260e8 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
@@ -277,13 +277,14 @@ static int ipath_diag_open(struct inode *in, struct file *fp) | |||
277 | 277 | ||
278 | bail: | 278 | bail: |
279 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | 279 | spin_unlock_irqrestore(&ipath_devs_lock, flags); |
280 | mutex_unlock(&ipath_mutex); | ||
281 | 280 | ||
282 | /* Only expose a way to reset the device if we | 281 | /* Only expose a way to reset the device if we |
283 | make it into diag mode. */ | 282 | make it into diag mode. */ |
284 | if (ret == 0) | 283 | if (ret == 0) |
285 | ipath_expose_reset(&dd->pcidev->dev); | 284 | ipath_expose_reset(&dd->pcidev->dev); |
286 | 285 | ||
286 | mutex_unlock(&ipath_mutex); | ||
287 | |||
287 | return ret; | 288 | return ret; |
288 | } | 289 | } |
289 | 290 | ||
@@ -365,15 +366,3 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data, | |||
365 | bail: | 366 | bail: |
366 | return ret; | 367 | return ret; |
367 | } | 368 | } |
368 | |||
369 | void ipath_diag_bringup_link(struct ipath_devdata *dd) | ||
370 | { | ||
371 | if (diag_set_link || (dd->ipath_flags & IPATH_LINKACTIVE)) | ||
372 | return; | ||
373 | |||
374 | diag_set_link = 1; | ||
375 | ipath_cdbg(VERBOSE, "Trying to set to set link active for " | ||
376 | "diag pkt\n"); | ||
377 | ipath_layer_set_linkstate(dd, IPATH_IB_LINKARM); | ||
378 | ipath_layer_set_linkstate(dd, IPATH_IB_LINKACTIVE); | ||
379 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 58a94efb0070..dddcdae736ac 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -116,10 +116,9 @@ static int __devinit ipath_init_one(struct pci_dev *, | |||
116 | #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 | 116 | #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 |
117 | 117 | ||
118 | static const struct pci_device_id ipath_pci_tbl[] = { | 118 | static const struct pci_device_id ipath_pci_tbl[] = { |
119 | {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, | 119 | { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, |
120 | PCI_DEVICE_ID_INFINIPATH_HT)}, | 120 | { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, |
121 | {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, | 121 | { 0, } |
122 | PCI_DEVICE_ID_INFINIPATH_PE800)}, | ||
123 | }; | 122 | }; |
124 | 123 | ||
125 | MODULE_DEVICE_TABLE(pci, ipath_pci_tbl); | 124 | MODULE_DEVICE_TABLE(pci, ipath_pci_tbl); |
@@ -418,9 +417,19 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
418 | 417 | ||
419 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | 418 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); |
420 | if (ret) { | 419 | if (ret) { |
421 | dev_info(&pdev->dev, "pci_set_dma_mask unit %u " | 420 | /* |
422 | "fails: %d\n", dd->ipath_unit, ret); | 421 | * if the 64 bit setup fails, try 32 bit. Some systems |
423 | goto bail_regions; | 422 | * do not setup 64 bit maps on systems with 2GB or less |
423 | * memory installed. | ||
424 | */ | ||
425 | ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
426 | if (ret) { | ||
427 | dev_info(&pdev->dev, "pci_set_dma_mask unit %u " | ||
428 | "fails: %d\n", dd->ipath_unit, ret); | ||
429 | goto bail_regions; | ||
430 | } | ||
431 | else | ||
432 | ipath_dbg("No 64bit DMA mask, used 32 bit mask\n"); | ||
424 | } | 433 | } |
425 | 434 | ||
426 | pci_set_master(pdev); | 435 | pci_set_master(pdev); |
@@ -1729,7 +1738,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq) | |||
1729 | } | 1738 | } |
1730 | } | 1739 | } |
1731 | 1740 | ||
1732 | int __init infinipath_init(void) | 1741 | static int __init infinipath_init(void) |
1733 | { | 1742 | { |
1734 | int ret; | 1743 | int ret; |
1735 | 1744 | ||
@@ -1896,19 +1905,19 @@ static void __exit infinipath_cleanup(void) | |||
1896 | } else | 1905 | } else |
1897 | ipath_dbg("irq is 0, not doing free_irq " | 1906 | ipath_dbg("irq is 0, not doing free_irq " |
1898 | "for unit %u\n", dd->ipath_unit); | 1907 | "for unit %u\n", dd->ipath_unit); |
1899 | dd->pcidev = NULL; | ||
1900 | } | ||
1901 | 1908 | ||
1902 | /* | 1909 | /* |
1903 | * we check for NULL here, because it's outside the kregbase | 1910 | * we check for NULL here, because it's outside |
1904 | * check, and we need to call it after the free_irq. Thus | 1911 | * the kregbase check, and we need to call it |
1905 | * it's possible that the function pointers were never | 1912 | * after the free_irq. Thus it's possible that |
1906 | * initialized. | 1913 | * the function pointers were never initialized. |
1907 | */ | 1914 | */ |
1908 | if (dd->ipath_f_cleanup) | 1915 | if (dd->ipath_f_cleanup) |
1909 | /* clean up chip-specific stuff */ | 1916 | /* clean up chip-specific stuff */ |
1910 | dd->ipath_f_cleanup(dd); | 1917 | dd->ipath_f_cleanup(dd); |
1911 | 1918 | ||
1919 | dd->pcidev = NULL; | ||
1920 | } | ||
1912 | spin_lock_irqsave(&ipath_devs_lock, flags); | 1921 | spin_lock_irqsave(&ipath_devs_lock, flags); |
1913 | } | 1922 | } |
1914 | 1923 | ||
@@ -1949,7 +1958,7 @@ int ipath_reset_device(int unit) | |||
1949 | } | 1958 | } |
1950 | 1959 | ||
1951 | if (dd->ipath_pd) | 1960 | if (dd->ipath_pd) |
1952 | for (i = 1; i < dd->ipath_portcnt; i++) { | 1961 | for (i = 1; i < dd->ipath_cfgports; i++) { |
1953 | if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { | 1962 | if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { |
1954 | ipath_dbg("unit %u port %d is in use " | 1963 | ipath_dbg("unit %u port %d is in use " |
1955 | "(PID %u cmd %s), can't reset\n", | 1964 | "(PID %u cmd %s), can't reset\n", |
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c index f11a900e8cd7..a2f1ceafcca9 100644 --- a/drivers/infiniband/hw/ipath/ipath_eeprom.c +++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c | |||
@@ -505,11 +505,10 @@ static u8 flash_csum(struct ipath_flash *ifp, int adjust) | |||
505 | * ipath_get_guid - get the GUID from the i2c device | 505 | * ipath_get_guid - get the GUID from the i2c device |
506 | * @dd: the infinipath device | 506 | * @dd: the infinipath device |
507 | * | 507 | * |
508 | * When we add the multi-chip support, we will probably have to add | 508 | * We have the capability to use the ipath_nguid field, and get |
509 | * the ability to use the number of guids field, and get the guid from | 509 | * the guid from the first chip's flash, to use for all of them. |
510 | * the first chip's flash, to use for all of them. | ||
511 | */ | 510 | */ |
512 | void ipath_get_guid(struct ipath_devdata *dd) | 511 | void ipath_get_eeprom_info(struct ipath_devdata *dd) |
513 | { | 512 | { |
514 | void *buf; | 513 | void *buf; |
515 | struct ipath_flash *ifp; | 514 | struct ipath_flash *ifp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index c347191f02bf..ada267e41f6c 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -139,7 +139,7 @@ static int ipath_get_base_info(struct ipath_portdata *pd, | |||
139 | kinfo->spi_piosize = dd->ipath_ibmaxlen; | 139 | kinfo->spi_piosize = dd->ipath_ibmaxlen; |
140 | kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ | 140 | kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ |
141 | kinfo->spi_port = pd->port_port; | 141 | kinfo->spi_port = pd->port_port; |
142 | kinfo->spi_sw_version = IPATH_USER_SWVERSION; | 142 | kinfo->spi_sw_version = IPATH_KERN_SWVERSION; |
143 | kinfo->spi_hw_version = dd->ipath_revision; | 143 | kinfo->spi_hw_version = dd->ipath_revision; |
144 | 144 | ||
145 | if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) | 145 | if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) |
@@ -1224,6 +1224,10 @@ static unsigned int ipath_poll(struct file *fp, | |||
1224 | 1224 | ||
1225 | if (tail == head) { | 1225 | if (tail == head) { |
1226 | set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); | 1226 | set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); |
1227 | if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ | ||
1228 | (void)ipath_write_ureg(dd, ur_rcvhdrhead, | ||
1229 | dd->ipath_rhdrhead_intr_off | ||
1230 | | head, pd->port_port); | ||
1227 | poll_wait(fp, &pd->port_wait, pt); | 1231 | poll_wait(fp, &pd->port_wait, pt); |
1228 | 1232 | ||
1229 | if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { | 1233 | if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { |
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_ht400.c index 4652435998f3..fac0a2b74de2 100644 --- a/drivers/infiniband/hw/ipath/ipath_ht400.c +++ b/drivers/infiniband/hw/ipath/ipath_ht400.c | |||
@@ -607,7 +607,12 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
607 | case 4: /* Ponderosa is one of the bringup boards */ | 607 | case 4: /* Ponderosa is one of the bringup boards */ |
608 | n = "Ponderosa"; | 608 | n = "Ponderosa"; |
609 | break; | 609 | break; |
610 | case 5: /* HT-460 original production board */ | 610 | case 5: |
611 | /* | ||
612 | * HT-460 original production board; two production levels, with | ||
613 | * different serial number ranges. See ipath_ht_early_init() for | ||
614 | * case where we enable IPATH_GPIO_INTR for later serial # range. | ||
615 | */ | ||
611 | n = "InfiniPath_HT-460"; | 616 | n = "InfiniPath_HT-460"; |
612 | break; | 617 | break; |
613 | case 6: | 618 | case 6: |
@@ -642,7 +647,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
642 | if (n) | 647 | if (n) |
643 | snprintf(name, namelen, "%s", n); | 648 | snprintf(name, namelen, "%s", n); |
644 | 649 | ||
645 | if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) { | 650 | if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { |
646 | /* | 651 | /* |
647 | * This version of the driver only supports the HT-400 | 652 | * This version of the driver only supports the HT-400 |
648 | * Rev 3.2 | 653 | * Rev 3.2 |
@@ -1520,6 +1525,18 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) | |||
1520 | */ | 1525 | */ |
1521 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | 1526 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, |
1522 | INFINIPATH_S_ABORT); | 1527 | INFINIPATH_S_ABORT); |
1528 | |||
1529 | ipath_get_eeprom_info(dd); | ||
1530 | if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && | ||
1531 | dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { | ||
1532 | /* | ||
1533 | * Later production HT-460 has same changes as HT-465, so | ||
1534 | * can use GPIO interrupts. They have serial #'s starting | ||
1535 | * with 128, rather than 112. | ||
1536 | */ | ||
1537 | dd->ipath_flags |= IPATH_GPIO_INTR; | ||
1538 | dd->ipath_flags &= ~IPATH_POLL_RX_INTR; | ||
1539 | } | ||
1523 | return 0; | 1540 | return 0; |
1524 | } | 1541 | } |
1525 | 1542 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 2823ff9c0c62..dc83250d26a6 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -53,13 +53,19 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); | |||
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Number of buffers reserved for driver (layered drivers and SMA | 55 | * Number of buffers reserved for driver (layered drivers and SMA |
56 | * send). Reserved at end of buffer list. | 56 | * send). Reserved at end of buffer list. Initialized based on |
57 | * number of PIO buffers if not set via module interface. | ||
58 | * The problem with this is that it's global, but we'll use different | ||
59 | * numbers for different chip types. So the default value is not | ||
60 | * very useful. I've redefined it for the 1.3 release so that it's | ||
61 | * zero unless set by the user to something else, in which case we | ||
62 | * try to respect it. | ||
57 | */ | 63 | */ |
58 | static ushort ipath_kpiobufs = 32; | 64 | static ushort ipath_kpiobufs; |
59 | 65 | ||
60 | static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); | 66 | static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); |
61 | 67 | ||
62 | module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint, | 68 | module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort, |
63 | &ipath_kpiobufs, S_IWUSR | S_IRUGO); | 69 | &ipath_kpiobufs, S_IWUSR | S_IRUGO); |
64 | MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); | 70 | MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); |
65 | 71 | ||
@@ -531,8 +537,11 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
531 | * Don't clear ipath_flags as 8bit mode was set before | 537 | * Don't clear ipath_flags as 8bit mode was set before |
532 | * entering this func. However, we do set the linkstate to | 538 | * entering this func. However, we do set the linkstate to |
533 | * unknown, so we can watch for a transition. | 539 | * unknown, so we can watch for a transition. |
540 | * PRESENT is set because we want register reads to work, | ||
541 | * and the kernel infrastructure saw it in config space; | ||
542 | * We clear it if we have failures. | ||
534 | */ | 543 | */ |
535 | dd->ipath_flags |= IPATH_LINKUNK; | 544 | dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT; |
536 | dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | | 545 | dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | |
537 | IPATH_LINKDOWN | IPATH_LINKINIT); | 546 | IPATH_LINKDOWN | IPATH_LINKINIT); |
538 | 547 | ||
@@ -560,6 +569,7 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
560 | || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { | 569 | || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { |
561 | ipath_dev_err(dd, "Register read failures from chip, " | 570 | ipath_dev_err(dd, "Register read failures from chip, " |
562 | "giving up initialization\n"); | 571 | "giving up initialization\n"); |
572 | dd->ipath_flags &= ~IPATH_PRESENT; | ||
563 | ret = -ENODEV; | 573 | ret = -ENODEV; |
564 | goto done; | 574 | goto done; |
565 | } | 575 | } |
@@ -682,16 +692,14 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
682 | */ | 692 | */ |
683 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) | 693 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) |
684 | / (sizeof(u64) * BITS_PER_BYTE / 2); | 694 | / (sizeof(u64) * BITS_PER_BYTE / 2); |
685 | if (!ipath_kpiobufs) /* have to have at least 1, for SMA */ | 695 | if (ipath_kpiobufs == 0) { |
686 | kpiobufs = ipath_kpiobufs = 1; | 696 | /* not set by user, or set explictly to default */ |
687 | else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) < | 697 | if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) |
688 | (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) { | 698 | kpiobufs = 32; |
689 | dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) " | 699 | else |
690 | "for %u ports to have %u each!\n", | 700 | kpiobufs = 16; |
691 | dd->ipath_piobcnt2k + dd->ipath_piobcnt4k, | 701 | } |
692 | dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT); | 702 | else |
693 | kpiobufs = 1; /* reserve just the minimum for SMA/ether */ | ||
694 | } else | ||
695 | kpiobufs = ipath_kpiobufs; | 703 | kpiobufs = ipath_kpiobufs; |
696 | 704 | ||
697 | if (kpiobufs > | 705 | if (kpiobufs > |
@@ -871,7 +879,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
871 | 879 | ||
872 | done: | 880 | done: |
873 | if (!ret) { | 881 | if (!ret) { |
874 | ipath_get_guid(dd); | ||
875 | *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; | 882 | *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; |
876 | if (!dd->ipath_f_intrsetup(dd)) { | 883 | if (!dd->ipath_f_intrsetup(dd)) { |
877 | /* now we can enable all interrupts from the chip */ | 884 | /* now we can enable all interrupts from the chip */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 60f5f4108069..3e72a1fe3d73 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -172,8 +172,8 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
172 | "was %s\n", dd->ipath_unit, | 172 | "was %s\n", dd->ipath_unit, |
173 | ib_linkstate(lstate), | 173 | ib_linkstate(lstate), |
174 | ib_linkstate((unsigned) | 174 | ib_linkstate((unsigned) |
175 | dd->ipath_lastibcstat | 175 | dd->ipath_lastibcstat |
176 | & IPATH_IBSTATE_MASK)); | 176 | & IPATH_IBSTATE_MASK)); |
177 | } | 177 | } |
178 | else { | 178 | else { |
179 | lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; | 179 | lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; |
@@ -665,14 +665,14 @@ static void handle_layer_pioavail(struct ipath_devdata *dd) | |||
665 | 665 | ||
666 | ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); | 666 | ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); |
667 | if (ret > 0) | 667 | if (ret > 0) |
668 | goto clear; | 668 | goto set; |
669 | 669 | ||
670 | ret = __ipath_verbs_piobufavail(dd); | 670 | ret = __ipath_verbs_piobufavail(dd); |
671 | if (ret > 0) | 671 | if (ret > 0) |
672 | goto clear; | 672 | goto set; |
673 | 673 | ||
674 | return; | 674 | return; |
675 | clear: | 675 | set: |
676 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); | 676 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); |
677 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | 677 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, |
678 | dd->ipath_sendctrl); | 678 | dd->ipath_sendctrl); |
@@ -719,11 +719,24 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat) | |||
719 | irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) | 719 | irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) |
720 | { | 720 | { |
721 | struct ipath_devdata *dd = data; | 721 | struct ipath_devdata *dd = data; |
722 | u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); | 722 | u32 istat; |
723 | ipath_err_t estat = 0; | 723 | ipath_err_t estat = 0; |
724 | static unsigned unexpected = 0; | 724 | static unsigned unexpected = 0; |
725 | irqreturn_t ret; | 725 | irqreturn_t ret; |
726 | 726 | ||
727 | if(!(dd->ipath_flags & IPATH_PRESENT)) { | ||
728 | /* this is mostly so we don't try to touch the chip while | ||
729 | * it is being reset */ | ||
730 | /* | ||
731 | * This return value is perhaps odd, but we do not want the | ||
732 | * interrupt core code to remove our interrupt handler | ||
733 | * because we don't appear to be handling an interrupt | ||
734 | * during a chip reset. | ||
735 | */ | ||
736 | return IRQ_HANDLED; | ||
737 | } | ||
738 | |||
739 | istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); | ||
727 | if (unlikely(!istat)) { | 740 | if (unlikely(!istat)) { |
728 | ipath_stats.sps_nullintr++; | 741 | ipath_stats.sps_nullintr++; |
729 | ret = IRQ_NONE; /* not our interrupt, or already handled */ | 742 | ret = IRQ_NONE; /* not our interrupt, or already handled */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 159d0aed31a5..5d92d57b6f54 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -528,7 +528,6 @@ extern spinlock_t ipath_devs_lock; | |||
528 | extern struct ipath_devdata *ipath_lookup(int unit); | 528 | extern struct ipath_devdata *ipath_lookup(int unit); |
529 | 529 | ||
530 | extern u16 ipath_layer_rcv_opcode; | 530 | extern u16 ipath_layer_rcv_opcode; |
531 | extern int ipath_verbs_registered; | ||
532 | extern int __ipath_layer_intr(struct ipath_devdata *, u32); | 531 | extern int __ipath_layer_intr(struct ipath_devdata *, u32); |
533 | extern int ipath_layer_intr(struct ipath_devdata *, u32); | 532 | extern int ipath_layer_intr(struct ipath_devdata *, u32); |
534 | extern int __ipath_layer_rcv(struct ipath_devdata *, void *, | 533 | extern int __ipath_layer_rcv(struct ipath_devdata *, void *, |
@@ -651,7 +650,7 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); | |||
651 | void ipath_init_pe800_funcs(struct ipath_devdata *); | 650 | void ipath_init_pe800_funcs(struct ipath_devdata *); |
652 | /* init HT-400-specific func */ | 651 | /* init HT-400-specific func */ |
653 | void ipath_init_ht400_funcs(struct ipath_devdata *); | 652 | void ipath_init_ht400_funcs(struct ipath_devdata *); |
654 | void ipath_get_guid(struct ipath_devdata *); | 653 | void ipath_get_eeprom_info(struct ipath_devdata *); |
655 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); | 654 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); |
656 | 655 | ||
657 | /* | 656 | /* |
@@ -732,7 +731,7 @@ u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg, | |||
732 | static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd, | 731 | static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd, |
733 | ipath_ureg regno, int port) | 732 | ipath_ureg regno, int port) |
734 | { | 733 | { |
735 | if (!dd->ipath_kregbase) | 734 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
736 | return 0; | 735 | return 0; |
737 | 736 | ||
738 | return readl(regno + (u64 __iomem *) | 737 | return readl(regno + (u64 __iomem *) |
@@ -763,7 +762,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd, | |||
763 | static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, | 762 | static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, |
764 | ipath_kreg regno) | 763 | ipath_kreg regno) |
765 | { | 764 | { |
766 | if (!dd->ipath_kregbase) | 765 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
767 | return -1; | 766 | return -1; |
768 | return readl((u32 __iomem *) & dd->ipath_kregbase[regno]); | 767 | return readl((u32 __iomem *) & dd->ipath_kregbase[regno]); |
769 | } | 768 | } |
@@ -771,7 +770,7 @@ static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, | |||
771 | static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd, | 770 | static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd, |
772 | ipath_kreg regno) | 771 | ipath_kreg regno) |
773 | { | 772 | { |
774 | if (!dd->ipath_kregbase) | 773 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
775 | return -1; | 774 | return -1; |
776 | 775 | ||
777 | return readq(&dd->ipath_kregbase[regno]); | 776 | return readq(&dd->ipath_kregbase[regno]); |
@@ -787,7 +786,7 @@ static inline void ipath_write_kreg(const struct ipath_devdata *dd, | |||
787 | static inline u64 ipath_read_creg(const struct ipath_devdata *dd, | 786 | static inline u64 ipath_read_creg(const struct ipath_devdata *dd, |
788 | ipath_sreg regno) | 787 | ipath_sreg regno) |
789 | { | 788 | { |
790 | if (!dd->ipath_kregbase) | 789 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
791 | return 0; | 790 | return 0; |
792 | 791 | ||
793 | return readq(regno + (u64 __iomem *) | 792 | return readq(regno + (u64 __iomem *) |
@@ -798,7 +797,7 @@ static inline u64 ipath_read_creg(const struct ipath_devdata *dd, | |||
798 | static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, | 797 | static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, |
799 | ipath_sreg regno) | 798 | ipath_sreg regno) |
800 | { | 799 | { |
801 | if (!dd->ipath_kregbase) | 800 | if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) |
802 | return 0; | 801 | return 0; |
803 | return readl(regno + (u64 __iomem *) | 802 | return readl(regno + (u64 __iomem *) |
804 | (dd->ipath_cregbase + | 803 | (dd->ipath_cregbase + |
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index aa33b0e9f2f6..5ae8761f9dd2 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c | |||
@@ -136,9 +136,7 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | |||
136 | ret = 1; | 136 | ret = 1; |
137 | goto bail; | 137 | goto bail; |
138 | } | 138 | } |
139 | spin_lock(&rkt->lock); | ||
140 | mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; | 139 | mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; |
141 | spin_unlock(&rkt->lock); | ||
142 | if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { | 140 | if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { |
143 | ret = 0; | 141 | ret = 0; |
144 | goto bail; | 142 | goto bail; |
@@ -184,8 +182,6 @@ bail: | |||
184 | * @acc: access flags | 182 | * @acc: access flags |
185 | * | 183 | * |
186 | * Return 1 if successful, otherwise 0. | 184 | * Return 1 if successful, otherwise 0. |
187 | * | ||
188 | * The QP r_rq.lock should be held. | ||
189 | */ | 185 | */ |
190 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | 186 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, |
191 | u32 len, u64 vaddr, u32 rkey, int acc) | 187 | u32 len, u64 vaddr, u32 rkey, int acc) |
@@ -196,9 +192,7 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | |||
196 | size_t off; | 192 | size_t off; |
197 | int ret; | 193 | int ret; |
198 | 194 | ||
199 | spin_lock(&rkt->lock); | ||
200 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; | 195 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; |
201 | spin_unlock(&rkt->lock); | ||
202 | if (unlikely(mr == NULL || mr->lkey != rkey)) { | 196 | if (unlikely(mr == NULL || mr->lkey != rkey)) { |
203 | ret = 0; | 197 | ret = 0; |
204 | goto bail; | 198 | goto bail; |
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index 2cabf6340572..9ec4ac77b87f 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c | |||
@@ -46,13 +46,15 @@ | |||
46 | /* Acquire before ipath_devs_lock. */ | 46 | /* Acquire before ipath_devs_lock. */ |
47 | static DEFINE_MUTEX(ipath_layer_mutex); | 47 | static DEFINE_MUTEX(ipath_layer_mutex); |
48 | 48 | ||
49 | static int ipath_verbs_registered; | ||
50 | |||
49 | u16 ipath_layer_rcv_opcode; | 51 | u16 ipath_layer_rcv_opcode; |
52 | |||
50 | static int (*layer_intr)(void *, u32); | 53 | static int (*layer_intr)(void *, u32); |
51 | static int (*layer_rcv)(void *, void *, struct sk_buff *); | 54 | static int (*layer_rcv)(void *, void *, struct sk_buff *); |
52 | static int (*layer_rcv_lid)(void *, void *); | 55 | static int (*layer_rcv_lid)(void *, void *); |
53 | static int (*verbs_piobufavail)(void *); | 56 | static int (*verbs_piobufavail)(void *); |
54 | static void (*verbs_rcv)(void *, void *, void *, u32); | 57 | static void (*verbs_rcv)(void *, void *, void *, u32); |
55 | int ipath_verbs_registered; | ||
56 | 58 | ||
57 | static void *(*layer_add_one)(int, struct ipath_devdata *); | 59 | static void *(*layer_add_one)(int, struct ipath_devdata *); |
58 | static void (*layer_remove_one)(void *); | 60 | static void (*layer_remove_one)(void *); |
@@ -586,6 +588,8 @@ void ipath_verbs_unregister(void) | |||
586 | verbs_rcv = NULL; | 588 | verbs_rcv = NULL; |
587 | verbs_timer_cb = NULL; | 589 | verbs_timer_cb = NULL; |
588 | 590 | ||
591 | ipath_verbs_registered = 0; | ||
592 | |||
589 | mutex_unlock(&ipath_layer_mutex); | 593 | mutex_unlock(&ipath_layer_mutex); |
590 | } | 594 | } |
591 | 595 | ||
@@ -868,12 +872,13 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, | |||
868 | update_sge(ss, len); | 872 | update_sge(ss, len); |
869 | length -= len; | 873 | length -= len; |
870 | } | 874 | } |
875 | /* Update address before sending packet. */ | ||
876 | update_sge(ss, length); | ||
871 | /* must flush early everything before trigger word */ | 877 | /* must flush early everything before trigger word */ |
872 | ipath_flush_wc(); | 878 | ipath_flush_wc(); |
873 | __raw_writel(last, piobuf); | 879 | __raw_writel(last, piobuf); |
874 | /* be sure trigger word is written */ | 880 | /* be sure trigger word is written */ |
875 | ipath_flush_wc(); | 881 | ipath_flush_wc(); |
876 | update_sge(ss, length); | ||
877 | } | 882 | } |
878 | 883 | ||
879 | /** | 884 | /** |
@@ -939,17 +944,18 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | |||
939 | if (likely(ss->num_sge == 1 && len <= ss->sge.length && | 944 | if (likely(ss->num_sge == 1 && len <= ss->sge.length && |
940 | !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { | 945 | !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { |
941 | u32 w; | 946 | u32 w; |
947 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
942 | 948 | ||
949 | /* Update address before sending packet. */ | ||
950 | update_sge(ss, len); | ||
943 | /* Need to round up for the last dword in the packet. */ | 951 | /* Need to round up for the last dword in the packet. */ |
944 | w = (len + 3) >> 2; | 952 | w = (len + 3) >> 2; |
945 | __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); | 953 | __iowrite32_copy(piobuf, addr, w - 1); |
946 | /* must flush early everything before trigger word */ | 954 | /* must flush early everything before trigger word */ |
947 | ipath_flush_wc(); | 955 | ipath_flush_wc(); |
948 | __raw_writel(((u32 *) ss->sge.vaddr)[w - 1], | 956 | __raw_writel(addr[w - 1], piobuf + w - 1); |
949 | piobuf + w - 1); | ||
950 | /* be sure trigger word is written */ | 957 | /* be sure trigger word is written */ |
951 | ipath_flush_wc(); | 958 | ipath_flush_wc(); |
952 | update_sge(ss, len); | ||
953 | ret = 0; | 959 | ret = 0; |
954 | goto bail; | 960 | goto bail; |
955 | } | 961 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c index e693a7a82667..02e8c75b24f6 100644 --- a/drivers/infiniband/hw/ipath/ipath_pe800.c +++ b/drivers/infiniband/hw/ipath/ipath_pe800.c | |||
@@ -305,8 +305,8 @@ static const struct ipath_cregs ipath_pe_cregs = { | |||
305 | * we'll print them and continue. We reuse the same message buffer as | 305 | * we'll print them and continue. We reuse the same message buffer as |
306 | * ipath_handle_errors() to avoid excessive stack usage. | 306 | * ipath_handle_errors() to avoid excessive stack usage. |
307 | */ | 307 | */ |
308 | void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | 308 | static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, |
309 | size_t msgl) | 309 | size_t msgl) |
310 | { | 310 | { |
311 | ipath_err_t hwerrs; | 311 | ipath_err_t hwerrs; |
312 | u32 bits, ctrl; | 312 | u32 bits, ctrl; |
@@ -552,7 +552,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, | |||
552 | * freeze mode), and enable hardware errors as errors (along with | 552 | * freeze mode), and enable hardware errors as errors (along with |
553 | * everything else) in errormask | 553 | * everything else) in errormask |
554 | */ | 554 | */ |
555 | void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | 555 | static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) |
556 | { | 556 | { |
557 | ipath_err_t val; | 557 | ipath_err_t val; |
558 | u64 extsval; | 558 | u64 extsval; |
@@ -577,7 +577,7 @@ void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | |||
577 | * ipath_pe_bringup_serdes - bring up the serdes | 577 | * ipath_pe_bringup_serdes - bring up the serdes |
578 | * @dd: the infinipath device | 578 | * @dd: the infinipath device |
579 | */ | 579 | */ |
580 | int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | 580 | static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) |
581 | { | 581 | { |
582 | u64 val, tmp, config1; | 582 | u64 val, tmp, config1; |
583 | int ret = 0, change = 0; | 583 | int ret = 0, change = 0; |
@@ -694,7 +694,7 @@ int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | |||
694 | * @dd: the infinipath device | 694 | * @dd: the infinipath device |
695 | * Called when driver is being unloaded | 695 | * Called when driver is being unloaded |
696 | */ | 696 | */ |
697 | void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | 697 | static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) |
698 | { | 698 | { |
699 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | 699 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); |
700 | 700 | ||
@@ -972,6 +972,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd) | |||
972 | /* Use ERROR so it shows up in logs, etc. */ | 972 | /* Use ERROR so it shows up in logs, etc. */ |
973 | ipath_dev_err(dd, "Resetting PE-800 unit %u\n", | 973 | ipath_dev_err(dd, "Resetting PE-800 unit %u\n", |
974 | dd->ipath_unit); | 974 | dd->ipath_unit); |
975 | /* keep chip from being accessed in a few places */ | ||
976 | dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); | ||
975 | val = dd->ipath_control | INFINIPATH_C_RESET; | 977 | val = dd->ipath_control | INFINIPATH_C_RESET; |
976 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); | 978 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); |
977 | mb(); | 979 | mb(); |
@@ -997,6 +999,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd) | |||
997 | if ((r = pci_enable_device(dd->pcidev))) | 999 | if ((r = pci_enable_device(dd->pcidev))) |
998 | ipath_dev_err(dd, "pci_enable_device failed after " | 1000 | ipath_dev_err(dd, "pci_enable_device failed after " |
999 | "reset: %d\n", r); | 1001 | "reset: %d\n", r); |
1002 | /* whether it worked or not, mark as present, again */ | ||
1003 | dd->ipath_flags |= IPATH_PRESENT; | ||
1000 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); | 1004 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); |
1001 | if (val == dd->ipath_revision) { | 1005 | if (val == dd->ipath_revision) { |
1002 | ipath_cdbg(VERBOSE, "Got matching revision " | 1006 | ipath_cdbg(VERBOSE, "Got matching revision " |
@@ -1176,6 +1180,8 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) | |||
1176 | */ | 1180 | */ |
1177 | dd->ipath_rhdrhead_intr_off = 1ULL<<32; | 1181 | dd->ipath_rhdrhead_intr_off = 1ULL<<32; |
1178 | 1182 | ||
1183 | ipath_get_eeprom_info(dd); | ||
1184 | |||
1179 | return 0; | 1185 | return 0; |
1180 | } | 1186 | } |
1181 | 1187 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 6058d70d7577..9f8855d970c8 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -188,8 +188,8 @@ static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) | |||
188 | * Allocate the next available QPN and put the QP into the hash table. | 188 | * Allocate the next available QPN and put the QP into the hash table. |
189 | * The hash table holds a reference to the QP. | 189 | * The hash table holds a reference to the QP. |
190 | */ | 190 | */ |
191 | int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, | 191 | static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, |
192 | enum ib_qp_type type) | 192 | enum ib_qp_type type) |
193 | { | 193 | { |
194 | unsigned long flags; | 194 | unsigned long flags; |
195 | u32 qpn; | 195 | u32 qpn; |
@@ -232,7 +232,7 @@ bail: | |||
232 | * Remove the QP from the table so it can't be found asynchronously by | 232 | * Remove the QP from the table so it can't be found asynchronously by |
233 | * the receive interrupt routine. | 233 | * the receive interrupt routine. |
234 | */ | 234 | */ |
235 | void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) | 235 | static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) |
236 | { | 236 | { |
237 | struct ipath_qp *q, **qpp; | 237 | struct ipath_qp *q, **qpp; |
238 | unsigned long flags; | 238 | unsigned long flags; |
@@ -358,6 +358,65 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
358 | } | 358 | } |
359 | 359 | ||
360 | /** | 360 | /** |
361 | * ipath_error_qp - put a QP into an error state | ||
362 | * @qp: the QP to put into an error state | ||
363 | * | ||
364 | * Flushes both send and receive work queues. | ||
365 | * QP r_rq.lock and s_lock should be held. | ||
366 | */ | ||
367 | |||
368 | static void ipath_error_qp(struct ipath_qp *qp) | ||
369 | { | ||
370 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
371 | struct ib_wc wc; | ||
372 | |||
373 | _VERBS_INFO("QP%d/%d in error state\n", | ||
374 | qp->ibqp.qp_num, qp->remote_qpn); | ||
375 | |||
376 | spin_lock(&dev->pending_lock); | ||
377 | /* XXX What if its already removed by the timeout code? */ | ||
378 | if (!list_empty(&qp->timerwait)) | ||
379 | list_del_init(&qp->timerwait); | ||
380 | if (!list_empty(&qp->piowait)) | ||
381 | list_del_init(&qp->piowait); | ||
382 | spin_unlock(&dev->pending_lock); | ||
383 | |||
384 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
385 | wc.vendor_err = 0; | ||
386 | wc.byte_len = 0; | ||
387 | wc.imm_data = 0; | ||
388 | wc.qp_num = qp->ibqp.qp_num; | ||
389 | wc.src_qp = 0; | ||
390 | wc.wc_flags = 0; | ||
391 | wc.pkey_index = 0; | ||
392 | wc.slid = 0; | ||
393 | wc.sl = 0; | ||
394 | wc.dlid_path_bits = 0; | ||
395 | wc.port_num = 0; | ||
396 | |||
397 | while (qp->s_last != qp->s_head) { | ||
398 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
399 | |||
400 | wc.wr_id = wqe->wr.wr_id; | ||
401 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
402 | if (++qp->s_last >= qp->s_size) | ||
403 | qp->s_last = 0; | ||
404 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
405 | } | ||
406 | qp->s_cur = qp->s_tail = qp->s_head; | ||
407 | qp->s_hdrwords = 0; | ||
408 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
409 | |||
410 | wc.opcode = IB_WC_RECV; | ||
411 | while (qp->r_rq.tail != qp->r_rq.head) { | ||
412 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; | ||
413 | if (++qp->r_rq.tail >= qp->r_rq.size) | ||
414 | qp->r_rq.tail = 0; | ||
415 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /** | ||
361 | * ipath_modify_qp - modify the attributes of a queue pair | 420 | * ipath_modify_qp - modify the attributes of a queue pair |
362 | * @ibqp: the queue pair who's attributes we're modifying | 421 | * @ibqp: the queue pair who's attributes we're modifying |
363 | * @attr: the new attributes | 422 | * @attr: the new attributes |
@@ -368,6 +427,7 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
368 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 427 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
369 | int attr_mask) | 428 | int attr_mask) |
370 | { | 429 | { |
430 | struct ipath_ibdev *dev = to_idev(ibqp->device); | ||
371 | struct ipath_qp *qp = to_iqp(ibqp); | 431 | struct ipath_qp *qp = to_iqp(ibqp); |
372 | enum ib_qp_state cur_state, new_state; | 432 | enum ib_qp_state cur_state, new_state; |
373 | unsigned long flags; | 433 | unsigned long flags; |
@@ -384,6 +444,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
384 | attr_mask)) | 444 | attr_mask)) |
385 | goto inval; | 445 | goto inval; |
386 | 446 | ||
447 | if (attr_mask & IB_QP_AV) | ||
448 | if (attr->ah_attr.dlid == 0 || | ||
449 | attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) | ||
450 | goto inval; | ||
451 | |||
452 | if (attr_mask & IB_QP_PKEY_INDEX) | ||
453 | if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) | ||
454 | goto inval; | ||
455 | |||
456 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | ||
457 | if (attr->min_rnr_timer > 31) | ||
458 | goto inval; | ||
459 | |||
387 | switch (new_state) { | 460 | switch (new_state) { |
388 | case IB_QPS_RESET: | 461 | case IB_QPS_RESET: |
389 | ipath_reset_qp(qp); | 462 | ipath_reset_qp(qp); |
@@ -398,13 +471,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
398 | 471 | ||
399 | } | 472 | } |
400 | 473 | ||
401 | if (attr_mask & IB_QP_PKEY_INDEX) { | 474 | if (attr_mask & IB_QP_PKEY_INDEX) |
402 | struct ipath_ibdev *dev = to_idev(ibqp->device); | ||
403 | |||
404 | if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) | ||
405 | goto inval; | ||
406 | qp->s_pkey_index = attr->pkey_index; | 475 | qp->s_pkey_index = attr->pkey_index; |
407 | } | ||
408 | 476 | ||
409 | if (attr_mask & IB_QP_DEST_QPN) | 477 | if (attr_mask & IB_QP_DEST_QPN) |
410 | qp->remote_qpn = attr->dest_qp_num; | 478 | qp->remote_qpn = attr->dest_qp_num; |
@@ -420,12 +488,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
420 | if (attr_mask & IB_QP_ACCESS_FLAGS) | 488 | if (attr_mask & IB_QP_ACCESS_FLAGS) |
421 | qp->qp_access_flags = attr->qp_access_flags; | 489 | qp->qp_access_flags = attr->qp_access_flags; |
422 | 490 | ||
423 | if (attr_mask & IB_QP_AV) { | 491 | if (attr_mask & IB_QP_AV) |
424 | if (attr->ah_attr.dlid == 0 || | ||
425 | attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) | ||
426 | goto inval; | ||
427 | qp->remote_ah_attr = attr->ah_attr; | 492 | qp->remote_ah_attr = attr->ah_attr; |
428 | } | ||
429 | 493 | ||
430 | if (attr_mask & IB_QP_PATH_MTU) | 494 | if (attr_mask & IB_QP_PATH_MTU) |
431 | qp->path_mtu = attr->path_mtu; | 495 | qp->path_mtu = attr->path_mtu; |
@@ -440,11 +504,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
440 | qp->s_rnr_retry_cnt = qp->s_rnr_retry; | 504 | qp->s_rnr_retry_cnt = qp->s_rnr_retry; |
441 | } | 505 | } |
442 | 506 | ||
443 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { | 507 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
444 | if (attr->min_rnr_timer > 31) | ||
445 | goto inval; | ||
446 | qp->s_min_rnr_timer = attr->min_rnr_timer; | 508 | qp->s_min_rnr_timer = attr->min_rnr_timer; |
447 | } | ||
448 | 509 | ||
449 | if (attr_mask & IB_QP_QKEY) | 510 | if (attr_mask & IB_QP_QKEY) |
450 | qp->qkey = attr->qkey; | 511 | qp->qkey = attr->qkey; |
@@ -651,10 +712,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
651 | init_attr->qp_type == IB_QPT_RC ? | 712 | init_attr->qp_type == IB_QPT_RC ? |
652 | ipath_do_rc_send : ipath_do_uc_send, | 713 | ipath_do_rc_send : ipath_do_uc_send, |
653 | (unsigned long)qp); | 714 | (unsigned long)qp); |
654 | qp->piowait.next = LIST_POISON1; | 715 | INIT_LIST_HEAD(&qp->piowait); |
655 | qp->piowait.prev = LIST_POISON2; | 716 | INIT_LIST_HEAD(&qp->timerwait); |
656 | qp->timerwait.next = LIST_POISON1; | ||
657 | qp->timerwait.prev = LIST_POISON2; | ||
658 | qp->state = IB_QPS_RESET; | 717 | qp->state = IB_QPS_RESET; |
659 | qp->s_wq = swq; | 718 | qp->s_wq = swq; |
660 | qp->s_size = init_attr->cap.max_send_wr + 1; | 719 | qp->s_size = init_attr->cap.max_send_wr + 1; |
@@ -675,7 +734,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
675 | ipath_reset_qp(qp); | 734 | ipath_reset_qp(qp); |
676 | 735 | ||
677 | /* Tell the core driver that the kernel SMA is present. */ | 736 | /* Tell the core driver that the kernel SMA is present. */ |
678 | if (qp->ibqp.qp_type == IB_QPT_SMI) | 737 | if (init_attr->qp_type == IB_QPT_SMI) |
679 | ipath_layer_set_verbs_flags(dev->dd, | 738 | ipath_layer_set_verbs_flags(dev->dd, |
680 | IPATH_VERBS_KERNEL_SMA); | 739 | IPATH_VERBS_KERNEL_SMA); |
681 | break; | 740 | break; |
@@ -724,10 +783,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp) | |||
724 | 783 | ||
725 | /* Make sure the QP isn't on the timeout list. */ | 784 | /* Make sure the QP isn't on the timeout list. */ |
726 | spin_lock_irqsave(&dev->pending_lock, flags); | 785 | spin_lock_irqsave(&dev->pending_lock, flags); |
727 | if (qp->timerwait.next != LIST_POISON1) | 786 | if (!list_empty(&qp->timerwait)) |
728 | list_del(&qp->timerwait); | 787 | list_del_init(&qp->timerwait); |
729 | if (qp->piowait.next != LIST_POISON1) | 788 | if (!list_empty(&qp->piowait)) |
730 | list_del(&qp->piowait); | 789 | list_del_init(&qp->piowait); |
731 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 790 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
732 | 791 | ||
733 | /* | 792 | /* |
@@ -796,10 +855,10 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
796 | 855 | ||
797 | spin_lock(&dev->pending_lock); | 856 | spin_lock(&dev->pending_lock); |
798 | /* XXX What if its already removed by the timeout code? */ | 857 | /* XXX What if its already removed by the timeout code? */ |
799 | if (qp->timerwait.next != LIST_POISON1) | 858 | if (!list_empty(&qp->timerwait)) |
800 | list_del(&qp->timerwait); | 859 | list_del_init(&qp->timerwait); |
801 | if (qp->piowait.next != LIST_POISON1) | 860 | if (!list_empty(&qp->piowait)) |
802 | list_del(&qp->piowait); | 861 | list_del_init(&qp->piowait); |
803 | spin_unlock(&dev->pending_lock); | 862 | spin_unlock(&dev->pending_lock); |
804 | 863 | ||
805 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); | 864 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); |
@@ -821,65 +880,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
821 | } | 880 | } |
822 | 881 | ||
823 | /** | 882 | /** |
824 | * ipath_error_qp - put a QP into an error state | ||
825 | * @qp: the QP to put into an error state | ||
826 | * | ||
827 | * Flushes both send and receive work queues. | ||
828 | * QP r_rq.lock and s_lock should be held. | ||
829 | */ | ||
830 | |||
831 | void ipath_error_qp(struct ipath_qp *qp) | ||
832 | { | ||
833 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
834 | struct ib_wc wc; | ||
835 | |||
836 | _VERBS_INFO("QP%d/%d in error state\n", | ||
837 | qp->ibqp.qp_num, qp->remote_qpn); | ||
838 | |||
839 | spin_lock(&dev->pending_lock); | ||
840 | /* XXX What if its already removed by the timeout code? */ | ||
841 | if (qp->timerwait.next != LIST_POISON1) | ||
842 | list_del(&qp->timerwait); | ||
843 | if (qp->piowait.next != LIST_POISON1) | ||
844 | list_del(&qp->piowait); | ||
845 | spin_unlock(&dev->pending_lock); | ||
846 | |||
847 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
848 | wc.vendor_err = 0; | ||
849 | wc.byte_len = 0; | ||
850 | wc.imm_data = 0; | ||
851 | wc.qp_num = qp->ibqp.qp_num; | ||
852 | wc.src_qp = 0; | ||
853 | wc.wc_flags = 0; | ||
854 | wc.pkey_index = 0; | ||
855 | wc.slid = 0; | ||
856 | wc.sl = 0; | ||
857 | wc.dlid_path_bits = 0; | ||
858 | wc.port_num = 0; | ||
859 | |||
860 | while (qp->s_last != qp->s_head) { | ||
861 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
862 | |||
863 | wc.wr_id = wqe->wr.wr_id; | ||
864 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
865 | if (++qp->s_last >= qp->s_size) | ||
866 | qp->s_last = 0; | ||
867 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
868 | } | ||
869 | qp->s_cur = qp->s_tail = qp->s_head; | ||
870 | qp->s_hdrwords = 0; | ||
871 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
872 | |||
873 | wc.opcode = IB_WC_RECV; | ||
874 | while (qp->r_rq.tail != qp->r_rq.head) { | ||
875 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; | ||
876 | if (++qp->r_rq.tail >= qp->r_rq.size) | ||
877 | qp->r_rq.tail = 0; | ||
878 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
879 | } | ||
880 | } | ||
881 | |||
882 | /** | ||
883 | * ipath_get_credit - flush the send work queue of a QP | 883 | * ipath_get_credit - flush the send work queue of a QP |
884 | * @qp: the qp who's send work queue to flush | 884 | * @qp: the qp who's send work queue to flush |
885 | * @aeth: the Acknowledge Extended Transport Header | 885 | * @aeth: the Acknowledge Extended Transport Header |
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index a4055ca00614..493b1821a934 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -57,7 +57,7 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) | |||
57 | qp->s_len = wqe->length - len; | 57 | qp->s_len = wqe->length - len; |
58 | dev = to_idev(qp->ibqp.device); | 58 | dev = to_idev(qp->ibqp.device); |
59 | spin_lock(&dev->pending_lock); | 59 | spin_lock(&dev->pending_lock); |
60 | if (qp->timerwait.next == LIST_POISON1) | 60 | if (list_empty(&qp->timerwait)) |
61 | list_add_tail(&qp->timerwait, | 61 | list_add_tail(&qp->timerwait, |
62 | &dev->pending[dev->pending_index]); | 62 | &dev->pending[dev->pending_index]); |
63 | spin_unlock(&dev->pending_lock); | 63 | spin_unlock(&dev->pending_lock); |
@@ -356,7 +356,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp, | |||
356 | if ((int)(qp->s_psn - qp->s_next_psn) > 0) | 356 | if ((int)(qp->s_psn - qp->s_next_psn) > 0) |
357 | qp->s_next_psn = qp->s_psn; | 357 | qp->s_next_psn = qp->s_psn; |
358 | spin_lock(&dev->pending_lock); | 358 | spin_lock(&dev->pending_lock); |
359 | if (qp->timerwait.next == LIST_POISON1) | 359 | if (list_empty(&qp->timerwait)) |
360 | list_add_tail(&qp->timerwait, | 360 | list_add_tail(&qp->timerwait, |
361 | &dev->pending[dev->pending_index]); | 361 | &dev->pending[dev->pending_index]); |
362 | spin_unlock(&dev->pending_lock); | 362 | spin_unlock(&dev->pending_lock); |
@@ -726,8 +726,8 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) | |||
726 | */ | 726 | */ |
727 | dev = to_idev(qp->ibqp.device); | 727 | dev = to_idev(qp->ibqp.device); |
728 | spin_lock(&dev->pending_lock); | 728 | spin_lock(&dev->pending_lock); |
729 | if (qp->timerwait.next != LIST_POISON1) | 729 | if (!list_empty(&qp->timerwait)) |
730 | list_del(&qp->timerwait); | 730 | list_del_init(&qp->timerwait); |
731 | spin_unlock(&dev->pending_lock); | 731 | spin_unlock(&dev->pending_lock); |
732 | 732 | ||
733 | if (wqe->wr.opcode == IB_WR_RDMA_READ) | 733 | if (wqe->wr.opcode == IB_WR_RDMA_READ) |
@@ -886,8 +886,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) | |||
886 | * just won't find anything to restart if we ACK everything. | 886 | * just won't find anything to restart if we ACK everything. |
887 | */ | 887 | */ |
888 | spin_lock(&dev->pending_lock); | 888 | spin_lock(&dev->pending_lock); |
889 | if (qp->timerwait.next != LIST_POISON1) | 889 | if (!list_empty(&qp->timerwait)) |
890 | list_del(&qp->timerwait); | 890 | list_del_init(&qp->timerwait); |
891 | spin_unlock(&dev->pending_lock); | 891 | spin_unlock(&dev->pending_lock); |
892 | 892 | ||
893 | /* | 893 | /* |
@@ -1194,8 +1194,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1194 | IB_WR_RDMA_READ)) | 1194 | IB_WR_RDMA_READ)) |
1195 | goto ack_done; | 1195 | goto ack_done; |
1196 | spin_lock(&dev->pending_lock); | 1196 | spin_lock(&dev->pending_lock); |
1197 | if (qp->s_rnr_timeout == 0 && | 1197 | if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) |
1198 | qp->timerwait.next != LIST_POISON1) | ||
1199 | list_move_tail(&qp->timerwait, | 1198 | list_move_tail(&qp->timerwait, |
1200 | &dev->pending[dev->pending_index]); | 1199 | &dev->pending[dev->pending_index]); |
1201 | spin_unlock(&dev->pending_lock); | 1200 | spin_unlock(&dev->pending_lock); |
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h index 1e59750c5f63..402126eb79c9 100644 --- a/drivers/infiniband/hw/ipath/ipath_registers.h +++ b/drivers/infiniband/hw/ipath/ipath_registers.h | |||
@@ -34,8 +34,9 @@ | |||
34 | #define _IPATH_REGISTERS_H | 34 | #define _IPATH_REGISTERS_H |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * This file should only be included by kernel source, and by the diags. | 37 | * This file should only be included by kernel source, and by the diags. It |
38 | * It defines the registers, and their contents, for the InfiniPath HT-400 chip | 38 | * defines the registers, and their contents, for the InfiniPath HT-400 |
39 | * chip. | ||
39 | */ | 40 | */ |
40 | 41 | ||
41 | /* | 42 | /* |
@@ -156,8 +157,10 @@ | |||
156 | #define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8 | 157 | #define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8 |
157 | #define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL | 158 | #define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL |
158 | #define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1 | 159 | #define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1 |
159 | #define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */ | 160 | /* cycle through TS1/TS2 till OK */ |
160 | #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */ | 161 | #define INFINIPATH_IBCC_LINKINITCMD_POLL 2 |
162 | /* wait for TS1, then go on */ | ||
163 | #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 | ||
161 | #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 | 164 | #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 |
162 | #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL | 165 | #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL |
163 | #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ | 166 | #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ |
@@ -182,7 +185,8 @@ | |||
182 | #define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 | 185 | #define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 |
183 | #define INFINIPATH_IBCS_TXREADY 0x40000000 | 186 | #define INFINIPATH_IBCS_TXREADY 0x40000000 |
184 | #define INFINIPATH_IBCS_TXCREDITOK 0x80000000 | 187 | #define INFINIPATH_IBCS_TXCREDITOK 0x80000000 |
185 | /* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */ | 188 | /* link training states (shift by |
189 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */ | ||
186 | #define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00 | 190 | #define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00 |
187 | #define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01 | 191 | #define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01 |
188 | #define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02 | 192 | #define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02 |
@@ -267,10 +271,12 @@ | |||
267 | /* kr_serdesconfig0 bits */ | 271 | /* kr_serdesconfig0 bits */ |
268 | #define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */ | 272 | #define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */ |
269 | #define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */ | 273 | #define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */ |
270 | #define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */ | 274 | /* tx idle enables (per lane) */ |
271 | #define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */ | 275 | #define INFINIPATH_SERDC0_TXIDLE 0xF000ULL |
272 | #define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT, | 276 | /* rx detect enables (per lane) */ |
273 | Otherwise not used on IB side */ | 277 | #define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL |
278 | /* L1 Power down; use with RXDETECT, Otherwise not used on IB side */ | ||
279 | #define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL | ||
274 | 280 | ||
275 | /* kr_xgxsconfig bits */ | 281 | /* kr_xgxsconfig bits */ |
276 | #define INFINIPATH_XGXS_RESET 0x7ULL | 282 | #define INFINIPATH_XGXS_RESET 0x7ULL |
@@ -390,12 +396,13 @@ struct ipath_kregs { | |||
390 | ipath_kreg kr_txintmemsize; | 396 | ipath_kreg kr_txintmemsize; |
391 | ipath_kreg kr_xgxsconfig; | 397 | ipath_kreg kr_xgxsconfig; |
392 | ipath_kreg kr_ibpllcfg; | 398 | ipath_kreg kr_ibpllcfg; |
393 | /* use these two (and the following N ports) only with ipath_k*_kreg64_port(); | 399 | /* use these two (and the following N ports) only with |
394 | * not *kreg64() */ | 400 | * ipath_k*_kreg64_port(); not *kreg64() */ |
395 | ipath_kreg kr_rcvhdraddr; | 401 | ipath_kreg kr_rcvhdraddr; |
396 | ipath_kreg kr_rcvhdrtailaddr; | 402 | ipath_kreg kr_rcvhdrtailaddr; |
397 | 403 | ||
398 | /* remaining registers are not present on all types of infinipath chips */ | 404 | /* remaining registers are not present on all types of infinipath |
405 | chips */ | ||
399 | ipath_kreg kr_rcvpktledcnt; | 406 | ipath_kreg kr_rcvpktledcnt; |
400 | ipath_kreg kr_pcierbuftestreg0; | 407 | ipath_kreg kr_pcierbuftestreg0; |
401 | ipath_kreg kr_pcierbuftestreg1; | 408 | ipath_kreg kr_pcierbuftestreg1; |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index f232e77b78ee..d38f4f3cfd1d 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -435,7 +435,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | |||
435 | unsigned long flags; | 435 | unsigned long flags; |
436 | 436 | ||
437 | spin_lock_irqsave(&dev->pending_lock, flags); | 437 | spin_lock_irqsave(&dev->pending_lock, flags); |
438 | if (qp->piowait.next == LIST_POISON1) | 438 | if (list_empty(&qp->piowait)) |
439 | list_add_tail(&qp->piowait, &dev->piowait); | 439 | list_add_tail(&qp->piowait, &dev->piowait); |
440 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 440 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
441 | /* | 441 | /* |
@@ -531,19 +531,12 @@ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
531 | } | 531 | } |
532 | wqe->wr.num_sge = j; | 532 | wqe->wr.num_sge = j; |
533 | qp->s_head = next; | 533 | qp->s_head = next; |
534 | /* | ||
535 | * Wake up the send tasklet if the QP is not waiting | ||
536 | * for an RNR timeout. | ||
537 | */ | ||
538 | next = qp->s_rnr_timeout; | ||
539 | spin_unlock_irqrestore(&qp->s_lock, flags); | 534 | spin_unlock_irqrestore(&qp->s_lock, flags); |
540 | 535 | ||
541 | if (next == 0) { | 536 | if (qp->ibqp.qp_type == IB_QPT_UC) |
542 | if (qp->ibqp.qp_type == IB_QPT_UC) | 537 | ipath_do_uc_send((unsigned long) qp); |
543 | ipath_do_uc_send((unsigned long) qp); | 538 | else |
544 | else | 539 | ipath_do_rc_send((unsigned long) qp); |
545 | ipath_do_rc_send((unsigned long) qp); | ||
546 | } | ||
547 | 540 | ||
548 | ret = 0; | 541 | ret = 0; |
549 | 542 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c index 32acd8048b49..f323791cc495 100644 --- a/drivers/infiniband/hw/ipath/ipath_sysfs.c +++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c | |||
@@ -711,10 +711,22 @@ static struct attribute_group dev_attr_group = { | |||
711 | * enters diag mode. A device reset is quite likely to crash the | 711 | * enters diag mode. A device reset is quite likely to crash the |
712 | * machine entirely, so we don't want to normally make it | 712 | * machine entirely, so we don't want to normally make it |
713 | * available. | 713 | * available. |
714 | * | ||
715 | * Called with ipath_mutex held. | ||
714 | */ | 716 | */ |
715 | int ipath_expose_reset(struct device *dev) | 717 | int ipath_expose_reset(struct device *dev) |
716 | { | 718 | { |
717 | return device_create_file(dev, &dev_attr_reset); | 719 | static int exposed; |
720 | int ret; | ||
721 | |||
722 | if (!exposed) { | ||
723 | ret = device_create_file(dev, &dev_attr_reset); | ||
724 | exposed = 1; | ||
725 | } | ||
726 | else | ||
727 | ret = 0; | ||
728 | |||
729 | return ret; | ||
718 | } | 730 | } |
719 | 731 | ||
720 | int ipath_driver_create_group(struct device_driver *drv) | 732 | int ipath_driver_create_group(struct device_driver *drv) |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 5ff3de6128b2..e606daf83210 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -46,8 +46,10 @@ | |||
46 | * This is called from ipath_post_ud_send() to forward a WQE addressed | 46 | * This is called from ipath_post_ud_send() to forward a WQE addressed |
47 | * to the same HCA. | 47 | * to the same HCA. |
48 | */ | 48 | */ |
49 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | 49 | static void ipath_ud_loopback(struct ipath_qp *sqp, |
50 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc) | 50 | struct ipath_sge_state *ss, |
51 | u32 length, struct ib_send_wr *wr, | ||
52 | struct ib_wc *wc) | ||
51 | { | 53 | { |
52 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); | 54 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); |
53 | struct ipath_qp *qp; | 55 | struct ipath_qp *qp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 9f27fd35cdbb..28fdbdaa789d 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -41,7 +41,7 @@ | |||
41 | /* Not static, because we don't want the compiler removing it */ | 41 | /* Not static, because we don't want the compiler removing it */ |
42 | const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR; | 42 | const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR; |
43 | 43 | ||
44 | unsigned int ib_ipath_qp_table_size = 251; | 44 | static unsigned int ib_ipath_qp_table_size = 251; |
45 | module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); | 45 | module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); |
46 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | 46 | MODULE_PARM_DESC(qp_table_size, "QP table size"); |
47 | 47 | ||
@@ -87,7 +87,7 @@ const enum ib_wc_opcode ib_ipath_wc_opcode[] = { | |||
87 | /* | 87 | /* |
88 | * System image GUID. | 88 | * System image GUID. |
89 | */ | 89 | */ |
90 | __be64 sys_image_guid; | 90 | static __be64 sys_image_guid; |
91 | 91 | ||
92 | /** | 92 | /** |
93 | * ipath_copy_sge - copy data to SGE memory | 93 | * ipath_copy_sge - copy data to SGE memory |
@@ -449,7 +449,6 @@ static void ipath_ib_timer(void *arg) | |||
449 | { | 449 | { |
450 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; | 450 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; |
451 | struct ipath_qp *resend = NULL; | 451 | struct ipath_qp *resend = NULL; |
452 | struct ipath_qp *rnr = NULL; | ||
453 | struct list_head *last; | 452 | struct list_head *last; |
454 | struct ipath_qp *qp; | 453 | struct ipath_qp *qp; |
455 | unsigned long flags; | 454 | unsigned long flags; |
@@ -465,32 +464,18 @@ static void ipath_ib_timer(void *arg) | |||
465 | last = &dev->pending[dev->pending_index]; | 464 | last = &dev->pending[dev->pending_index]; |
466 | while (!list_empty(last)) { | 465 | while (!list_empty(last)) { |
467 | qp = list_entry(last->next, struct ipath_qp, timerwait); | 466 | qp = list_entry(last->next, struct ipath_qp, timerwait); |
468 | if (last->next == LIST_POISON1 || | 467 | list_del_init(&qp->timerwait); |
469 | last->next != &qp->timerwait || | 468 | qp->timer_next = resend; |
470 | qp->timerwait.prev != last) { | 469 | resend = qp; |
471 | INIT_LIST_HEAD(last); | 470 | atomic_inc(&qp->refcount); |
472 | } else { | ||
473 | list_del(&qp->timerwait); | ||
474 | qp->timerwait.prev = (struct list_head *) resend; | ||
475 | resend = qp; | ||
476 | atomic_inc(&qp->refcount); | ||
477 | } | ||
478 | } | 471 | } |
479 | last = &dev->rnrwait; | 472 | last = &dev->rnrwait; |
480 | if (!list_empty(last)) { | 473 | if (!list_empty(last)) { |
481 | qp = list_entry(last->next, struct ipath_qp, timerwait); | 474 | qp = list_entry(last->next, struct ipath_qp, timerwait); |
482 | if (--qp->s_rnr_timeout == 0) { | 475 | if (--qp->s_rnr_timeout == 0) { |
483 | do { | 476 | do { |
484 | if (last->next == LIST_POISON1 || | 477 | list_del_init(&qp->timerwait); |
485 | last->next != &qp->timerwait || | 478 | tasklet_hi_schedule(&qp->s_task); |
486 | qp->timerwait.prev != last) { | ||
487 | INIT_LIST_HEAD(last); | ||
488 | break; | ||
489 | } | ||
490 | list_del(&qp->timerwait); | ||
491 | qp->timerwait.prev = | ||
492 | (struct list_head *) rnr; | ||
493 | rnr = qp; | ||
494 | if (list_empty(last)) | 479 | if (list_empty(last)) |
495 | break; | 480 | break; |
496 | qp = list_entry(last->next, struct ipath_qp, | 481 | qp = list_entry(last->next, struct ipath_qp, |
@@ -530,8 +515,7 @@ static void ipath_ib_timer(void *arg) | |||
530 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 515 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
531 | 516 | ||
532 | /* XXX What if timer fires again while this is running? */ | 517 | /* XXX What if timer fires again while this is running? */ |
533 | for (qp = resend; qp != NULL; | 518 | for (qp = resend; qp != NULL; qp = qp->timer_next) { |
534 | qp = (struct ipath_qp *) qp->timerwait.prev) { | ||
535 | struct ib_wc wc; | 519 | struct ib_wc wc; |
536 | 520 | ||
537 | spin_lock_irqsave(&qp->s_lock, flags); | 521 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -545,9 +529,6 @@ static void ipath_ib_timer(void *arg) | |||
545 | if (atomic_dec_and_test(&qp->refcount)) | 529 | if (atomic_dec_and_test(&qp->refcount)) |
546 | wake_up(&qp->wait); | 530 | wake_up(&qp->wait); |
547 | } | 531 | } |
548 | for (qp = rnr; qp != NULL; | ||
549 | qp = (struct ipath_qp *) qp->timerwait.prev) | ||
550 | tasklet_hi_schedule(&qp->s_task); | ||
551 | } | 532 | } |
552 | 533 | ||
553 | /** | 534 | /** |
@@ -556,9 +537,9 @@ static void ipath_ib_timer(void *arg) | |||
556 | * | 537 | * |
557 | * This is called from ipath_intr() at interrupt level when a PIO buffer is | 538 | * This is called from ipath_intr() at interrupt level when a PIO buffer is |
558 | * available after ipath_verbs_send() returned an error that no buffers were | 539 | * available after ipath_verbs_send() returned an error that no buffers were |
559 | * available. Return 0 if we consumed all the PIO buffers and we still have | 540 | * available. Return 1 if we consumed all the PIO buffers and we still have |
560 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and | 541 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and |
561 | * return one). | 542 | * return zero). |
562 | */ | 543 | */ |
563 | static int ipath_ib_piobufavail(void *arg) | 544 | static int ipath_ib_piobufavail(void *arg) |
564 | { | 545 | { |
@@ -573,13 +554,13 @@ static int ipath_ib_piobufavail(void *arg) | |||
573 | while (!list_empty(&dev->piowait)) { | 554 | while (!list_empty(&dev->piowait)) { |
574 | qp = list_entry(dev->piowait.next, struct ipath_qp, | 555 | qp = list_entry(dev->piowait.next, struct ipath_qp, |
575 | piowait); | 556 | piowait); |
576 | list_del(&qp->piowait); | 557 | list_del_init(&qp->piowait); |
577 | tasklet_hi_schedule(&qp->s_task); | 558 | tasklet_hi_schedule(&qp->s_task); |
578 | } | 559 | } |
579 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 560 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
580 | 561 | ||
581 | bail: | 562 | bail: |
582 | return 1; | 563 | return 0; |
583 | } | 564 | } |
584 | 565 | ||
585 | static int ipath_query_device(struct ib_device *ibdev, | 566 | static int ipath_query_device(struct ib_device *ibdev, |
@@ -970,6 +951,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) | |||
970 | idev->dd = dd; | 951 | idev->dd = dd; |
971 | 952 | ||
972 | strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); | 953 | strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); |
954 | dev->owner = THIS_MODULE; | ||
973 | dev->node_guid = ipath_layer_get_guid(dd); | 955 | dev->node_guid = ipath_layer_get_guid(dd); |
974 | dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; | 956 | dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; |
975 | dev->uverbs_cmd_mask = | 957 | dev->uverbs_cmd_mask = |
@@ -1110,7 +1092,7 @@ static void ipath_unregister_ib_device(void *arg) | |||
1110 | ib_dealloc_device(ibdev); | 1092 | ib_dealloc_device(ibdev); |
1111 | } | 1093 | } |
1112 | 1094 | ||
1113 | int __init ipath_verbs_init(void) | 1095 | static int __init ipath_verbs_init(void) |
1114 | { | 1096 | { |
1115 | return ipath_verbs_register(ipath_register_ib_device, | 1097 | return ipath_verbs_register(ipath_register_ib_device, |
1116 | ipath_unregister_ib_device, | 1098 | ipath_unregister_ib_device, |
@@ -1118,33 +1100,33 @@ int __init ipath_verbs_init(void) | |||
1118 | ipath_ib_timer); | 1100 | ipath_ib_timer); |
1119 | } | 1101 | } |
1120 | 1102 | ||
1121 | void __exit ipath_verbs_cleanup(void) | 1103 | static void __exit ipath_verbs_cleanup(void) |
1122 | { | 1104 | { |
1123 | ipath_verbs_unregister(); | 1105 | ipath_verbs_unregister(); |
1124 | } | 1106 | } |
1125 | 1107 | ||
1126 | static ssize_t show_rev(struct class_device *cdev, char *buf) | 1108 | static ssize_t show_rev(struct class_device *cdev, char *buf) |
1127 | { | 1109 | { |
1128 | struct ipath_ibdev *dev = | 1110 | struct ipath_ibdev *dev = |
1129 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1111 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1130 | int vendor, boardrev, majrev, minrev; | 1112 | int vendor, boardrev, majrev, minrev; |
1131 | 1113 | ||
1132 | ipath_layer_query_device(dev->dd, &vendor, &boardrev, | 1114 | ipath_layer_query_device(dev->dd, &vendor, &boardrev, |
1133 | &majrev, &minrev); | 1115 | &majrev, &minrev); |
1134 | return sprintf(buf, "%d.%d\n", majrev, minrev); | 1116 | return sprintf(buf, "%d.%d\n", majrev, minrev); |
1135 | } | 1117 | } |
1136 | 1118 | ||
1137 | static ssize_t show_hca(struct class_device *cdev, char *buf) | 1119 | static ssize_t show_hca(struct class_device *cdev, char *buf) |
1138 | { | 1120 | { |
1139 | struct ipath_ibdev *dev = | 1121 | struct ipath_ibdev *dev = |
1140 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1122 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1141 | int ret; | 1123 | int ret; |
1142 | 1124 | ||
1143 | ret = ipath_layer_get_boardname(dev->dd, buf, 128); | 1125 | ret = ipath_layer_get_boardname(dev->dd, buf, 128); |
1144 | if (ret < 0) | 1126 | if (ret < 0) |
1145 | goto bail; | 1127 | goto bail; |
1146 | strcat(buf, "\n"); | 1128 | strcat(buf, "\n"); |
1147 | ret = strlen(buf); | 1129 | ret = strlen(buf); |
1148 | 1130 | ||
1149 | bail: | 1131 | bail: |
1150 | return ret; | 1132 | return ret; |
@@ -1152,40 +1134,40 @@ bail: | |||
1152 | 1134 | ||
1153 | static ssize_t show_stats(struct class_device *cdev, char *buf) | 1135 | static ssize_t show_stats(struct class_device *cdev, char *buf) |
1154 | { | 1136 | { |
1155 | struct ipath_ibdev *dev = | 1137 | struct ipath_ibdev *dev = |
1156 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1138 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1157 | int i; | 1139 | int i; |
1158 | int len; | 1140 | int len; |
1159 | 1141 | ||
1160 | len = sprintf(buf, | 1142 | len = sprintf(buf, |
1161 | "RC resends %d\n" | 1143 | "RC resends %d\n" |
1162 | "RC QACKs %d\n" | 1144 | "RC no QACK %d\n" |
1163 | "RC ACKs %d\n" | 1145 | "RC ACKs %d\n" |
1164 | "RC SEQ NAKs %d\n" | 1146 | "RC SEQ NAKs %d\n" |
1165 | "RC RDMA seq %d\n" | 1147 | "RC RDMA seq %d\n" |
1166 | "RC RNR NAKs %d\n" | 1148 | "RC RNR NAKs %d\n" |
1167 | "RC OTH NAKs %d\n" | 1149 | "RC OTH NAKs %d\n" |
1168 | "RC timeouts %d\n" | 1150 | "RC timeouts %d\n" |
1169 | "RC RDMA dup %d\n" | 1151 | "RC RDMA dup %d\n" |
1170 | "piobuf wait %d\n" | 1152 | "piobuf wait %d\n" |
1171 | "no piobuf %d\n" | 1153 | "no piobuf %d\n" |
1172 | "PKT drops %d\n" | 1154 | "PKT drops %d\n" |
1173 | "WQE errs %d\n", | 1155 | "WQE errs %d\n", |
1174 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, | 1156 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, |
1175 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, | 1157 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, |
1176 | dev->n_other_naks, dev->n_timeouts, | 1158 | dev->n_other_naks, dev->n_timeouts, |
1177 | dev->n_rdma_dup_busy, dev->n_piowait, | 1159 | dev->n_rdma_dup_busy, dev->n_piowait, |
1178 | dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); | 1160 | dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); |
1179 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { | 1161 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { |
1180 | const struct ipath_opcode_stats *si = &dev->opstats[i]; | 1162 | const struct ipath_opcode_stats *si = &dev->opstats[i]; |
1181 | 1163 | ||
1182 | if (!si->n_packets && !si->n_bytes) | 1164 | if (!si->n_packets && !si->n_bytes) |
1183 | continue; | 1165 | continue; |
1184 | len += sprintf(buf + len, "%02x %llu/%llu\n", i, | 1166 | len += sprintf(buf + len, "%02x %llu/%llu\n", i, |
1185 | (unsigned long long) si->n_packets, | 1167 | (unsigned long long) si->n_packets, |
1186 | (unsigned long long) si->n_bytes); | 1168 | (unsigned long long) si->n_bytes); |
1187 | } | 1169 | } |
1188 | return len; | 1170 | return len; |
1189 | } | 1171 | } |
1190 | 1172 | ||
1191 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | 1173 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); |
@@ -1194,25 +1176,25 @@ static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); | |||
1194 | static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); | 1176 | static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); |
1195 | 1177 | ||
1196 | static struct class_device_attribute *ipath_class_attributes[] = { | 1178 | static struct class_device_attribute *ipath_class_attributes[] = { |
1197 | &class_device_attr_hw_rev, | 1179 | &class_device_attr_hw_rev, |
1198 | &class_device_attr_hca_type, | 1180 | &class_device_attr_hca_type, |
1199 | &class_device_attr_board_id, | 1181 | &class_device_attr_board_id, |
1200 | &class_device_attr_stats | 1182 | &class_device_attr_stats |
1201 | }; | 1183 | }; |
1202 | 1184 | ||
1203 | static int ipath_verbs_register_sysfs(struct ib_device *dev) | 1185 | static int ipath_verbs_register_sysfs(struct ib_device *dev) |
1204 | { | 1186 | { |
1205 | int i; | 1187 | int i; |
1206 | int ret; | 1188 | int ret; |
1207 | 1189 | ||
1208 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) | 1190 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) |
1209 | if (class_device_create_file(&dev->class_dev, | 1191 | if (class_device_create_file(&dev->class_dev, |
1210 | ipath_class_attributes[i])) { | 1192 | ipath_class_attributes[i])) { |
1211 | ret = 1; | 1193 | ret = 1; |
1212 | goto bail; | 1194 | goto bail; |
1213 | } | 1195 | } |
1214 | 1196 | ||
1215 | ret = 0; | 1197 | ret = 0; |
1216 | 1198 | ||
1217 | bail: | 1199 | bail: |
1218 | return ret; | 1200 | return ret; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index b824632b2a8c..4f8d59300e9b 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -282,7 +282,8 @@ struct ipath_srq { | |||
282 | */ | 282 | */ |
283 | struct ipath_qp { | 283 | struct ipath_qp { |
284 | struct ib_qp ibqp; | 284 | struct ib_qp ibqp; |
285 | struct ipath_qp *next; /* link list for QPN hash table */ | 285 | struct ipath_qp *next; /* link list for QPN hash table */ |
286 | struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */ | ||
286 | struct list_head piowait; /* link for wait PIO buf */ | 287 | struct list_head piowait; /* link for wait PIO buf */ |
287 | struct list_head timerwait; /* link for waiting for timeouts */ | 288 | struct list_head timerwait; /* link for waiting for timeouts */ |
288 | struct ib_ah_attr remote_ah_attr; | 289 | struct ib_ah_attr remote_ah_attr; |
@@ -577,8 +578,6 @@ int ipath_init_qp_table(struct ipath_ibdev *idev, int size); | |||
577 | 578 | ||
578 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); | 579 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); |
579 | 580 | ||
580 | void ipath_error_qp(struct ipath_qp *qp); | ||
581 | |||
582 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); | 581 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); |
583 | 582 | ||
584 | void ipath_do_rc_send(unsigned long data); | 583 | void ipath_do_rc_send(unsigned long data); |
@@ -607,9 +606,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
607 | 606 | ||
608 | void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); | 607 | void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); |
609 | 608 | ||
610 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | ||
611 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc); | ||
612 | |||
613 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); | 609 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); |
614 | 610 | ||
615 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | 611 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, |
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h index 410a764dfcef..ab7cbbbfd03a 100644 --- a/drivers/infiniband/hw/ipath/ips_common.h +++ b/drivers/infiniband/hw/ipath/ips_common.h | |||
@@ -95,7 +95,7 @@ struct ether_header { | |||
95 | __u8 seq_num; | 95 | __u8 seq_num; |
96 | __le32 len; | 96 | __le32 len; |
97 | /* MUST be of word size due to PIO write requirements */ | 97 | /* MUST be of word size due to PIO write requirements */ |
98 | __u32 csum; | 98 | __le32 csum; |
99 | __le16 csum_offset; | 99 | __le16 csum_offset; |
100 | __le16 flags; | 100 | __le16 flags; |
101 | __u16 first_2_bytes; | 101 | __u16 first_2_bytes; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 1985b5dfa481..798e13e14faf 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -182,7 +182,7 @@ struct mthca_cmd_context { | |||
182 | u8 status; | 182 | u8 status; |
183 | }; | 183 | }; |
184 | 184 | ||
185 | static int fw_cmd_doorbell = 1; | 185 | static int fw_cmd_doorbell = 0; |
186 | module_param(fw_cmd_doorbell, int, 0644); | 186 | module_param(fw_cmd_doorbell, int, 0644); |
187 | MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " | 187 | MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " |
188 | "(and supported by FW)"); | 188 | "(and supported by FW)"); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 312cf90731ea..205854e9c662 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
238 | spin_lock(&dev->cq_table.lock); | 238 | spin_lock(&dev->cq_table.lock); |
239 | 239 | ||
240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | 240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); |
241 | |||
242 | if (cq) | 241 | if (cq) |
243 | atomic_inc(&cq->refcount); | 242 | ++cq->refcount; |
243 | |||
244 | spin_unlock(&dev->cq_table.lock); | 244 | spin_unlock(&dev->cq_table.lock); |
245 | 245 | ||
246 | if (!cq) { | 246 | if (!cq) { |
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
254 | if (cq->ibcq.event_handler) | 254 | if (cq->ibcq.event_handler) |
255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); | 255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); |
256 | 256 | ||
257 | if (atomic_dec_and_test(&cq->refcount)) | 257 | spin_lock(&dev->cq_table.lock); |
258 | if (!--cq->refcount) | ||
258 | wake_up(&cq->wait); | 259 | wake_up(&cq->wait); |
260 | spin_unlock(&dev->cq_table.lock); | ||
259 | } | 261 | } |
260 | 262 | ||
261 | static inline int is_recv_cqe(struct mthca_cqe *cqe) | 263 | static inline int is_recv_cqe(struct mthca_cqe *cqe) |
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe) | |||
267 | return !(cqe->is_send & 0x80); | 269 | return !(cqe->is_send & 0x80); |
268 | } | 270 | } |
269 | 271 | ||
270 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 272 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
271 | struct mthca_srq *srq) | 273 | struct mthca_srq *srq) |
272 | { | 274 | { |
273 | struct mthca_cq *cq; | ||
274 | struct mthca_cqe *cqe; | 275 | struct mthca_cqe *cqe; |
275 | u32 prod_index; | 276 | u32 prod_index; |
276 | int nfreed = 0; | 277 | int nfreed = 0; |
277 | 278 | ||
278 | spin_lock_irq(&dev->cq_table.lock); | ||
279 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | ||
280 | if (cq) | ||
281 | atomic_inc(&cq->refcount); | ||
282 | spin_unlock_irq(&dev->cq_table.lock); | ||
283 | |||
284 | if (!cq) | ||
285 | return; | ||
286 | |||
287 | spin_lock_irq(&cq->lock); | 279 | spin_lock_irq(&cq->lock); |
288 | 280 | ||
289 | /* | 281 | /* |
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
301 | 293 | ||
302 | if (0) | 294 | if (0) |
303 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", | 295 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", |
304 | qpn, cqn, cq->cons_index, prod_index); | 296 | qpn, cq->cqn, cq->cons_index, prod_index); |
305 | 297 | ||
306 | /* | 298 | /* |
307 | * Now sweep backwards through the CQ, removing CQ entries | 299 | * Now sweep backwards through the CQ, removing CQ entries |
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
325 | } | 317 | } |
326 | 318 | ||
327 | spin_unlock_irq(&cq->lock); | 319 | spin_unlock_irq(&cq->lock); |
328 | if (atomic_dec_and_test(&cq->refcount)) | ||
329 | wake_up(&cq->wait); | ||
330 | } | 320 | } |
331 | 321 | ||
332 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) | 322 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) |
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
821 | } | 811 | } |
822 | 812 | ||
823 | spin_lock_init(&cq->lock); | 813 | spin_lock_init(&cq->lock); |
824 | atomic_set(&cq->refcount, 1); | 814 | cq->refcount = 1; |
825 | init_waitqueue_head(&cq->wait); | 815 | init_waitqueue_head(&cq->wait); |
826 | 816 | ||
827 | memset(cq_context, 0, sizeof *cq_context); | 817 | memset(cq_context, 0, sizeof *cq_context); |
@@ -896,6 +886,17 @@ err_out: | |||
896 | return err; | 886 | return err; |
897 | } | 887 | } |
898 | 888 | ||
889 | static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) | ||
890 | { | ||
891 | int c; | ||
892 | |||
893 | spin_lock_irq(&dev->cq_table.lock); | ||
894 | c = cq->refcount; | ||
895 | spin_unlock_irq(&dev->cq_table.lock); | ||
896 | |||
897 | return c; | ||
898 | } | ||
899 | |||
899 | void mthca_free_cq(struct mthca_dev *dev, | 900 | void mthca_free_cq(struct mthca_dev *dev, |
900 | struct mthca_cq *cq) | 901 | struct mthca_cq *cq) |
901 | { | 902 | { |
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
929 | spin_lock_irq(&dev->cq_table.lock); | 930 | spin_lock_irq(&dev->cq_table.lock); |
930 | mthca_array_clear(&dev->cq_table.cq, | 931 | mthca_array_clear(&dev->cq_table.cq, |
931 | cq->cqn & (dev->limits.num_cqs - 1)); | 932 | cq->cqn & (dev->limits.num_cqs - 1)); |
933 | --cq->refcount; | ||
932 | spin_unlock_irq(&dev->cq_table.lock); | 934 | spin_unlock_irq(&dev->cq_table.lock); |
933 | 935 | ||
934 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) | 936 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) |
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
936 | else | 938 | else |
937 | synchronize_irq(dev->pdev->irq); | 939 | synchronize_irq(dev->pdev->irq); |
938 | 940 | ||
939 | atomic_dec(&cq->refcount); | 941 | wait_event(cq->wait, !get_cq_refcount(dev, cq)); |
940 | wait_event(cq->wait, !atomic_read(&cq->refcount)); | ||
941 | 942 | ||
942 | if (cq->is_kernel) { | 943 | if (cq->is_kernel) { |
943 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | 944 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 4c1dcb4c1822..f8160b8de090 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); | 496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); |
497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | 497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, |
498 | enum ib_event_type event_type); | 498 | enum ib_event_type event_type); |
499 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 499 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
500 | struct mthca_srq *srq); | 500 | struct mthca_srq *srq); |
501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); | 501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); |
502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); | 502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index f235c7ea42f0..4730863ece9a 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -49,7 +49,7 @@ enum { | |||
49 | MTHCA_VENDOR_CLASS2 = 0xa | 49 | MTHCA_VENDOR_CLASS2 = 0xa |
50 | }; | 50 | }; |
51 | 51 | ||
52 | int mthca_update_rate(struct mthca_dev *dev, u8 port_num) | 52 | static int mthca_update_rate(struct mthca_dev *dev, u8 port_num) |
53 | { | 53 | { |
54 | struct ib_port_attr *tprops = NULL; | 54 | struct ib_port_attr *tprops = NULL; |
55 | int ret; | 55 | int ret; |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 25e1c1db9a40..a486dec1707e 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
761 | 761 | ||
762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) | 762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) |
763 | { | 763 | { |
764 | unsigned long addr; | ||
764 | int err, i; | 765 | int err, i; |
765 | 766 | ||
766 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, | 767 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, |
@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
796 | goto err_fmr_mpt; | 797 | goto err_fmr_mpt; |
797 | } | 798 | } |
798 | 799 | ||
800 | addr = pci_resource_start(dev->pdev, 4) + | ||
801 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
802 | dev->mr_table.mpt_base); | ||
803 | |||
799 | dev->mr_table.tavor_fmr.mpt_base = | 804 | dev->mr_table.tavor_fmr.mpt_base = |
800 | ioremap(dev->mr_table.mpt_base, | 805 | ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry)); |
801 | (1 << i) * sizeof (struct mthca_mpt_entry)); | ||
802 | 806 | ||
803 | if (!dev->mr_table.tavor_fmr.mpt_base) { | 807 | if (!dev->mr_table.tavor_fmr.mpt_base) { |
804 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); | 808 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); |
@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
806 | goto err_fmr_mpt; | 810 | goto err_fmr_mpt; |
807 | } | 811 | } |
808 | 812 | ||
813 | addr = pci_resource_start(dev->pdev, 4) + | ||
814 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
815 | dev->mr_table.mtt_base); | ||
816 | |||
809 | dev->mr_table.tavor_fmr.mtt_base = | 817 | dev->mr_table.tavor_fmr.mtt_base = |
810 | ioremap(dev->mr_table.mtt_base, | 818 | ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE); |
811 | (1 << i) * MTHCA_MTT_SEG_SIZE); | ||
812 | if (!dev->mr_table.tavor_fmr.mtt_base) { | 819 | if (!dev->mr_table.tavor_fmr.mtt_base) { |
813 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); | 820 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); |
814 | err = -ENOMEM; | 821 | err = -ENOMEM; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 565a24b1756f..a2eae8a30167 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -306,7 +306,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port, | |||
306 | goto out; | 306 | goto out; |
307 | } | 307 | } |
308 | 308 | ||
309 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8); | 309 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); |
310 | 310 | ||
311 | out: | 311 | out: |
312 | kfree(in_mad); | 312 | kfree(in_mad); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 6676a786d690..179a8f610d0f 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -139,11 +139,12 @@ struct mthca_ah { | |||
139 | * a qp may be locked, with the send cq locked first. No other | 139 | * a qp may be locked, with the send cq locked first. No other |
140 | * nesting should be done. | 140 | * nesting should be done. |
141 | * | 141 | * |
142 | * Each struct mthca_cq/qp also has an atomic_t ref count. The | 142 | * Each struct mthca_cq/qp also has an ref count, protected by the |
143 | * pointer from the cq/qp_table to the struct counts as one reference. | 143 | * corresponding table lock. The pointer from the cq/qp_table to the |
144 | * This reference also is good for access through the consumer API, so | 144 | * struct counts as one reference. This reference also is good for |
145 | * modifying the CQ/QP etc doesn't need to take another reference. | 145 | * access through the consumer API, so modifying the CQ/QP etc doesn't |
146 | * Access because of a completion being polled does need a reference. | 146 | * need to take another reference. Access to a QP because of a |
147 | * completion being polled does not need a reference either. | ||
147 | * | 148 | * |
148 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the | 149 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the |
149 | * destroy function to sleep on. | 150 | * destroy function to sleep on. |
@@ -159,8 +160,9 @@ struct mthca_ah { | |||
159 | * - decrement ref count; if zero, wake up waiters | 160 | * - decrement ref count; if zero, wake up waiters |
160 | * | 161 | * |
161 | * To destroy a CQ/QP, we can do the following: | 162 | * To destroy a CQ/QP, we can do the following: |
162 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | 163 | * - lock cq/qp_table |
163 | * - decrement ref count | 164 | * - remove pointer and decrement ref count |
165 | * - unlock cq/qp_table lock | ||
164 | * - wait_event until ref count is zero | 166 | * - wait_event until ref count is zero |
165 | * | 167 | * |
166 | * It is the consumer's responsibilty to make sure that no QP | 168 | * It is the consumer's responsibilty to make sure that no QP |
@@ -197,7 +199,7 @@ struct mthca_cq_resize { | |||
197 | struct mthca_cq { | 199 | struct mthca_cq { |
198 | struct ib_cq ibcq; | 200 | struct ib_cq ibcq; |
199 | spinlock_t lock; | 201 | spinlock_t lock; |
200 | atomic_t refcount; | 202 | int refcount; |
201 | int cqn; | 203 | int cqn; |
202 | u32 cons_index; | 204 | u32 cons_index; |
203 | struct mthca_cq_buf buf; | 205 | struct mthca_cq_buf buf; |
@@ -217,7 +219,7 @@ struct mthca_cq { | |||
217 | struct mthca_srq { | 219 | struct mthca_srq { |
218 | struct ib_srq ibsrq; | 220 | struct ib_srq ibsrq; |
219 | spinlock_t lock; | 221 | spinlock_t lock; |
220 | atomic_t refcount; | 222 | int refcount; |
221 | int srqn; | 223 | int srqn; |
222 | int max; | 224 | int max; |
223 | int max_gs; | 225 | int max_gs; |
@@ -254,7 +256,7 @@ struct mthca_wq { | |||
254 | 256 | ||
255 | struct mthca_qp { | 257 | struct mthca_qp { |
256 | struct ib_qp ibqp; | 258 | struct ib_qp ibqp; |
257 | atomic_t refcount; | 259 | int refcount; |
258 | u32 qpn; | 260 | u32 qpn; |
259 | int is_direct; | 261 | int is_direct; |
260 | u8 port; /* for SQP and memfree use only */ | 262 | u8 port; /* for SQP and memfree use only */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f37b0e367323..07c13be07a4a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
240 | spin_lock(&dev->qp_table.lock); | 240 | spin_lock(&dev->qp_table.lock); |
241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | 241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); |
242 | if (qp) | 242 | if (qp) |
243 | atomic_inc(&qp->refcount); | 243 | ++qp->refcount; |
244 | spin_unlock(&dev->qp_table.lock); | 244 | spin_unlock(&dev->qp_table.lock); |
245 | 245 | ||
246 | if (!qp) { | 246 | if (!qp) { |
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
257 | if (qp->ibqp.event_handler) | 257 | if (qp->ibqp.event_handler) |
258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | 258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); |
259 | 259 | ||
260 | if (atomic_dec_and_test(&qp->refcount)) | 260 | spin_lock(&dev->qp_table.lock); |
261 | if (!--qp->refcount) | ||
261 | wake_up(&qp->wait); | 262 | wake_up(&qp->wait); |
263 | spin_unlock(&dev->qp_table.lock); | ||
262 | } | 264 | } |
263 | 265 | ||
264 | static int to_mthca_state(enum ib_qp_state ib_state) | 266 | static int to_mthca_state(enum ib_qp_state ib_state) |
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
833 | * entries and reinitialize the QP. | 835 | * entries and reinitialize the QP. |
834 | */ | 836 | */ |
835 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | 837 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { |
836 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 838 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
837 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 839 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
838 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 840 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
839 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 841 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
840 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 842 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
841 | 843 | ||
842 | mthca_wq_init(&qp->sq); | 844 | mthca_wq_init(&qp->sq); |
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1096 | int ret; | 1098 | int ret; |
1097 | int i; | 1099 | int i; |
1098 | 1100 | ||
1099 | atomic_set(&qp->refcount, 1); | 1101 | qp->refcount = 1; |
1100 | init_waitqueue_head(&qp->wait); | 1102 | init_waitqueue_head(&qp->wait); |
1101 | qp->state = IB_QPS_RESET; | 1103 | qp->state = IB_QPS_RESET; |
1102 | qp->atomic_rd_en = 0; | 1104 | qp->atomic_rd_en = 0; |
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
1318 | return err; | 1320 | return err; |
1319 | } | 1321 | } |
1320 | 1322 | ||
1323 | static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) | ||
1324 | { | ||
1325 | int c; | ||
1326 | |||
1327 | spin_lock_irq(&dev->qp_table.lock); | ||
1328 | c = qp->refcount; | ||
1329 | spin_unlock_irq(&dev->qp_table.lock); | ||
1330 | |||
1331 | return c; | ||
1332 | } | ||
1333 | |||
1321 | void mthca_free_qp(struct mthca_dev *dev, | 1334 | void mthca_free_qp(struct mthca_dev *dev, |
1322 | struct mthca_qp *qp) | 1335 | struct mthca_qp *qp) |
1323 | { | 1336 | { |
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1339 | spin_lock(&dev->qp_table.lock); | 1352 | spin_lock(&dev->qp_table.lock); |
1340 | mthca_array_clear(&dev->qp_table.qp, | 1353 | mthca_array_clear(&dev->qp_table.qp, |
1341 | qp->qpn & (dev->limits.num_qps - 1)); | 1354 | qp->qpn & (dev->limits.num_qps - 1)); |
1355 | --qp->refcount; | ||
1342 | spin_unlock(&dev->qp_table.lock); | 1356 | spin_unlock(&dev->qp_table.lock); |
1343 | 1357 | ||
1344 | if (send_cq != recv_cq) | 1358 | if (send_cq != recv_cq) |
1345 | spin_unlock(&recv_cq->lock); | 1359 | spin_unlock(&recv_cq->lock); |
1346 | spin_unlock_irq(&send_cq->lock); | 1360 | spin_unlock_irq(&send_cq->lock); |
1347 | 1361 | ||
1348 | atomic_dec(&qp->refcount); | 1362 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); |
1349 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
1350 | 1363 | ||
1351 | if (qp->state != IB_QPS_RESET) | 1364 | if (qp->state != IB_QPS_RESET) |
1352 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, | 1365 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, |
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1358 | * unref the mem-free tables and free the QPN in our table. | 1371 | * unref the mem-free tables and free the QPN in our table. |
1359 | */ | 1372 | */ |
1360 | if (!qp->ibqp.uobject) { | 1373 | if (!qp->ibqp.uobject) { |
1361 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 1374 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
1362 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1375 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1363 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 1376 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
1364 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 1377 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
1365 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1378 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
1366 | 1379 | ||
1367 | mthca_free_memfree(dev, qp); | 1380 | mthca_free_memfree(dev, qp); |
@@ -1714,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1714 | 1727 | ||
1715 | ind = qp->rq.next_ind; | 1728 | ind = qp->rq.next_ind; |
1716 | 1729 | ||
1717 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 1730 | for (nreq = 0; wr; wr = wr->next) { |
1718 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1719 | nreq = 0; | ||
1720 | |||
1721 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1722 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1723 | |||
1724 | wmb(); | ||
1725 | |||
1726 | mthca_write64(doorbell, | ||
1727 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1728 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1729 | |||
1730 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1731 | size0 = 0; | ||
1732 | } | ||
1733 | |||
1734 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | 1731 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
1735 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | 1732 | mthca_err(dev, "RQ %06x full (%u head, %u tail," |
1736 | " %d max, %d nreq)\n", qp->qpn, | 1733 | " %d max, %d nreq)\n", qp->qpn, |
@@ -1784,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1784 | ++ind; | 1781 | ++ind; |
1785 | if (unlikely(ind >= qp->rq.max)) | 1782 | if (unlikely(ind >= qp->rq.max)) |
1786 | ind -= qp->rq.max; | 1783 | ind -= qp->rq.max; |
1784 | |||
1785 | ++nreq; | ||
1786 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1787 | nreq = 0; | ||
1788 | |||
1789 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1790 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1791 | |||
1792 | wmb(); | ||
1793 | |||
1794 | mthca_write64(doorbell, | ||
1795 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1796 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1797 | |||
1798 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1799 | size0 = 0; | ||
1800 | } | ||
1787 | } | 1801 | } |
1788 | 1802 | ||
1789 | out: | 1803 | out: |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index adcaf85355ae..b292fefa3b41 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
241 | goto err_out_mailbox; | 241 | goto err_out_mailbox; |
242 | 242 | ||
243 | spin_lock_init(&srq->lock); | 243 | spin_lock_init(&srq->lock); |
244 | atomic_set(&srq->refcount, 1); | 244 | srq->refcount = 1; |
245 | init_waitqueue_head(&srq->wait); | 245 | init_waitqueue_head(&srq->wait); |
246 | 246 | ||
247 | if (mthca_is_memfree(dev)) | 247 | if (mthca_is_memfree(dev)) |
@@ -308,6 +308,17 @@ err_out: | |||
308 | return err; | 308 | return err; |
309 | } | 309 | } |
310 | 310 | ||
311 | static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) | ||
312 | { | ||
313 | int c; | ||
314 | |||
315 | spin_lock_irq(&dev->srq_table.lock); | ||
316 | c = srq->refcount; | ||
317 | spin_unlock_irq(&dev->srq_table.lock); | ||
318 | |||
319 | return c; | ||
320 | } | ||
321 | |||
311 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | 322 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) |
312 | { | 323 | { |
313 | struct mthca_mailbox *mailbox; | 324 | struct mthca_mailbox *mailbox; |
@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | |||
329 | spin_lock_irq(&dev->srq_table.lock); | 340 | spin_lock_irq(&dev->srq_table.lock); |
330 | mthca_array_clear(&dev->srq_table.srq, | 341 | mthca_array_clear(&dev->srq_table.srq, |
331 | srq->srqn & (dev->limits.num_srqs - 1)); | 342 | srq->srqn & (dev->limits.num_srqs - 1)); |
343 | --srq->refcount; | ||
332 | spin_unlock_irq(&dev->srq_table.lock); | 344 | spin_unlock_irq(&dev->srq_table.lock); |
333 | 345 | ||
334 | atomic_dec(&srq->refcount); | 346 | wait_event(srq->wait, !get_srq_refcount(dev, srq)); |
335 | wait_event(srq->wait, !atomic_read(&srq->refcount)); | ||
336 | 347 | ||
337 | if (!srq->ibsrq.uobject) { | 348 | if (!srq->ibsrq.uobject) { |
338 | mthca_free_srq_buf(dev, srq); | 349 | mthca_free_srq_buf(dev, srq); |
@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
414 | spin_lock(&dev->srq_table.lock); | 425 | spin_lock(&dev->srq_table.lock); |
415 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); | 426 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); |
416 | if (srq) | 427 | if (srq) |
417 | atomic_inc(&srq->refcount); | 428 | ++srq->refcount; |
418 | spin_unlock(&dev->srq_table.lock); | 429 | spin_unlock(&dev->srq_table.lock); |
419 | 430 | ||
420 | if (!srq) { | 431 | if (!srq) { |
@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
431 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); | 442 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); |
432 | 443 | ||
433 | out: | 444 | out: |
434 | if (atomic_dec_and_test(&srq->refcount)) | 445 | spin_lock(&dev->srq_table.lock); |
446 | if (!--srq->refcount) | ||
435 | wake_up(&srq->wait); | 447 | wake_up(&srq->wait); |
448 | spin_unlock(&dev->srq_table.lock); | ||
436 | } | 449 | } |
437 | 450 | ||
438 | /* | 451 | /* |
@@ -477,26 +490,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
477 | 490 | ||
478 | first_ind = srq->first_free; | 491 | first_ind = srq->first_free; |
479 | 492 | ||
480 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 493 | for (nreq = 0; wr; wr = wr->next) { |
481 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
482 | nreq = 0; | ||
483 | |||
484 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); | ||
485 | doorbell[1] = cpu_to_be32(srq->srqn << 8); | ||
486 | |||
487 | /* | ||
488 | * Make sure that descriptors are written | ||
489 | * before doorbell is rung. | ||
490 | */ | ||
491 | wmb(); | ||
492 | |||
493 | mthca_write64(doorbell, | ||
494 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
495 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
496 | |||
497 | first_ind = srq->first_free; | ||
498 | } | ||
499 | |||
500 | ind = srq->first_free; | 494 | ind = srq->first_free; |
501 | 495 | ||
502 | if (ind < 0) { | 496 | if (ind < 0) { |
@@ -556,6 +550,26 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
556 | 550 | ||
557 | srq->wrid[ind] = wr->wr_id; | 551 | srq->wrid[ind] = wr->wr_id; |
558 | srq->first_free = next_ind; | 552 | srq->first_free = next_ind; |
553 | |||
554 | ++nreq; | ||
555 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
556 | nreq = 0; | ||
557 | |||
558 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); | ||
559 | doorbell[1] = cpu_to_be32(srq->srqn << 8); | ||
560 | |||
561 | /* | ||
562 | * Make sure that descriptors are written | ||
563 | * before doorbell is rung. | ||
564 | */ | ||
565 | wmb(); | ||
566 | |||
567 | mthca_write64(doorbell, | ||
568 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
569 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
570 | |||
571 | first_ind = srq->first_free; | ||
572 | } | ||
559 | } | 573 | } |
560 | 574 | ||
561 | if (likely(nreq)) { | 575 | if (likely(nreq)) { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a54da42849ae..8406839b91cf 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -275,6 +275,7 @@ static void ipoib_ib_handle_wc(struct net_device *dev, | |||
275 | spin_lock_irqsave(&priv->tx_lock, flags); | 275 | spin_lock_irqsave(&priv->tx_lock, flags); |
276 | ++priv->tx_tail; | 276 | ++priv->tx_tail; |
277 | if (netif_queue_stopped(dev) && | 277 | if (netif_queue_stopped(dev) && |
278 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && | ||
278 | priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) | 279 | priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) |
279 | netif_wake_queue(dev); | 280 | netif_wake_queue(dev); |
280 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 281 | spin_unlock_irqrestore(&priv->tx_lock, flags); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 4ca175553f9f..f887780e8093 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
158 | if (priv->pkey == pkey) { | 158 | if (priv->pkey == pkey) { |
159 | unregister_netdev(priv->dev); | 159 | unregister_netdev(priv->dev); |
160 | ipoib_dev_cleanup(priv->dev); | 160 | ipoib_dev_cleanup(priv->dev); |
161 | |||
162 | list_del(&priv->list); | 161 | list_del(&priv->list); |
163 | 162 | free_netdev(priv->dev); | |
164 | kfree(priv); | ||
165 | 163 | ||
166 | ret = 0; | 164 | ret = 0; |
167 | break; | 165 | break; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 5f2b3f6e4c47..9cbdffa08dc2 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
340 | /* XXX should send SRP_I_LOGOUT request */ | 340 | /* XXX should send SRP_I_LOGOUT request */ |
341 | 341 | ||
342 | init_completion(&target->done); | 342 | init_completion(&target->done); |
343 | ib_send_cm_dreq(target->cm_id, NULL, 0); | 343 | if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { |
344 | printk(KERN_DEBUG PFX "Sending CM DREQ failed\n"); | ||
345 | return; | ||
346 | } | ||
344 | wait_for_completion(&target->done); | 347 | wait_for_completion(&target->done); |
345 | } | 348 | } |
346 | 349 | ||
@@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr) | |||
351 | spin_lock_irq(target->scsi_host->host_lock); | 354 | spin_lock_irq(target->scsi_host->host_lock); |
352 | if (target->state != SRP_TARGET_DEAD) { | 355 | if (target->state != SRP_TARGET_DEAD) { |
353 | spin_unlock_irq(target->scsi_host->host_lock); | 356 | spin_unlock_irq(target->scsi_host->host_lock); |
354 | scsi_host_put(target->scsi_host); | ||
355 | return; | 357 | return; |
356 | } | 358 | } |
357 | target->state = SRP_TARGET_REMOVED; | 359 | target->state = SRP_TARGET_REMOVED; |
@@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr) | |||
365 | ib_destroy_cm_id(target->cm_id); | 367 | ib_destroy_cm_id(target->cm_id); |
366 | srp_free_target_ib(target); | 368 | srp_free_target_ib(target); |
367 | scsi_host_put(target->scsi_host); | 369 | scsi_host_put(target->scsi_host); |
368 | /* And another put to really free the target port... */ | ||
369 | scsi_host_put(target->scsi_host); | ||
370 | } | 370 | } |
371 | 371 | ||
372 | static int srp_connect_target(struct srp_target_port *target) | 372 | static int srp_connect_target(struct srp_target_port *target) |
@@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target) | |||
409 | } | 409 | } |
410 | } | 410 | } |
411 | 411 | ||
412 | static void srp_unmap_data(struct scsi_cmnd *scmnd, | ||
413 | struct srp_target_port *target, | ||
414 | struct srp_request *req) | ||
415 | { | ||
416 | struct scatterlist *scat; | ||
417 | int nents; | ||
418 | |||
419 | if (!scmnd->request_buffer || | ||
420 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | ||
421 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | ||
422 | return; | ||
423 | |||
424 | /* | ||
425 | * This handling of non-SG commands can be killed when the | ||
426 | * SCSI midlayer no longer generates non-SG commands. | ||
427 | */ | ||
428 | if (likely(scmnd->use_sg)) { | ||
429 | nents = scmnd->use_sg; | ||
430 | scat = scmnd->request_buffer; | ||
431 | } else { | ||
432 | nents = 1; | ||
433 | scat = &req->fake_sg; | ||
434 | } | ||
435 | |||
436 | dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, | ||
437 | scmnd->sc_data_direction); | ||
438 | } | ||
439 | |||
412 | static int srp_reconnect_target(struct srp_target_port *target) | 440 | static int srp_reconnect_target(struct srp_target_port *target) |
413 | { | 441 | { |
414 | struct ib_cm_id *new_cm_id; | 442 | struct ib_cm_id *new_cm_id; |
@@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
455 | list_for_each_entry(req, &target->req_queue, list) { | 483 | list_for_each_entry(req, &target->req_queue, list) { |
456 | req->scmnd->result = DID_RESET << 16; | 484 | req->scmnd->result = DID_RESET << 16; |
457 | req->scmnd->scsi_done(req->scmnd); | 485 | req->scmnd->scsi_done(req->scmnd); |
486 | srp_unmap_data(req->scmnd, target, req); | ||
458 | } | 487 | } |
459 | 488 | ||
460 | target->rx_head = 0; | 489 | target->rx_head = 0; |
461 | target->tx_head = 0; | 490 | target->tx_head = 0; |
462 | target->tx_tail = 0; | 491 | target->tx_tail = 0; |
463 | target->req_head = 0; | 492 | INIT_LIST_HEAD(&target->free_reqs); |
464 | for (i = 0; i < SRP_SQ_SIZE - 1; ++i) | ||
465 | target->req_ring[i].next = i + 1; | ||
466 | target->req_ring[SRP_SQ_SIZE - 1].next = -1; | ||
467 | INIT_LIST_HEAD(&target->req_queue); | 493 | INIT_LIST_HEAD(&target->req_queue); |
494 | for (i = 0; i < SRP_SQ_SIZE; ++i) | ||
495 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | ||
468 | 496 | ||
469 | ret = srp_connect_target(target); | 497 | ret = srp_connect_target(target); |
470 | if (ret) | 498 | if (ret) |
@@ -589,32 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
589 | return len; | 617 | return len; |
590 | } | 618 | } |
591 | 619 | ||
592 | static void srp_unmap_data(struct scsi_cmnd *scmnd, | 620 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) |
593 | struct srp_target_port *target, | ||
594 | struct srp_request *req) | ||
595 | { | 621 | { |
596 | struct scatterlist *scat; | 622 | srp_unmap_data(req->scmnd, target, req); |
597 | int nents; | 623 | list_move_tail(&req->list, &target->free_reqs); |
598 | |||
599 | if (!scmnd->request_buffer || | ||
600 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | ||
601 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | ||
602 | return; | ||
603 | |||
604 | /* | ||
605 | * This handling of non-SG commands can be killed when the | ||
606 | * SCSI midlayer no longer generates non-SG commands. | ||
607 | */ | ||
608 | if (likely(scmnd->use_sg)) { | ||
609 | nents = scmnd->use_sg; | ||
610 | scat = scmnd->request_buffer; | ||
611 | } else { | ||
612 | nents = 1; | ||
613 | scat = &req->fake_sg; | ||
614 | } | ||
615 | |||
616 | dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, | ||
617 | scmnd->sc_data_direction); | ||
618 | } | 624 | } |
619 | 625 | ||
620 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | 626 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
@@ -639,7 +645,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
639 | req->tsk_status = rsp->data[3]; | 645 | req->tsk_status = rsp->data[3]; |
640 | complete(&req->done); | 646 | complete(&req->done); |
641 | } else { | 647 | } else { |
642 | scmnd = req->scmnd; | 648 | scmnd = req->scmnd; |
643 | if (!scmnd) | 649 | if (!scmnd) |
644 | printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", | 650 | printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", |
645 | (unsigned long long) rsp->tag); | 651 | (unsigned long long) rsp->tag); |
@@ -657,16 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
657 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | 663 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) |
658 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); | 664 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); |
659 | 665 | ||
660 | srp_unmap_data(scmnd, target, req); | ||
661 | |||
662 | if (!req->tsk_mgmt) { | 666 | if (!req->tsk_mgmt) { |
663 | req->scmnd = NULL; | ||
664 | scmnd->host_scribble = (void *) -1L; | 667 | scmnd->host_scribble = (void *) -1L; |
665 | scmnd->scsi_done(scmnd); | 668 | scmnd->scsi_done(scmnd); |
666 | 669 | ||
667 | list_del(&req->list); | 670 | srp_remove_req(target, req); |
668 | req->next = target->req_head; | ||
669 | target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT; | ||
670 | } else | 671 | } else |
671 | req->cmd_done = 1; | 672 | req->cmd_done = 1; |
672 | } | 673 | } |
@@ -853,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
853 | struct srp_request *req; | 854 | struct srp_request *req; |
854 | struct srp_iu *iu; | 855 | struct srp_iu *iu; |
855 | struct srp_cmd *cmd; | 856 | struct srp_cmd *cmd; |
856 | long req_index; | ||
857 | int len; | 857 | int len; |
858 | 858 | ||
859 | if (target->state == SRP_TARGET_CONNECTING) | 859 | if (target->state == SRP_TARGET_CONNECTING) |
@@ -873,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
873 | dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, | 873 | dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, |
874 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); | 874 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); |
875 | 875 | ||
876 | req_index = target->req_head; | 876 | req = list_entry(target->free_reqs.next, struct srp_request, list); |
877 | 877 | ||
878 | scmnd->scsi_done = done; | 878 | scmnd->scsi_done = done; |
879 | scmnd->result = 0; | 879 | scmnd->result = 0; |
880 | scmnd->host_scribble = (void *) req_index; | 880 | scmnd->host_scribble = (void *) (long) req->index; |
881 | 881 | ||
882 | cmd = iu->buf; | 882 | cmd = iu->buf; |
883 | memset(cmd, 0, sizeof *cmd); | 883 | memset(cmd, 0, sizeof *cmd); |
884 | 884 | ||
885 | cmd->opcode = SRP_CMD; | 885 | cmd->opcode = SRP_CMD; |
886 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | 886 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); |
887 | cmd->tag = req_index; | 887 | cmd->tag = req->index; |
888 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); | 888 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); |
889 | 889 | ||
890 | req = &target->req_ring[req_index]; | ||
891 | |||
892 | req->scmnd = scmnd; | 890 | req->scmnd = scmnd; |
893 | req->cmd = iu; | 891 | req->cmd = iu; |
894 | req->cmd_done = 0; | 892 | req->cmd_done = 0; |
@@ -913,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
913 | goto err_unmap; | 911 | goto err_unmap; |
914 | } | 912 | } |
915 | 913 | ||
916 | target->req_head = req->next; | 914 | list_move_tail(&req->list, &target->req_queue); |
917 | list_add_tail(&req->list, &target->req_queue); | ||
918 | 915 | ||
919 | return 0; | 916 | return 0; |
920 | 917 | ||
@@ -1137,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1137 | return 0; | 1134 | return 0; |
1138 | } | 1135 | } |
1139 | 1136 | ||
1140 | static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | 1137 | static int srp_send_tsk_mgmt(struct srp_target_port *target, |
1138 | struct srp_request *req, u8 func) | ||
1141 | { | 1139 | { |
1142 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
1143 | struct srp_request *req; | ||
1144 | struct srp_iu *iu; | 1140 | struct srp_iu *iu; |
1145 | struct srp_tsk_mgmt *tsk_mgmt; | 1141 | struct srp_tsk_mgmt *tsk_mgmt; |
1146 | int req_index; | ||
1147 | int ret = FAILED; | ||
1148 | 1142 | ||
1149 | spin_lock_irq(target->scsi_host->host_lock); | 1143 | spin_lock_irq(target->scsi_host->host_lock); |
1150 | 1144 | ||
1151 | if (target->state == SRP_TARGET_DEAD || | 1145 | if (target->state == SRP_TARGET_DEAD || |
1152 | target->state == SRP_TARGET_REMOVED) { | 1146 | target->state == SRP_TARGET_REMOVED) { |
1153 | scmnd->result = DID_BAD_TARGET << 16; | 1147 | req->scmnd->result = DID_BAD_TARGET << 16; |
1154 | goto out; | 1148 | goto out; |
1155 | } | 1149 | } |
1156 | 1150 | ||
1157 | if (scmnd->host_scribble == (void *) -1L) | ||
1158 | goto out; | ||
1159 | |||
1160 | req_index = (long) scmnd->host_scribble; | ||
1161 | printk(KERN_ERR "Abort for req_index %d\n", req_index); | ||
1162 | |||
1163 | req = &target->req_ring[req_index]; | ||
1164 | init_completion(&req->done); | 1151 | init_completion(&req->done); |
1165 | 1152 | ||
1166 | iu = __srp_get_tx_iu(target); | 1153 | iu = __srp_get_tx_iu(target); |
@@ -1171,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | |||
1171 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); | 1158 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); |
1172 | 1159 | ||
1173 | tsk_mgmt->opcode = SRP_TSK_MGMT; | 1160 | tsk_mgmt->opcode = SRP_TSK_MGMT; |
1174 | tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | 1161 | tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); |
1175 | tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; | 1162 | tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; |
1176 | tsk_mgmt->tsk_mgmt_func = func; | 1163 | tsk_mgmt->tsk_mgmt_func = func; |
1177 | tsk_mgmt->task_tag = req_index; | 1164 | tsk_mgmt->task_tag = req->index; |
1178 | 1165 | ||
1179 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) | 1166 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) |
1180 | goto out; | 1167 | goto out; |
@@ -1182,39 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | |||
1182 | req->tsk_mgmt = iu; | 1169 | req->tsk_mgmt = iu; |
1183 | 1170 | ||
1184 | spin_unlock_irq(target->scsi_host->host_lock); | 1171 | spin_unlock_irq(target->scsi_host->host_lock); |
1172 | |||
1185 | if (!wait_for_completion_timeout(&req->done, | 1173 | if (!wait_for_completion_timeout(&req->done, |
1186 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) | 1174 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) |
1187 | return FAILED; | 1175 | return -1; |
1188 | spin_lock_irq(target->scsi_host->host_lock); | ||
1189 | 1176 | ||
1190 | if (req->cmd_done) { | 1177 | return 0; |
1191 | list_del(&req->list); | ||
1192 | req->next = target->req_head; | ||
1193 | target->req_head = req_index; | ||
1194 | |||
1195 | scmnd->scsi_done(scmnd); | ||
1196 | } else if (!req->tsk_status) { | ||
1197 | scmnd->result = DID_ABORT << 16; | ||
1198 | ret = SUCCESS; | ||
1199 | } | ||
1200 | 1178 | ||
1201 | out: | 1179 | out: |
1202 | spin_unlock_irq(target->scsi_host->host_lock); | 1180 | spin_unlock_irq(target->scsi_host->host_lock); |
1203 | return ret; | 1181 | return -1; |
1182 | } | ||
1183 | |||
1184 | static int srp_find_req(struct srp_target_port *target, | ||
1185 | struct scsi_cmnd *scmnd, | ||
1186 | struct srp_request **req) | ||
1187 | { | ||
1188 | if (scmnd->host_scribble == (void *) -1L) | ||
1189 | return -1; | ||
1190 | |||
1191 | *req = &target->req_ring[(long) scmnd->host_scribble]; | ||
1192 | |||
1193 | return 0; | ||
1204 | } | 1194 | } |
1205 | 1195 | ||
1206 | static int srp_abort(struct scsi_cmnd *scmnd) | 1196 | static int srp_abort(struct scsi_cmnd *scmnd) |
1207 | { | 1197 | { |
1198 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
1199 | struct srp_request *req; | ||
1200 | int ret = SUCCESS; | ||
1201 | |||
1208 | printk(KERN_ERR "SRP abort called\n"); | 1202 | printk(KERN_ERR "SRP abort called\n"); |
1209 | 1203 | ||
1210 | return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); | 1204 | if (srp_find_req(target, scmnd, &req)) |
1205 | return FAILED; | ||
1206 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) | ||
1207 | return FAILED; | ||
1208 | |||
1209 | spin_lock_irq(target->scsi_host->host_lock); | ||
1210 | |||
1211 | if (req->cmd_done) { | ||
1212 | srp_remove_req(target, req); | ||
1213 | scmnd->scsi_done(scmnd); | ||
1214 | } else if (!req->tsk_status) { | ||
1215 | srp_remove_req(target, req); | ||
1216 | scmnd->result = DID_ABORT << 16; | ||
1217 | } else | ||
1218 | ret = FAILED; | ||
1219 | |||
1220 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1221 | |||
1222 | return ret; | ||
1211 | } | 1223 | } |
1212 | 1224 | ||
1213 | static int srp_reset_device(struct scsi_cmnd *scmnd) | 1225 | static int srp_reset_device(struct scsi_cmnd *scmnd) |
1214 | { | 1226 | { |
1227 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
1228 | struct srp_request *req, *tmp; | ||
1229 | |||
1215 | printk(KERN_ERR "SRP reset_device called\n"); | 1230 | printk(KERN_ERR "SRP reset_device called\n"); |
1216 | 1231 | ||
1217 | return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); | 1232 | if (srp_find_req(target, scmnd, &req)) |
1233 | return FAILED; | ||
1234 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) | ||
1235 | return FAILED; | ||
1236 | if (req->tsk_status) | ||
1237 | return FAILED; | ||
1238 | |||
1239 | spin_lock_irq(target->scsi_host->host_lock); | ||
1240 | |||
1241 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | ||
1242 | if (req->scmnd->device == scmnd->device) { | ||
1243 | req->scmnd->result = DID_RESET << 16; | ||
1244 | req->scmnd->scsi_done(req->scmnd); | ||
1245 | srp_remove_req(target, req); | ||
1246 | } | ||
1247 | |||
1248 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1249 | |||
1250 | return SUCCESS; | ||
1218 | } | 1251 | } |
1219 | 1252 | ||
1220 | static int srp_reset_host(struct scsi_cmnd *scmnd) | 1253 | static int srp_reset_host(struct scsi_cmnd *scmnd) |
@@ -1514,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev, | |||
1514 | 1547 | ||
1515 | INIT_WORK(&target->work, srp_reconnect_work, target); | 1548 | INIT_WORK(&target->work, srp_reconnect_work, target); |
1516 | 1549 | ||
1517 | for (i = 0; i < SRP_SQ_SIZE - 1; ++i) | 1550 | INIT_LIST_HEAD(&target->free_reqs); |
1518 | target->req_ring[i].next = i + 1; | ||
1519 | target->req_ring[SRP_SQ_SIZE - 1].next = -1; | ||
1520 | INIT_LIST_HEAD(&target->req_queue); | 1551 | INIT_LIST_HEAD(&target->req_queue); |
1552 | for (i = 0; i < SRP_SQ_SIZE; ++i) { | ||
1553 | target->req_ring[i].index = i; | ||
1554 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | ||
1555 | } | ||
1521 | 1556 | ||
1522 | ret = srp_parse_options(buf, target); | 1557 | ret = srp_parse_options(buf, target); |
1523 | if (ret) | 1558 | if (ret) |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index bd7f7c3115de..c5cd43aae860 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -101,7 +101,7 @@ struct srp_request { | |||
101 | */ | 101 | */ |
102 | struct scatterlist fake_sg; | 102 | struct scatterlist fake_sg; |
103 | struct completion done; | 103 | struct completion done; |
104 | short next; | 104 | short index; |
105 | u8 cmd_done; | 105 | u8 cmd_done; |
106 | u8 tsk_status; | 106 | u8 tsk_status; |
107 | }; | 107 | }; |
@@ -133,7 +133,7 @@ struct srp_target_port { | |||
133 | unsigned tx_tail; | 133 | unsigned tx_tail; |
134 | struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; | 134 | struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; |
135 | 135 | ||
136 | int req_head; | 136 | struct list_head free_reqs; |
137 | struct list_head req_queue; | 137 | struct list_head req_queue; |
138 | struct srp_request req_ring[SRP_SQ_SIZE]; | 138 | struct srp_request req_ring[SRP_SQ_SIZE]; |
139 | 139 | ||