diff options
| -rw-r--r-- | drivers/infiniband/core/sysfs.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 41 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mr.c | 15 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 22 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 31 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 23 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 195 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 4 |
10 files changed, 202 insertions, 137 deletions
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 15121cb5a1f6..21f9282c1b25 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
| @@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, | |||
| 336 | switch (width) { | 336 | switch (width) { |
| 337 | case 4: | 337 | case 4: |
| 338 | ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> | 338 | ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> |
| 339 | (offset % 4)) & 0xf); | 339 | (4 - (offset % 8))) & 0xf); |
| 340 | break; | 340 | break; |
| 341 | case 8: | 341 | case 8: |
| 342 | ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); | 342 | ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 312cf90731ea..205854e9c662 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
| @@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
| 238 | spin_lock(&dev->cq_table.lock); | 238 | spin_lock(&dev->cq_table.lock); |
| 239 | 239 | ||
| 240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | 240 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); |
| 241 | |||
| 242 | if (cq) | 241 | if (cq) |
| 243 | atomic_inc(&cq->refcount); | 242 | ++cq->refcount; |
| 243 | |||
| 244 | spin_unlock(&dev->cq_table.lock); | 244 | spin_unlock(&dev->cq_table.lock); |
| 245 | 245 | ||
| 246 | if (!cq) { | 246 | if (!cq) { |
| @@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
| 254 | if (cq->ibcq.event_handler) | 254 | if (cq->ibcq.event_handler) |
| 255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); | 255 | cq->ibcq.event_handler(&event, cq->ibcq.cq_context); |
| 256 | 256 | ||
| 257 | if (atomic_dec_and_test(&cq->refcount)) | 257 | spin_lock(&dev->cq_table.lock); |
| 258 | if (!--cq->refcount) | ||
| 258 | wake_up(&cq->wait); | 259 | wake_up(&cq->wait); |
| 260 | spin_unlock(&dev->cq_table.lock); | ||
| 259 | } | 261 | } |
| 260 | 262 | ||
| 261 | static inline int is_recv_cqe(struct mthca_cqe *cqe) | 263 | static inline int is_recv_cqe(struct mthca_cqe *cqe) |
| @@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe) | |||
| 267 | return !(cqe->is_send & 0x80); | 269 | return !(cqe->is_send & 0x80); |
| 268 | } | 270 | } |
| 269 | 271 | ||
| 270 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 272 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
| 271 | struct mthca_srq *srq) | 273 | struct mthca_srq *srq) |
| 272 | { | 274 | { |
| 273 | struct mthca_cq *cq; | ||
| 274 | struct mthca_cqe *cqe; | 275 | struct mthca_cqe *cqe; |
| 275 | u32 prod_index; | 276 | u32 prod_index; |
| 276 | int nfreed = 0; | 277 | int nfreed = 0; |
| 277 | 278 | ||
| 278 | spin_lock_irq(&dev->cq_table.lock); | ||
| 279 | cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); | ||
| 280 | if (cq) | ||
| 281 | atomic_inc(&cq->refcount); | ||
| 282 | spin_unlock_irq(&dev->cq_table.lock); | ||
| 283 | |||
| 284 | if (!cq) | ||
| 285 | return; | ||
| 286 | |||
| 287 | spin_lock_irq(&cq->lock); | 279 | spin_lock_irq(&cq->lock); |
| 288 | 280 | ||
| 289 | /* | 281 | /* |
| @@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
| 301 | 293 | ||
| 302 | if (0) | 294 | if (0) |
| 303 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", | 295 | mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", |
| 304 | qpn, cqn, cq->cons_index, prod_index); | 296 | qpn, cq->cqn, cq->cons_index, prod_index); |
| 305 | 297 | ||
| 306 | /* | 298 | /* |
| 307 | * Now sweep backwards through the CQ, removing CQ entries | 299 | * Now sweep backwards through the CQ, removing CQ entries |
| @@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
| 325 | } | 317 | } |
| 326 | 318 | ||
| 327 | spin_unlock_irq(&cq->lock); | 319 | spin_unlock_irq(&cq->lock); |
| 328 | if (atomic_dec_and_test(&cq->refcount)) | ||
| 329 | wake_up(&cq->wait); | ||
| 330 | } | 320 | } |
| 331 | 321 | ||
| 332 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) | 322 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) |
| @@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
| 821 | } | 811 | } |
| 822 | 812 | ||
| 823 | spin_lock_init(&cq->lock); | 813 | spin_lock_init(&cq->lock); |
| 824 | atomic_set(&cq->refcount, 1); | 814 | cq->refcount = 1; |
| 825 | init_waitqueue_head(&cq->wait); | 815 | init_waitqueue_head(&cq->wait); |
| 826 | 816 | ||
| 827 | memset(cq_context, 0, sizeof *cq_context); | 817 | memset(cq_context, 0, sizeof *cq_context); |
| @@ -896,6 +886,17 @@ err_out: | |||
| 896 | return err; | 886 | return err; |
| 897 | } | 887 | } |
| 898 | 888 | ||
| 889 | static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) | ||
| 890 | { | ||
| 891 | int c; | ||
| 892 | |||
| 893 | spin_lock_irq(&dev->cq_table.lock); | ||
| 894 | c = cq->refcount; | ||
| 895 | spin_unlock_irq(&dev->cq_table.lock); | ||
| 896 | |||
| 897 | return c; | ||
| 898 | } | ||
| 899 | |||
| 899 | void mthca_free_cq(struct mthca_dev *dev, | 900 | void mthca_free_cq(struct mthca_dev *dev, |
| 900 | struct mthca_cq *cq) | 901 | struct mthca_cq *cq) |
| 901 | { | 902 | { |
| @@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
| 929 | spin_lock_irq(&dev->cq_table.lock); | 930 | spin_lock_irq(&dev->cq_table.lock); |
| 930 | mthca_array_clear(&dev->cq_table.cq, | 931 | mthca_array_clear(&dev->cq_table.cq, |
| 931 | cq->cqn & (dev->limits.num_cqs - 1)); | 932 | cq->cqn & (dev->limits.num_cqs - 1)); |
| 933 | --cq->refcount; | ||
| 932 | spin_unlock_irq(&dev->cq_table.lock); | 934 | spin_unlock_irq(&dev->cq_table.lock); |
| 933 | 935 | ||
| 934 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) | 936 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) |
| @@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
| 936 | else | 938 | else |
| 937 | synchronize_irq(dev->pdev->irq); | 939 | synchronize_irq(dev->pdev->irq); |
| 938 | 940 | ||
| 939 | atomic_dec(&cq->refcount); | 941 | wait_event(cq->wait, !get_cq_refcount(dev, cq)); |
| 940 | wait_event(cq->wait, !atomic_read(&cq->refcount)); | ||
| 941 | 942 | ||
| 942 | if (cq->is_kernel) { | 943 | if (cq->is_kernel) { |
| 943 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | 944 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 4c1dcb4c1822..f8160b8de090 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
| @@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
| 496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); | 496 | void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); |
| 497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | 497 | void mthca_cq_event(struct mthca_dev *dev, u32 cqn, |
| 498 | enum ib_event_type event_type); | 498 | enum ib_event_type event_type); |
| 499 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 499 | void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, |
| 500 | struct mthca_srq *srq); | 500 | struct mthca_srq *srq); |
| 501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); | 501 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); |
| 502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); | 502 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 25e1c1db9a40..a486dec1707e 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
| @@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
| 761 | 761 | ||
| 762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) | 762 | int __devinit mthca_init_mr_table(struct mthca_dev *dev) |
| 763 | { | 763 | { |
| 764 | unsigned long addr; | ||
| 764 | int err, i; | 765 | int err, i; |
| 765 | 766 | ||
| 766 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, | 767 | err = mthca_alloc_init(&dev->mr_table.mpt_alloc, |
| @@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
| 796 | goto err_fmr_mpt; | 797 | goto err_fmr_mpt; |
| 797 | } | 798 | } |
| 798 | 799 | ||
| 800 | addr = pci_resource_start(dev->pdev, 4) + | ||
| 801 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
| 802 | dev->mr_table.mpt_base); | ||
| 803 | |||
| 799 | dev->mr_table.tavor_fmr.mpt_base = | 804 | dev->mr_table.tavor_fmr.mpt_base = |
| 800 | ioremap(dev->mr_table.mpt_base, | 805 | ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry)); |
| 801 | (1 << i) * sizeof (struct mthca_mpt_entry)); | ||
| 802 | 806 | ||
| 803 | if (!dev->mr_table.tavor_fmr.mpt_base) { | 807 | if (!dev->mr_table.tavor_fmr.mpt_base) { |
| 804 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); | 808 | mthca_warn(dev, "MPT ioremap for FMR failed.\n"); |
| @@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) | |||
| 806 | goto err_fmr_mpt; | 810 | goto err_fmr_mpt; |
| 807 | } | 811 | } |
| 808 | 812 | ||
| 813 | addr = pci_resource_start(dev->pdev, 4) + | ||
| 814 | ((pci_resource_len(dev->pdev, 4) - 1) & | ||
| 815 | dev->mr_table.mtt_base); | ||
| 816 | |||
| 809 | dev->mr_table.tavor_fmr.mtt_base = | 817 | dev->mr_table.tavor_fmr.mtt_base = |
| 810 | ioremap(dev->mr_table.mtt_base, | 818 | ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE); |
| 811 | (1 << i) * MTHCA_MTT_SEG_SIZE); | ||
| 812 | if (!dev->mr_table.tavor_fmr.mtt_base) { | 819 | if (!dev->mr_table.tavor_fmr.mtt_base) { |
| 813 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); | 820 | mthca_warn(dev, "MTT ioremap for FMR failed.\n"); |
| 814 | err = -ENOMEM; | 821 | err = -ENOMEM; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 6676a786d690..179a8f610d0f 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
| @@ -139,11 +139,12 @@ struct mthca_ah { | |||
| 139 | * a qp may be locked, with the send cq locked first. No other | 139 | * a qp may be locked, with the send cq locked first. No other |
| 140 | * nesting should be done. | 140 | * nesting should be done. |
| 141 | * | 141 | * |
| 142 | * Each struct mthca_cq/qp also has an atomic_t ref count. The | 142 | * Each struct mthca_cq/qp also has an ref count, protected by the |
| 143 | * pointer from the cq/qp_table to the struct counts as one reference. | 143 | * corresponding table lock. The pointer from the cq/qp_table to the |
| 144 | * This reference also is good for access through the consumer API, so | 144 | * struct counts as one reference. This reference also is good for |
| 145 | * modifying the CQ/QP etc doesn't need to take another reference. | 145 | * access through the consumer API, so modifying the CQ/QP etc doesn't |
| 146 | * Access because of a completion being polled does need a reference. | 146 | * need to take another reference. Access to a QP because of a |
| 147 | * completion being polled does not need a reference either. | ||
| 147 | * | 148 | * |
| 148 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the | 149 | * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the |
| 149 | * destroy function to sleep on. | 150 | * destroy function to sleep on. |
| @@ -159,8 +160,9 @@ struct mthca_ah { | |||
| 159 | * - decrement ref count; if zero, wake up waiters | 160 | * - decrement ref count; if zero, wake up waiters |
| 160 | * | 161 | * |
| 161 | * To destroy a CQ/QP, we can do the following: | 162 | * To destroy a CQ/QP, we can do the following: |
| 162 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | 163 | * - lock cq/qp_table |
| 163 | * - decrement ref count | 164 | * - remove pointer and decrement ref count |
| 165 | * - unlock cq/qp_table lock | ||
| 164 | * - wait_event until ref count is zero | 166 | * - wait_event until ref count is zero |
| 165 | * | 167 | * |
| 166 | * It is the consumer's responsibilty to make sure that no QP | 168 | * It is the consumer's responsibilty to make sure that no QP |
| @@ -197,7 +199,7 @@ struct mthca_cq_resize { | |||
| 197 | struct mthca_cq { | 199 | struct mthca_cq { |
| 198 | struct ib_cq ibcq; | 200 | struct ib_cq ibcq; |
| 199 | spinlock_t lock; | 201 | spinlock_t lock; |
| 200 | atomic_t refcount; | 202 | int refcount; |
| 201 | int cqn; | 203 | int cqn; |
| 202 | u32 cons_index; | 204 | u32 cons_index; |
| 203 | struct mthca_cq_buf buf; | 205 | struct mthca_cq_buf buf; |
| @@ -217,7 +219,7 @@ struct mthca_cq { | |||
| 217 | struct mthca_srq { | 219 | struct mthca_srq { |
| 218 | struct ib_srq ibsrq; | 220 | struct ib_srq ibsrq; |
| 219 | spinlock_t lock; | 221 | spinlock_t lock; |
| 220 | atomic_t refcount; | 222 | int refcount; |
| 221 | int srqn; | 223 | int srqn; |
| 222 | int max; | 224 | int max; |
| 223 | int max_gs; | 225 | int max_gs; |
| @@ -254,7 +256,7 @@ struct mthca_wq { | |||
| 254 | 256 | ||
| 255 | struct mthca_qp { | 257 | struct mthca_qp { |
| 256 | struct ib_qp ibqp; | 258 | struct ib_qp ibqp; |
| 257 | atomic_t refcount; | 259 | int refcount; |
| 258 | u32 qpn; | 260 | u32 qpn; |
| 259 | int is_direct; | 261 | int is_direct; |
| 260 | u8 port; /* for SQP and memfree use only */ | 262 | u8 port; /* for SQP and memfree use only */ |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f37b0e367323..19765f6f8d58 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
| @@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
| 240 | spin_lock(&dev->qp_table.lock); | 240 | spin_lock(&dev->qp_table.lock); |
| 241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | 241 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); |
| 242 | if (qp) | 242 | if (qp) |
| 243 | atomic_inc(&qp->refcount); | 243 | ++qp->refcount; |
| 244 | spin_unlock(&dev->qp_table.lock); | 244 | spin_unlock(&dev->qp_table.lock); |
| 245 | 245 | ||
| 246 | if (!qp) { | 246 | if (!qp) { |
| @@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
| 257 | if (qp->ibqp.event_handler) | 257 | if (qp->ibqp.event_handler) |
| 258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | 258 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); |
| 259 | 259 | ||
| 260 | if (atomic_dec_and_test(&qp->refcount)) | 260 | spin_lock(&dev->qp_table.lock); |
| 261 | if (!--qp->refcount) | ||
| 261 | wake_up(&qp->wait); | 262 | wake_up(&qp->wait); |
| 263 | spin_unlock(&dev->qp_table.lock); | ||
| 262 | } | 264 | } |
| 263 | 265 | ||
| 264 | static int to_mthca_state(enum ib_qp_state ib_state) | 266 | static int to_mthca_state(enum ib_qp_state ib_state) |
| @@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
| 833 | * entries and reinitialize the QP. | 835 | * entries and reinitialize the QP. |
| 834 | */ | 836 | */ |
| 835 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { | 837 | if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { |
| 836 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 838 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
| 837 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 839 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
| 838 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 840 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
| 839 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 841 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
| 840 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 842 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
| 841 | 843 | ||
| 842 | mthca_wq_init(&qp->sq); | 844 | mthca_wq_init(&qp->sq); |
| @@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
| 1096 | int ret; | 1098 | int ret; |
| 1097 | int i; | 1099 | int i; |
| 1098 | 1100 | ||
| 1099 | atomic_set(&qp->refcount, 1); | 1101 | qp->refcount = 1; |
| 1100 | init_waitqueue_head(&qp->wait); | 1102 | init_waitqueue_head(&qp->wait); |
| 1101 | qp->state = IB_QPS_RESET; | 1103 | qp->state = IB_QPS_RESET; |
| 1102 | qp->atomic_rd_en = 0; | 1104 | qp->atomic_rd_en = 0; |
| @@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
| 1318 | return err; | 1320 | return err; |
| 1319 | } | 1321 | } |
| 1320 | 1322 | ||
| 1323 | static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) | ||
| 1324 | { | ||
| 1325 | int c; | ||
| 1326 | |||
| 1327 | spin_lock_irq(&dev->qp_table.lock); | ||
| 1328 | c = qp->refcount; | ||
| 1329 | spin_unlock_irq(&dev->qp_table.lock); | ||
| 1330 | |||
| 1331 | return c; | ||
| 1332 | } | ||
| 1333 | |||
| 1321 | void mthca_free_qp(struct mthca_dev *dev, | 1334 | void mthca_free_qp(struct mthca_dev *dev, |
| 1322 | struct mthca_qp *qp) | 1335 | struct mthca_qp *qp) |
| 1323 | { | 1336 | { |
| @@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
| 1339 | spin_lock(&dev->qp_table.lock); | 1352 | spin_lock(&dev->qp_table.lock); |
| 1340 | mthca_array_clear(&dev->qp_table.qp, | 1353 | mthca_array_clear(&dev->qp_table.qp, |
| 1341 | qp->qpn & (dev->limits.num_qps - 1)); | 1354 | qp->qpn & (dev->limits.num_qps - 1)); |
| 1355 | --qp->refcount; | ||
| 1342 | spin_unlock(&dev->qp_table.lock); | 1356 | spin_unlock(&dev->qp_table.lock); |
| 1343 | 1357 | ||
| 1344 | if (send_cq != recv_cq) | 1358 | if (send_cq != recv_cq) |
| 1345 | spin_unlock(&recv_cq->lock); | 1359 | spin_unlock(&recv_cq->lock); |
| 1346 | spin_unlock_irq(&send_cq->lock); | 1360 | spin_unlock_irq(&send_cq->lock); |
| 1347 | 1361 | ||
| 1348 | atomic_dec(&qp->refcount); | 1362 | wait_event(qp->wait, !get_qp_refcount(dev, qp)); |
| 1349 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
| 1350 | 1363 | ||
| 1351 | if (qp->state != IB_QPS_RESET) | 1364 | if (qp->state != IB_QPS_RESET) |
| 1352 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, | 1365 | mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, |
| @@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
| 1358 | * unref the mem-free tables and free the QPN in our table. | 1371 | * unref the mem-free tables and free the QPN in our table. |
| 1359 | */ | 1372 | */ |
| 1360 | if (!qp->ibqp.uobject) { | 1373 | if (!qp->ibqp.uobject) { |
| 1361 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, | 1374 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, |
| 1362 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1375 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
| 1363 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) | 1376 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
| 1364 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, | 1377 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, |
| 1365 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | 1378 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); |
| 1366 | 1379 | ||
| 1367 | mthca_free_memfree(dev, qp); | 1380 | mthca_free_memfree(dev, qp); |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index adcaf85355ae..1ea433291fa7 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
| @@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
| 241 | goto err_out_mailbox; | 241 | goto err_out_mailbox; |
| 242 | 242 | ||
| 243 | spin_lock_init(&srq->lock); | 243 | spin_lock_init(&srq->lock); |
| 244 | atomic_set(&srq->refcount, 1); | 244 | srq->refcount = 1; |
| 245 | init_waitqueue_head(&srq->wait); | 245 | init_waitqueue_head(&srq->wait); |
| 246 | 246 | ||
| 247 | if (mthca_is_memfree(dev)) | 247 | if (mthca_is_memfree(dev)) |
| @@ -308,6 +308,17 @@ err_out: | |||
| 308 | return err; | 308 | return err; |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) | ||
| 312 | { | ||
| 313 | int c; | ||
| 314 | |||
| 315 | spin_lock_irq(&dev->srq_table.lock); | ||
| 316 | c = srq->refcount; | ||
| 317 | spin_unlock_irq(&dev->srq_table.lock); | ||
| 318 | |||
| 319 | return c; | ||
| 320 | } | ||
| 321 | |||
| 311 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | 322 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) |
| 312 | { | 323 | { |
| 313 | struct mthca_mailbox *mailbox; | 324 | struct mthca_mailbox *mailbox; |
| @@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | |||
| 329 | spin_lock_irq(&dev->srq_table.lock); | 340 | spin_lock_irq(&dev->srq_table.lock); |
| 330 | mthca_array_clear(&dev->srq_table.srq, | 341 | mthca_array_clear(&dev->srq_table.srq, |
| 331 | srq->srqn & (dev->limits.num_srqs - 1)); | 342 | srq->srqn & (dev->limits.num_srqs - 1)); |
| 343 | --srq->refcount; | ||
| 332 | spin_unlock_irq(&dev->srq_table.lock); | 344 | spin_unlock_irq(&dev->srq_table.lock); |
| 333 | 345 | ||
| 334 | atomic_dec(&srq->refcount); | 346 | wait_event(srq->wait, !get_srq_refcount(dev, srq)); |
| 335 | wait_event(srq->wait, !atomic_read(&srq->refcount)); | ||
| 336 | 347 | ||
| 337 | if (!srq->ibsrq.uobject) { | 348 | if (!srq->ibsrq.uobject) { |
| 338 | mthca_free_srq_buf(dev, srq); | 349 | mthca_free_srq_buf(dev, srq); |
| @@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
| 414 | spin_lock(&dev->srq_table.lock); | 425 | spin_lock(&dev->srq_table.lock); |
| 415 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); | 426 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); |
| 416 | if (srq) | 427 | if (srq) |
| 417 | atomic_inc(&srq->refcount); | 428 | ++srq->refcount; |
| 418 | spin_unlock(&dev->srq_table.lock); | 429 | spin_unlock(&dev->srq_table.lock); |
| 419 | 430 | ||
| 420 | if (!srq) { | 431 | if (!srq) { |
| @@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | |||
| 431 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); | 442 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); |
| 432 | 443 | ||
| 433 | out: | 444 | out: |
| 434 | if (atomic_dec_and_test(&srq->refcount)) | 445 | spin_lock(&dev->srq_table.lock); |
| 446 | if (!--srq->refcount) | ||
| 435 | wake_up(&srq->wait); | 447 | wake_up(&srq->wait); |
| 448 | spin_unlock(&dev->srq_table.lock); | ||
| 436 | } | 449 | } |
| 437 | 450 | ||
| 438 | /* | 451 | /* |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 4ca175553f9f..f887780e8093 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
| @@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
| 158 | if (priv->pkey == pkey) { | 158 | if (priv->pkey == pkey) { |
| 159 | unregister_netdev(priv->dev); | 159 | unregister_netdev(priv->dev); |
| 160 | ipoib_dev_cleanup(priv->dev); | 160 | ipoib_dev_cleanup(priv->dev); |
| 161 | |||
| 162 | list_del(&priv->list); | 161 | list_del(&priv->list); |
| 163 | 162 | free_netdev(priv->dev); | |
| 164 | kfree(priv); | ||
| 165 | 163 | ||
| 166 | ret = 0; | 164 | ret = 0; |
| 167 | break; | 165 | break; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 5bb55742ada6..c32ce4348e1b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target) | |||
| 409 | } | 409 | } |
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | static void srp_unmap_data(struct scsi_cmnd *scmnd, | ||
| 413 | struct srp_target_port *target, | ||
| 414 | struct srp_request *req) | ||
| 415 | { | ||
| 416 | struct scatterlist *scat; | ||
| 417 | int nents; | ||
| 418 | |||
| 419 | if (!scmnd->request_buffer || | ||
| 420 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | ||
| 421 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | ||
| 422 | return; | ||
| 423 | |||
| 424 | /* | ||
| 425 | * This handling of non-SG commands can be killed when the | ||
| 426 | * SCSI midlayer no longer generates non-SG commands. | ||
| 427 | */ | ||
| 428 | if (likely(scmnd->use_sg)) { | ||
| 429 | nents = scmnd->use_sg; | ||
| 430 | scat = scmnd->request_buffer; | ||
| 431 | } else { | ||
| 432 | nents = 1; | ||
| 433 | scat = &req->fake_sg; | ||
| 434 | } | ||
| 435 | |||
| 436 | dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, | ||
| 437 | scmnd->sc_data_direction); | ||
| 438 | } | ||
| 439 | |||
| 412 | static int srp_reconnect_target(struct srp_target_port *target) | 440 | static int srp_reconnect_target(struct srp_target_port *target) |
| 413 | { | 441 | { |
| 414 | struct ib_cm_id *new_cm_id; | 442 | struct ib_cm_id *new_cm_id; |
| @@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
| 455 | list_for_each_entry(req, &target->req_queue, list) { | 483 | list_for_each_entry(req, &target->req_queue, list) { |
| 456 | req->scmnd->result = DID_RESET << 16; | 484 | req->scmnd->result = DID_RESET << 16; |
| 457 | req->scmnd->scsi_done(req->scmnd); | 485 | req->scmnd->scsi_done(req->scmnd); |
| 486 | srp_unmap_data(req->scmnd, target, req); | ||
| 458 | } | 487 | } |
| 459 | 488 | ||
| 460 | target->rx_head = 0; | 489 | target->rx_head = 0; |
| 461 | target->tx_head = 0; | 490 | target->tx_head = 0; |
| 462 | target->tx_tail = 0; | 491 | target->tx_tail = 0; |
| 463 | target->req_head = 0; | 492 | INIT_LIST_HEAD(&target->free_reqs); |
| 464 | for (i = 0; i < SRP_SQ_SIZE - 1; ++i) | ||
| 465 | target->req_ring[i].next = i + 1; | ||
| 466 | target->req_ring[SRP_SQ_SIZE - 1].next = -1; | ||
| 467 | INIT_LIST_HEAD(&target->req_queue); | 493 | INIT_LIST_HEAD(&target->req_queue); |
| 494 | for (i = 0; i < SRP_SQ_SIZE; ++i) | ||
| 495 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | ||
| 468 | 496 | ||
| 469 | ret = srp_connect_target(target); | 497 | ret = srp_connect_target(target); |
| 470 | if (ret) | 498 | if (ret) |
| @@ -589,40 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
| 589 | return len; | 617 | return len; |
| 590 | } | 618 | } |
| 591 | 619 | ||
| 592 | static void srp_unmap_data(struct scsi_cmnd *scmnd, | 620 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) |
| 593 | struct srp_target_port *target, | ||
| 594 | struct srp_request *req) | ||
| 595 | { | ||
| 596 | struct scatterlist *scat; | ||
| 597 | int nents; | ||
| 598 | |||
| 599 | if (!scmnd->request_buffer || | ||
| 600 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | ||
| 601 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | ||
| 602 | return; | ||
| 603 | |||
| 604 | /* | ||
| 605 | * This handling of non-SG commands can be killed when the | ||
| 606 | * SCSI midlayer no longer generates non-SG commands. | ||
| 607 | */ | ||
| 608 | if (likely(scmnd->use_sg)) { | ||
| 609 | nents = scmnd->use_sg; | ||
| 610 | scat = scmnd->request_buffer; | ||
| 611 | } else { | ||
| 612 | nents = 1; | ||
| 613 | scat = &req->fake_sg; | ||
| 614 | } | ||
| 615 | |||
| 616 | dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, | ||
| 617 | scmnd->sc_data_direction); | ||
| 618 | } | ||
| 619 | |||
| 620 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req, | ||
| 621 | int index) | ||
| 622 | { | 621 | { |
| 623 | list_del(&req->list); | 622 | srp_unmap_data(req->scmnd, target, req); |
| 624 | req->next = target->req_head; | 623 | list_move_tail(&req->list, &target->free_reqs); |
| 625 | target->req_head = index; | ||
| 626 | } | 624 | } |
| 627 | 625 | ||
| 628 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | 626 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
| @@ -647,7 +645,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
| 647 | req->tsk_status = rsp->data[3]; | 645 | req->tsk_status = rsp->data[3]; |
| 648 | complete(&req->done); | 646 | complete(&req->done); |
| 649 | } else { | 647 | } else { |
| 650 | scmnd = req->scmnd; | 648 | scmnd = req->scmnd; |
| 651 | if (!scmnd) | 649 | if (!scmnd) |
| 652 | printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", | 650 | printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", |
| 653 | (unsigned long long) rsp->tag); | 651 | (unsigned long long) rsp->tag); |
| @@ -665,14 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
| 665 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | 663 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) |
| 666 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); | 664 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); |
| 667 | 665 | ||
| 668 | srp_unmap_data(scmnd, target, req); | ||
| 669 | |||
| 670 | if (!req->tsk_mgmt) { | 666 | if (!req->tsk_mgmt) { |
| 671 | req->scmnd = NULL; | ||
| 672 | scmnd->host_scribble = (void *) -1L; | 667 | scmnd->host_scribble = (void *) -1L; |
| 673 | scmnd->scsi_done(scmnd); | 668 | scmnd->scsi_done(scmnd); |
| 674 | 669 | ||
| 675 | srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); | 670 | srp_remove_req(target, req); |
| 676 | } else | 671 | } else |
| 677 | req->cmd_done = 1; | 672 | req->cmd_done = 1; |
| 678 | } | 673 | } |
| @@ -859,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
| 859 | struct srp_request *req; | 854 | struct srp_request *req; |
| 860 | struct srp_iu *iu; | 855 | struct srp_iu *iu; |
| 861 | struct srp_cmd *cmd; | 856 | struct srp_cmd *cmd; |
| 862 | long req_index; | ||
| 863 | int len; | 857 | int len; |
| 864 | 858 | ||
| 865 | if (target->state == SRP_TARGET_CONNECTING) | 859 | if (target->state == SRP_TARGET_CONNECTING) |
| @@ -879,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
| 879 | dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, | 873 | dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, |
| 880 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); | 874 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); |
| 881 | 875 | ||
| 882 | req_index = target->req_head; | 876 | req = list_entry(target->free_reqs.next, struct srp_request, list); |
| 883 | 877 | ||
| 884 | scmnd->scsi_done = done; | 878 | scmnd->scsi_done = done; |
| 885 | scmnd->result = 0; | 879 | scmnd->result = 0; |
| 886 | scmnd->host_scribble = (void *) req_index; | 880 | scmnd->host_scribble = (void *) (long) req->index; |
| 887 | 881 | ||
| 888 | cmd = iu->buf; | 882 | cmd = iu->buf; |
| 889 | memset(cmd, 0, sizeof *cmd); | 883 | memset(cmd, 0, sizeof *cmd); |
| 890 | 884 | ||
| 891 | cmd->opcode = SRP_CMD; | 885 | cmd->opcode = SRP_CMD; |
| 892 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | 886 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); |
| 893 | cmd->tag = req_index; | 887 | cmd->tag = req->index; |
| 894 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); | 888 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); |
| 895 | 889 | ||
| 896 | req = &target->req_ring[req_index]; | ||
| 897 | |||
| 898 | req->scmnd = scmnd; | 890 | req->scmnd = scmnd; |
| 899 | req->cmd = iu; | 891 | req->cmd = iu; |
| 900 | req->cmd_done = 0; | 892 | req->cmd_done = 0; |
| @@ -919,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
| 919 | goto err_unmap; | 911 | goto err_unmap; |
| 920 | } | 912 | } |
| 921 | 913 | ||
| 922 | target->req_head = req->next; | 914 | list_move_tail(&req->list, &target->req_queue); |
| 923 | list_add_tail(&req->list, &target->req_queue); | ||
| 924 | 915 | ||
| 925 | return 0; | 916 | return 0; |
| 926 | 917 | ||
| @@ -1143,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
| 1143 | return 0; | 1134 | return 0; |
| 1144 | } | 1135 | } |
| 1145 | 1136 | ||
| 1146 | static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | 1137 | static int srp_send_tsk_mgmt(struct srp_target_port *target, |
| 1138 | struct srp_request *req, u8 func) | ||
| 1147 | { | 1139 | { |
| 1148 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
| 1149 | struct srp_request *req; | ||
| 1150 | struct srp_iu *iu; | 1140 | struct srp_iu *iu; |
| 1151 | struct srp_tsk_mgmt *tsk_mgmt; | 1141 | struct srp_tsk_mgmt *tsk_mgmt; |
| 1152 | int req_index; | ||
| 1153 | int ret = FAILED; | ||
| 1154 | 1142 | ||
| 1155 | spin_lock_irq(target->scsi_host->host_lock); | 1143 | spin_lock_irq(target->scsi_host->host_lock); |
| 1156 | 1144 | ||
| 1157 | if (target->state == SRP_TARGET_DEAD || | 1145 | if (target->state == SRP_TARGET_DEAD || |
| 1158 | target->state == SRP_TARGET_REMOVED) { | 1146 | target->state == SRP_TARGET_REMOVED) { |
| 1159 | scmnd->result = DID_BAD_TARGET << 16; | 1147 | req->scmnd->result = DID_BAD_TARGET << 16; |
| 1160 | goto out; | 1148 | goto out; |
| 1161 | } | 1149 | } |
| 1162 | 1150 | ||
| 1163 | if (scmnd->host_scribble == (void *) -1L) | ||
| 1164 | goto out; | ||
| 1165 | |||
| 1166 | req_index = (long) scmnd->host_scribble; | ||
| 1167 | printk(KERN_ERR "Abort for req_index %d\n", req_index); | ||
| 1168 | |||
| 1169 | req = &target->req_ring[req_index]; | ||
| 1170 | init_completion(&req->done); | 1151 | init_completion(&req->done); |
| 1171 | 1152 | ||
| 1172 | iu = __srp_get_tx_iu(target); | 1153 | iu = __srp_get_tx_iu(target); |
| @@ -1177,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | |||
| 1177 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); | 1158 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); |
| 1178 | 1159 | ||
| 1179 | tsk_mgmt->opcode = SRP_TSK_MGMT; | 1160 | tsk_mgmt->opcode = SRP_TSK_MGMT; |
| 1180 | tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | 1161 | tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); |
| 1181 | tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; | 1162 | tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; |
| 1182 | tsk_mgmt->tsk_mgmt_func = func; | 1163 | tsk_mgmt->tsk_mgmt_func = func; |
| 1183 | tsk_mgmt->task_tag = req_index; | 1164 | tsk_mgmt->task_tag = req->index; |
| 1184 | 1165 | ||
| 1185 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) | 1166 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) |
| 1186 | goto out; | 1167 | goto out; |
| @@ -1188,37 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) | |||
| 1188 | req->tsk_mgmt = iu; | 1169 | req->tsk_mgmt = iu; |
| 1189 | 1170 | ||
| 1190 | spin_unlock_irq(target->scsi_host->host_lock); | 1171 | spin_unlock_irq(target->scsi_host->host_lock); |
| 1172 | |||
| 1191 | if (!wait_for_completion_timeout(&req->done, | 1173 | if (!wait_for_completion_timeout(&req->done, |
| 1192 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) | 1174 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) |
| 1193 | return FAILED; | 1175 | return -1; |
| 1194 | spin_lock_irq(target->scsi_host->host_lock); | ||
| 1195 | 1176 | ||
| 1196 | if (req->cmd_done) { | 1177 | return 0; |
| 1197 | srp_remove_req(target, req, req_index); | ||
| 1198 | scmnd->scsi_done(scmnd); | ||
| 1199 | } else if (!req->tsk_status) { | ||
| 1200 | srp_remove_req(target, req, req_index); | ||
| 1201 | scmnd->result = DID_ABORT << 16; | ||
| 1202 | ret = SUCCESS; | ||
| 1203 | } | ||
| 1204 | 1178 | ||
| 1205 | out: | 1179 | out: |
| 1206 | spin_unlock_irq(target->scsi_host->host_lock); | 1180 | spin_unlock_irq(target->scsi_host->host_lock); |
| 1207 | return ret; | 1181 | return -1; |
| 1182 | } | ||
| 1183 | |||
| 1184 | static int srp_find_req(struct srp_target_port *target, | ||
| 1185 | struct scsi_cmnd *scmnd, | ||
| 1186 | struct srp_request **req) | ||
| 1187 | { | ||
| 1188 | if (scmnd->host_scribble == (void *) -1L) | ||
| 1189 | return -1; | ||
| 1190 | |||
| 1191 | *req = &target->req_ring[(long) scmnd->host_scribble]; | ||
| 1192 | |||
| 1193 | return 0; | ||
| 1208 | } | 1194 | } |
| 1209 | 1195 | ||
| 1210 | static int srp_abort(struct scsi_cmnd *scmnd) | 1196 | static int srp_abort(struct scsi_cmnd *scmnd) |
| 1211 | { | 1197 | { |
| 1198 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
| 1199 | struct srp_request *req; | ||
| 1200 | int ret = SUCCESS; | ||
| 1201 | |||
| 1212 | printk(KERN_ERR "SRP abort called\n"); | 1202 | printk(KERN_ERR "SRP abort called\n"); |
| 1213 | 1203 | ||
| 1214 | return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); | 1204 | if (srp_find_req(target, scmnd, &req)) |
| 1205 | return FAILED; | ||
| 1206 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) | ||
| 1207 | return FAILED; | ||
| 1208 | |||
| 1209 | spin_lock_irq(target->scsi_host->host_lock); | ||
| 1210 | |||
| 1211 | if (req->cmd_done) { | ||
| 1212 | srp_remove_req(target, req); | ||
| 1213 | scmnd->scsi_done(scmnd); | ||
| 1214 | } else if (!req->tsk_status) { | ||
| 1215 | srp_remove_req(target, req); | ||
| 1216 | scmnd->result = DID_ABORT << 16; | ||
| 1217 | } else | ||
| 1218 | ret = FAILED; | ||
| 1219 | |||
| 1220 | spin_unlock_irq(target->scsi_host->host_lock); | ||
| 1221 | |||
| 1222 | return ret; | ||
| 1215 | } | 1223 | } |
| 1216 | 1224 | ||
| 1217 | static int srp_reset_device(struct scsi_cmnd *scmnd) | 1225 | static int srp_reset_device(struct scsi_cmnd *scmnd) |
| 1218 | { | 1226 | { |
| 1227 | struct srp_target_port *target = host_to_target(scmnd->device->host); | ||
| 1228 | struct srp_request *req, *tmp; | ||
| 1229 | |||
| 1219 | printk(KERN_ERR "SRP reset_device called\n"); | 1230 | printk(KERN_ERR "SRP reset_device called\n"); |
| 1220 | 1231 | ||
| 1221 | return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); | 1232 | if (srp_find_req(target, scmnd, &req)) |
| 1233 | return FAILED; | ||
| 1234 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) | ||
| 1235 | return FAILED; | ||
| 1236 | if (req->tsk_status) | ||
| 1237 | return FAILED; | ||
| 1238 | |||
| 1239 | spin_lock_irq(target->scsi_host->host_lock); | ||
| 1240 | |||
| 1241 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | ||
| 1242 | if (req->scmnd->device == scmnd->device) { | ||
| 1243 | req->scmnd->result = DID_RESET << 16; | ||
| 1244 | scmnd->scsi_done(scmnd); | ||
| 1245 | srp_remove_req(target, req); | ||
| 1246 | } | ||
| 1247 | |||
| 1248 | spin_unlock_irq(target->scsi_host->host_lock); | ||
| 1249 | |||
| 1250 | return SUCCESS; | ||
| 1222 | } | 1251 | } |
| 1223 | 1252 | ||
| 1224 | static int srp_reset_host(struct scsi_cmnd *scmnd) | 1253 | static int srp_reset_host(struct scsi_cmnd *scmnd) |
| @@ -1518,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev, | |||
| 1518 | 1547 | ||
| 1519 | INIT_WORK(&target->work, srp_reconnect_work, target); | 1548 | INIT_WORK(&target->work, srp_reconnect_work, target); |
| 1520 | 1549 | ||
| 1521 | for (i = 0; i < SRP_SQ_SIZE - 1; ++i) | 1550 | INIT_LIST_HEAD(&target->free_reqs); |
| 1522 | target->req_ring[i].next = i + 1; | ||
| 1523 | target->req_ring[SRP_SQ_SIZE - 1].next = -1; | ||
| 1524 | INIT_LIST_HEAD(&target->req_queue); | 1551 | INIT_LIST_HEAD(&target->req_queue); |
| 1552 | for (i = 0; i < SRP_SQ_SIZE; ++i) { | ||
| 1553 | target->req_ring[i].index = i; | ||
| 1554 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | ||
| 1555 | } | ||
| 1525 | 1556 | ||
| 1526 | ret = srp_parse_options(buf, target); | 1557 | ret = srp_parse_options(buf, target); |
| 1527 | if (ret) | 1558 | if (ret) |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index bd7f7c3115de..c5cd43aae860 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
| @@ -101,7 +101,7 @@ struct srp_request { | |||
| 101 | */ | 101 | */ |
| 102 | struct scatterlist fake_sg; | 102 | struct scatterlist fake_sg; |
| 103 | struct completion done; | 103 | struct completion done; |
| 104 | short next; | 104 | short index; |
| 105 | u8 cmd_done; | 105 | u8 cmd_done; |
| 106 | u8 tsk_status; | 106 | u8 tsk_status; |
| 107 | }; | 107 | }; |
| @@ -133,7 +133,7 @@ struct srp_target_port { | |||
| 133 | unsigned tx_tail; | 133 | unsigned tx_tail; |
| 134 | struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; | 134 | struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; |
| 135 | 135 | ||
| 136 | int req_head; | 136 | struct list_head free_reqs; |
| 137 | struct list_head req_queue; | 137 | struct list_head req_queue; |
| 138 | struct srp_request req_ring[SRP_SQ_SIZE]; | 138 | struct srp_request req_ring[SRP_SQ_SIZE]; |
| 139 | 139 | ||
