diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 33 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 132 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 127 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 53 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_user.h | 7 |
7 files changed, 308 insertions, 52 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 2825615ce81c..7780e974cfb5 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
4 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | 4 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -1514,6 +1514,37 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, | |||
1514 | CMD_TIME_CLASS_A, status); | 1514 | CMD_TIME_CLASS_A, status); |
1515 | } | 1515 | } |
1516 | 1516 | ||
1517 | int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size, | ||
1518 | u8 *status) | ||
1519 | { | ||
1520 | struct mthca_mailbox *mailbox; | ||
1521 | __be32 *inbox; | ||
1522 | int err; | ||
1523 | |||
1524 | #define RESIZE_CQ_IN_SIZE 0x40 | ||
1525 | #define RESIZE_CQ_LOG_SIZE_OFFSET 0x0c | ||
1526 | #define RESIZE_CQ_LKEY_OFFSET 0x1c | ||
1527 | |||
1528 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | ||
1529 | if (IS_ERR(mailbox)) | ||
1530 | return PTR_ERR(mailbox); | ||
1531 | inbox = mailbox->buf; | ||
1532 | |||
1533 | memset(inbox, 0, RESIZE_CQ_IN_SIZE); | ||
1534 | /* | ||
1535 | * Leave start address fields zeroed out -- mthca assumes that | ||
1536 | * MRs for CQs always start at virtual address 0. | ||
1537 | */ | ||
1538 | MTHCA_PUT(inbox, log_size, RESIZE_CQ_LOG_SIZE_OFFSET); | ||
1539 | MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET); | ||
1540 | |||
1541 | err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ, | ||
1542 | CMD_TIME_CLASS_B, status); | ||
1543 | |||
1544 | mthca_free_mailbox(dev, mailbox); | ||
1545 | return err; | ||
1546 | } | ||
1547 | |||
1517 | int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, | 1548 | int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, |
1518 | int srq_num, u8 *status) | 1549 | int srq_num, u8 *status) |
1519 | { | 1550 | { |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index 18175bec84c2..3473b8235ee0 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
4 | * Copyright (c) 2006 Cisco Systems. All rights reserved. | ||
4 | * | 5 | * |
5 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
6 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -298,6 +299,8 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, | |||
298 | int cq_num, u8 *status); | 299 | int cq_num, u8 *status); |
299 | int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, | 300 | int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, |
300 | int cq_num, u8 *status); | 301 | int cq_num, u8 *status); |
302 | int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size, | ||
303 | u8 *status); | ||
301 | int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, | 304 | int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, |
302 | int srq_num, u8 *status); | 305 | int srq_num, u8 *status); |
303 | int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, | 306 | int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 78d9cc119f33..76aabc5bf371 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved. |
5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | 6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
7 | * | 7 | * |
@@ -150,24 +150,29 @@ struct mthca_err_cqe { | |||
150 | #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) | 150 | #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) |
151 | #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) | 151 | #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) |
152 | 152 | ||
153 | static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) | 153 | static inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf, |
154 | int entry) | ||
154 | { | 155 | { |
155 | if (cq->is_direct) | 156 | if (buf->is_direct) |
156 | return cq->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE); | 157 | return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE); |
157 | else | 158 | else |
158 | return cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf | 159 | return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf |
159 | + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE; | 160 | + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE; |
160 | } | 161 | } |
161 | 162 | ||
162 | static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i) | 163 | static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) |
164 | { | ||
165 | return get_cqe_from_buf(&cq->buf, entry); | ||
166 | } | ||
167 | |||
168 | static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) | ||
163 | { | 169 | { |
164 | struct mthca_cqe *cqe = get_cqe(cq, i); | ||
165 | return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; | 170 | return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; |
166 | } | 171 | } |
167 | 172 | ||
168 | static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) | 173 | static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) |
169 | { | 174 | { |
170 | return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe); | 175 | return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); |
171 | } | 176 | } |
172 | 177 | ||
173 | static inline void set_cqe_hw(struct mthca_cqe *cqe) | 178 | static inline void set_cqe_hw(struct mthca_cqe *cqe) |
@@ -289,7 +294,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
289 | * from our QP and therefore don't need to be checked. | 294 | * from our QP and therefore don't need to be checked. |
290 | */ | 295 | */ |
291 | for (prod_index = cq->cons_index; | 296 | for (prod_index = cq->cons_index; |
292 | cqe_sw(cq, prod_index & cq->ibcq.cqe); | 297 | cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe)); |
293 | ++prod_index) | 298 | ++prod_index) |
294 | if (prod_index == cq->cons_index + cq->ibcq.cqe) | 299 | if (prod_index == cq->cons_index + cq->ibcq.cqe) |
295 | break; | 300 | break; |
@@ -324,6 +329,53 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | |||
324 | wake_up(&cq->wait); | 329 | wake_up(&cq->wait); |
325 | } | 330 | } |
326 | 331 | ||
332 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) | ||
333 | { | ||
334 | int i; | ||
335 | |||
336 | /* | ||
337 | * In Tavor mode, the hardware keeps the consumer and producer | ||
338 | * indices mod the CQ size. Since we might be making the CQ | ||
339 | * bigger, we need to deal with the case where the producer | ||
340 | * index wrapped around before the CQ was resized. | ||
341 | */ | ||
342 | if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) && | ||
343 | cq->ibcq.cqe < cq->resize_buf->cqe) { | ||
344 | cq->cons_index &= cq->ibcq.cqe; | ||
345 | if (cqe_sw(get_cqe(cq, cq->ibcq.cqe))) | ||
346 | cq->cons_index -= cq->ibcq.cqe + 1; | ||
347 | } | ||
348 | |||
349 | for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i) | ||
350 | memcpy(get_cqe_from_buf(&cq->resize_buf->buf, | ||
351 | i & cq->resize_buf->cqe), | ||
352 | get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE); | ||
353 | } | ||
354 | |||
355 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) | ||
356 | { | ||
357 | int ret; | ||
358 | int i; | ||
359 | |||
360 | ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, | ||
361 | MTHCA_MAX_DIRECT_CQ_SIZE, | ||
362 | &buf->queue, &buf->is_direct, | ||
363 | &dev->driver_pd, 1, &buf->mr); | ||
364 | if (ret) | ||
365 | return ret; | ||
366 | |||
367 | for (i = 0; i < nent; ++i) | ||
368 | set_cqe_hw(get_cqe_from_buf(buf, i)); | ||
369 | |||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) | ||
374 | { | ||
375 | mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue, | ||
376 | buf->is_direct, &buf->mr); | ||
377 | } | ||
378 | |||
327 | static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, | 379 | static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, |
328 | struct mthca_qp *qp, int wqe_index, int is_send, | 380 | struct mthca_qp *qp, int wqe_index, int is_send, |
329 | struct mthca_err_cqe *cqe, | 381 | struct mthca_err_cqe *cqe, |
@@ -609,11 +661,14 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, | |||
609 | 661 | ||
610 | spin_lock_irqsave(&cq->lock, flags); | 662 | spin_lock_irqsave(&cq->lock, flags); |
611 | 663 | ||
612 | for (npolled = 0; npolled < num_entries; ++npolled) { | 664 | npolled = 0; |
665 | repoll: | ||
666 | while (npolled < num_entries) { | ||
613 | err = mthca_poll_one(dev, cq, &qp, | 667 | err = mthca_poll_one(dev, cq, &qp, |
614 | &freed, entry + npolled); | 668 | &freed, entry + npolled); |
615 | if (err) | 669 | if (err) |
616 | break; | 670 | break; |
671 | ++npolled; | ||
617 | } | 672 | } |
618 | 673 | ||
619 | if (freed) { | 674 | if (freed) { |
@@ -621,6 +676,42 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, | |||
621 | update_cons_index(dev, cq, freed); | 676 | update_cons_index(dev, cq, freed); |
622 | } | 677 | } |
623 | 678 | ||
679 | /* | ||
680 | * If a CQ resize is in progress and we discovered that the | ||
681 | * old buffer is empty, then peek in the new buffer, and if | ||
682 | * it's not empty, switch to the new buffer and continue | ||
683 | * polling there. | ||
684 | */ | ||
685 | if (unlikely(err == -EAGAIN && cq->resize_buf && | ||
686 | cq->resize_buf->state == CQ_RESIZE_READY)) { | ||
687 | /* | ||
688 | * In Tavor mode, the hardware keeps the producer | ||
689 | * index modulo the CQ size. Since we might be making | ||
690 | * the CQ bigger, we need to mask our consumer index | ||
691 | * using the size of the old CQ buffer before looking | ||
692 | * in the new CQ buffer. | ||
693 | */ | ||
694 | if (!mthca_is_memfree(dev)) | ||
695 | cq->cons_index &= cq->ibcq.cqe; | ||
696 | |||
697 | if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf, | ||
698 | cq->cons_index & cq->resize_buf->cqe))) { | ||
699 | struct mthca_cq_buf tbuf; | ||
700 | int tcqe; | ||
701 | |||
702 | tbuf = cq->buf; | ||
703 | tcqe = cq->ibcq.cqe; | ||
704 | cq->buf = cq->resize_buf->buf; | ||
705 | cq->ibcq.cqe = cq->resize_buf->cqe; | ||
706 | |||
707 | cq->resize_buf->buf = tbuf; | ||
708 | cq->resize_buf->cqe = tcqe; | ||
709 | cq->resize_buf->state = CQ_RESIZE_SWAPPED; | ||
710 | |||
711 | goto repoll; | ||
712 | } | ||
713 | } | ||
714 | |||
624 | spin_unlock_irqrestore(&cq->lock, flags); | 715 | spin_unlock_irqrestore(&cq->lock, flags); |
625 | 716 | ||
626 | return err == 0 || err == -EAGAIN ? npolled : err; | 717 | return err == 0 || err == -EAGAIN ? npolled : err; |
@@ -679,22 +770,14 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |||
679 | return 0; | 770 | return 0; |
680 | } | 771 | } |
681 | 772 | ||
682 | static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) | ||
683 | { | ||
684 | mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, | ||
685 | &cq->queue, cq->is_direct, &cq->mr); | ||
686 | } | ||
687 | |||
688 | int mthca_init_cq(struct mthca_dev *dev, int nent, | 773 | int mthca_init_cq(struct mthca_dev *dev, int nent, |
689 | struct mthca_ucontext *ctx, u32 pdn, | 774 | struct mthca_ucontext *ctx, u32 pdn, |
690 | struct mthca_cq *cq) | 775 | struct mthca_cq *cq) |
691 | { | 776 | { |
692 | int size = nent * MTHCA_CQ_ENTRY_SIZE; | ||
693 | struct mthca_mailbox *mailbox; | 777 | struct mthca_mailbox *mailbox; |
694 | struct mthca_cq_context *cq_context; | 778 | struct mthca_cq_context *cq_context; |
695 | int err = -ENOMEM; | 779 | int err = -ENOMEM; |
696 | u8 status; | 780 | u8 status; |
697 | int i; | ||
698 | 781 | ||
699 | cq->ibcq.cqe = nent - 1; | 782 | cq->ibcq.cqe = nent - 1; |
700 | cq->is_kernel = !ctx; | 783 | cq->is_kernel = !ctx; |
@@ -732,14 +815,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
732 | cq_context = mailbox->buf; | 815 | cq_context = mailbox->buf; |
733 | 816 | ||
734 | if (cq->is_kernel) { | 817 | if (cq->is_kernel) { |
735 | err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE, | 818 | err = mthca_alloc_cq_buf(dev, &cq->buf, nent); |
736 | &cq->queue, &cq->is_direct, | ||
737 | &dev->driver_pd, 1, &cq->mr); | ||
738 | if (err) | 819 | if (err) |
739 | goto err_out_mailbox; | 820 | goto err_out_mailbox; |
740 | |||
741 | for (i = 0; i < nent; ++i) | ||
742 | set_cqe_hw(get_cqe(cq, i)); | ||
743 | } | 821 | } |
744 | 822 | ||
745 | spin_lock_init(&cq->lock); | 823 | spin_lock_init(&cq->lock); |
@@ -758,7 +836,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
758 | cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); | 836 | cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); |
759 | cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); | 837 | cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); |
760 | cq_context->pd = cpu_to_be32(pdn); | 838 | cq_context->pd = cpu_to_be32(pdn); |
761 | cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey); | 839 | cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey); |
762 | cq_context->cqn = cpu_to_be32(cq->cqn); | 840 | cq_context->cqn = cpu_to_be32(cq->cqn); |
763 | 841 | ||
764 | if (mthca_is_memfree(dev)) { | 842 | if (mthca_is_memfree(dev)) { |
@@ -796,7 +874,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, | |||
796 | 874 | ||
797 | err_out_free_mr: | 875 | err_out_free_mr: |
798 | if (cq->is_kernel) | 876 | if (cq->is_kernel) |
799 | mthca_free_cq_buf(dev, cq); | 877 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); |
800 | 878 | ||
801 | err_out_mailbox: | 879 | err_out_mailbox: |
802 | mthca_free_mailbox(dev, mailbox); | 880 | mthca_free_mailbox(dev, mailbox); |
@@ -862,7 +940,7 @@ void mthca_free_cq(struct mthca_dev *dev, | |||
862 | wait_event(cq->wait, !atomic_read(&cq->refcount)); | 940 | wait_event(cq->wait, !atomic_read(&cq->refcount)); |
863 | 941 | ||
864 | if (cq->is_kernel) { | 942 | if (cq->is_kernel) { |
865 | mthca_free_cq_buf(dev, cq); | 943 | mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); |
866 | if (mthca_is_memfree(dev)) { | 944 | if (mthca_is_memfree(dev)) { |
867 | mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); | 945 | mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); |
868 | mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); | 946 | mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index c98628ab8a09..d827558c27be 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | 4 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | 6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
7 | * | 7 | * |
@@ -470,6 +470,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, | |||
470 | enum ib_event_type event_type); | 470 | enum ib_event_type event_type); |
471 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, | 471 | void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, |
472 | struct mthca_srq *srq); | 472 | struct mthca_srq *srq); |
473 | void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); | ||
474 | int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); | ||
475 | void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe); | ||
473 | 476 | ||
474 | int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | 477 | int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, |
475 | struct ib_srq_attr *attr, struct mthca_srq *srq); | 478 | struct ib_srq_attr *attr, struct mthca_srq *srq); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index e88e39aef85a..cd2038bdd1ac 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | 4 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | 6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
7 | * | 7 | * |
@@ -669,9 +669,9 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, | |||
669 | } | 669 | } |
670 | 670 | ||
671 | if (context) { | 671 | if (context) { |
672 | cq->mr.ibmr.lkey = ucmd.lkey; | 672 | cq->buf.mr.ibmr.lkey = ucmd.lkey; |
673 | cq->set_ci_db_index = ucmd.set_db_index; | 673 | cq->set_ci_db_index = ucmd.set_db_index; |
674 | cq->arm_db_index = ucmd.arm_db_index; | 674 | cq->arm_db_index = ucmd.arm_db_index; |
675 | } | 675 | } |
676 | 676 | ||
677 | for (nent = 1; nent <= entries; nent <<= 1) | 677 | for (nent = 1; nent <= entries; nent <<= 1) |
@@ -689,6 +689,8 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, | |||
689 | goto err_free; | 689 | goto err_free; |
690 | } | 690 | } |
691 | 691 | ||
692 | cq->resize_buf = NULL; | ||
693 | |||
692 | return &cq->ibcq; | 694 | return &cq->ibcq; |
693 | 695 | ||
694 | err_free: | 696 | err_free: |
@@ -707,6 +709,121 @@ err_unmap_set: | |||
707 | return ERR_PTR(err); | 709 | return ERR_PTR(err); |
708 | } | 710 | } |
709 | 711 | ||
712 | static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, | ||
713 | int entries) | ||
714 | { | ||
715 | int ret; | ||
716 | |||
717 | spin_lock_irq(&cq->lock); | ||
718 | if (cq->resize_buf) { | ||
719 | ret = -EBUSY; | ||
720 | goto unlock; | ||
721 | } | ||
722 | |||
723 | cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); | ||
724 | if (!cq->resize_buf) { | ||
725 | ret = -ENOMEM; | ||
726 | goto unlock; | ||
727 | } | ||
728 | |||
729 | cq->resize_buf->state = CQ_RESIZE_ALLOC; | ||
730 | |||
731 | ret = 0; | ||
732 | |||
733 | unlock: | ||
734 | spin_unlock_irq(&cq->lock); | ||
735 | |||
736 | if (ret) | ||
737 | return ret; | ||
738 | |||
739 | ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); | ||
740 | if (ret) { | ||
741 | spin_lock_irq(&cq->lock); | ||
742 | kfree(cq->resize_buf); | ||
743 | cq->resize_buf = NULL; | ||
744 | spin_unlock_irq(&cq->lock); | ||
745 | return ret; | ||
746 | } | ||
747 | |||
748 | cq->resize_buf->cqe = entries - 1; | ||
749 | |||
750 | spin_lock_irq(&cq->lock); | ||
751 | cq->resize_buf->state = CQ_RESIZE_READY; | ||
752 | spin_unlock_irq(&cq->lock); | ||
753 | |||
754 | return 0; | ||
755 | } | ||
756 | |||
757 | static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | ||
758 | { | ||
759 | struct mthca_dev *dev = to_mdev(ibcq->device); | ||
760 | struct mthca_cq *cq = to_mcq(ibcq); | ||
761 | struct mthca_resize_cq ucmd; | ||
762 | u32 lkey; | ||
763 | u8 status; | ||
764 | int ret; | ||
765 | |||
766 | if (entries < 1 || entries > dev->limits.max_cqes) | ||
767 | return -EINVAL; | ||
768 | |||
769 | entries = roundup_pow_of_two(entries + 1); | ||
770 | if (entries == ibcq->cqe + 1) | ||
771 | return 0; | ||
772 | |||
773 | if (cq->is_kernel) { | ||
774 | ret = mthca_alloc_resize_buf(dev, cq, entries); | ||
775 | if (ret) | ||
776 | return ret; | ||
777 | lkey = cq->resize_buf->buf.mr.ibmr.lkey; | ||
778 | } else { | ||
779 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) | ||
780 | return -EFAULT; | ||
781 | lkey = ucmd.lkey; | ||
782 | } | ||
783 | |||
784 | ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, long_log2(entries), &status); | ||
785 | if (status) | ||
786 | ret = -EINVAL; | ||
787 | |||
788 | if (ret) { | ||
789 | if (cq->resize_buf) { | ||
790 | mthca_free_cq_buf(dev, &cq->resize_buf->buf, | ||
791 | cq->resize_buf->cqe); | ||
792 | kfree(cq->resize_buf); | ||
793 | spin_lock_irq(&cq->lock); | ||
794 | cq->resize_buf = NULL; | ||
795 | spin_unlock_irq(&cq->lock); | ||
796 | } | ||
797 | return ret; | ||
798 | } | ||
799 | |||
800 | if (cq->is_kernel) { | ||
801 | struct mthca_cq_buf tbuf; | ||
802 | int tcqe; | ||
803 | |||
804 | spin_lock_irq(&cq->lock); | ||
805 | if (cq->resize_buf->state == CQ_RESIZE_READY) { | ||
806 | mthca_cq_resize_copy_cqes(cq); | ||
807 | tbuf = cq->buf; | ||
808 | tcqe = cq->ibcq.cqe; | ||
809 | cq->buf = cq->resize_buf->buf; | ||
810 | cq->ibcq.cqe = cq->resize_buf->cqe; | ||
811 | } else { | ||
812 | tbuf = cq->resize_buf->buf; | ||
813 | tcqe = cq->resize_buf->cqe; | ||
814 | } | ||
815 | |||
816 | kfree(cq->resize_buf); | ||
817 | cq->resize_buf = NULL; | ||
818 | spin_unlock_irq(&cq->lock); | ||
819 | |||
820 | mthca_free_cq_buf(dev, &tbuf, tcqe); | ||
821 | } else | ||
822 | ibcq->cqe = entries - 1; | ||
823 | |||
824 | return 0; | ||
825 | } | ||
826 | |||
710 | static int mthca_destroy_cq(struct ib_cq *cq) | 827 | static int mthca_destroy_cq(struct ib_cq *cq) |
711 | { | 828 | { |
712 | if (cq->uobject) { | 829 | if (cq->uobject) { |
@@ -1113,6 +1230,7 @@ int mthca_register_device(struct mthca_dev *dev) | |||
1113 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | 1230 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | |
1114 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | 1231 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | |
1115 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | 1232 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | |
1233 | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | | ||
1116 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | 1234 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | |
1117 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | 1235 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | |
1118 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | 1236 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | |
@@ -1154,6 +1272,7 @@ int mthca_register_device(struct mthca_dev *dev) | |||
1154 | dev->ib_dev.modify_qp = mthca_modify_qp; | 1272 | dev->ib_dev.modify_qp = mthca_modify_qp; |
1155 | dev->ib_dev.destroy_qp = mthca_destroy_qp; | 1273 | dev->ib_dev.destroy_qp = mthca_destroy_qp; |
1156 | dev->ib_dev.create_cq = mthca_create_cq; | 1274 | dev->ib_dev.create_cq = mthca_create_cq; |
1275 | dev->ib_dev.resize_cq = mthca_resize_cq; | ||
1157 | dev->ib_dev.destroy_cq = mthca_destroy_cq; | 1276 | dev->ib_dev.destroy_cq = mthca_destroy_cq; |
1158 | dev->ib_dev.poll_cq = mthca_poll_cq; | 1277 | dev->ib_dev.poll_cq = mthca_poll_cq; |
1159 | dev->ib_dev.get_dma_mr = mthca_get_dma_mr; | 1278 | dev->ib_dev.get_dma_mr = mthca_get_dma_mr; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 1e73947b4702..2e7f52136965 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | 3 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -164,9 +164,11 @@ struct mthca_ah { | |||
164 | * - wait_event until ref count is zero | 164 | * - wait_event until ref count is zero |
165 | * | 165 | * |
166 | * It is the consumer's responsibilty to make sure that no QP | 166 | * It is the consumer's responsibilty to make sure that no QP |
167 | * operations (WQE posting or state modification) are pending when the | 167 | * operations (WQE posting or state modification) are pending when a |
168 | * QP is destroyed. Also, the consumer must make sure that calls to | 168 | * QP is destroyed. Also, the consumer must make sure that calls to |
169 | * qp_modify are serialized. | 169 | * qp_modify are serialized. Similarly, the consumer is responsible |
170 | * for ensuring that no CQ resize operations are pending when a CQ | ||
171 | * is destroyed. | ||
170 | * | 172 | * |
171 | * Possible optimizations (wait for profile data to see if/where we | 173 | * Possible optimizations (wait for profile data to see if/where we |
172 | * have locks bouncing between CPUs): | 174 | * have locks bouncing between CPUs): |
@@ -176,25 +178,40 @@ struct mthca_ah { | |||
176 | * send queue and one for the receive queue) | 178 | * send queue and one for the receive queue) |
177 | */ | 179 | */ |
178 | 180 | ||
181 | struct mthca_cq_buf { | ||
182 | union mthca_buf queue; | ||
183 | struct mthca_mr mr; | ||
184 | int is_direct; | ||
185 | }; | ||
186 | |||
187 | struct mthca_cq_resize { | ||
188 | struct mthca_cq_buf buf; | ||
189 | int cqe; | ||
190 | enum { | ||
191 | CQ_RESIZE_ALLOC, | ||
192 | CQ_RESIZE_READY, | ||
193 | CQ_RESIZE_SWAPPED | ||
194 | } state; | ||
195 | }; | ||
196 | |||
179 | struct mthca_cq { | 197 | struct mthca_cq { |
180 | struct ib_cq ibcq; | 198 | struct ib_cq ibcq; |
181 | spinlock_t lock; | 199 | spinlock_t lock; |
182 | atomic_t refcount; | 200 | atomic_t refcount; |
183 | int cqn; | 201 | int cqn; |
184 | u32 cons_index; | 202 | u32 cons_index; |
185 | int is_direct; | 203 | struct mthca_cq_buf buf; |
186 | int is_kernel; | 204 | struct mthca_cq_resize *resize_buf; |
205 | int is_kernel; | ||
187 | 206 | ||
188 | /* Next fields are Arbel only */ | 207 | /* Next fields are Arbel only */ |
189 | int set_ci_db_index; | 208 | int set_ci_db_index; |
190 | __be32 *set_ci_db; | 209 | __be32 *set_ci_db; |
191 | int arm_db_index; | 210 | int arm_db_index; |
192 | __be32 *arm_db; | 211 | __be32 *arm_db; |
193 | int arm_sn; | 212 | int arm_sn; |
194 | 213 | ||
195 | union mthca_buf queue; | 214 | wait_queue_head_t wait; |
196 | struct mthca_mr mr; | ||
197 | wait_queue_head_t wait; | ||
198 | }; | 215 | }; |
199 | 216 | ||
200 | struct mthca_srq { | 217 | struct mthca_srq { |
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h index bb015c6494c4..02cc0a766f3a 100644 --- a/drivers/infiniband/hw/mthca/mthca_user.h +++ b/drivers/infiniband/hw/mthca/mthca_user.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | 3 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
4 | * | 4 | * |
5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
6 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -75,6 +75,11 @@ struct mthca_create_cq_resp { | |||
75 | __u32 reserved; | 75 | __u32 reserved; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | struct mthca_resize_cq { | ||
79 | __u32 lkey; | ||
80 | __u32 reserved; | ||
81 | }; | ||
82 | |||
78 | struct mthca_create_srq { | 83 | struct mthca_create_srq { |
79 | __u32 lkey; | 84 | __u32 lkey; |
80 | __u32 db_index; | 85 | __u32 db_index; |