diff options
author | Steve Wise <swise@opengridcomputing.com> | 2008-07-15 02:48:45 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-07-15 02:48:45 -0400 |
commit | e7e55829999deaab3f43e201a087731c02c54cf9 (patch) | |
tree | 665f2ff291045af9469bbb929f99224e9707965b /drivers/infiniband/hw/cxgb3/iwch_provider.c | |
parent | 00f7ec36c9324928e4cd23f02e6d8550f30c32ca (diff) |
RDMA/cxgb3: MEM_MGT_EXTENSIONS support
- set IB_DEVICE_MEM_MGT_EXTENSIONS capability bit if fw supports it.
- set max_fast_reg_page_list_len device attribute.
- add iwch_alloc_fast_reg_mr function.
- add iwch_alloc_fastreg_pbl
- add iwch_free_fastreg_pbl
- adjust the WQ depth for kernel mode work queues to account for
fastreg possibly taking 2 WR slots.
- add fastreg_mr work request support.
- add local_inv work request support.
- add send_with_inv and send_with_se_inv work request support.
- removed useless duplicate enums/defines for TPT/MW/MR stuff.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb3/iwch_provider.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 104 |
1 files changed, 101 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 95f82cfb6c54..5d504f3ed68b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -768,6 +768,68 @@ static int iwch_dealloc_mw(struct ib_mw *mw) | |||
768 | return 0; | 768 | return 0; |
769 | } | 769 | } |
770 | 770 | ||
771 | static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) | ||
772 | { | ||
773 | struct iwch_dev *rhp; | ||
774 | struct iwch_pd *php; | ||
775 | struct iwch_mr *mhp; | ||
776 | u32 mmid; | ||
777 | u32 stag = 0; | ||
778 | int ret; | ||
779 | |||
780 | php = to_iwch_pd(pd); | ||
781 | rhp = php->rhp; | ||
782 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | ||
783 | if (!mhp) | ||
784 | return ERR_PTR(-ENOMEM); | ||
785 | |||
786 | mhp->rhp = rhp; | ||
787 | ret = iwch_alloc_pbl(mhp, pbl_depth); | ||
788 | if (ret) { | ||
789 | kfree(mhp); | ||
790 | return ERR_PTR(ret); | ||
791 | } | ||
792 | mhp->attr.pbl_size = pbl_depth; | ||
793 | ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, | ||
794 | mhp->attr.pbl_size, mhp->attr.pbl_addr); | ||
795 | if (ret) { | ||
796 | iwch_free_pbl(mhp); | ||
797 | kfree(mhp); | ||
798 | return ERR_PTR(ret); | ||
799 | } | ||
800 | mhp->attr.pdid = php->pdid; | ||
801 | mhp->attr.type = TPT_NON_SHARED_MR; | ||
802 | mhp->attr.stag = stag; | ||
803 | mhp->attr.state = 1; | ||
804 | mmid = (stag) >> 8; | ||
805 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; | ||
806 | insert_handle(rhp, &rhp->mmidr, mhp, mmid); | ||
807 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); | ||
808 | return &(mhp->ibmr); | ||
809 | } | ||
810 | |||
811 | static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( | ||
812 | struct ib_device *device, | ||
813 | int page_list_len) | ||
814 | { | ||
815 | struct ib_fast_reg_page_list *page_list; | ||
816 | |||
817 | page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64), | ||
818 | GFP_KERNEL); | ||
819 | if (!page_list) | ||
820 | return ERR_PTR(-ENOMEM); | ||
821 | |||
822 | page_list->page_list = (u64 *)(page_list + 1); | ||
823 | page_list->max_page_list_len = page_list_len; | ||
824 | |||
825 | return page_list; | ||
826 | } | ||
827 | |||
828 | static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list) | ||
829 | { | ||
830 | kfree(page_list); | ||
831 | } | ||
832 | |||
771 | static int iwch_destroy_qp(struct ib_qp *ib_qp) | 833 | static int iwch_destroy_qp(struct ib_qp *ib_qp) |
772 | { | 834 | { |
773 | struct iwch_dev *rhp; | 835 | struct iwch_dev *rhp; |
@@ -843,6 +905,15 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, | |||
843 | */ | 905 | */ |
844 | sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); | 906 | sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); |
845 | wqsize = roundup_pow_of_two(rqsize + sqsize); | 907 | wqsize = roundup_pow_of_two(rqsize + sqsize); |
908 | |||
909 | /* | ||
910 | * Kernel users need more wq space for fastreg WRs which can take | ||
911 | * 2 WR fragments. | ||
912 | */ | ||
913 | ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; | ||
914 | if (!ucontext && wqsize < (rqsize + (2 * sqsize))) | ||
915 | wqsize = roundup_pow_of_two(rqsize + | ||
916 | roundup_pow_of_two(attrs->cap.max_send_wr * 2)); | ||
846 | PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__, | 917 | PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__, |
847 | wqsize, sqsize, rqsize); | 918 | wqsize, sqsize, rqsize); |
848 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); | 919 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); |
@@ -851,7 +922,6 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, | |||
851 | qhp->wq.size_log2 = ilog2(wqsize); | 922 | qhp->wq.size_log2 = ilog2(wqsize); |
852 | qhp->wq.rq_size_log2 = ilog2(rqsize); | 923 | qhp->wq.rq_size_log2 = ilog2(rqsize); |
853 | qhp->wq.sq_size_log2 = ilog2(sqsize); | 924 | qhp->wq.sq_size_log2 = ilog2(sqsize); |
854 | ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; | ||
855 | if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, | 925 | if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, |
856 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { | 926 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { |
857 | kfree(qhp); | 927 | kfree(qhp); |
@@ -1048,6 +1118,7 @@ static int iwch_query_device(struct ib_device *ibdev, | |||
1048 | props->max_mr = dev->attr.max_mem_regs; | 1118 | props->max_mr = dev->attr.max_mem_regs; |
1049 | props->max_pd = dev->attr.max_pds; | 1119 | props->max_pd = dev->attr.max_pds; |
1050 | props->local_ca_ack_delay = 0; | 1120 | props->local_ca_ack_delay = 0; |
1121 | props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH; | ||
1051 | 1122 | ||
1052 | return 0; | 1123 | return 0; |
1053 | } | 1124 | } |
@@ -1088,6 +1159,28 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr, | |||
1088 | return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); | 1159 | return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); |
1089 | } | 1160 | } |
1090 | 1161 | ||
1162 | static int fw_supports_fastreg(struct iwch_dev *iwch_dev) | ||
1163 | { | ||
1164 | struct ethtool_drvinfo info; | ||
1165 | struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; | ||
1166 | char *cp, *next; | ||
1167 | unsigned fw_maj, fw_min; | ||
1168 | |||
1169 | rtnl_lock(); | ||
1170 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | ||
1171 | rtnl_unlock(); | ||
1172 | |||
1173 | next = info.fw_version+1; | ||
1174 | cp = strsep(&next, "."); | ||
1175 | sscanf(cp, "%i", &fw_maj); | ||
1176 | cp = strsep(&next, "."); | ||
1177 | sscanf(cp, "%i", &fw_min); | ||
1178 | |||
1179 | PDBG("%s maj %u min %u\n", __func__, fw_maj, fw_min); | ||
1180 | |||
1181 | return fw_maj > 6 || (fw_maj == 6 && fw_min > 0); | ||
1182 | } | ||
1183 | |||
1091 | static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) | 1184 | static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) |
1092 | { | 1185 | { |
1093 | struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, | 1186 | struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, |
@@ -1149,8 +1242,10 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1149 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); | 1242 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); |
1150 | memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); | 1243 | memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); |
1151 | dev->ibdev.owner = THIS_MODULE; | 1244 | dev->ibdev.owner = THIS_MODULE; |
1152 | dev->device_cap_flags = | 1245 | dev->device_cap_flags = IB_DEVICE_ZERO_STAG | |
1153 | (IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW); | 1246 | IB_DEVICE_MEM_WINDOW; |
1247 | if (fw_supports_fastreg(dev)) | ||
1248 | dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | ||
1154 | 1249 | ||
1155 | dev->ibdev.uverbs_cmd_mask = | 1250 | dev->ibdev.uverbs_cmd_mask = |
1156 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | 1251 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | |
@@ -1202,6 +1297,9 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1202 | dev->ibdev.alloc_mw = iwch_alloc_mw; | 1297 | dev->ibdev.alloc_mw = iwch_alloc_mw; |
1203 | dev->ibdev.bind_mw = iwch_bind_mw; | 1298 | dev->ibdev.bind_mw = iwch_bind_mw; |
1204 | dev->ibdev.dealloc_mw = iwch_dealloc_mw; | 1299 | dev->ibdev.dealloc_mw = iwch_dealloc_mw; |
1300 | dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr; | ||
1301 | dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl; | ||
1302 | dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl; | ||
1205 | 1303 | ||
1206 | dev->ibdev.attach_mcast = iwch_multicast_attach; | 1304 | dev->ibdev.attach_mcast = iwch_multicast_attach; |
1207 | dev->ibdev.detach_mcast = iwch_multicast_detach; | 1305 | dev->ibdev.detach_mcast = iwch_multicast_detach; |