diff options
Diffstat (limited to 'drivers/infiniband')
33 files changed, 11574 insertions, 0 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 69a53d476b5..fd2d528daa3 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
| @@ -36,6 +36,7 @@ config INFINIBAND_ADDR_TRANS | |||
| 36 | 36 | ||
| 37 | source "drivers/infiniband/hw/mthca/Kconfig" | 37 | source "drivers/infiniband/hw/mthca/Kconfig" |
| 38 | source "drivers/infiniband/hw/ipath/Kconfig" | 38 | source "drivers/infiniband/hw/ipath/Kconfig" |
| 39 | source "drivers/infiniband/hw/ehca/Kconfig" | ||
| 39 | 40 | ||
| 40 | source "drivers/infiniband/ulp/ipoib/Kconfig" | 41 | source "drivers/infiniband/ulp/ipoib/Kconfig" |
| 41 | 42 | ||
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index c7ff58c1d0e..893bee0a50b 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | obj-$(CONFIG_INFINIBAND) += core/ | 1 | obj-$(CONFIG_INFINIBAND) += core/ |
| 2 | obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ | 2 | obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ |
| 3 | obj-$(CONFIG_IPATH_CORE) += hw/ipath/ | 3 | obj-$(CONFIG_IPATH_CORE) += hw/ipath/ |
| 4 | obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ | ||
| 4 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ | 5 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ |
| 5 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ | 6 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ |
| 6 | obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ | 7 | obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ |
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/infiniband/hw/ehca/Kconfig new file mode 100644 index 00000000000..922389b6439 --- /dev/null +++ b/drivers/infiniband/hw/ehca/Kconfig | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | config INFINIBAND_EHCA | ||
| 2 | tristate "eHCA support" | ||
| 3 | depends on IBMEBUS && INFINIBAND | ||
| 4 | ---help--- | ||
| 5 | This driver supports the IBM pSeries eHCA InfiniBand adapter. | ||
| 6 | |||
| 7 | To compile the driver as a module, choose M here. The module | ||
| 8 | will be called ib_ehca. | ||
| 9 | |||
| 10 | config INFINIBAND_EHCA_SCALING | ||
| 11 | bool "Scaling support (EXPERIMENTAL)" | ||
| 12 | depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL | ||
| 13 | ---help--- | ||
| 14 | eHCA scaling support schedules the CQ callbacks to different CPUs. | ||
| 15 | |||
| 16 | To enable this feature choose Y here. | ||
diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/infiniband/hw/ehca/Makefile new file mode 100644 index 00000000000..74d284e46a4 --- /dev/null +++ b/drivers/infiniband/hw/ehca/Makefile | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | # Authors: Heiko J Schick <schickhj@de.ibm.com> | ||
| 2 | # Christoph Raisch <raisch@de.ibm.com> | ||
| 3 | # Joachim Fenkes <fenkes@de.ibm.com> | ||
| 4 | # | ||
| 5 | # Copyright (c) 2005 IBM Corporation | ||
| 6 | # | ||
| 7 | # All rights reserved. | ||
| 8 | # | ||
| 9 | # This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD. | ||
| 10 | |||
| 11 | obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o | ||
| 12 | |||
| 13 | ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \ | ||
| 14 | ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \ | ||
| 15 | ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o | ||
| 16 | |||
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c new file mode 100644 index 00000000000..3bac197f901 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_av.c | |||
| @@ -0,0 +1,271 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * adress vector functions | ||
| 5 | * | ||
| 6 | * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 7 | * Khadija Souissi <souissik@de.ibm.com> | ||
| 8 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 9 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 10 | * | ||
| 11 | * Copyright (c) 2005 IBM Corporation | ||
| 12 | * | ||
| 13 | * All rights reserved. | ||
| 14 | * | ||
| 15 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 16 | * BSD. | ||
| 17 | * | ||
| 18 | * OpenIB BSD License | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions are met: | ||
| 22 | * | ||
| 23 | * Redistributions of source code must retain the above copyright notice, this | ||
| 24 | * list of conditions and the following disclaimer. | ||
| 25 | * | ||
| 26 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 27 | * this list of conditions and the following disclaimer in the documentation | ||
| 28 | * and/or other materials | ||
| 29 | * provided with the distribution. | ||
| 30 | * | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 32 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 33 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 34 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 35 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 36 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 37 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 38 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 39 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 40 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 41 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 42 | */ | ||
| 43 | |||
| 44 | |||
| 45 | #include <asm/current.h> | ||
| 46 | |||
| 47 | #include "ehca_tools.h" | ||
| 48 | #include "ehca_iverbs.h" | ||
| 49 | #include "hcp_if.h" | ||
| 50 | |||
| 51 | static struct kmem_cache *av_cache; | ||
| 52 | |||
| 53 | struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | ||
| 54 | { | ||
| 55 | int ret; | ||
| 56 | struct ehca_av *av; | ||
| 57 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, | ||
| 58 | ib_device); | ||
| 59 | |||
| 60 | av = kmem_cache_alloc(av_cache, SLAB_KERNEL); | ||
| 61 | if (!av) { | ||
| 62 | ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", | ||
| 63 | pd, ah_attr); | ||
| 64 | return ERR_PTR(-ENOMEM); | ||
| 65 | } | ||
| 66 | |||
| 67 | av->av.sl = ah_attr->sl; | ||
| 68 | av->av.dlid = ah_attr->dlid; | ||
| 69 | av->av.slid_path_bits = ah_attr->src_path_bits; | ||
| 70 | |||
| 71 | if (ehca_static_rate < 0) { | ||
| 72 | int ah_mult = ib_rate_to_mult(ah_attr->static_rate); | ||
| 73 | int ehca_mult = | ||
| 74 | ib_rate_to_mult(shca->sport[ah_attr->port_num].rate ); | ||
| 75 | |||
| 76 | if (ah_mult >= ehca_mult) | ||
| 77 | av->av.ipd = 0; | ||
| 78 | else | ||
| 79 | av->av.ipd = (ah_mult > 0) ? | ||
| 80 | ((ehca_mult - 1) / ah_mult) : 0; | ||
| 81 | } else | ||
| 82 | av->av.ipd = ehca_static_rate; | ||
| 83 | |||
| 84 | av->av.lnh = ah_attr->ah_flags; | ||
| 85 | av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6); | ||
| 86 | av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK, | ||
| 87 | ah_attr->grh.traffic_class); | ||
| 88 | av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK, | ||
| 89 | ah_attr->grh.flow_label); | ||
| 90 | av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK, | ||
| 91 | ah_attr->grh.hop_limit); | ||
| 92 | av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B); | ||
| 93 | /* set sgid in grh.word_1 */ | ||
| 94 | if (ah_attr->ah_flags & IB_AH_GRH) { | ||
| 95 | int rc; | ||
| 96 | struct ib_port_attr port_attr; | ||
| 97 | union ib_gid gid; | ||
| 98 | memset(&port_attr, 0, sizeof(port_attr)); | ||
| 99 | rc = ehca_query_port(pd->device, ah_attr->port_num, | ||
| 100 | &port_attr); | ||
| 101 | if (rc) { /* invalid port number */ | ||
| 102 | ret = -EINVAL; | ||
| 103 | ehca_err(pd->device, "Invalid port number " | ||
| 104 | "ehca_query_port() returned %x " | ||
| 105 | "pd=%p ah_attr=%p", rc, pd, ah_attr); | ||
| 106 | goto create_ah_exit1; | ||
| 107 | } | ||
| 108 | memset(&gid, 0, sizeof(gid)); | ||
| 109 | rc = ehca_query_gid(pd->device, | ||
| 110 | ah_attr->port_num, | ||
| 111 | ah_attr->grh.sgid_index, &gid); | ||
| 112 | if (rc) { | ||
| 113 | ret = -EINVAL; | ||
| 114 | ehca_err(pd->device, "Failed to retrieve sgid " | ||
| 115 | "ehca_query_gid() returned %x " | ||
| 116 | "pd=%p ah_attr=%p", rc, pd, ah_attr); | ||
| 117 | goto create_ah_exit1; | ||
| 118 | } | ||
| 119 | memcpy(&av->av.grh.word_1, &gid, sizeof(gid)); | ||
| 120 | } | ||
| 121 | /* for the time being we use a hard coded PMTU of 2048 Bytes */ | ||
| 122 | av->av.pmtu = 4; | ||
| 123 | |||
| 124 | /* dgid comes in grh.word_3 */ | ||
| 125 | memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid, | ||
| 126 | sizeof(ah_attr->grh.dgid)); | ||
| 127 | |||
| 128 | return &av->ib_ah; | ||
| 129 | |||
| 130 | create_ah_exit1: | ||
| 131 | kmem_cache_free(av_cache, av); | ||
| 132 | |||
| 133 | return ERR_PTR(ret); | ||
| 134 | } | ||
| 135 | |||
| 136 | int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) | ||
| 137 | { | ||
| 138 | struct ehca_av *av; | ||
| 139 | struct ehca_ud_av new_ehca_av; | ||
| 140 | struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd); | ||
| 141 | u32 cur_pid = current->tgid; | ||
| 142 | |||
| 143 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 144 | my_pd->ownpid != cur_pid) { | ||
| 145 | ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x", | ||
| 146 | cur_pid, my_pd->ownpid); | ||
| 147 | return -EINVAL; | ||
| 148 | } | ||
| 149 | |||
| 150 | memset(&new_ehca_av, 0, sizeof(new_ehca_av)); | ||
| 151 | new_ehca_av.sl = ah_attr->sl; | ||
| 152 | new_ehca_av.dlid = ah_attr->dlid; | ||
| 153 | new_ehca_av.slid_path_bits = ah_attr->src_path_bits; | ||
| 154 | new_ehca_av.ipd = ah_attr->static_rate; | ||
| 155 | new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK, | ||
| 156 | (ah_attr->ah_flags & IB_AH_GRH) > 0); | ||
| 157 | new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK, | ||
| 158 | ah_attr->grh.traffic_class); | ||
| 159 | new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK, | ||
| 160 | ah_attr->grh.flow_label); | ||
| 161 | new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK, | ||
| 162 | ah_attr->grh.hop_limit); | ||
| 163 | new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b); | ||
| 164 | |||
| 165 | /* set sgid in grh.word_1 */ | ||
| 166 | if (ah_attr->ah_flags & IB_AH_GRH) { | ||
| 167 | int rc; | ||
| 168 | struct ib_port_attr port_attr; | ||
| 169 | union ib_gid gid; | ||
| 170 | memset(&port_attr, 0, sizeof(port_attr)); | ||
| 171 | rc = ehca_query_port(ah->device, ah_attr->port_num, | ||
| 172 | &port_attr); | ||
| 173 | if (rc) { /* invalid port number */ | ||
| 174 | ehca_err(ah->device, "Invalid port number " | ||
| 175 | "ehca_query_port() returned %x " | ||
| 176 | "ah=%p ah_attr=%p port_num=%x", | ||
| 177 | rc, ah, ah_attr, ah_attr->port_num); | ||
| 178 | return -EINVAL; | ||
| 179 | } | ||
| 180 | memset(&gid, 0, sizeof(gid)); | ||
| 181 | rc = ehca_query_gid(ah->device, | ||
| 182 | ah_attr->port_num, | ||
| 183 | ah_attr->grh.sgid_index, &gid); | ||
| 184 | if (rc) { | ||
| 185 | ehca_err(ah->device, "Failed to retrieve sgid " | ||
| 186 | "ehca_query_gid() returned %x " | ||
| 187 | "ah=%p ah_attr=%p port_num=%x " | ||
| 188 | "sgid_index=%x", | ||
| 189 | rc, ah, ah_attr, ah_attr->port_num, | ||
| 190 | ah_attr->grh.sgid_index); | ||
| 191 | return -EINVAL; | ||
| 192 | } | ||
| 193 | memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid)); | ||
| 194 | } | ||
| 195 | |||
| 196 | new_ehca_av.pmtu = 4; /* see also comment in create_ah() */ | ||
| 197 | |||
| 198 | memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid, | ||
| 199 | sizeof(ah_attr->grh.dgid)); | ||
| 200 | |||
| 201 | av = container_of(ah, struct ehca_av, ib_ah); | ||
| 202 | av->av = new_ehca_av; | ||
| 203 | |||
| 204 | return 0; | ||
| 205 | } | ||
| 206 | |||
| 207 | int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) | ||
| 208 | { | ||
| 209 | struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah); | ||
| 210 | struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd); | ||
| 211 | u32 cur_pid = current->tgid; | ||
| 212 | |||
| 213 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 214 | my_pd->ownpid != cur_pid) { | ||
| 215 | ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x", | ||
| 216 | cur_pid, my_pd->ownpid); | ||
| 217 | return -EINVAL; | ||
| 218 | } | ||
| 219 | |||
| 220 | memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3, | ||
| 221 | sizeof(ah_attr->grh.dgid)); | ||
| 222 | ah_attr->sl = av->av.sl; | ||
| 223 | |||
| 224 | ah_attr->dlid = av->av.dlid; | ||
| 225 | |||
| 226 | ah_attr->src_path_bits = av->av.slid_path_bits; | ||
| 227 | ah_attr->static_rate = av->av.ipd; | ||
| 228 | ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh); | ||
| 229 | ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK, | ||
| 230 | av->av.grh.word_0); | ||
| 231 | ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK, | ||
| 232 | av->av.grh.word_0); | ||
| 233 | ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK, | ||
| 234 | av->av.grh.word_0); | ||
| 235 | |||
| 236 | return 0; | ||
| 237 | } | ||
| 238 | |||
| 239 | int ehca_destroy_ah(struct ib_ah *ah) | ||
| 240 | { | ||
| 241 | struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd); | ||
| 242 | u32 cur_pid = current->tgid; | ||
| 243 | |||
| 244 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 245 | my_pd->ownpid != cur_pid) { | ||
| 246 | ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x", | ||
| 247 | cur_pid, my_pd->ownpid); | ||
| 248 | return -EINVAL; | ||
| 249 | } | ||
| 250 | |||
| 251 | kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah)); | ||
| 252 | |||
| 253 | return 0; | ||
| 254 | } | ||
| 255 | |||
| 256 | int ehca_init_av_cache(void) | ||
| 257 | { | ||
| 258 | av_cache = kmem_cache_create("ehca_cache_av", | ||
| 259 | sizeof(struct ehca_av), 0, | ||
| 260 | SLAB_HWCACHE_ALIGN, | ||
| 261 | NULL, NULL); | ||
| 262 | if (!av_cache) | ||
| 263 | return -ENOMEM; | ||
| 264 | return 0; | ||
| 265 | } | ||
| 266 | |||
| 267 | void ehca_cleanup_av_cache(void) | ||
| 268 | { | ||
| 269 | if (av_cache) | ||
| 270 | kmem_cache_destroy(av_cache); | ||
| 271 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h new file mode 100644 index 00000000000..1c722032319 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
| @@ -0,0 +1,346 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Struct definition for eHCA internal structures | ||
| 5 | * | ||
| 6 | * Authors: Heiko J Schick <schickhj@de.ibm.com> | ||
| 7 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #ifndef __EHCA_CLASSES_H__ | ||
| 43 | #define __EHCA_CLASSES_H__ | ||
| 44 | |||
| 45 | #include "ehca_classes.h" | ||
| 46 | #include "ipz_pt_fn.h" | ||
| 47 | |||
| 48 | struct ehca_module; | ||
| 49 | struct ehca_qp; | ||
| 50 | struct ehca_cq; | ||
| 51 | struct ehca_eq; | ||
| 52 | struct ehca_mr; | ||
| 53 | struct ehca_mw; | ||
| 54 | struct ehca_pd; | ||
| 55 | struct ehca_av; | ||
| 56 | |||
| 57 | #ifdef CONFIG_PPC64 | ||
| 58 | #include "ehca_classes_pSeries.h" | ||
| 59 | #endif | ||
| 60 | |||
| 61 | #include <rdma/ib_verbs.h> | ||
| 62 | #include <rdma/ib_user_verbs.h> | ||
| 63 | |||
| 64 | #include "ehca_irq.h" | ||
| 65 | |||
| 66 | struct ehca_eq { | ||
| 67 | u32 length; | ||
| 68 | struct ipz_queue ipz_queue; | ||
| 69 | struct ipz_eq_handle ipz_eq_handle; | ||
| 70 | struct work_struct work; | ||
| 71 | struct h_galpas galpas; | ||
| 72 | int is_initialized; | ||
| 73 | struct ehca_pfeq pf; | ||
| 74 | spinlock_t spinlock; | ||
| 75 | struct tasklet_struct interrupt_task; | ||
| 76 | u32 ist; | ||
| 77 | }; | ||
| 78 | |||
| 79 | struct ehca_sport { | ||
| 80 | struct ib_cq *ibcq_aqp1; | ||
| 81 | struct ib_qp *ibqp_aqp1; | ||
| 82 | enum ib_rate rate; | ||
| 83 | enum ib_port_state port_state; | ||
| 84 | }; | ||
| 85 | |||
| 86 | struct ehca_shca { | ||
| 87 | struct ib_device ib_device; | ||
| 88 | struct ibmebus_dev *ibmebus_dev; | ||
| 89 | u8 num_ports; | ||
| 90 | int hw_level; | ||
| 91 | struct list_head shca_list; | ||
| 92 | struct ipz_adapter_handle ipz_hca_handle; | ||
| 93 | struct ehca_sport sport[2]; | ||
| 94 | struct ehca_eq eq; | ||
| 95 | struct ehca_eq neq; | ||
| 96 | struct ehca_mr *maxmr; | ||
| 97 | struct ehca_pd *pd; | ||
| 98 | struct h_galpas galpas; | ||
| 99 | }; | ||
| 100 | |||
| 101 | struct ehca_pd { | ||
| 102 | struct ib_pd ib_pd; | ||
| 103 | struct ipz_pd fw_pd; | ||
| 104 | u32 ownpid; | ||
| 105 | }; | ||
| 106 | |||
| 107 | struct ehca_qp { | ||
| 108 | struct ib_qp ib_qp; | ||
| 109 | u32 qp_type; | ||
| 110 | struct ipz_queue ipz_squeue; | ||
| 111 | struct ipz_queue ipz_rqueue; | ||
| 112 | struct h_galpas galpas; | ||
| 113 | u32 qkey; | ||
| 114 | u32 real_qp_num; | ||
| 115 | u32 token; | ||
| 116 | spinlock_t spinlock_s; | ||
| 117 | spinlock_t spinlock_r; | ||
| 118 | u32 sq_max_inline_data_size; | ||
| 119 | struct ipz_qp_handle ipz_qp_handle; | ||
| 120 | struct ehca_pfqp pf; | ||
| 121 | struct ib_qp_init_attr init_attr; | ||
| 122 | u64 uspace_squeue; | ||
| 123 | u64 uspace_rqueue; | ||
| 124 | u64 uspace_fwh; | ||
| 125 | struct ehca_cq *send_cq; | ||
| 126 | struct ehca_cq *recv_cq; | ||
| 127 | unsigned int sqerr_purgeflag; | ||
| 128 | struct hlist_node list_entries; | ||
| 129 | }; | ||
| 130 | |||
| 131 | /* must be power of 2 */ | ||
| 132 | #define QP_HASHTAB_LEN 8 | ||
| 133 | |||
| 134 | struct ehca_cq { | ||
| 135 | struct ib_cq ib_cq; | ||
| 136 | struct ipz_queue ipz_queue; | ||
| 137 | struct h_galpas galpas; | ||
| 138 | spinlock_t spinlock; | ||
| 139 | u32 cq_number; | ||
| 140 | u32 token; | ||
| 141 | u32 nr_of_entries; | ||
| 142 | struct ipz_cq_handle ipz_cq_handle; | ||
| 143 | struct ehca_pfcq pf; | ||
| 144 | spinlock_t cb_lock; | ||
| 145 | u64 uspace_queue; | ||
| 146 | u64 uspace_fwh; | ||
| 147 | struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; | ||
| 148 | struct list_head entry; | ||
| 149 | u32 nr_callbacks; | ||
| 150 | spinlock_t task_lock; | ||
| 151 | u32 ownpid; | ||
| 152 | }; | ||
| 153 | |||
| 154 | enum ehca_mr_flag { | ||
| 155 | EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */ | ||
| 156 | EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */ | ||
| 157 | }; | ||
| 158 | |||
| 159 | struct ehca_mr { | ||
| 160 | union { | ||
| 161 | struct ib_mr ib_mr; /* must always be first in ehca_mr */ | ||
| 162 | struct ib_fmr ib_fmr; /* must always be first in ehca_mr */ | ||
| 163 | } ib; | ||
| 164 | spinlock_t mrlock; | ||
| 165 | |||
| 166 | enum ehca_mr_flag flags; | ||
| 167 | u32 num_pages; /* number of MR pages */ | ||
| 168 | u32 num_4k; /* number of 4k "page" portions to form MR */ | ||
| 169 | int acl; /* ACL (stored here for usage in reregister) */ | ||
| 170 | u64 *start; /* virtual start address (stored here for */ | ||
| 171 | /* usage in reregister) */ | ||
| 172 | u64 size; /* size (stored here for usage in reregister) */ | ||
| 173 | u32 fmr_page_size; /* page size for FMR */ | ||
| 174 | u32 fmr_max_pages; /* max pages for FMR */ | ||
| 175 | u32 fmr_max_maps; /* max outstanding maps for FMR */ | ||
| 176 | u32 fmr_map_cnt; /* map counter for FMR */ | ||
| 177 | /* fw specific data */ | ||
| 178 | struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */ | ||
| 179 | struct h_galpas galpas; | ||
| 180 | /* data for userspace bridge */ | ||
| 181 | u32 nr_of_pages; | ||
| 182 | void *pagearray; | ||
| 183 | }; | ||
| 184 | |||
| 185 | struct ehca_mw { | ||
| 186 | struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */ | ||
| 187 | spinlock_t mwlock; | ||
| 188 | |||
| 189 | u8 never_bound; /* indication MW was never bound */ | ||
| 190 | struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */ | ||
| 191 | struct h_galpas galpas; | ||
| 192 | }; | ||
| 193 | |||
| 194 | enum ehca_mr_pgi_type { | ||
| 195 | EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr, | ||
| 196 | * ehca_rereg_phys_mr, | ||
| 197 | * ehca_reg_internal_maxmr */ | ||
| 198 | EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */ | ||
| 199 | EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */ | ||
| 200 | }; | ||
| 201 | |||
| 202 | struct ehca_mr_pginfo { | ||
| 203 | enum ehca_mr_pgi_type type; | ||
| 204 | u64 num_pages; | ||
| 205 | u64 page_cnt; | ||
| 206 | u64 num_4k; /* number of 4k "page" portions */ | ||
| 207 | u64 page_4k_cnt; /* counter for 4k "page" portions */ | ||
| 208 | u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */ | ||
| 209 | |||
| 210 | /* type EHCA_MR_PGI_PHYS section */ | ||
| 211 | int num_phys_buf; | ||
| 212 | struct ib_phys_buf *phys_buf_array; | ||
| 213 | u64 next_buf; | ||
| 214 | |||
| 215 | /* type EHCA_MR_PGI_USER section */ | ||
| 216 | struct ib_umem *region; | ||
| 217 | struct ib_umem_chunk *next_chunk; | ||
| 218 | u64 next_nmap; | ||
| 219 | |||
| 220 | /* type EHCA_MR_PGI_FMR section */ | ||
| 221 | u64 *page_list; | ||
| 222 | u64 next_listelem; | ||
| 223 | /* next_4k also used within EHCA_MR_PGI_FMR */ | ||
| 224 | }; | ||
| 225 | |||
| 226 | /* output parameters for MR/FMR hipz calls */ | ||
| 227 | struct ehca_mr_hipzout_parms { | ||
| 228 | struct ipz_mrmw_handle handle; | ||
| 229 | u32 lkey; | ||
| 230 | u32 rkey; | ||
| 231 | u64 len; | ||
| 232 | u64 vaddr; | ||
| 233 | u32 acl; | ||
| 234 | }; | ||
| 235 | |||
| 236 | /* output parameters for MW hipz calls */ | ||
| 237 | struct ehca_mw_hipzout_parms { | ||
| 238 | struct ipz_mrmw_handle handle; | ||
| 239 | u32 rkey; | ||
| 240 | }; | ||
| 241 | |||
| 242 | struct ehca_av { | ||
| 243 | struct ib_ah ib_ah; | ||
| 244 | struct ehca_ud_av av; | ||
| 245 | }; | ||
| 246 | |||
| 247 | struct ehca_ucontext { | ||
| 248 | struct ib_ucontext ib_ucontext; | ||
| 249 | }; | ||
| 250 | |||
| 251 | struct ehca_module *ehca_module_new(void); | ||
| 252 | |||
| 253 | int ehca_module_delete(struct ehca_module *me); | ||
| 254 | |||
| 255 | int ehca_eq_ctor(struct ehca_eq *eq); | ||
| 256 | |||
| 257 | int ehca_eq_dtor(struct ehca_eq *eq); | ||
| 258 | |||
| 259 | struct ehca_shca *ehca_shca_new(void); | ||
| 260 | |||
| 261 | int ehca_shca_delete(struct ehca_shca *me); | ||
| 262 | |||
| 263 | struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor); | ||
| 264 | |||
| 265 | int ehca_init_pd_cache(void); | ||
| 266 | void ehca_cleanup_pd_cache(void); | ||
| 267 | int ehca_init_cq_cache(void); | ||
| 268 | void ehca_cleanup_cq_cache(void); | ||
| 269 | int ehca_init_qp_cache(void); | ||
| 270 | void ehca_cleanup_qp_cache(void); | ||
| 271 | int ehca_init_av_cache(void); | ||
| 272 | void ehca_cleanup_av_cache(void); | ||
| 273 | int ehca_init_mrmw_cache(void); | ||
| 274 | void ehca_cleanup_mrmw_cache(void); | ||
| 275 | |||
| 276 | extern spinlock_t ehca_qp_idr_lock; | ||
| 277 | extern spinlock_t ehca_cq_idr_lock; | ||
| 278 | extern struct idr ehca_qp_idr; | ||
| 279 | extern struct idr ehca_cq_idr; | ||
| 280 | |||
| 281 | extern int ehca_static_rate; | ||
| 282 | extern int ehca_port_act_time; | ||
| 283 | extern int ehca_use_hp_mr; | ||
| 284 | |||
| 285 | struct ipzu_queue_resp { | ||
| 286 | u64 queue; /* points to first queue entry */ | ||
| 287 | u32 qe_size; /* queue entry size */ | ||
| 288 | u32 act_nr_of_sg; | ||
| 289 | u32 queue_length; /* queue length allocated in bytes */ | ||
| 290 | u32 pagesize; | ||
| 291 | u32 toggle_state; | ||
| 292 | u32 dummy; /* padding for 8 byte alignment */ | ||
| 293 | }; | ||
| 294 | |||
| 295 | struct ehca_create_cq_resp { | ||
| 296 | u32 cq_number; | ||
| 297 | u32 token; | ||
| 298 | struct ipzu_queue_resp ipz_queue; | ||
| 299 | struct h_galpas galpas; | ||
| 300 | }; | ||
| 301 | |||
| 302 | struct ehca_create_qp_resp { | ||
| 303 | u32 qp_num; | ||
| 304 | u32 token; | ||
| 305 | u32 qp_type; | ||
| 306 | u32 qkey; | ||
| 307 | /* qp_num assigned by ehca: sqp0/1 may have got different numbers */ | ||
| 308 | u32 real_qp_num; | ||
| 309 | u32 dummy; /* padding for 8 byte alignment */ | ||
| 310 | struct ipzu_queue_resp ipz_squeue; | ||
| 311 | struct ipzu_queue_resp ipz_rqueue; | ||
| 312 | struct h_galpas galpas; | ||
| 313 | }; | ||
| 314 | |||
| 315 | struct ehca_alloc_cq_parms { | ||
| 316 | u32 nr_cqe; | ||
| 317 | u32 act_nr_of_entries; | ||
| 318 | u32 act_pages; | ||
| 319 | struct ipz_eq_handle eq_handle; | ||
| 320 | }; | ||
| 321 | |||
| 322 | struct ehca_alloc_qp_parms { | ||
| 323 | int servicetype; | ||
| 324 | int sigtype; | ||
| 325 | int daqp_ctrl; | ||
| 326 | int max_send_sge; | ||
| 327 | int max_recv_sge; | ||
| 328 | int ud_av_l_key_ctl; | ||
| 329 | |||
| 330 | u16 act_nr_send_wqes; | ||
| 331 | u16 act_nr_recv_wqes; | ||
| 332 | u8 act_nr_recv_sges; | ||
| 333 | u8 act_nr_send_sges; | ||
| 334 | |||
| 335 | u32 nr_rq_pages; | ||
| 336 | u32 nr_sq_pages; | ||
| 337 | |||
| 338 | struct ipz_eq_handle ipz_eq_handle; | ||
| 339 | struct ipz_pd pd; | ||
| 340 | }; | ||
| 341 | |||
| 342 | int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp); | ||
| 343 | int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num); | ||
| 344 | struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num); | ||
| 345 | |||
| 346 | #endif | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h new file mode 100644 index 00000000000..5665f213b81 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h | |||
| @@ -0,0 +1,236 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * pSeries interface definitions | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #ifndef __EHCA_CLASSES_PSERIES_H__ | ||
| 43 | #define __EHCA_CLASSES_PSERIES_H__ | ||
| 44 | |||
| 45 | #include "hcp_phyp.h" | ||
| 46 | #include "ipz_pt_fn.h" | ||
| 47 | |||
| 48 | |||
| 49 | struct ehca_pfqp { | ||
| 50 | struct ipz_qpt sqpt; | ||
| 51 | struct ipz_qpt rqpt; | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct ehca_pfcq { | ||
| 55 | struct ipz_qpt qpt; | ||
| 56 | u32 cqnr; | ||
| 57 | }; | ||
| 58 | |||
| 59 | struct ehca_pfeq { | ||
| 60 | struct ipz_qpt qpt; | ||
| 61 | struct h_galpa galpa; | ||
| 62 | u32 eqnr; | ||
| 63 | }; | ||
| 64 | |||
| 65 | struct ipz_adapter_handle { | ||
| 66 | u64 handle; | ||
| 67 | }; | ||
| 68 | |||
| 69 | struct ipz_cq_handle { | ||
| 70 | u64 handle; | ||
| 71 | }; | ||
| 72 | |||
| 73 | struct ipz_eq_handle { | ||
| 74 | u64 handle; | ||
| 75 | }; | ||
| 76 | |||
| 77 | struct ipz_qp_handle { | ||
| 78 | u64 handle; | ||
| 79 | }; | ||
| 80 | struct ipz_mrmw_handle { | ||
| 81 | u64 handle; | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct ipz_pd { | ||
| 85 | u32 value; | ||
| 86 | }; | ||
| 87 | |||
| 88 | struct hcp_modify_qp_control_block { | ||
| 89 | u32 qkey; /* 00 */ | ||
| 90 | u32 rdd; /* reliable datagram domain */ | ||
| 91 | u32 send_psn; /* 02 */ | ||
| 92 | u32 receive_psn; /* 03 */ | ||
| 93 | u32 prim_phys_port; /* 04 */ | ||
| 94 | u32 alt_phys_port; /* 05 */ | ||
| 95 | u32 prim_p_key_idx; /* 06 */ | ||
| 96 | u32 alt_p_key_idx; /* 07 */ | ||
| 97 | u32 rdma_atomic_ctrl; /* 08 */ | ||
| 98 | u32 qp_state; /* 09 */ | ||
| 99 | u32 reserved_10; /* 10 */ | ||
| 100 | u32 rdma_nr_atomic_resp_res; /* 11 */ | ||
| 101 | u32 path_migration_state; /* 12 */ | ||
| 102 | u32 rdma_atomic_outst_dest_qp; /* 13 */ | ||
| 103 | u32 dest_qp_nr; /* 14 */ | ||
| 104 | u32 min_rnr_nak_timer_field; /* 15 */ | ||
| 105 | u32 service_level; /* 16 */ | ||
| 106 | u32 send_grh_flag; /* 17 */ | ||
| 107 | u32 retry_count; /* 18 */ | ||
| 108 | u32 timeout; /* 19 */ | ||
| 109 | u32 path_mtu; /* 20 */ | ||
| 110 | u32 max_static_rate; /* 21 */ | ||
| 111 | u32 dlid; /* 22 */ | ||
| 112 | u32 rnr_retry_count; /* 23 */ | ||
| 113 | u32 source_path_bits; /* 24 */ | ||
| 114 | u32 traffic_class; /* 25 */ | ||
| 115 | u32 hop_limit; /* 26 */ | ||
| 116 | u32 source_gid_idx; /* 27 */ | ||
| 117 | u32 flow_label; /* 28 */ | ||
| 118 | u32 reserved_29; /* 29 */ | ||
| 119 | union { /* 30 */ | ||
| 120 | u64 dw[2]; | ||
| 121 | u8 byte[16]; | ||
| 122 | } dest_gid; | ||
| 123 | u32 service_level_al; /* 34 */ | ||
| 124 | u32 send_grh_flag_al; /* 35 */ | ||
| 125 | u32 retry_count_al; /* 36 */ | ||
| 126 | u32 timeout_al; /* 37 */ | ||
| 127 | u32 max_static_rate_al; /* 38 */ | ||
| 128 | u32 dlid_al; /* 39 */ | ||
| 129 | u32 rnr_retry_count_al; /* 40 */ | ||
| 130 | u32 source_path_bits_al; /* 41 */ | ||
| 131 | u32 traffic_class_al; /* 42 */ | ||
| 132 | u32 hop_limit_al; /* 43 */ | ||
| 133 | u32 source_gid_idx_al; /* 44 */ | ||
| 134 | u32 flow_label_al; /* 45 */ | ||
| 135 | u32 reserved_46; /* 46 */ | ||
| 136 | u32 reserved_47; /* 47 */ | ||
| 137 | union { /* 48 */ | ||
| 138 | u64 dw[2]; | ||
| 139 | u8 byte[16]; | ||
| 140 | } dest_gid_al; | ||
| 141 | u32 max_nr_outst_send_wr; /* 52 */ | ||
| 142 | u32 max_nr_outst_recv_wr; /* 53 */ | ||
| 143 | u32 disable_ete_credit_check; /* 54 */ | ||
| 144 | u32 qp_number; /* 55 */ | ||
| 145 | u64 send_queue_handle; /* 56 */ | ||
| 146 | u64 recv_queue_handle; /* 58 */ | ||
| 147 | u32 actual_nr_sges_in_sq_wqe; /* 60 */ | ||
| 148 | u32 actual_nr_sges_in_rq_wqe; /* 61 */ | ||
| 149 | u32 qp_enable; /* 62 */ | ||
| 150 | u32 curr_srq_limit; /* 63 */ | ||
| 151 | u64 qp_aff_asyn_ev_log_reg; /* 64 */ | ||
| 152 | u64 shared_rq_hndl; /* 66 */ | ||
| 153 | u64 trigg_doorbell_qp_hndl; /* 68 */ | ||
| 154 | u32 reserved_70_127[58]; /* 70 */ | ||
| 155 | }; | ||
| 156 | |||
| 157 | #define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0) | ||
| 158 | #define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2) | ||
| 159 | #define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3) | ||
| 160 | #define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4) | ||
| 161 | #define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31) | ||
| 162 | #define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5) | ||
| 163 | #define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6) | ||
| 164 | #define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31) | ||
| 165 | #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7) | ||
| 166 | #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8) | ||
| 167 | #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9) | ||
| 168 | #define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31) | ||
| 169 | #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11) | ||
| 170 | #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12) | ||
| 171 | #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13) | ||
| 172 | #define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14) | ||
| 173 | #define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15) | ||
| 174 | #define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16) | ||
| 175 | #define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17) | ||
| 176 | #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18) | ||
| 177 | #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19) | ||
| 178 | #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20) | ||
| 179 | #define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31) | ||
| 180 | #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21) | ||
| 181 | #define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31) | ||
| 182 | #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22) | ||
| 183 | #define MQPCB_DLID EHCA_BMASK_IBM(16,31) | ||
| 184 | #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23) | ||
| 185 | #define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31) | ||
| 186 | #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24) | ||
| 187 | #define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31) | ||
| 188 | #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25) | ||
| 189 | #define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31) | ||
| 190 | #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26) | ||
| 191 | #define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31) | ||
| 192 | #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27) | ||
| 193 | #define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31) | ||
| 194 | #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28) | ||
| 195 | #define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31) | ||
| 196 | #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30) | ||
| 197 | #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31) | ||
| 198 | #define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31) | ||
| 199 | #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32) | ||
| 200 | #define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31) | ||
| 201 | #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33) | ||
| 202 | #define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31) | ||
| 203 | #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34) | ||
| 204 | #define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31) | ||
| 205 | #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35) | ||
| 206 | #define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31) | ||
| 207 | #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36) | ||
| 208 | #define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31) | ||
| 209 | #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37) | ||
| 210 | #define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31) | ||
| 211 | #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38) | ||
| 212 | #define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31) | ||
| 213 | #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39) | ||
| 214 | #define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31) | ||
| 215 | #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40) | ||
| 216 | #define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31) | ||
| 217 | #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41) | ||
| 218 | #define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31) | ||
| 219 | #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42) | ||
| 220 | #define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31) | ||
| 221 | #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44) | ||
| 222 | #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45) | ||
| 223 | #define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31) | ||
| 224 | #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46) | ||
| 225 | #define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31) | ||
| 226 | #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47) | ||
| 227 | #define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31) | ||
| 228 | #define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31) | ||
| 229 | #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48) | ||
| 230 | #define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31) | ||
| 231 | #define MQPCB_MASK_CURR_SQR_LIMIT EHCA_BMASK_IBM(49,49) | ||
| 232 | #define MQPCB_CURR_SQR_LIMIT EHCA_BMASK_IBM(15,31) | ||
| 233 | #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50) | ||
| 234 | #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51) | ||
| 235 | |||
| 236 | #endif /* __EHCA_CLASSES_PSERIES_H__ */ | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c new file mode 100644 index 00000000000..458fe19648a --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_cq.c | |||
| @@ -0,0 +1,427 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Completion queue handling | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Khadija Souissi <souissi@de.ibm.com> | ||
| 8 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 9 | * Heiko J Schick <schickhj@de.ibm.com> | ||
| 10 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 11 | * | ||
| 12 | * | ||
| 13 | * Copyright (c) 2005 IBM Corporation | ||
| 14 | * | ||
| 15 | * All rights reserved. | ||
| 16 | * | ||
| 17 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 18 | * BSD. | ||
| 19 | * | ||
| 20 | * OpenIB BSD License | ||
| 21 | * | ||
| 22 | * Redistribution and use in source and binary forms, with or without | ||
| 23 | * modification, are permitted provided that the following conditions are met: | ||
| 24 | * | ||
| 25 | * Redistributions of source code must retain the above copyright notice, this | ||
| 26 | * list of conditions and the following disclaimer. | ||
| 27 | * | ||
| 28 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 29 | * this list of conditions and the following disclaimer in the documentation | ||
| 30 | * and/or other materials | ||
| 31 | * provided with the distribution. | ||
| 32 | * | ||
| 33 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 34 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 35 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 36 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 37 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 38 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 39 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 40 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 41 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 42 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 43 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 44 | */ | ||
| 45 | |||
| 46 | #include <asm/current.h> | ||
| 47 | |||
| 48 | #include "ehca_iverbs.h" | ||
| 49 | #include "ehca_classes.h" | ||
| 50 | #include "ehca_irq.h" | ||
| 51 | #include "hcp_if.h" | ||
| 52 | |||
| 53 | static struct kmem_cache *cq_cache; | ||
| 54 | |||
| 55 | int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) | ||
| 56 | { | ||
| 57 | unsigned int qp_num = qp->real_qp_num; | ||
| 58 | unsigned int key = qp_num & (QP_HASHTAB_LEN-1); | ||
| 59 | unsigned long spl_flags; | ||
| 60 | |||
| 61 | spin_lock_irqsave(&cq->spinlock, spl_flags); | ||
| 62 | hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); | ||
| 63 | spin_unlock_irqrestore(&cq->spinlock, spl_flags); | ||
| 64 | |||
| 65 | ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x", | ||
| 66 | cq->cq_number, qp_num); | ||
| 67 | |||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num) | ||
| 72 | { | ||
| 73 | int ret = -EINVAL; | ||
| 74 | unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1); | ||
| 75 | struct hlist_node *iter; | ||
| 76 | struct ehca_qp *qp; | ||
| 77 | unsigned long spl_flags; | ||
| 78 | |||
| 79 | spin_lock_irqsave(&cq->spinlock, spl_flags); | ||
| 80 | hlist_for_each(iter, &cq->qp_hashtab[key]) { | ||
| 81 | qp = hlist_entry(iter, struct ehca_qp, list_entries); | ||
| 82 | if (qp->real_qp_num == real_qp_num) { | ||
| 83 | hlist_del(iter); | ||
| 84 | ehca_dbg(cq->ib_cq.device, | ||
| 85 | "removed qp from cq .cq_num=%x real_qp_num=%x", | ||
| 86 | cq->cq_number, real_qp_num); | ||
| 87 | ret = 0; | ||
| 88 | break; | ||
| 89 | } | ||
| 90 | } | ||
| 91 | spin_unlock_irqrestore(&cq->spinlock, spl_flags); | ||
| 92 | if (ret) | ||
| 93 | ehca_err(cq->ib_cq.device, | ||
| 94 | "qp not found cq_num=%x real_qp_num=%x", | ||
| 95 | cq->cq_number, real_qp_num); | ||
| 96 | |||
| 97 | return ret; | ||
| 98 | } | ||
| 99 | |||
| 100 | struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) | ||
| 101 | { | ||
| 102 | struct ehca_qp *ret = NULL; | ||
| 103 | unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1); | ||
| 104 | struct hlist_node *iter; | ||
| 105 | struct ehca_qp *qp; | ||
| 106 | hlist_for_each(iter, &cq->qp_hashtab[key]) { | ||
| 107 | qp = hlist_entry(iter, struct ehca_qp, list_entries); | ||
| 108 | if (qp->real_qp_num == real_qp_num) { | ||
| 109 | ret = qp; | ||
| 110 | break; | ||
| 111 | } | ||
| 112 | } | ||
| 113 | return ret; | ||
| 114 | } | ||
| 115 | |||
| 116 | struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, | ||
| 117 | struct ib_ucontext *context, | ||
| 118 | struct ib_udata *udata) | ||
| 119 | { | ||
| 120 | static const u32 additional_cqe = 20; | ||
| 121 | struct ib_cq *cq; | ||
| 122 | struct ehca_cq *my_cq; | ||
| 123 | struct ehca_shca *shca = | ||
| 124 | container_of(device, struct ehca_shca, ib_device); | ||
| 125 | struct ipz_adapter_handle adapter_handle; | ||
| 126 | struct ehca_alloc_cq_parms param; /* h_call's out parameters */ | ||
| 127 | struct h_galpa gal; | ||
| 128 | void *vpage; | ||
| 129 | u32 counter; | ||
| 130 | u64 rpage, cqx_fec, h_ret; | ||
| 131 | int ipz_rc, ret, i; | ||
| 132 | unsigned long flags; | ||
| 133 | |||
| 134 | if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) | ||
| 135 | return ERR_PTR(-EINVAL); | ||
| 136 | |||
| 137 | my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL); | ||
| 138 | if (!my_cq) { | ||
| 139 | ehca_err(device, "Out of memory for ehca_cq struct device=%p", | ||
| 140 | device); | ||
| 141 | return ERR_PTR(-ENOMEM); | ||
| 142 | } | ||
| 143 | |||
| 144 | memset(my_cq, 0, sizeof(struct ehca_cq)); | ||
| 145 | memset(¶m, 0, sizeof(struct ehca_alloc_cq_parms)); | ||
| 146 | |||
| 147 | spin_lock_init(&my_cq->spinlock); | ||
| 148 | spin_lock_init(&my_cq->cb_lock); | ||
| 149 | spin_lock_init(&my_cq->task_lock); | ||
| 150 | my_cq->ownpid = current->tgid; | ||
| 151 | |||
| 152 | cq = &my_cq->ib_cq; | ||
| 153 | |||
| 154 | adapter_handle = shca->ipz_hca_handle; | ||
| 155 | param.eq_handle = shca->eq.ipz_eq_handle; | ||
| 156 | |||
| 157 | do { | ||
| 158 | if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) { | ||
| 159 | cq = ERR_PTR(-ENOMEM); | ||
| 160 | ehca_err(device, "Can't reserve idr nr. device=%p", | ||
| 161 | device); | ||
| 162 | goto create_cq_exit1; | ||
| 163 | } | ||
| 164 | |||
| 165 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | ||
| 166 | ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token); | ||
| 167 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
| 168 | |||
| 169 | } while (ret == -EAGAIN); | ||
| 170 | |||
| 171 | if (ret) { | ||
| 172 | cq = ERR_PTR(-ENOMEM); | ||
| 173 | ehca_err(device, "Can't allocate new idr entry. device=%p", | ||
| 174 | device); | ||
| 175 | goto create_cq_exit1; | ||
| 176 | } | ||
| 177 | |||
| 178 | /* | ||
| 179 | * CQs maximum depth is 4GB-64, but we need additional 20 as buffer | ||
| 180 | * for receiving errors CQEs. | ||
| 181 | */ | ||
| 182 | param.nr_cqe = cqe + additional_cqe; | ||
| 183 | h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, ¶m); | ||
| 184 | |||
| 185 | if (h_ret != H_SUCCESS) { | ||
| 186 | ehca_err(device, "hipz_h_alloc_resource_cq() failed " | ||
| 187 | "h_ret=%lx device=%p", h_ret, device); | ||
| 188 | cq = ERR_PTR(ehca2ib_return_code(h_ret)); | ||
| 189 | goto create_cq_exit2; | ||
| 190 | } | ||
| 191 | |||
| 192 | ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages, | ||
| 193 | EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0); | ||
| 194 | if (!ipz_rc) { | ||
| 195 | ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p", | ||
| 196 | ipz_rc, device); | ||
| 197 | cq = ERR_PTR(-EINVAL); | ||
| 198 | goto create_cq_exit3; | ||
| 199 | } | ||
| 200 | |||
| 201 | for (counter = 0; counter < param.act_pages; counter++) { | ||
| 202 | vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue); | ||
| 203 | if (!vpage) { | ||
| 204 | ehca_err(device, "ipz_qpageit_get_inc() " | ||
| 205 | "returns NULL device=%p", device); | ||
| 206 | cq = ERR_PTR(-EAGAIN); | ||
| 207 | goto create_cq_exit4; | ||
| 208 | } | ||
| 209 | rpage = virt_to_abs(vpage); | ||
| 210 | |||
| 211 | h_ret = hipz_h_register_rpage_cq(adapter_handle, | ||
| 212 | my_cq->ipz_cq_handle, | ||
| 213 | &my_cq->pf, | ||
| 214 | 0, | ||
| 215 | 0, | ||
| 216 | rpage, | ||
| 217 | 1, | ||
| 218 | my_cq->galpas. | ||
| 219 | kernel); | ||
| 220 | |||
| 221 | if (h_ret < H_SUCCESS) { | ||
| 222 | ehca_err(device, "hipz_h_register_rpage_cq() failed " | ||
| 223 | "ehca_cq=%p cq_num=%x h_ret=%lx counter=%i " | ||
| 224 | "act_pages=%i", my_cq, my_cq->cq_number, | ||
| 225 | h_ret, counter, param.act_pages); | ||
| 226 | cq = ERR_PTR(-EINVAL); | ||
| 227 | goto create_cq_exit4; | ||
| 228 | } | ||
| 229 | |||
| 230 | if (counter == (param.act_pages - 1)) { | ||
| 231 | vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue); | ||
| 232 | if ((h_ret != H_SUCCESS) || vpage) { | ||
| 233 | ehca_err(device, "Registration of pages not " | ||
| 234 | "complete ehca_cq=%p cq_num=%x " | ||
| 235 | "h_ret=%lx", my_cq, my_cq->cq_number, | ||
| 236 | h_ret); | ||
| 237 | cq = ERR_PTR(-EAGAIN); | ||
| 238 | goto create_cq_exit4; | ||
| 239 | } | ||
| 240 | } else { | ||
| 241 | if (h_ret != H_PAGE_REGISTERED) { | ||
| 242 | ehca_err(device, "Registration of page failed " | ||
| 243 | "ehca_cq=%p cq_num=%x h_ret=%lx" | ||
| 244 | "counter=%i act_pages=%i", | ||
| 245 | my_cq, my_cq->cq_number, | ||
| 246 | h_ret, counter, param.act_pages); | ||
| 247 | cq = ERR_PTR(-ENOMEM); | ||
| 248 | goto create_cq_exit4; | ||
| 249 | } | ||
| 250 | } | ||
| 251 | } | ||
| 252 | |||
| 253 | ipz_qeit_reset(&my_cq->ipz_queue); | ||
| 254 | |||
| 255 | gal = my_cq->galpas.kernel; | ||
| 256 | cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); | ||
| 257 | ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx", | ||
| 258 | my_cq, my_cq->cq_number, cqx_fec); | ||
| 259 | |||
| 260 | my_cq->ib_cq.cqe = my_cq->nr_of_entries = | ||
| 261 | param.act_nr_of_entries - additional_cqe; | ||
| 262 | my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff; | ||
| 263 | |||
| 264 | for (i = 0; i < QP_HASHTAB_LEN; i++) | ||
| 265 | INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]); | ||
| 266 | |||
| 267 | if (context) { | ||
| 268 | struct ipz_queue *ipz_queue = &my_cq->ipz_queue; | ||
| 269 | struct ehca_create_cq_resp resp; | ||
| 270 | struct vm_area_struct *vma; | ||
| 271 | memset(&resp, 0, sizeof(resp)); | ||
| 272 | resp.cq_number = my_cq->cq_number; | ||
| 273 | resp.token = my_cq->token; | ||
| 274 | resp.ipz_queue.qe_size = ipz_queue->qe_size; | ||
| 275 | resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg; | ||
| 276 | resp.ipz_queue.queue_length = ipz_queue->queue_length; | ||
| 277 | resp.ipz_queue.pagesize = ipz_queue->pagesize; | ||
| 278 | resp.ipz_queue.toggle_state = ipz_queue->toggle_state; | ||
| 279 | ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000, | ||
| 280 | ipz_queue->queue_length, | ||
| 281 | (void**)&resp.ipz_queue.queue, | ||
| 282 | &vma); | ||
| 283 | if (ret) { | ||
| 284 | ehca_err(device, "Could not mmap queue pages"); | ||
| 285 | cq = ERR_PTR(ret); | ||
| 286 | goto create_cq_exit4; | ||
| 287 | } | ||
| 288 | my_cq->uspace_queue = resp.ipz_queue.queue; | ||
| 289 | resp.galpas = my_cq->galpas; | ||
| 290 | ret = ehca_mmap_register(my_cq->galpas.user.fw_handle, | ||
| 291 | (void**)&resp.galpas.kernel.fw_handle, | ||
| 292 | &vma); | ||
| 293 | if (ret) { | ||
| 294 | ehca_err(device, "Could not mmap fw_handle"); | ||
| 295 | cq = ERR_PTR(ret); | ||
| 296 | goto create_cq_exit5; | ||
| 297 | } | ||
| 298 | my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle; | ||
| 299 | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { | ||
| 300 | ehca_err(device, "Copy to udata failed."); | ||
| 301 | goto create_cq_exit6; | ||
| 302 | } | ||
| 303 | } | ||
| 304 | |||
| 305 | return cq; | ||
| 306 | |||
| 307 | create_cq_exit6: | ||
| 308 | ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE); | ||
| 309 | |||
| 310 | create_cq_exit5: | ||
| 311 | ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length); | ||
| 312 | |||
| 313 | create_cq_exit4: | ||
| 314 | ipz_queue_dtor(&my_cq->ipz_queue); | ||
| 315 | |||
| 316 | create_cq_exit3: | ||
| 317 | h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); | ||
| 318 | if (h_ret != H_SUCCESS) | ||
| 319 | ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " | ||
| 320 | "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret); | ||
| 321 | |||
| 322 | create_cq_exit2: | ||
| 323 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | ||
| 324 | idr_remove(&ehca_cq_idr, my_cq->token); | ||
| 325 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
| 326 | |||
| 327 | create_cq_exit1: | ||
| 328 | kmem_cache_free(cq_cache, my_cq); | ||
| 329 | |||
| 330 | return cq; | ||
| 331 | } | ||
| 332 | |||
| 333 | int ehca_destroy_cq(struct ib_cq *cq) | ||
| 334 | { | ||
| 335 | u64 h_ret; | ||
| 336 | int ret; | ||
| 337 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); | ||
| 338 | int cq_num = my_cq->cq_number; | ||
| 339 | struct ib_device *device = cq->device; | ||
| 340 | struct ehca_shca *shca = container_of(device, struct ehca_shca, | ||
| 341 | ib_device); | ||
| 342 | struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; | ||
| 343 | u32 cur_pid = current->tgid; | ||
| 344 | unsigned long flags; | ||
| 345 | |||
| 346 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | ||
| 347 | while (my_cq->nr_callbacks) | ||
| 348 | yield(); | ||
| 349 | |||
| 350 | idr_remove(&ehca_cq_idr, my_cq->token); | ||
| 351 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
| 352 | |||
| 353 | if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { | ||
| 354 | ehca_err(device, "Invalid caller pid=%x ownpid=%x", | ||
| 355 | cur_pid, my_cq->ownpid); | ||
| 356 | return -EINVAL; | ||
| 357 | } | ||
| 358 | |||
| 359 | /* un-mmap if vma alloc */ | ||
| 360 | if (my_cq->uspace_queue ) { | ||
| 361 | ret = ehca_munmap(my_cq->uspace_queue, | ||
| 362 | my_cq->ipz_queue.queue_length); | ||
| 363 | if (ret) | ||
| 364 | ehca_err(device, "Could not munmap queue ehca_cq=%p " | ||
| 365 | "cq_num=%x", my_cq, cq_num); | ||
| 366 | ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE); | ||
| 367 | if (ret) | ||
| 368 | ehca_err(device, "Could not munmap fwh ehca_cq=%p " | ||
| 369 | "cq_num=%x", my_cq, cq_num); | ||
| 370 | } | ||
| 371 | |||
| 372 | h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); | ||
| 373 | if (h_ret == H_R_STATE) { | ||
| 374 | /* cq in err: read err data and destroy it forcibly */ | ||
| 375 | ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err " | ||
| 376 | "state. Try to delete it forcibly.", | ||
| 377 | my_cq, cq_num, my_cq->ipz_cq_handle.handle); | ||
| 378 | ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); | ||
| 379 | h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); | ||
| 380 | if (h_ret == H_SUCCESS) | ||
| 381 | ehca_dbg(device, "cq_num=%x deleted successfully.", | ||
| 382 | cq_num); | ||
| 383 | } | ||
| 384 | if (h_ret != H_SUCCESS) { | ||
| 385 | ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx " | ||
| 386 | "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); | ||
| 387 | return ehca2ib_return_code(h_ret); | ||
| 388 | } | ||
| 389 | ipz_queue_dtor(&my_cq->ipz_queue); | ||
| 390 | kmem_cache_free(cq_cache, my_cq); | ||
| 391 | |||
| 392 | return 0; | ||
| 393 | } | ||
| 394 | |||
| 395 | int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) | ||
| 396 | { | ||
| 397 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); | ||
| 398 | u32 cur_pid = current->tgid; | ||
| 399 | |||
| 400 | if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { | ||
| 401 | ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x", | ||
| 402 | cur_pid, my_cq->ownpid); | ||
| 403 | return -EINVAL; | ||
| 404 | } | ||
| 405 | |||
| 406 | /* TODO: proper resize needs to be done */ | ||
| 407 | ehca_err(cq->device, "not implemented yet"); | ||
| 408 | |||
| 409 | return -EFAULT; | ||
| 410 | } | ||
| 411 | |||
| 412 | int ehca_init_cq_cache(void) | ||
| 413 | { | ||
| 414 | cq_cache = kmem_cache_create("ehca_cache_cq", | ||
| 415 | sizeof(struct ehca_cq), 0, | ||
| 416 | SLAB_HWCACHE_ALIGN, | ||
| 417 | NULL, NULL); | ||
| 418 | if (!cq_cache) | ||
| 419 | return -ENOMEM; | ||
| 420 | return 0; | ||
| 421 | } | ||
| 422 | |||
| 423 | void ehca_cleanup_cq_cache(void) | ||
| 424 | { | ||
| 425 | if (cq_cache) | ||
| 426 | kmem_cache_destroy(cq_cache); | ||
| 427 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c new file mode 100644 index 00000000000..5281dec66f1 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_eq.c | |||
| @@ -0,0 +1,185 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Event queue handling | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Khadija Souissi <souissi@de.ibm.com> | ||
| 8 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 9 | * Heiko J Schick <schickhj@de.ibm.com> | ||
| 10 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 11 | * | ||
| 12 | * | ||
| 13 | * Copyright (c) 2005 IBM Corporation | ||
| 14 | * | ||
| 15 | * All rights reserved. | ||
| 16 | * | ||
| 17 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 18 | * BSD. | ||
| 19 | * | ||
| 20 | * OpenIB BSD License | ||
| 21 | * | ||
| 22 | * Redistribution and use in source and binary forms, with or without | ||
| 23 | * modification, are permitted provided that the following conditions are met: | ||
| 24 | * | ||
| 25 | * Redistributions of source code must retain the above copyright notice, this | ||
| 26 | * list of conditions and the following disclaimer. | ||
| 27 | * | ||
| 28 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 29 | * this list of conditions and the following disclaimer in the documentation | ||
| 30 | * and/or other materials | ||
| 31 | * provided with the distribution. | ||
| 32 | * | ||
| 33 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 34 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 35 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 36 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 37 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 38 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 39 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 40 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 41 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 42 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 43 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 44 | */ | ||
| 45 | |||
| 46 | #include "ehca_classes.h" | ||
| 47 | #include "ehca_irq.h" | ||
| 48 | #include "ehca_iverbs.h" | ||
| 49 | #include "ehca_qes.h" | ||
| 50 | #include "hcp_if.h" | ||
| 51 | #include "ipz_pt_fn.h" | ||
| 52 | |||
| 53 | int ehca_create_eq(struct ehca_shca *shca, | ||
| 54 | struct ehca_eq *eq, | ||
| 55 | const enum ehca_eq_type type, const u32 length) | ||
| 56 | { | ||
| 57 | u64 ret; | ||
| 58 | u32 nr_pages; | ||
| 59 | u32 i; | ||
| 60 | void *vpage; | ||
| 61 | struct ib_device *ib_dev = &shca->ib_device; | ||
| 62 | |||
| 63 | spin_lock_init(&eq->spinlock); | ||
| 64 | eq->is_initialized = 0; | ||
| 65 | |||
| 66 | if (type != EHCA_EQ && type != EHCA_NEQ) { | ||
| 67 | ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq); | ||
| 68 | return -EINVAL; | ||
| 69 | } | ||
| 70 | if (!length) { | ||
| 71 | ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq); | ||
| 72 | return -EINVAL; | ||
| 73 | } | ||
| 74 | |||
| 75 | ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle, | ||
| 76 | &eq->pf, | ||
| 77 | type, | ||
| 78 | length, | ||
| 79 | &eq->ipz_eq_handle, | ||
| 80 | &eq->length, | ||
| 81 | &nr_pages, &eq->ist); | ||
| 82 | |||
| 83 | if (ret != H_SUCCESS) { | ||
| 84 | ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq); | ||
| 85 | return -EINVAL; | ||
| 86 | } | ||
| 87 | |||
| 88 | ret = ipz_queue_ctor(&eq->ipz_queue, nr_pages, | ||
| 89 | EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0); | ||
| 90 | if (!ret) { | ||
| 91 | ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq); | ||
| 92 | goto create_eq_exit1; | ||
| 93 | } | ||
| 94 | |||
| 95 | for (i = 0; i < nr_pages; i++) { | ||
| 96 | u64 rpage; | ||
| 97 | |||
| 98 | if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) { | ||
| 99 | ret = H_RESOURCE; | ||
| 100 | goto create_eq_exit2; | ||
| 101 | } | ||
| 102 | |||
| 103 | rpage = virt_to_abs(vpage); | ||
| 104 | ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle, | ||
| 105 | eq->ipz_eq_handle, | ||
| 106 | &eq->pf, | ||
| 107 | 0, 0, rpage, 1); | ||
| 108 | |||
| 109 | if (i == (nr_pages - 1)) { | ||
| 110 | /* last page */ | ||
| 111 | vpage = ipz_qpageit_get_inc(&eq->ipz_queue); | ||
| 112 | if (ret != H_SUCCESS || vpage) | ||
| 113 | goto create_eq_exit2; | ||
| 114 | } else { | ||
| 115 | if (ret != H_PAGE_REGISTERED || !vpage) | ||
| 116 | goto create_eq_exit2; | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 120 | ipz_qeit_reset(&eq->ipz_queue); | ||
| 121 | |||
| 122 | /* register interrupt handlers and initialize work queues */ | ||
| 123 | if (type == EHCA_EQ) { | ||
| 124 | ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq, | ||
| 125 | SA_INTERRUPT, "ehca_eq", | ||
| 126 | (void *)shca); | ||
| 127 | if (ret < 0) | ||
| 128 | ehca_err(ib_dev, "Can't map interrupt handler."); | ||
| 129 | |||
| 130 | tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); | ||
| 131 | } else if (type == EHCA_NEQ) { | ||
| 132 | ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq, | ||
| 133 | SA_INTERRUPT, "ehca_neq", | ||
| 134 | (void *)shca); | ||
| 135 | if (ret < 0) | ||
| 136 | ehca_err(ib_dev, "Can't map interrupt handler."); | ||
| 137 | |||
| 138 | tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); | ||
| 139 | } | ||
| 140 | |||
| 141 | eq->is_initialized = 1; | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | |||
| 145 | create_eq_exit2: | ||
| 146 | ipz_queue_dtor(&eq->ipz_queue); | ||
| 147 | |||
| 148 | create_eq_exit1: | ||
| 149 | hipz_h_destroy_eq(shca->ipz_hca_handle, eq); | ||
| 150 | |||
| 151 | return -EINVAL; | ||
| 152 | } | ||
| 153 | |||
| 154 | void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq) | ||
| 155 | { | ||
| 156 | unsigned long flags; | ||
| 157 | void *eqe; | ||
| 158 | |||
| 159 | spin_lock_irqsave(&eq->spinlock, flags); | ||
| 160 | eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue); | ||
| 161 | spin_unlock_irqrestore(&eq->spinlock, flags); | ||
| 162 | |||
| 163 | return eqe; | ||
| 164 | } | ||
| 165 | |||
| 166 | int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq) | ||
| 167 | { | ||
| 168 | unsigned long flags; | ||
| 169 | u64 h_ret; | ||
| 170 | |||
| 171 | spin_lock_irqsave(&eq->spinlock, flags); | ||
| 172 | ibmebus_free_irq(NULL, eq->ist, (void *)shca); | ||
| 173 | |||
| 174 | h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq); | ||
| 175 | |||
| 176 | spin_unlock_irqrestore(&eq->spinlock, flags); | ||
| 177 | |||
| 178 | if (h_ret != H_SUCCESS) { | ||
| 179 | ehca_err(&shca->ib_device, "Can't free EQ resources."); | ||
| 180 | return -EINVAL; | ||
| 181 | } | ||
| 182 | ipz_queue_dtor(&eq->ipz_queue); | ||
| 183 | |||
| 184 | return 0; | ||
| 185 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c new file mode 100644 index 00000000000..5eae6ac4842 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_hca.c | |||
| @@ -0,0 +1,241 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * HCA query functions | ||
| 5 | * | ||
| 6 | * Authors: Heiko J Schick <schickhj@de.ibm.com> | ||
| 7 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #include "ehca_tools.h" | ||
| 43 | #include "hcp_if.h" | ||
| 44 | |||
| 45 | int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) | ||
| 46 | { | ||
| 47 | int ret = 0; | ||
| 48 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, | ||
| 49 | ib_device); | ||
| 50 | struct hipz_query_hca *rblock; | ||
| 51 | |||
| 52 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 53 | if (!rblock) { | ||
| 54 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | ||
| 55 | return -ENOMEM; | ||
| 56 | } | ||
| 57 | |||
| 58 | if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { | ||
| 59 | ehca_err(&shca->ib_device, "Can't query device properties"); | ||
| 60 | ret = -EINVAL; | ||
| 61 | goto query_device1; | ||
| 62 | } | ||
| 63 | |||
| 64 | memset(props, 0, sizeof(struct ib_device_attr)); | ||
| 65 | props->fw_ver = rblock->hw_ver; | ||
| 66 | props->max_mr_size = rblock->max_mr_size; | ||
| 67 | props->vendor_id = rblock->vendor_id >> 8; | ||
| 68 | props->vendor_part_id = rblock->vendor_part_id >> 16; | ||
| 69 | props->hw_ver = rblock->hw_ver; | ||
| 70 | props->max_qp = min_t(int, rblock->max_qp, INT_MAX); | ||
| 71 | props->max_qp_wr = min_t(int, rblock->max_wqes_wq, INT_MAX); | ||
| 72 | props->max_sge = min_t(int, rblock->max_sge, INT_MAX); | ||
| 73 | props->max_sge_rd = min_t(int, rblock->max_sge_rd, INT_MAX); | ||
| 74 | props->max_cq = min_t(int, rblock->max_cq, INT_MAX); | ||
| 75 | props->max_cqe = min_t(int, rblock->max_cqe, INT_MAX); | ||
| 76 | props->max_mr = min_t(int, rblock->max_mr, INT_MAX); | ||
| 77 | props->max_mw = min_t(int, rblock->max_mw, INT_MAX); | ||
| 78 | props->max_pd = min_t(int, rblock->max_pd, INT_MAX); | ||
| 79 | props->max_ah = min_t(int, rblock->max_ah, INT_MAX); | ||
| 80 | props->max_fmr = min_t(int, rblock->max_mr, INT_MAX); | ||
| 81 | props->max_srq = 0; | ||
| 82 | props->max_srq_wr = 0; | ||
| 83 | props->max_srq_sge = 0; | ||
| 84 | props->max_pkeys = 16; | ||
| 85 | props->local_ca_ack_delay | ||
| 86 | = rblock->local_ca_ack_delay; | ||
| 87 | props->max_raw_ipv6_qp | ||
| 88 | = min_t(int, rblock->max_raw_ipv6_qp, INT_MAX); | ||
| 89 | props->max_raw_ethy_qp | ||
| 90 | = min_t(int, rblock->max_raw_ethy_qp, INT_MAX); | ||
| 91 | props->max_mcast_grp | ||
| 92 | = min_t(int, rblock->max_mcast_grp, INT_MAX); | ||
| 93 | props->max_mcast_qp_attach | ||
| 94 | = min_t(int, rblock->max_mcast_qp_attach, INT_MAX); | ||
| 95 | props->max_total_mcast_qp_attach | ||
| 96 | = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); | ||
| 97 | |||
| 98 | query_device1: | ||
| 99 | kfree(rblock); | ||
| 100 | |||
| 101 | return ret; | ||
| 102 | } | ||
| 103 | |||
| 104 | int ehca_query_port(struct ib_device *ibdev, | ||
| 105 | u8 port, struct ib_port_attr *props) | ||
| 106 | { | ||
| 107 | int ret = 0; | ||
| 108 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, | ||
| 109 | ib_device); | ||
| 110 | struct hipz_query_port *rblock; | ||
| 111 | |||
| 112 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 113 | if (!rblock) { | ||
| 114 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | ||
| 115 | return -ENOMEM; | ||
| 116 | } | ||
| 117 | |||
| 118 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | ||
| 119 | ehca_err(&shca->ib_device, "Can't query port properties"); | ||
| 120 | ret = -EINVAL; | ||
| 121 | goto query_port1; | ||
| 122 | } | ||
| 123 | |||
| 124 | memset(props, 0, sizeof(struct ib_port_attr)); | ||
| 125 | props->state = rblock->state; | ||
| 126 | |||
| 127 | switch (rblock->max_mtu) { | ||
| 128 | case 0x1: | ||
| 129 | props->active_mtu = props->max_mtu = IB_MTU_256; | ||
| 130 | break; | ||
| 131 | case 0x2: | ||
| 132 | props->active_mtu = props->max_mtu = IB_MTU_512; | ||
| 133 | break; | ||
| 134 | case 0x3: | ||
| 135 | props->active_mtu = props->max_mtu = IB_MTU_1024; | ||
| 136 | break; | ||
| 137 | case 0x4: | ||
| 138 | props->active_mtu = props->max_mtu = IB_MTU_2048; | ||
| 139 | break; | ||
| 140 | case 0x5: | ||
| 141 | props->active_mtu = props->max_mtu = IB_MTU_4096; | ||
| 142 | break; | ||
| 143 | default: | ||
| 144 | ehca_err(&shca->ib_device, "Unknown MTU size: %x.", | ||
| 145 | rblock->max_mtu); | ||
| 146 | break; | ||
| 147 | } | ||
| 148 | |||
| 149 | props->gid_tbl_len = rblock->gid_tbl_len; | ||
| 150 | props->max_msg_sz = rblock->max_msg_sz; | ||
| 151 | props->bad_pkey_cntr = rblock->bad_pkey_cntr; | ||
| 152 | props->qkey_viol_cntr = rblock->qkey_viol_cntr; | ||
| 153 | props->pkey_tbl_len = rblock->pkey_tbl_len; | ||
| 154 | props->lid = rblock->lid; | ||
| 155 | props->sm_lid = rblock->sm_lid; | ||
| 156 | props->lmc = rblock->lmc; | ||
| 157 | props->sm_sl = rblock->sm_sl; | ||
| 158 | props->subnet_timeout = rblock->subnet_timeout; | ||
| 159 | props->init_type_reply = rblock->init_type_reply; | ||
| 160 | |||
| 161 | props->active_width = IB_WIDTH_12X; | ||
| 162 | props->active_speed = 0x1; | ||
| 163 | |||
| 164 | query_port1: | ||
| 165 | kfree(rblock); | ||
| 166 | |||
| 167 | return ret; | ||
| 168 | } | ||
| 169 | |||
| 170 | int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | ||
| 171 | { | ||
| 172 | int ret = 0; | ||
| 173 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); | ||
| 174 | struct hipz_query_port *rblock; | ||
| 175 | |||
| 176 | if (index > 16) { | ||
| 177 | ehca_err(&shca->ib_device, "Invalid index: %x.", index); | ||
| 178 | return -EINVAL; | ||
| 179 | } | ||
| 180 | |||
| 181 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 182 | if (!rblock) { | ||
| 183 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | ||
| 184 | return -ENOMEM; | ||
| 185 | } | ||
| 186 | |||
| 187 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | ||
| 188 | ehca_err(&shca->ib_device, "Can't query port properties"); | ||
| 189 | ret = -EINVAL; | ||
| 190 | goto query_pkey1; | ||
| 191 | } | ||
| 192 | |||
| 193 | memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); | ||
| 194 | |||
| 195 | query_pkey1: | ||
| 196 | kfree(rblock); | ||
| 197 | |||
| 198 | return ret; | ||
| 199 | } | ||
| 200 | |||
| 201 | int ehca_query_gid(struct ib_device *ibdev, u8 port, | ||
| 202 | int index, union ib_gid *gid) | ||
| 203 | { | ||
| 204 | int ret = 0; | ||
| 205 | struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, | ||
| 206 | ib_device); | ||
| 207 | struct hipz_query_port *rblock; | ||
| 208 | |||
| 209 | if (index > 255) { | ||
| 210 | ehca_err(&shca->ib_device, "Invalid index: %x.", index); | ||
| 211 | return -EINVAL; | ||
| 212 | } | ||
| 213 | |||
| 214 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 215 | if (!rblock) { | ||
| 216 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | ||
| 217 | return -ENOMEM; | ||
| 218 | } | ||
| 219 | |||
| 220 | if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { | ||
| 221 | ehca_err(&shca->ib_device, "Can't query port properties"); | ||
| 222 | ret = -EINVAL; | ||
| 223 | goto query_gid1; | ||
| 224 | } | ||
| 225 | |||
| 226 | memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64)); | ||
| 227 | memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); | ||
| 228 | |||
| 229 | query_gid1: | ||
| 230 | kfree(rblock); | ||
| 231 | |||
| 232 | return ret; | ||
| 233 | } | ||
| 234 | |||
| 235 | int ehca_modify_port(struct ib_device *ibdev, | ||
| 236 | u8 port, int port_modify_mask, | ||
| 237 | struct ib_port_modify *props) | ||
| 238 | { | ||
| 239 | /* Not implemented yet */ | ||
| 240 | return -EFAULT; | ||
| 241 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c new file mode 100644 index 00000000000..2a65b5be197 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
| @@ -0,0 +1,762 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Functions for EQs, NEQs and interrupts | ||
| 5 | * | ||
| 6 | * Authors: Heiko J Schick <schickhj@de.ibm.com> | ||
| 7 | * Khadija Souissi <souissi@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #include "ehca_classes.h" | ||
| 43 | #include "ehca_irq.h" | ||
| 44 | #include "ehca_iverbs.h" | ||
| 45 | #include "ehca_tools.h" | ||
| 46 | #include "hcp_if.h" | ||
| 47 | #include "hipz_fns.h" | ||
| 48 | |||
| 49 | #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) | ||
| 50 | #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) | ||
| 51 | #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7) | ||
| 52 | #define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31) | ||
| 53 | #define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31) | ||
| 54 | #define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63) | ||
| 55 | #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63) | ||
| 56 | |||
| 57 | #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) | ||
| 58 | #define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7) | ||
| 59 | #define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15) | ||
| 60 | #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16) | ||
| 61 | |||
| 62 | #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63) | ||
| 63 | #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7) | ||
| 64 | |||
| 65 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
| 66 | |||
| 67 | static void queue_comp_task(struct ehca_cq *__cq); | ||
| 68 | |||
| 69 | static struct ehca_comp_pool* pool; | ||
| 70 | static struct notifier_block comp_pool_callback_nb; | ||
| 71 | |||
| 72 | #endif | ||
| 73 | |||
| 74 | static inline void comp_event_callback(struct ehca_cq *cq) | ||
| 75 | { | ||
| 76 | if (!cq->ib_cq.comp_handler) | ||
| 77 | return; | ||
| 78 | |||
| 79 | spin_lock(&cq->cb_lock); | ||
| 80 | cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context); | ||
| 81 | spin_unlock(&cq->cb_lock); | ||
| 82 | |||
| 83 | return; | ||
| 84 | } | ||
| 85 | |||
| 86 | static void print_error_data(struct ehca_shca * shca, void* data, | ||
| 87 | u64* rblock, int length) | ||
| 88 | { | ||
| 89 | u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]); | ||
| 90 | u64 resource = rblock[1]; | ||
| 91 | |||
| 92 | switch (type) { | ||
| 93 | case 0x1: /* Queue Pair */ | ||
| 94 | { | ||
| 95 | struct ehca_qp *qp = (struct ehca_qp*)data; | ||
| 96 | |||
| 97 | /* only print error data if AER is set */ | ||
| 98 | if (rblock[6] == 0) | ||
| 99 | return; | ||
| 100 | |||
| 101 | ehca_err(&shca->ib_device, | ||
| 102 | "QP 0x%x (resource=%lx) has errors.", | ||
| 103 | qp->ib_qp.qp_num, resource); | ||
| 104 | break; | ||
| 105 | } | ||
| 106 | case 0x4: /* Completion Queue */ | ||
| 107 | { | ||
| 108 | struct ehca_cq *cq = (struct ehca_cq*)data; | ||
| 109 | |||
| 110 | ehca_err(&shca->ib_device, | ||
| 111 | "CQ 0x%x (resource=%lx) has errors.", | ||
| 112 | cq->cq_number, resource); | ||
| 113 | break; | ||
| 114 | } | ||
| 115 | default: | ||
| 116 | ehca_err(&shca->ib_device, | ||
| 117 | "Unknown errror type: %lx on %s.", | ||
| 118 | type, shca->ib_device.name); | ||
| 119 | break; | ||
| 120 | } | ||
| 121 | |||
| 122 | ehca_err(&shca->ib_device, "Error data is available: %lx.", resource); | ||
| 123 | ehca_err(&shca->ib_device, "EHCA ----- error data begin " | ||
| 124 | "---------------------------------------------------"); | ||
| 125 | ehca_dmp(rblock, length, "resource=%lx", resource); | ||
| 126 | ehca_err(&shca->ib_device, "EHCA ----- error data end " | ||
| 127 | "----------------------------------------------------"); | ||
| 128 | |||
| 129 | return; | ||
| 130 | } | ||
| 131 | |||
| 132 | int ehca_error_data(struct ehca_shca *shca, void *data, | ||
| 133 | u64 resource) | ||
| 134 | { | ||
| 135 | |||
| 136 | unsigned long ret; | ||
| 137 | u64 *rblock; | ||
| 138 | unsigned long block_count; | ||
| 139 | |||
| 140 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 141 | if (!rblock) { | ||
| 142 | ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); | ||
| 143 | ret = -ENOMEM; | ||
| 144 | goto error_data1; | ||
| 145 | } | ||
| 146 | |||
| 147 | ret = hipz_h_error_data(shca->ipz_hca_handle, | ||
| 148 | resource, | ||
| 149 | rblock, | ||
| 150 | &block_count); | ||
| 151 | |||
| 152 | if (ret == H_R_STATE) { | ||
| 153 | ehca_err(&shca->ib_device, | ||
| 154 | "No error data is available: %lx.", resource); | ||
| 155 | } | ||
| 156 | else if (ret == H_SUCCESS) { | ||
| 157 | int length; | ||
| 158 | |||
| 159 | length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); | ||
| 160 | |||
| 161 | if (length > PAGE_SIZE) | ||
| 162 | length = PAGE_SIZE; | ||
| 163 | |||
| 164 | print_error_data(shca, data, rblock, length); | ||
| 165 | } | ||
| 166 | else { | ||
| 167 | ehca_err(&shca->ib_device, | ||
| 168 | "Error data could not be fetched: %lx", resource); | ||
| 169 | } | ||
| 170 | |||
| 171 | kfree(rblock); | ||
| 172 | |||
| 173 | error_data1: | ||
| 174 | return ret; | ||
| 175 | |||
| 176 | } | ||
| 177 | |||
| 178 | static void qp_event_callback(struct ehca_shca *shca, | ||
| 179 | u64 eqe, | ||
| 180 | enum ib_event_type event_type) | ||
| 181 | { | ||
| 182 | struct ib_event event; | ||
| 183 | struct ehca_qp *qp; | ||
| 184 | unsigned long flags; | ||
| 185 | u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); | ||
| 186 | |||
| 187 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | ||
| 188 | qp = idr_find(&ehca_qp_idr, token); | ||
| 189 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | ||
| 190 | |||
| 191 | |||
| 192 | if (!qp) | ||
| 193 | return; | ||
| 194 | |||
| 195 | ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); | ||
| 196 | |||
| 197 | if (!qp->ib_qp.event_handler) | ||
| 198 | return; | ||
| 199 | |||
| 200 | event.device = &shca->ib_device; | ||
| 201 | event.event = event_type; | ||
| 202 | event.element.qp = &qp->ib_qp; | ||
| 203 | |||
| 204 | qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); | ||
| 205 | |||
| 206 | return; | ||
| 207 | } | ||
| 208 | |||
| 209 | static void cq_event_callback(struct ehca_shca *shca, | ||
| 210 | u64 eqe) | ||
| 211 | { | ||
| 212 | struct ehca_cq *cq; | ||
| 213 | unsigned long flags; | ||
| 214 | u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); | ||
| 215 | |||
| 216 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | ||
| 217 | cq = idr_find(&ehca_cq_idr, token); | ||
| 218 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
| 219 | |||
| 220 | if (!cq) | ||
| 221 | return; | ||
| 222 | |||
| 223 | ehca_error_data(shca, cq, cq->ipz_cq_handle.handle); | ||
| 224 | |||
| 225 | return; | ||
| 226 | } | ||
| 227 | |||
| 228 | static void parse_identifier(struct ehca_shca *shca, u64 eqe) | ||
| 229 | { | ||
| 230 | u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe); | ||
| 231 | |||
| 232 | switch (identifier) { | ||
| 233 | case 0x02: /* path migrated */ | ||
| 234 | qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG); | ||
| 235 | break; | ||
| 236 | case 0x03: /* communication established */ | ||
| 237 | qp_event_callback(shca, eqe, IB_EVENT_COMM_EST); | ||
| 238 | break; | ||
| 239 | case 0x04: /* send queue drained */ | ||
| 240 | qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED); | ||
| 241 | break; | ||
| 242 | case 0x05: /* QP error */ | ||
| 243 | case 0x06: /* QP error */ | ||
| 244 | qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL); | ||
| 245 | break; | ||
| 246 | case 0x07: /* CQ error */ | ||
| 247 | case 0x08: /* CQ error */ | ||
| 248 | cq_event_callback(shca, eqe); | ||
| 249 | break; | ||
| 250 | case 0x09: /* MRMWPTE error */ | ||
| 251 | ehca_err(&shca->ib_device, "MRMWPTE error."); | ||
| 252 | break; | ||
| 253 | case 0x0A: /* port event */ | ||
| 254 | ehca_err(&shca->ib_device, "Port event."); | ||
| 255 | break; | ||
| 256 | case 0x0B: /* MR access error */ | ||
| 257 | ehca_err(&shca->ib_device, "MR access error."); | ||
| 258 | break; | ||
| 259 | case 0x0C: /* EQ error */ | ||
| 260 | ehca_err(&shca->ib_device, "EQ error."); | ||
| 261 | break; | ||
| 262 | case 0x0D: /* P/Q_Key mismatch */ | ||
| 263 | ehca_err(&shca->ib_device, "P/Q_Key mismatch."); | ||
| 264 | break; | ||
| 265 | case 0x10: /* sampling complete */ | ||
| 266 | ehca_err(&shca->ib_device, "Sampling complete."); | ||
| 267 | break; | ||
| 268 | case 0x11: /* unaffiliated access error */ | ||
| 269 | ehca_err(&shca->ib_device, "Unaffiliated access error."); | ||
| 270 | break; | ||
| 271 | case 0x12: /* path migrating error */ | ||
| 272 | ehca_err(&shca->ib_device, "Path migration error."); | ||
| 273 | break; | ||
| 274 | case 0x13: /* interface trace stopped */ | ||
| 275 | ehca_err(&shca->ib_device, "Interface trace stopped."); | ||
| 276 | break; | ||
| 277 | case 0x14: /* first error capture info available */ | ||
| 278 | default: | ||
| 279 | ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.", | ||
| 280 | identifier, shca->ib_device.name); | ||
| 281 | break; | ||
| 282 | } | ||
| 283 | |||
| 284 | return; | ||
| 285 | } | ||
| 286 | |||
| 287 | static void parse_ec(struct ehca_shca *shca, u64 eqe) | ||
| 288 | { | ||
| 289 | struct ib_event event; | ||
| 290 | u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); | ||
| 291 | u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); | ||
| 292 | |||
| 293 | switch (ec) { | ||
| 294 | case 0x30: /* port availability change */ | ||
| 295 | if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { | ||
| 296 | ehca_info(&shca->ib_device, | ||
| 297 | "port %x is active.", port); | ||
| 298 | event.device = &shca->ib_device; | ||
| 299 | event.event = IB_EVENT_PORT_ACTIVE; | ||
| 300 | event.element.port_num = port; | ||
| 301 | shca->sport[port - 1].port_state = IB_PORT_ACTIVE; | ||
| 302 | ib_dispatch_event(&event); | ||
| 303 | } else { | ||
| 304 | ehca_info(&shca->ib_device, | ||
| 305 | "port %x is inactive.", port); | ||
| 306 | event.device = &shca->ib_device; | ||
| 307 | event.event = IB_EVENT_PORT_ERR; | ||
| 308 | event.element.port_num = port; | ||
| 309 | shca->sport[port - 1].port_state = IB_PORT_DOWN; | ||
| 310 | ib_dispatch_event(&event); | ||
| 311 | } | ||
| 312 | break; | ||
| 313 | case 0x31: | ||
| 314 | /* port configuration change | ||
| 315 | * disruptive change is caused by | ||
| 316 | * LID, PKEY or SM change | ||
| 317 | */ | ||
| 318 | ehca_warn(&shca->ib_device, | ||
| 319 | "disruptive port %x configuration change", port); | ||
| 320 | |||
| 321 | ehca_info(&shca->ib_device, | ||
| 322 | "port %x is inactive.", port); | ||
| 323 | event.device = &shca->ib_device; | ||
| 324 | event.event = IB_EVENT_PORT_ERR; | ||
| 325 | event.element.port_num = port; | ||
| 326 | shca->sport[port - 1].port_state = IB_PORT_DOWN; | ||
| 327 | ib_dispatch_event(&event); | ||
| 328 | |||
| 329 | ehca_info(&shca->ib_device, | ||
| 330 | "port %x is active.", port); | ||
| 331 | event.device = &shca->ib_device; | ||
| 332 | event.event = IB_EVENT_PORT_ACTIVE; | ||
| 333 | event.element.port_num = port; | ||
| 334 | shca->sport[port - 1].port_state = IB_PORT_ACTIVE; | ||
| 335 | ib_dispatch_event(&event); | ||
| 336 | break; | ||
| 337 | case 0x32: /* adapter malfunction */ | ||
| 338 | ehca_err(&shca->ib_device, "Adapter malfunction."); | ||
| 339 | break; | ||
| 340 | case 0x33: /* trace stopped */ | ||
| 341 | ehca_err(&shca->ib_device, "Traced stopped."); | ||
| 342 | break; | ||
| 343 | default: | ||
| 344 | ehca_err(&shca->ib_device, "Unknown event code: %x on %s.", | ||
| 345 | ec, shca->ib_device.name); | ||
| 346 | break; | ||
| 347 | } | ||
| 348 | |||
| 349 | return; | ||
| 350 | } | ||
| 351 | |||
| 352 | static inline void reset_eq_pending(struct ehca_cq *cq) | ||
| 353 | { | ||
| 354 | u64 CQx_EP; | ||
| 355 | struct h_galpa gal = cq->galpas.kernel; | ||
| 356 | |||
| 357 | hipz_galpa_store_cq(gal, cqx_ep, 0x0); | ||
| 358 | CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep)); | ||
| 359 | |||
| 360 | return; | ||
| 361 | } | ||
| 362 | |||
| 363 | irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs) | ||
| 364 | { | ||
| 365 | struct ehca_shca *shca = (struct ehca_shca*)dev_id; | ||
| 366 | |||
| 367 | tasklet_hi_schedule(&shca->neq.interrupt_task); | ||
| 368 | |||
| 369 | return IRQ_HANDLED; | ||
| 370 | } | ||
| 371 | |||
| 372 | void ehca_tasklet_neq(unsigned long data) | ||
| 373 | { | ||
| 374 | struct ehca_shca *shca = (struct ehca_shca*)data; | ||
| 375 | struct ehca_eqe *eqe; | ||
| 376 | u64 ret; | ||
| 377 | |||
| 378 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); | ||
| 379 | |||
| 380 | while (eqe) { | ||
| 381 | if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) | ||
| 382 | parse_ec(shca, eqe->entry); | ||
| 383 | |||
| 384 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); | ||
| 385 | } | ||
| 386 | |||
| 387 | ret = hipz_h_reset_event(shca->ipz_hca_handle, | ||
| 388 | shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL); | ||
| 389 | |||
| 390 | if (ret != H_SUCCESS) | ||
| 391 | ehca_err(&shca->ib_device, "Can't clear notification events."); | ||
| 392 | |||
| 393 | return; | ||
| 394 | } | ||
| 395 | |||
| 396 | irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs) | ||
| 397 | { | ||
| 398 | struct ehca_shca *shca = (struct ehca_shca*)dev_id; | ||
| 399 | |||
| 400 | tasklet_hi_schedule(&shca->eq.interrupt_task); | ||
| 401 | |||
| 402 | return IRQ_HANDLED; | ||
| 403 | } | ||
| 404 | |||
| 405 | void ehca_tasklet_eq(unsigned long data) | ||
| 406 | { | ||
| 407 | struct ehca_shca *shca = (struct ehca_shca*)data; | ||
| 408 | struct ehca_eqe *eqe; | ||
| 409 | int int_state; | ||
| 410 | int query_cnt = 0; | ||
| 411 | |||
| 412 | do { | ||
| 413 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | ||
| 414 | |||
| 415 | if ((shca->hw_level >= 2) && eqe) | ||
| 416 | int_state = 1; | ||
| 417 | else | ||
| 418 | int_state = 0; | ||
| 419 | |||
| 420 | while ((int_state == 1) || eqe) { | ||
| 421 | while (eqe) { | ||
| 422 | u64 eqe_value = eqe->entry; | ||
| 423 | |||
| 424 | ehca_dbg(&shca->ib_device, | ||
| 425 | "eqe_value=%lx", eqe_value); | ||
| 426 | |||
| 427 | /* TODO: better structure */ | ||
| 428 | if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, | ||
| 429 | eqe_value)) { | ||
| 430 | unsigned long flags; | ||
| 431 | u32 token; | ||
| 432 | struct ehca_cq *cq; | ||
| 433 | |||
| 434 | ehca_dbg(&shca->ib_device, | ||
| 435 | "... completion event"); | ||
| 436 | token = | ||
| 437 | EHCA_BMASK_GET(EQE_CQ_TOKEN, | ||
| 438 | eqe_value); | ||
| 439 | spin_lock_irqsave(&ehca_cq_idr_lock, | ||
| 440 | flags); | ||
| 441 | cq = idr_find(&ehca_cq_idr, token); | ||
| 442 | |||
| 443 | if (cq == NULL) { | ||
| 444 | spin_unlock(&ehca_cq_idr_lock); | ||
| 445 | break; | ||
| 446 | } | ||
| 447 | |||
| 448 | reset_eq_pending(cq); | ||
| 449 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
| 450 | queue_comp_task(cq); | ||
| 451 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | ||
| 452 | flags); | ||
| 453 | #else | ||
| 454 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | ||
| 455 | flags); | ||
| 456 | comp_event_callback(cq); | ||
| 457 | #endif | ||
| 458 | } else { | ||
| 459 | ehca_dbg(&shca->ib_device, | ||
| 460 | "... non completion event"); | ||
| 461 | parse_identifier(shca, eqe_value); | ||
| 462 | } | ||
| 463 | eqe = | ||
| 464 | (struct ehca_eqe *)ehca_poll_eq(shca, | ||
| 465 | &shca->eq); | ||
| 466 | } | ||
| 467 | |||
| 468 | if (shca->hw_level >= 2) { | ||
| 469 | int_state = | ||
| 470 | hipz_h_query_int_state(shca->ipz_hca_handle, | ||
| 471 | shca->eq.ist); | ||
| 472 | query_cnt++; | ||
| 473 | iosync(); | ||
| 474 | if (query_cnt >= 100) { | ||
| 475 | query_cnt = 0; | ||
| 476 | int_state = 0; | ||
| 477 | } | ||
| 478 | } | ||
| 479 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | ||
| 480 | |||
| 481 | } | ||
| 482 | } while (int_state != 0); | ||
| 483 | |||
| 484 | return; | ||
| 485 | } | ||
| 486 | |||
| 487 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
| 488 | |||
| 489 | static inline int find_next_online_cpu(struct ehca_comp_pool* pool) | ||
| 490 | { | ||
| 491 | unsigned long flags_last_cpu; | ||
| 492 | |||
| 493 | if (ehca_debug_level) | ||
| 494 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | ||
| 495 | |||
| 496 | spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu); | ||
| 497 | pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map); | ||
| 498 | if (pool->last_cpu == NR_CPUS) | ||
| 499 | pool->last_cpu = first_cpu(cpu_online_map); | ||
| 500 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu); | ||
| 501 | |||
| 502 | return pool->last_cpu; | ||
| 503 | } | ||
| 504 | |||
| 505 | static void __queue_comp_task(struct ehca_cq *__cq, | ||
| 506 | struct ehca_cpu_comp_task *cct) | ||
| 507 | { | ||
| 508 | unsigned long flags_cct; | ||
| 509 | unsigned long flags_cq; | ||
| 510 | |||
| 511 | spin_lock_irqsave(&cct->task_lock, flags_cct); | ||
| 512 | spin_lock_irqsave(&__cq->task_lock, flags_cq); | ||
| 513 | |||
| 514 | if (__cq->nr_callbacks == 0) { | ||
| 515 | __cq->nr_callbacks++; | ||
| 516 | list_add_tail(&__cq->entry, &cct->cq_list); | ||
| 517 | cct->cq_jobs++; | ||
| 518 | wake_up(&cct->wait_queue); | ||
| 519 | } | ||
| 520 | else | ||
| 521 | __cq->nr_callbacks++; | ||
| 522 | |||
| 523 | spin_unlock_irqrestore(&__cq->task_lock, flags_cq); | ||
| 524 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | ||
| 525 | } | ||
| 526 | |||
| 527 | static void queue_comp_task(struct ehca_cq *__cq) | ||
| 528 | { | ||
| 529 | int cpu; | ||
| 530 | int cpu_id; | ||
| 531 | struct ehca_cpu_comp_task *cct; | ||
| 532 | |||
| 533 | cpu = get_cpu(); | ||
| 534 | cpu_id = find_next_online_cpu(pool); | ||
| 535 | |||
| 536 | BUG_ON(!cpu_online(cpu_id)); | ||
| 537 | |||
| 538 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); | ||
| 539 | |||
| 540 | if (cct->cq_jobs > 0) { | ||
| 541 | cpu_id = find_next_online_cpu(pool); | ||
| 542 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); | ||
| 543 | } | ||
| 544 | |||
| 545 | __queue_comp_task(__cq, cct); | ||
| 546 | |||
| 547 | put_cpu(); | ||
| 548 | |||
| 549 | return; | ||
| 550 | } | ||
| 551 | |||
| 552 | static void run_comp_task(struct ehca_cpu_comp_task* cct) | ||
| 553 | { | ||
| 554 | struct ehca_cq *cq; | ||
| 555 | unsigned long flags_cct; | ||
| 556 | unsigned long flags_cq; | ||
| 557 | |||
| 558 | spin_lock_irqsave(&cct->task_lock, flags_cct); | ||
| 559 | |||
| 560 | while (!list_empty(&cct->cq_list)) { | ||
| 561 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | ||
| 562 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | ||
| 563 | comp_event_callback(cq); | ||
| 564 | spin_lock_irqsave(&cct->task_lock, flags_cct); | ||
| 565 | |||
| 566 | spin_lock_irqsave(&cq->task_lock, flags_cq); | ||
| 567 | cq->nr_callbacks--; | ||
| 568 | if (cq->nr_callbacks == 0) { | ||
| 569 | list_del_init(cct->cq_list.next); | ||
| 570 | cct->cq_jobs--; | ||
| 571 | } | ||
| 572 | spin_unlock_irqrestore(&cq->task_lock, flags_cq); | ||
| 573 | |||
| 574 | } | ||
| 575 | |||
| 576 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | ||
| 577 | |||
| 578 | return; | ||
| 579 | } | ||
| 580 | |||
| 581 | static int comp_task(void *__cct) | ||
| 582 | { | ||
| 583 | struct ehca_cpu_comp_task* cct = __cct; | ||
| 584 | DECLARE_WAITQUEUE(wait, current); | ||
| 585 | |||
| 586 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 587 | while(!kthread_should_stop()) { | ||
| 588 | add_wait_queue(&cct->wait_queue, &wait); | ||
| 589 | |||
| 590 | if (list_empty(&cct->cq_list)) | ||
| 591 | schedule(); | ||
| 592 | else | ||
| 593 | __set_current_state(TASK_RUNNING); | ||
| 594 | |||
| 595 | remove_wait_queue(&cct->wait_queue, &wait); | ||
| 596 | |||
| 597 | if (!list_empty(&cct->cq_list)) | ||
| 598 | run_comp_task(__cct); | ||
| 599 | |||
| 600 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 601 | } | ||
| 602 | __set_current_state(TASK_RUNNING); | ||
| 603 | |||
| 604 | return 0; | ||
| 605 | } | ||
| 606 | |||
| 607 | static struct task_struct *create_comp_task(struct ehca_comp_pool *pool, | ||
| 608 | int cpu) | ||
| 609 | { | ||
| 610 | struct ehca_cpu_comp_task *cct; | ||
| 611 | |||
| 612 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); | ||
| 613 | spin_lock_init(&cct->task_lock); | ||
| 614 | INIT_LIST_HEAD(&cct->cq_list); | ||
| 615 | init_waitqueue_head(&cct->wait_queue); | ||
| 616 | cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu); | ||
| 617 | |||
| 618 | return cct->task; | ||
| 619 | } | ||
| 620 | |||
| 621 | static void destroy_comp_task(struct ehca_comp_pool *pool, | ||
| 622 | int cpu) | ||
| 623 | { | ||
| 624 | struct ehca_cpu_comp_task *cct; | ||
| 625 | struct task_struct *task; | ||
| 626 | unsigned long flags_cct; | ||
| 627 | |||
| 628 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); | ||
| 629 | |||
| 630 | spin_lock_irqsave(&cct->task_lock, flags_cct); | ||
| 631 | |||
| 632 | task = cct->task; | ||
| 633 | cct->task = NULL; | ||
| 634 | cct->cq_jobs = 0; | ||
| 635 | |||
| 636 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | ||
| 637 | |||
| 638 | if (task) | ||
| 639 | kthread_stop(task); | ||
| 640 | |||
| 641 | return; | ||
| 642 | } | ||
| 643 | |||
| 644 | static void take_over_work(struct ehca_comp_pool *pool, | ||
| 645 | int cpu) | ||
| 646 | { | ||
| 647 | struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); | ||
| 648 | LIST_HEAD(list); | ||
| 649 | struct ehca_cq *cq; | ||
| 650 | unsigned long flags_cct; | ||
| 651 | |||
| 652 | spin_lock_irqsave(&cct->task_lock, flags_cct); | ||
| 653 | |||
| 654 | list_splice_init(&cct->cq_list, &list); | ||
| 655 | |||
| 656 | while(!list_empty(&list)) { | ||
| 657 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | ||
| 658 | |||
| 659 | list_del(&cq->entry); | ||
| 660 | __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, | ||
| 661 | smp_processor_id())); | ||
| 662 | } | ||
| 663 | |||
| 664 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | ||
| 665 | |||
| 666 | } | ||
| 667 | |||
| 668 | static int comp_pool_callback(struct notifier_block *nfb, | ||
| 669 | unsigned long action, | ||
| 670 | void *hcpu) | ||
| 671 | { | ||
| 672 | unsigned int cpu = (unsigned long)hcpu; | ||
| 673 | struct ehca_cpu_comp_task *cct; | ||
| 674 | |||
| 675 | switch (action) { | ||
| 676 | case CPU_UP_PREPARE: | ||
| 677 | ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); | ||
| 678 | if(!create_comp_task(pool, cpu)) { | ||
| 679 | ehca_gen_err("Can't create comp_task for cpu: %x", cpu); | ||
| 680 | return NOTIFY_BAD; | ||
| 681 | } | ||
| 682 | break; | ||
| 683 | case CPU_UP_CANCELED: | ||
| 684 | ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); | ||
| 685 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); | ||
| 686 | kthread_bind(cct->task, any_online_cpu(cpu_online_map)); | ||
| 687 | destroy_comp_task(pool, cpu); | ||
| 688 | break; | ||
| 689 | case CPU_ONLINE: | ||
| 690 | ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); | ||
| 691 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); | ||
| 692 | kthread_bind(cct->task, cpu); | ||
| 693 | wake_up_process(cct->task); | ||
| 694 | break; | ||
| 695 | case CPU_DOWN_PREPARE: | ||
| 696 | ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); | ||
| 697 | break; | ||
| 698 | case CPU_DOWN_FAILED: | ||
| 699 | ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); | ||
| 700 | break; | ||
| 701 | case CPU_DEAD: | ||
| 702 | ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); | ||
| 703 | destroy_comp_task(pool, cpu); | ||
| 704 | take_over_work(pool, cpu); | ||
| 705 | break; | ||
| 706 | } | ||
| 707 | |||
| 708 | return NOTIFY_OK; | ||
| 709 | } | ||
| 710 | |||
| 711 | #endif | ||
| 712 | |||
| 713 | int ehca_create_comp_pool(void) | ||
| 714 | { | ||
| 715 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
| 716 | int cpu; | ||
| 717 | struct task_struct *task; | ||
| 718 | |||
| 719 | pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL); | ||
| 720 | if (pool == NULL) | ||
| 721 | return -ENOMEM; | ||
| 722 | |||
| 723 | spin_lock_init(&pool->last_cpu_lock); | ||
| 724 | pool->last_cpu = any_online_cpu(cpu_online_map); | ||
| 725 | |||
| 726 | pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); | ||
| 727 | if (pool->cpu_comp_tasks == NULL) { | ||
| 728 | kfree(pool); | ||
| 729 | return -EINVAL; | ||
| 730 | } | ||
| 731 | |||
| 732 | for_each_online_cpu(cpu) { | ||
| 733 | task = create_comp_task(pool, cpu); | ||
| 734 | if (task) { | ||
| 735 | kthread_bind(task, cpu); | ||
| 736 | wake_up_process(task); | ||
| 737 | } | ||
| 738 | } | ||
| 739 | |||
| 740 | comp_pool_callback_nb.notifier_call = comp_pool_callback; | ||
| 741 | comp_pool_callback_nb.priority =0; | ||
| 742 | register_cpu_notifier(&comp_pool_callback_nb); | ||
| 743 | #endif | ||
| 744 | |||
| 745 | return 0; | ||
| 746 | } | ||
| 747 | |||
| 748 | void ehca_destroy_comp_pool(void) | ||
| 749 | { | ||
| 750 | #ifdef CONFIG_INFINIBAND_EHCA_SCALING | ||
| 751 | int i; | ||
| 752 | |||
| 753 | unregister_cpu_notifier(&comp_pool_callback_nb); | ||
| 754 | |||
| 755 | for (i = 0; i < NR_CPUS; i++) { | ||
| 756 | if (cpu_online(i)) | ||
| 757 | destroy_comp_task(pool, i); | ||
| 758 | } | ||
| 759 | #endif | ||
| 760 | |||
| 761 | return; | ||
| 762 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h new file mode 100644 index 00000000000..85bf1fe16fe --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_irq.h | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Function definitions and structs for EQs, NEQs and interrupts | ||
| 5 | * | ||
| 6 | * Authors: Heiko J Schick <schickhj@de.ibm.com> | ||
| 7 | * Khadija Souissi <souissi@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #ifndef __EHCA_IRQ_H | ||
| 43 | #define __EHCA_IRQ_H | ||
| 44 | |||
| 45 | |||
| 46 | struct ehca_shca; | ||
| 47 | |||
| 48 | #include <linux/interrupt.h> | ||
| 49 | #include <linux/types.h> | ||
| 50 | #include <asm/atomic.h> | ||
| 51 | |||
| 52 | int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource); | ||
| 53 | |||
| 54 | irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs); | ||
| 55 | void ehca_tasklet_neq(unsigned long data); | ||
| 56 | |||
| 57 | irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs); | ||
| 58 | void ehca_tasklet_eq(unsigned long data); | ||
| 59 | |||
| 60 | struct ehca_cpu_comp_task { | ||
| 61 | wait_queue_head_t wait_queue; | ||
| 62 | struct list_head cq_list; | ||
| 63 | struct task_struct *task; | ||
| 64 | spinlock_t task_lock; | ||
| 65 | int cq_jobs; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct ehca_comp_pool { | ||
| 69 | struct ehca_cpu_comp_task *cpu_comp_tasks; | ||
| 70 | int last_cpu; | ||
| 71 | spinlock_t last_cpu_lock; | ||
| 72 | }; | ||
| 73 | |||
| 74 | int ehca_create_comp_pool(void); | ||
| 75 | void ehca_destroy_comp_pool(void); | ||
| 76 | |||
| 77 | #endif | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h new file mode 100644 index 00000000000..bbdc437f516 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h | |||
| @@ -0,0 +1,181 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Function definitions for internal functions | ||
| 5 | * | ||
| 6 | * Authors: Heiko J Schick <schickhj@de.ibm.com> | ||
| 7 | * Dietmar Decker <ddecker@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #ifndef __EHCA_IVERBS_H__ | ||
| 43 | #define __EHCA_IVERBS_H__ | ||
| 44 | |||
| 45 | #include "ehca_classes.h" | ||
| 46 | |||
| 47 | int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props); | ||
| 48 | |||
| 49 | int ehca_query_port(struct ib_device *ibdev, u8 port, | ||
| 50 | struct ib_port_attr *props); | ||
| 51 | |||
| 52 | int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey); | ||
| 53 | |||
| 54 | int ehca_query_gid(struct ib_device *ibdev, u8 port, int index, | ||
| 55 | union ib_gid *gid); | ||
| 56 | |||
| 57 | int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, | ||
| 58 | struct ib_port_modify *props); | ||
| 59 | |||
| 60 | struct ib_pd *ehca_alloc_pd(struct ib_device *device, | ||
| 61 | struct ib_ucontext *context, | ||
| 62 | struct ib_udata *udata); | ||
| 63 | |||
| 64 | int ehca_dealloc_pd(struct ib_pd *pd); | ||
| 65 | |||
| 66 | struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); | ||
| 67 | |||
| 68 | int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); | ||
| 69 | |||
| 70 | int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); | ||
| 71 | |||
| 72 | int ehca_destroy_ah(struct ib_ah *ah); | ||
| 73 | |||
| 74 | struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags); | ||
| 75 | |||
| 76 | struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, | ||
| 77 | struct ib_phys_buf *phys_buf_array, | ||
| 78 | int num_phys_buf, | ||
| 79 | int mr_access_flags, u64 *iova_start); | ||
| 80 | |||
| 81 | struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, | ||
| 82 | struct ib_umem *region, | ||
| 83 | int mr_access_flags, struct ib_udata *udata); | ||
| 84 | |||
| 85 | int ehca_rereg_phys_mr(struct ib_mr *mr, | ||
| 86 | int mr_rereg_mask, | ||
| 87 | struct ib_pd *pd, | ||
| 88 | struct ib_phys_buf *phys_buf_array, | ||
| 89 | int num_phys_buf, int mr_access_flags, u64 *iova_start); | ||
| 90 | |||
| 91 | int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); | ||
| 92 | |||
| 93 | int ehca_dereg_mr(struct ib_mr *mr); | ||
| 94 | |||
| 95 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd); | ||
| 96 | |||
| 97 | int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, | ||
| 98 | struct ib_mw_bind *mw_bind); | ||
| 99 | |||
| 100 | int ehca_dealloc_mw(struct ib_mw *mw); | ||
| 101 | |||
| 102 | struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, | ||
| 103 | int mr_access_flags, | ||
| 104 | struct ib_fmr_attr *fmr_attr); | ||
| 105 | |||
| 106 | int ehca_map_phys_fmr(struct ib_fmr *fmr, | ||
| 107 | u64 *page_list, int list_len, u64 iova); | ||
| 108 | |||
| 109 | int ehca_unmap_fmr(struct list_head *fmr_list); | ||
| 110 | |||
| 111 | int ehca_dealloc_fmr(struct ib_fmr *fmr); | ||
| 112 | |||
| 113 | enum ehca_eq_type { | ||
| 114 | EHCA_EQ = 0, /* Event Queue */ | ||
| 115 | EHCA_NEQ /* Notification Event Queue */ | ||
| 116 | }; | ||
| 117 | |||
| 118 | int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq, | ||
| 119 | enum ehca_eq_type type, const u32 length); | ||
| 120 | |||
| 121 | int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq); | ||
| 122 | |||
| 123 | void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); | ||
| 124 | |||
| 125 | |||
| 126 | struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, | ||
| 127 | struct ib_ucontext *context, | ||
| 128 | struct ib_udata *udata); | ||
| 129 | |||
| 130 | int ehca_destroy_cq(struct ib_cq *cq); | ||
| 131 | |||
| 132 | int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); | ||
| 133 | |||
| 134 | int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); | ||
| 135 | |||
| 136 | int ehca_peek_cq(struct ib_cq *cq, int wc_cnt); | ||
| 137 | |||
| 138 | int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify); | ||
| 139 | |||
| 140 | struct ib_qp *ehca_create_qp(struct ib_pd *pd, | ||
| 141 | struct ib_qp_init_attr *init_attr, | ||
| 142 | struct ib_udata *udata); | ||
| 143 | |||
| 144 | int ehca_destroy_qp(struct ib_qp *qp); | ||
| 145 | |||
| 146 | int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask); | ||
| 147 | |||
| 148 | int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, | ||
| 149 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); | ||
| 150 | |||
| 151 | int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr, | ||
| 152 | struct ib_send_wr **bad_send_wr); | ||
| 153 | |||
| 154 | int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr, | ||
| 155 | struct ib_recv_wr **bad_recv_wr); | ||
| 156 | |||
| 157 | u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp, | ||
| 158 | struct ib_qp_init_attr *qp_init_attr); | ||
| 159 | |||
| 160 | int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); | ||
| 161 | |||
| 162 | int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); | ||
| 163 | |||
| 164 | struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device, | ||
| 165 | struct ib_udata *udata); | ||
| 166 | |||
| 167 | int ehca_dealloc_ucontext(struct ib_ucontext *context); | ||
| 168 | |||
| 169 | int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | ||
| 170 | |||
| 171 | void ehca_poll_eqs(unsigned long data); | ||
| 172 | |||
| 173 | int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped, | ||
| 174 | struct vm_area_struct **vma); | ||
| 175 | |||
| 176 | int ehca_mmap_register(u64 physical,void **mapped, | ||
| 177 | struct vm_area_struct **vma); | ||
| 178 | |||
| 179 | int ehca_munmap(unsigned long addr, size_t len); | ||
| 180 | |||
| 181 | #endif | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c new file mode 100644 index 00000000000..2a99f2d13cd --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
| @@ -0,0 +1,818 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * module start stop, hca detection | ||
| 5 | * | ||
| 6 | * Authors: Heiko J Schick <schickhj@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * Joachim Fenkes <fenkes@de.ibm.com> | ||
| 9 | * | ||
| 10 | * Copyright (c) 2005 IBM Corporation | ||
| 11 | * | ||
| 12 | * All rights reserved. | ||
| 13 | * | ||
| 14 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 15 | * BSD. | ||
| 16 | * | ||
| 17 | * OpenIB BSD License | ||
| 18 | * | ||
| 19 | * Redistribution and use in source and binary forms, with or without | ||
| 20 | * modification, are permitted provided that the following conditions are met: | ||
| 21 | * | ||
| 22 | * Redistributions of source code must retain the above copyright notice, this | ||
| 23 | * list of conditions and the following disclaimer. | ||
| 24 | * | ||
| 25 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 26 | * this list of conditions and the following disclaimer in the documentation | ||
| 27 | * and/or other materials | ||
| 28 | * provided with the distribution. | ||
| 29 | * | ||
| 30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 31 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 34 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 35 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 36 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 37 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 38 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 39 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 40 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | #include "ehca_classes.h" | ||
| 44 | #include "ehca_iverbs.h" | ||
| 45 | #include "ehca_mrmw.h" | ||
| 46 | #include "ehca_tools.h" | ||
| 47 | #include "hcp_if.h" | ||
| 48 | |||
| 49 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 50 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | ||
| 51 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); | ||
| 52 | MODULE_VERSION("SVNEHCA_0016"); | ||
| 53 | |||
| 54 | int ehca_open_aqp1 = 0; | ||
| 55 | int ehca_debug_level = 0; | ||
| 56 | int ehca_hw_level = 0; | ||
| 57 | int ehca_nr_ports = 2; | ||
| 58 | int ehca_use_hp_mr = 0; | ||
| 59 | int ehca_port_act_time = 30; | ||
| 60 | int ehca_poll_all_eqs = 1; | ||
| 61 | int ehca_static_rate = -1; | ||
| 62 | |||
| 63 | module_param_named(open_aqp1, ehca_open_aqp1, int, 0); | ||
| 64 | module_param_named(debug_level, ehca_debug_level, int, 0); | ||
| 65 | module_param_named(hw_level, ehca_hw_level, int, 0); | ||
| 66 | module_param_named(nr_ports, ehca_nr_ports, int, 0); | ||
| 67 | module_param_named(use_hp_mr, ehca_use_hp_mr, int, 0); | ||
| 68 | module_param_named(port_act_time, ehca_port_act_time, int, 0); | ||
| 69 | module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, 0); | ||
| 70 | module_param_named(static_rate, ehca_static_rate, int, 0); | ||
| 71 | |||
| 72 | MODULE_PARM_DESC(open_aqp1, | ||
| 73 | "AQP1 on startup (0: no (default), 1: yes)"); | ||
| 74 | MODULE_PARM_DESC(debug_level, | ||
| 75 | "debug level" | ||
| 76 | " (0: no debug traces (default), 1: with debug traces)"); | ||
| 77 | MODULE_PARM_DESC(hw_level, | ||
| 78 | "hardware level" | ||
| 79 | " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); | ||
| 80 | MODULE_PARM_DESC(nr_ports, | ||
| 81 | "number of connected ports (default: 2)"); | ||
| 82 | MODULE_PARM_DESC(use_hp_mr, | ||
| 83 | "high performance MRs (0: no (default), 1: yes)"); | ||
| 84 | MODULE_PARM_DESC(port_act_time, | ||
| 85 | "time to wait for port activation (default: 30 sec)"); | ||
| 86 | MODULE_PARM_DESC(poll_all_eqs, | ||
| 87 | "polls all event queues periodically" | ||
| 88 | " (0: no, 1: yes (default))"); | ||
| 89 | MODULE_PARM_DESC(static_rate, | ||
| 90 | "set permanent static rate (default: disabled)"); | ||
| 91 | |||
| 92 | spinlock_t ehca_qp_idr_lock; | ||
| 93 | spinlock_t ehca_cq_idr_lock; | ||
| 94 | DEFINE_IDR(ehca_qp_idr); | ||
| 95 | DEFINE_IDR(ehca_cq_idr); | ||
| 96 | |||
| 97 | static struct list_head shca_list; /* list of all registered ehcas */ | ||
| 98 | static spinlock_t shca_list_lock; | ||
| 99 | |||
| 100 | static struct timer_list poll_eqs_timer; | ||
| 101 | |||
| 102 | static int ehca_create_slab_caches(void) | ||
| 103 | { | ||
| 104 | int ret; | ||
| 105 | |||
| 106 | ret = ehca_init_pd_cache(); | ||
| 107 | if (ret) { | ||
| 108 | ehca_gen_err("Cannot create PD SLAB cache."); | ||
| 109 | return ret; | ||
| 110 | } | ||
| 111 | |||
| 112 | ret = ehca_init_cq_cache(); | ||
| 113 | if (ret) { | ||
| 114 | ehca_gen_err("Cannot create CQ SLAB cache."); | ||
| 115 | goto create_slab_caches2; | ||
| 116 | } | ||
| 117 | |||
| 118 | ret = ehca_init_qp_cache(); | ||
| 119 | if (ret) { | ||
| 120 | ehca_gen_err("Cannot create QP SLAB cache."); | ||
| 121 | goto create_slab_caches3; | ||
| 122 | } | ||
| 123 | |||
| 124 | ret = ehca_init_av_cache(); | ||
| 125 | if (ret) { | ||
| 126 | ehca_gen_err("Cannot create AV SLAB cache."); | ||
| 127 | goto create_slab_caches4; | ||
| 128 | } | ||
| 129 | |||
| 130 | ret = ehca_init_mrmw_cache(); | ||
| 131 | if (ret) { | ||
| 132 | ehca_gen_err("Cannot create MR&MW SLAB cache."); | ||
| 133 | goto create_slab_caches5; | ||
| 134 | } | ||
| 135 | |||
| 136 | return 0; | ||
| 137 | |||
| 138 | create_slab_caches5: | ||
| 139 | ehca_cleanup_av_cache(); | ||
| 140 | |||
| 141 | create_slab_caches4: | ||
| 142 | ehca_cleanup_qp_cache(); | ||
| 143 | |||
| 144 | create_slab_caches3: | ||
| 145 | ehca_cleanup_cq_cache(); | ||
| 146 | |||
| 147 | create_slab_caches2: | ||
| 148 | ehca_cleanup_pd_cache(); | ||
| 149 | |||
| 150 | return ret; | ||
| 151 | } | ||
| 152 | |||
| 153 | static void ehca_destroy_slab_caches(void) | ||
| 154 | { | ||
| 155 | ehca_cleanup_mrmw_cache(); | ||
| 156 | ehca_cleanup_av_cache(); | ||
| 157 | ehca_cleanup_qp_cache(); | ||
| 158 | ehca_cleanup_cq_cache(); | ||
| 159 | ehca_cleanup_pd_cache(); | ||
| 160 | } | ||
| 161 | |||
| 162 | #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) | ||
| 163 | #define EHCA_REVID EHCA_BMASK_IBM(40,63) | ||
| 164 | |||
| 165 | int ehca_sense_attributes(struct ehca_shca *shca) | ||
| 166 | { | ||
| 167 | int ret = 0; | ||
| 168 | u64 h_ret; | ||
| 169 | struct hipz_query_hca *rblock; | ||
| 170 | |||
| 171 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 172 | if (!rblock) { | ||
| 173 | ehca_gen_err("Cannot allocate rblock memory."); | ||
| 174 | return -ENOMEM; | ||
| 175 | } | ||
| 176 | |||
| 177 | h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock); | ||
| 178 | if (h_ret != H_SUCCESS) { | ||
| 179 | ehca_gen_err("Cannot query device properties. h_ret=%lx", | ||
| 180 | h_ret); | ||
| 181 | ret = -EPERM; | ||
| 182 | goto num_ports1; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (ehca_nr_ports == 1) | ||
| 186 | shca->num_ports = 1; | ||
| 187 | else | ||
| 188 | shca->num_ports = (u8)rblock->num_ports; | ||
| 189 | |||
| 190 | ehca_gen_dbg(" ... found %x ports", rblock->num_ports); | ||
| 191 | |||
| 192 | if (ehca_hw_level == 0) { | ||
| 193 | u32 hcaaver; | ||
| 194 | u32 revid; | ||
| 195 | |||
| 196 | hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver); | ||
| 197 | revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver); | ||
| 198 | |||
| 199 | ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid); | ||
| 200 | |||
| 201 | if ((hcaaver == 1) && (revid == 0)) | ||
| 202 | shca->hw_level = 0; | ||
| 203 | else if ((hcaaver == 1) && (revid == 1)) | ||
| 204 | shca->hw_level = 1; | ||
| 205 | else if ((hcaaver == 1) && (revid == 2)) | ||
| 206 | shca->hw_level = 2; | ||
| 207 | } | ||
| 208 | ehca_gen_dbg(" ... hardware level=%x", shca->hw_level); | ||
| 209 | |||
| 210 | shca->sport[0].rate = IB_RATE_30_GBPS; | ||
| 211 | shca->sport[1].rate = IB_RATE_30_GBPS; | ||
| 212 | |||
| 213 | num_ports1: | ||
| 214 | kfree(rblock); | ||
| 215 | return ret; | ||
| 216 | } | ||
| 217 | |||
| 218 | static int init_node_guid(struct ehca_shca *shca) | ||
| 219 | { | ||
| 220 | int ret = 0; | ||
| 221 | struct hipz_query_hca *rblock; | ||
| 222 | |||
| 223 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 224 | if (!rblock) { | ||
| 225 | ehca_err(&shca->ib_device, "Can't allocate rblock memory."); | ||
| 226 | return -ENOMEM; | ||
| 227 | } | ||
| 228 | |||
| 229 | if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { | ||
| 230 | ehca_err(&shca->ib_device, "Can't query device properties"); | ||
| 231 | ret = -EINVAL; | ||
| 232 | goto init_node_guid1; | ||
| 233 | } | ||
| 234 | |||
| 235 | memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); | ||
| 236 | |||
| 237 | init_node_guid1: | ||
| 238 | kfree(rblock); | ||
| 239 | return ret; | ||
| 240 | } | ||
| 241 | |||
| 242 | int ehca_register_device(struct ehca_shca *shca) | ||
| 243 | { | ||
| 244 | int ret; | ||
| 245 | |||
| 246 | ret = init_node_guid(shca); | ||
| 247 | if (ret) | ||
| 248 | return ret; | ||
| 249 | |||
| 250 | strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); | ||
| 251 | shca->ib_device.owner = THIS_MODULE; | ||
| 252 | |||
| 253 | shca->ib_device.uverbs_abi_ver = 5; | ||
| 254 | shca->ib_device.uverbs_cmd_mask = | ||
| 255 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | ||
| 256 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | ||
| 257 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | ||
| 258 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | ||
| 259 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | ||
| 260 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | ||
| 261 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | ||
| 262 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | ||
| 263 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | ||
| 264 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | ||
| 265 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | ||
| 266 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | ||
| 267 | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | | ||
| 268 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | ||
| 269 | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | | ||
| 270 | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); | ||
| 271 | |||
| 272 | shca->ib_device.node_type = IB_NODE_CA; | ||
| 273 | shca->ib_device.phys_port_cnt = shca->num_ports; | ||
| 274 | shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; | ||
| 275 | shca->ib_device.query_device = ehca_query_device; | ||
| 276 | shca->ib_device.query_port = ehca_query_port; | ||
| 277 | shca->ib_device.query_gid = ehca_query_gid; | ||
| 278 | shca->ib_device.query_pkey = ehca_query_pkey; | ||
| 279 | /* shca->in_device.modify_device = ehca_modify_device */ | ||
| 280 | shca->ib_device.modify_port = ehca_modify_port; | ||
| 281 | shca->ib_device.alloc_ucontext = ehca_alloc_ucontext; | ||
| 282 | shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext; | ||
| 283 | shca->ib_device.alloc_pd = ehca_alloc_pd; | ||
| 284 | shca->ib_device.dealloc_pd = ehca_dealloc_pd; | ||
| 285 | shca->ib_device.create_ah = ehca_create_ah; | ||
| 286 | /* shca->ib_device.modify_ah = ehca_modify_ah; */ | ||
| 287 | shca->ib_device.query_ah = ehca_query_ah; | ||
| 288 | shca->ib_device.destroy_ah = ehca_destroy_ah; | ||
| 289 | shca->ib_device.create_qp = ehca_create_qp; | ||
| 290 | shca->ib_device.modify_qp = ehca_modify_qp; | ||
| 291 | shca->ib_device.query_qp = ehca_query_qp; | ||
| 292 | shca->ib_device.destroy_qp = ehca_destroy_qp; | ||
| 293 | shca->ib_device.post_send = ehca_post_send; | ||
| 294 | shca->ib_device.post_recv = ehca_post_recv; | ||
| 295 | shca->ib_device.create_cq = ehca_create_cq; | ||
| 296 | shca->ib_device.destroy_cq = ehca_destroy_cq; | ||
| 297 | shca->ib_device.resize_cq = ehca_resize_cq; | ||
| 298 | shca->ib_device.poll_cq = ehca_poll_cq; | ||
| 299 | /* shca->ib_device.peek_cq = ehca_peek_cq; */ | ||
| 300 | shca->ib_device.req_notify_cq = ehca_req_notify_cq; | ||
| 301 | /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */ | ||
| 302 | shca->ib_device.get_dma_mr = ehca_get_dma_mr; | ||
| 303 | shca->ib_device.reg_phys_mr = ehca_reg_phys_mr; | ||
| 304 | shca->ib_device.reg_user_mr = ehca_reg_user_mr; | ||
| 305 | shca->ib_device.query_mr = ehca_query_mr; | ||
| 306 | shca->ib_device.dereg_mr = ehca_dereg_mr; | ||
| 307 | shca->ib_device.rereg_phys_mr = ehca_rereg_phys_mr; | ||
| 308 | shca->ib_device.alloc_mw = ehca_alloc_mw; | ||
| 309 | shca->ib_device.bind_mw = ehca_bind_mw; | ||
| 310 | shca->ib_device.dealloc_mw = ehca_dealloc_mw; | ||
| 311 | shca->ib_device.alloc_fmr = ehca_alloc_fmr; | ||
| 312 | shca->ib_device.map_phys_fmr = ehca_map_phys_fmr; | ||
| 313 | shca->ib_device.unmap_fmr = ehca_unmap_fmr; | ||
| 314 | shca->ib_device.dealloc_fmr = ehca_dealloc_fmr; | ||
| 315 | shca->ib_device.attach_mcast = ehca_attach_mcast; | ||
| 316 | shca->ib_device.detach_mcast = ehca_detach_mcast; | ||
| 317 | /* shca->ib_device.process_mad = ehca_process_mad; */ | ||
| 318 | shca->ib_device.mmap = ehca_mmap; | ||
| 319 | |||
| 320 | ret = ib_register_device(&shca->ib_device); | ||
| 321 | if (ret) | ||
| 322 | ehca_err(&shca->ib_device, | ||
| 323 | "ib_register_device() failed ret=%x", ret); | ||
| 324 | |||
| 325 | return ret; | ||
| 326 | } | ||
| 327 | |||
| 328 | static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) | ||
| 329 | { | ||
| 330 | struct ehca_sport *sport = &shca->sport[port - 1]; | ||
| 331 | struct ib_cq *ibcq; | ||
| 332 | struct ib_qp *ibqp; | ||
| 333 | struct ib_qp_init_attr qp_init_attr; | ||
| 334 | int ret; | ||
| 335 | |||
| 336 | if (sport->ibcq_aqp1) { | ||
| 337 | ehca_err(&shca->ib_device, "AQP1 CQ is already created."); | ||
| 338 | return -EPERM; | ||
| 339 | } | ||
| 340 | |||
| 341 | ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10); | ||
| 342 | if (IS_ERR(ibcq)) { | ||
| 343 | ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); | ||
| 344 | return PTR_ERR(ibcq); | ||
| 345 | } | ||
| 346 | sport->ibcq_aqp1 = ibcq; | ||
| 347 | |||
| 348 | if (sport->ibqp_aqp1) { | ||
| 349 | ehca_err(&shca->ib_device, "AQP1 QP is already created."); | ||
| 350 | ret = -EPERM; | ||
| 351 | goto create_aqp1; | ||
| 352 | } | ||
| 353 | |||
| 354 | memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr)); | ||
| 355 | qp_init_attr.send_cq = ibcq; | ||
| 356 | qp_init_attr.recv_cq = ibcq; | ||
| 357 | qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; | ||
| 358 | qp_init_attr.cap.max_send_wr = 100; | ||
| 359 | qp_init_attr.cap.max_recv_wr = 100; | ||
| 360 | qp_init_attr.cap.max_send_sge = 2; | ||
| 361 | qp_init_attr.cap.max_recv_sge = 1; | ||
| 362 | qp_init_attr.qp_type = IB_QPT_GSI; | ||
| 363 | qp_init_attr.port_num = port; | ||
| 364 | qp_init_attr.qp_context = NULL; | ||
| 365 | qp_init_attr.event_handler = NULL; | ||
| 366 | qp_init_attr.srq = NULL; | ||
| 367 | |||
| 368 | ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr); | ||
| 369 | if (IS_ERR(ibqp)) { | ||
| 370 | ehca_err(&shca->ib_device, "Cannot create AQP1 QP."); | ||
| 371 | ret = PTR_ERR(ibqp); | ||
| 372 | goto create_aqp1; | ||
| 373 | } | ||
| 374 | sport->ibqp_aqp1 = ibqp; | ||
| 375 | |||
| 376 | return 0; | ||
| 377 | |||
| 378 | create_aqp1: | ||
| 379 | ib_destroy_cq(sport->ibcq_aqp1); | ||
| 380 | return ret; | ||
| 381 | } | ||
| 382 | |||
| 383 | static int ehca_destroy_aqp1(struct ehca_sport *sport) | ||
| 384 | { | ||
| 385 | int ret; | ||
| 386 | |||
| 387 | ret = ib_destroy_qp(sport->ibqp_aqp1); | ||
| 388 | if (ret) { | ||
| 389 | ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret); | ||
| 390 | return ret; | ||
| 391 | } | ||
| 392 | |||
| 393 | ret = ib_destroy_cq(sport->ibcq_aqp1); | ||
| 394 | if (ret) | ||
| 395 | ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret); | ||
| 396 | |||
| 397 | return ret; | ||
| 398 | } | ||
| 399 | |||
| 400 | static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) | ||
| 401 | { | ||
| 402 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
| 403 | ehca_debug_level); | ||
| 404 | } | ||
| 405 | |||
| 406 | static ssize_t ehca_store_debug_level(struct device_driver *ddp, | ||
| 407 | const char *buf, size_t count) | ||
| 408 | { | ||
| 409 | int value = (*buf) - '0'; | ||
| 410 | if (value >= 0 && value <= 9) | ||
| 411 | ehca_debug_level = value; | ||
| 412 | return 1; | ||
| 413 | } | ||
| 414 | |||
| 415 | DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, | ||
| 416 | ehca_show_debug_level, ehca_store_debug_level); | ||
| 417 | |||
| 418 | void ehca_create_driver_sysfs(struct ibmebus_driver *drv) | ||
| 419 | { | ||
| 420 | driver_create_file(&drv->driver, &driver_attr_debug_level); | ||
| 421 | } | ||
| 422 | |||
| 423 | void ehca_remove_driver_sysfs(struct ibmebus_driver *drv) | ||
| 424 | { | ||
| 425 | driver_remove_file(&drv->driver, &driver_attr_debug_level); | ||
| 426 | } | ||
| 427 | |||
| 428 | #define EHCA_RESOURCE_ATTR(name) \ | ||
| 429 | static ssize_t ehca_show_##name(struct device *dev, \ | ||
| 430 | struct device_attribute *attr, \ | ||
| 431 | char *buf) \ | ||
| 432 | { \ | ||
| 433 | struct ehca_shca *shca; \ | ||
| 434 | struct hipz_query_hca *rblock; \ | ||
| 435 | int data; \ | ||
| 436 | \ | ||
| 437 | shca = dev->driver_data; \ | ||
| 438 | \ | ||
| 439 | rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \ | ||
| 440 | if (!rblock) { \ | ||
| 441 | dev_err(dev, "Can't allocate rblock memory."); \ | ||
| 442 | return 0; \ | ||
| 443 | } \ | ||
| 444 | \ | ||
| 445 | if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ | ||
| 446 | dev_err(dev, "Can't query device properties"); \ | ||
| 447 | kfree(rblock); \ | ||
| 448 | return 0; \ | ||
| 449 | } \ | ||
| 450 | \ | ||
| 451 | data = rblock->name; \ | ||
| 452 | kfree(rblock); \ | ||
| 453 | \ | ||
| 454 | if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ | ||
| 455 | return snprintf(buf, 256, "1\n"); \ | ||
| 456 | else \ | ||
| 457 | return snprintf(buf, 256, "%d\n", data); \ | ||
| 458 | \ | ||
| 459 | } \ | ||
| 460 | static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL); | ||
| 461 | |||
| 462 | EHCA_RESOURCE_ATTR(num_ports); | ||
| 463 | EHCA_RESOURCE_ATTR(hw_ver); | ||
| 464 | EHCA_RESOURCE_ATTR(max_eq); | ||
| 465 | EHCA_RESOURCE_ATTR(cur_eq); | ||
| 466 | EHCA_RESOURCE_ATTR(max_cq); | ||
| 467 | EHCA_RESOURCE_ATTR(cur_cq); | ||
| 468 | EHCA_RESOURCE_ATTR(max_qp); | ||
| 469 | EHCA_RESOURCE_ATTR(cur_qp); | ||
| 470 | EHCA_RESOURCE_ATTR(max_mr); | ||
| 471 | EHCA_RESOURCE_ATTR(cur_mr); | ||
| 472 | EHCA_RESOURCE_ATTR(max_mw); | ||
| 473 | EHCA_RESOURCE_ATTR(cur_mw); | ||
| 474 | EHCA_RESOURCE_ATTR(max_pd); | ||
| 475 | EHCA_RESOURCE_ATTR(max_ah); | ||
| 476 | |||
| 477 | static ssize_t ehca_show_adapter_handle(struct device *dev, | ||
| 478 | struct device_attribute *attr, | ||
| 479 | char *buf) | ||
| 480 | { | ||
| 481 | struct ehca_shca *shca = dev->driver_data; | ||
| 482 | |||
| 483 | return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle); | ||
| 484 | |||
| 485 | } | ||
| 486 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); | ||
| 487 | |||
| 488 | |||
| 489 | void ehca_create_device_sysfs(struct ibmebus_dev *dev) | ||
| 490 | { | ||
| 491 | device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle); | ||
| 492 | device_create_file(&dev->ofdev.dev, &dev_attr_num_ports); | ||
| 493 | device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver); | ||
| 494 | device_create_file(&dev->ofdev.dev, &dev_attr_max_eq); | ||
| 495 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq); | ||
| 496 | device_create_file(&dev->ofdev.dev, &dev_attr_max_cq); | ||
| 497 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq); | ||
| 498 | device_create_file(&dev->ofdev.dev, &dev_attr_max_qp); | ||
| 499 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp); | ||
| 500 | device_create_file(&dev->ofdev.dev, &dev_attr_max_mr); | ||
| 501 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr); | ||
| 502 | device_create_file(&dev->ofdev.dev, &dev_attr_max_mw); | ||
| 503 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw); | ||
| 504 | device_create_file(&dev->ofdev.dev, &dev_attr_max_pd); | ||
| 505 | device_create_file(&dev->ofdev.dev, &dev_attr_max_ah); | ||
| 506 | } | ||
| 507 | |||
| 508 | void ehca_remove_device_sysfs(struct ibmebus_dev *dev) | ||
| 509 | { | ||
| 510 | device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle); | ||
| 511 | device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports); | ||
| 512 | device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver); | ||
| 513 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq); | ||
| 514 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq); | ||
| 515 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq); | ||
| 516 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq); | ||
| 517 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp); | ||
| 518 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp); | ||
| 519 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr); | ||
| 520 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr); | ||
| 521 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw); | ||
| 522 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw); | ||
| 523 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd); | ||
| 524 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah); | ||
| 525 | } | ||
| 526 | |||
| 527 | static int __devinit ehca_probe(struct ibmebus_dev *dev, | ||
| 528 | const struct of_device_id *id) | ||
| 529 | { | ||
| 530 | struct ehca_shca *shca; | ||
| 531 | u64 *handle; | ||
| 532 | struct ib_pd *ibpd; | ||
| 533 | int ret; | ||
| 534 | |||
| 535 | handle = (u64 *)get_property(dev->ofdev.node, "ibm,hca-handle", NULL); | ||
| 536 | if (!handle) { | ||
| 537 | ehca_gen_err("Cannot get eHCA handle for adapter: %s.", | ||
| 538 | dev->ofdev.node->full_name); | ||
| 539 | return -ENODEV; | ||
| 540 | } | ||
| 541 | |||
| 542 | if (!(*handle)) { | ||
| 543 | ehca_gen_err("Wrong eHCA handle for adapter: %s.", | ||
| 544 | dev->ofdev.node->full_name); | ||
| 545 | return -ENODEV; | ||
| 546 | } | ||
| 547 | |||
| 548 | shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca)); | ||
| 549 | if (!shca) { | ||
| 550 | ehca_gen_err("Cannot allocate shca memory."); | ||
| 551 | return -ENOMEM; | ||
| 552 | } | ||
| 553 | |||
| 554 | shca->ibmebus_dev = dev; | ||
| 555 | shca->ipz_hca_handle.handle = *handle; | ||
| 556 | dev->ofdev.dev.driver_data = shca; | ||
| 557 | |||
| 558 | ret = ehca_sense_attributes(shca); | ||
| 559 | if (ret < 0) { | ||
| 560 | ehca_gen_err("Cannot sense eHCA attributes."); | ||
| 561 | goto probe1; | ||
| 562 | } | ||
| 563 | |||
| 564 | ret = ehca_register_device(shca); | ||
| 565 | if (ret) { | ||
| 566 | ehca_gen_err("Cannot register Infiniband device"); | ||
| 567 | goto probe1; | ||
| 568 | } | ||
| 569 | |||
| 570 | /* create event queues */ | ||
| 571 | ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048); | ||
| 572 | if (ret) { | ||
| 573 | ehca_err(&shca->ib_device, "Cannot create EQ."); | ||
| 574 | goto probe2; | ||
| 575 | } | ||
| 576 | |||
| 577 | ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513); | ||
| 578 | if (ret) { | ||
| 579 | ehca_err(&shca->ib_device, "Cannot create NEQ."); | ||
| 580 | goto probe3; | ||
| 581 | } | ||
| 582 | |||
| 583 | /* create internal protection domain */ | ||
| 584 | ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL); | ||
| 585 | if (IS_ERR(ibpd)) { | ||
| 586 | ehca_err(&shca->ib_device, "Cannot create internal PD."); | ||
| 587 | ret = PTR_ERR(ibpd); | ||
| 588 | goto probe4; | ||
| 589 | } | ||
| 590 | |||
| 591 | shca->pd = container_of(ibpd, struct ehca_pd, ib_pd); | ||
| 592 | shca->pd->ib_pd.device = &shca->ib_device; | ||
| 593 | |||
| 594 | /* create internal max MR */ | ||
| 595 | ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr); | ||
| 596 | |||
| 597 | if (ret) { | ||
| 598 | ehca_err(&shca->ib_device, "Cannot create internal MR ret=%x", | ||
| 599 | ret); | ||
| 600 | goto probe5; | ||
| 601 | } | ||
| 602 | |||
| 603 | /* create AQP1 for port 1 */ | ||
| 604 | if (ehca_open_aqp1 == 1) { | ||
| 605 | shca->sport[0].port_state = IB_PORT_DOWN; | ||
| 606 | ret = ehca_create_aqp1(shca, 1); | ||
| 607 | if (ret) { | ||
| 608 | ehca_err(&shca->ib_device, | ||
| 609 | "Cannot create AQP1 for port 1."); | ||
| 610 | goto probe6; | ||
| 611 | } | ||
| 612 | } | ||
| 613 | |||
| 614 | /* create AQP1 for port 2 */ | ||
| 615 | if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) { | ||
| 616 | shca->sport[1].port_state = IB_PORT_DOWN; | ||
| 617 | ret = ehca_create_aqp1(shca, 2); | ||
| 618 | if (ret) { | ||
| 619 | ehca_err(&shca->ib_device, | ||
| 620 | "Cannot create AQP1 for port 2."); | ||
| 621 | goto probe7; | ||
| 622 | } | ||
| 623 | } | ||
| 624 | |||
| 625 | ehca_create_device_sysfs(dev); | ||
| 626 | |||
| 627 | spin_lock(&shca_list_lock); | ||
| 628 | list_add(&shca->shca_list, &shca_list); | ||
| 629 | spin_unlock(&shca_list_lock); | ||
| 630 | |||
| 631 | return 0; | ||
| 632 | |||
| 633 | probe7: | ||
| 634 | ret = ehca_destroy_aqp1(&shca->sport[0]); | ||
| 635 | if (ret) | ||
| 636 | ehca_err(&shca->ib_device, | ||
| 637 | "Cannot destroy AQP1 for port 1. ret=%x", ret); | ||
| 638 | |||
| 639 | probe6: | ||
| 640 | ret = ehca_dereg_internal_maxmr(shca); | ||
| 641 | if (ret) | ||
| 642 | ehca_err(&shca->ib_device, | ||
| 643 | "Cannot destroy internal MR. ret=%x", ret); | ||
| 644 | |||
| 645 | probe5: | ||
| 646 | ret = ehca_dealloc_pd(&shca->pd->ib_pd); | ||
| 647 | if (ret) | ||
| 648 | ehca_err(&shca->ib_device, | ||
| 649 | "Cannot destroy internal PD. ret=%x", ret); | ||
| 650 | |||
| 651 | probe4: | ||
| 652 | ret = ehca_destroy_eq(shca, &shca->neq); | ||
| 653 | if (ret) | ||
| 654 | ehca_err(&shca->ib_device, | ||
| 655 | "Cannot destroy NEQ. ret=%x", ret); | ||
| 656 | |||
| 657 | probe3: | ||
| 658 | ret = ehca_destroy_eq(shca, &shca->eq); | ||
| 659 | if (ret) | ||
| 660 | ehca_err(&shca->ib_device, | ||
| 661 | "Cannot destroy EQ. ret=%x", ret); | ||
| 662 | |||
| 663 | probe2: | ||
| 664 | ib_unregister_device(&shca->ib_device); | ||
| 665 | |||
| 666 | probe1: | ||
| 667 | ib_dealloc_device(&shca->ib_device); | ||
| 668 | |||
| 669 | return -EINVAL; | ||
| 670 | } | ||
| 671 | |||
| 672 | static int __devexit ehca_remove(struct ibmebus_dev *dev) | ||
| 673 | { | ||
| 674 | struct ehca_shca *shca = dev->ofdev.dev.driver_data; | ||
| 675 | int ret; | ||
| 676 | |||
| 677 | ehca_remove_device_sysfs(dev); | ||
| 678 | |||
| 679 | if (ehca_open_aqp1 == 1) { | ||
| 680 | int i; | ||
| 681 | for (i = 0; i < shca->num_ports; i++) { | ||
| 682 | ret = ehca_destroy_aqp1(&shca->sport[i]); | ||
| 683 | if (ret) | ||
| 684 | ehca_err(&shca->ib_device, | ||
| 685 | "Cannot destroy AQP1 for port %x " | ||
| 686 | "ret=%x", ret, i); | ||
| 687 | } | ||
| 688 | } | ||
| 689 | |||
| 690 | ib_unregister_device(&shca->ib_device); | ||
| 691 | |||
| 692 | ret = ehca_dereg_internal_maxmr(shca); | ||
| 693 | if (ret) | ||
| 694 | ehca_err(&shca->ib_device, | ||
| 695 | "Cannot destroy internal MR. ret=%x", ret); | ||
| 696 | |||
| 697 | ret = ehca_dealloc_pd(&shca->pd->ib_pd); | ||
| 698 | if (ret) | ||
| 699 | ehca_err(&shca->ib_device, | ||
| 700 | "Cannot destroy internal PD. ret=%x", ret); | ||
| 701 | |||
| 702 | ret = ehca_destroy_eq(shca, &shca->eq); | ||
| 703 | if (ret) | ||
| 704 | ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%x", ret); | ||
| 705 | |||
| 706 | ret = ehca_destroy_eq(shca, &shca->neq); | ||
| 707 | if (ret) | ||
| 708 | ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%x", ret); | ||
| 709 | |||
| 710 | ib_dealloc_device(&shca->ib_device); | ||
| 711 | |||
| 712 | spin_lock(&shca_list_lock); | ||
| 713 | list_del(&shca->shca_list); | ||
| 714 | spin_unlock(&shca_list_lock); | ||
| 715 | |||
| 716 | return ret; | ||
| 717 | } | ||
| 718 | |||
| 719 | static struct of_device_id ehca_device_table[] = | ||
| 720 | { | ||
| 721 | { | ||
| 722 | .name = "lhca", | ||
| 723 | .compatible = "IBM,lhca", | ||
| 724 | }, | ||
| 725 | {}, | ||
| 726 | }; | ||
| 727 | |||
| 728 | static struct ibmebus_driver ehca_driver = { | ||
| 729 | .name = "ehca", | ||
| 730 | .id_table = ehca_device_table, | ||
| 731 | .probe = ehca_probe, | ||
| 732 | .remove = ehca_remove, | ||
| 733 | }; | ||
| 734 | |||
| 735 | void ehca_poll_eqs(unsigned long data) | ||
| 736 | { | ||
| 737 | struct ehca_shca *shca; | ||
| 738 | |||
| 739 | spin_lock(&shca_list_lock); | ||
| 740 | list_for_each_entry(shca, &shca_list, shca_list) { | ||
| 741 | if (shca->eq.is_initialized) | ||
| 742 | ehca_tasklet_eq((unsigned long)(void*)shca); | ||
| 743 | } | ||
| 744 | mod_timer(&poll_eqs_timer, jiffies + HZ); | ||
| 745 | spin_unlock(&shca_list_lock); | ||
| 746 | } | ||
| 747 | |||
| 748 | int __init ehca_module_init(void) | ||
| 749 | { | ||
| 750 | int ret; | ||
| 751 | |||
| 752 | printk(KERN_INFO "eHCA Infiniband Device Driver " | ||
| 753 | "(Rel.: SVNEHCA_0016)\n"); | ||
| 754 | idr_init(&ehca_qp_idr); | ||
| 755 | idr_init(&ehca_cq_idr); | ||
| 756 | spin_lock_init(&ehca_qp_idr_lock); | ||
| 757 | spin_lock_init(&ehca_cq_idr_lock); | ||
| 758 | |||
| 759 | INIT_LIST_HEAD(&shca_list); | ||
| 760 | spin_lock_init(&shca_list_lock); | ||
| 761 | |||
| 762 | if ((ret = ehca_create_comp_pool())) { | ||
| 763 | ehca_gen_err("Cannot create comp pool."); | ||
| 764 | return ret; | ||
| 765 | } | ||
| 766 | |||
| 767 | if ((ret = ehca_create_slab_caches())) { | ||
| 768 | ehca_gen_err("Cannot create SLAB caches"); | ||
| 769 | ret = -ENOMEM; | ||
| 770 | goto module_init1; | ||
| 771 | } | ||
| 772 | |||
| 773 | if ((ret = ibmebus_register_driver(&ehca_driver))) { | ||
| 774 | ehca_gen_err("Cannot register eHCA device driver"); | ||
| 775 | ret = -EINVAL; | ||
| 776 | goto module_init2; | ||
| 777 | } | ||
| 778 | |||
| 779 | ehca_create_driver_sysfs(&ehca_driver); | ||
| 780 | |||
| 781 | if (ehca_poll_all_eqs != 1) { | ||
| 782 | ehca_gen_err("WARNING!!!"); | ||
| 783 | ehca_gen_err("It is possible to lose interrupts."); | ||
| 784 | } else { | ||
| 785 | init_timer(&poll_eqs_timer); | ||
| 786 | poll_eqs_timer.function = ehca_poll_eqs; | ||
| 787 | poll_eqs_timer.expires = jiffies + HZ; | ||
| 788 | add_timer(&poll_eqs_timer); | ||
| 789 | } | ||
| 790 | |||
| 791 | return 0; | ||
| 792 | |||
| 793 | module_init2: | ||
| 794 | ehca_destroy_slab_caches(); | ||
| 795 | |||
| 796 | module_init1: | ||
| 797 | ehca_destroy_comp_pool(); | ||
| 798 | return ret; | ||
| 799 | }; | ||
| 800 | |||
| 801 | void __exit ehca_module_exit(void) | ||
| 802 | { | ||
| 803 | if (ehca_poll_all_eqs == 1) | ||
| 804 | del_timer_sync(&poll_eqs_timer); | ||
| 805 | |||
| 806 | ehca_remove_driver_sysfs(&ehca_driver); | ||
| 807 | ibmebus_unregister_driver(&ehca_driver); | ||
| 808 | |||
| 809 | ehca_destroy_slab_caches(); | ||
| 810 | |||
| 811 | ehca_destroy_comp_pool(); | ||
| 812 | |||
| 813 | idr_destroy(&ehca_cq_idr); | ||
| 814 | idr_destroy(&ehca_qp_idr); | ||
| 815 | }; | ||
| 816 | |||
| 817 | module_init(ehca_module_init); | ||
| 818 | module_exit(ehca_module_exit); | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c new file mode 100644 index 00000000000..32a870660bf --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_mcast.c | |||
| @@ -0,0 +1,131 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * mcast functions | ||
| 5 | * | ||
| 6 | * Authors: Khadija Souissi <souissik@de.ibm.com> | ||
| 7 | * Waleri Fomin <fomin@de.ibm.com> | ||
| 8 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 9 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 10 | * Heiko J Schick <schickhj@de.ibm.com> | ||
| 11 | * | ||
| 12 | * Copyright (c) 2005 IBM Corporation | ||
| 13 | * | ||
| 14 | * All rights reserved. | ||
| 15 | * | ||
| 16 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 17 | * BSD. | ||
| 18 | * | ||
| 19 | * OpenIB BSD License | ||
| 20 | * | ||
| 21 | * Redistribution and use in source and binary forms, with or without | ||
| 22 | * modification, are permitted provided that the following conditions are met: | ||
| 23 | * | ||
| 24 | * Redistributions of source code must retain the above copyright notice, this | ||
| 25 | * list of conditions and the following disclaimer. | ||
| 26 | * | ||
| 27 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 28 | * this list of conditions and the following disclaimer in the documentation | ||
| 29 | * and/or other materials | ||
| 30 | * provided with the distribution. | ||
| 31 | * | ||
| 32 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 33 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 34 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 35 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 36 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 37 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 38 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 39 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 40 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 41 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 42 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 43 | */ | ||
| 44 | |||
| 45 | #include <linux/module.h> | ||
| 46 | #include <linux/err.h> | ||
| 47 | #include "ehca_classes.h" | ||
| 48 | #include "ehca_tools.h" | ||
| 49 | #include "ehca_qes.h" | ||
| 50 | #include "ehca_iverbs.h" | ||
| 51 | #include "hcp_if.h" | ||
| 52 | |||
| 53 | #define MAX_MC_LID 0xFFFE | ||
| 54 | #define MIN_MC_LID 0xC000 /* Multicast limits */ | ||
| 55 | #define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF) | ||
| 56 | #define EHCA_VALID_MULTICAST_LID(lid) \ | ||
| 57 | (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID)) | ||
| 58 | |||
| 59 | int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
| 60 | { | ||
| 61 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); | ||
| 62 | struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, | ||
| 63 | ib_device); | ||
| 64 | union ib_gid my_gid; | ||
| 65 | u64 subnet_prefix, interface_id, h_ret; | ||
| 66 | |||
| 67 | if (ibqp->qp_type != IB_QPT_UD) { | ||
| 68 | ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type); | ||
| 69 | return -EINVAL; | ||
| 70 | } | ||
| 71 | |||
| 72 | if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) { | ||
| 73 | ehca_err(ibqp->device, "invalid mulitcast gid"); | ||
| 74 | return -EINVAL; | ||
| 75 | } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) { | ||
| 76 | ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid); | ||
| 77 | return -EINVAL; | ||
| 78 | } | ||
| 79 | |||
| 80 | memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); | ||
| 81 | |||
| 82 | subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); | ||
| 83 | interface_id = be64_to_cpu(my_gid.global.interface_id); | ||
| 84 | h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle, | ||
| 85 | my_qp->ipz_qp_handle, | ||
| 86 | my_qp->galpas.kernel, | ||
| 87 | lid, subnet_prefix, interface_id); | ||
| 88 | if (h_ret != H_SUCCESS) | ||
| 89 | ehca_err(ibqp->device, | ||
| 90 | "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " | ||
| 91 | "h_ret=%lx", my_qp, ibqp->qp_num, h_ret); | ||
| 92 | |||
| 93 | return ehca2ib_return_code(h_ret); | ||
| 94 | } | ||
| 95 | |||
| 96 | int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
| 97 | { | ||
| 98 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); | ||
| 99 | struct ehca_shca *shca = container_of(ibqp->pd->device, | ||
| 100 | struct ehca_shca, ib_device); | ||
| 101 | union ib_gid my_gid; | ||
| 102 | u64 subnet_prefix, interface_id, h_ret; | ||
| 103 | |||
| 104 | if (ibqp->qp_type != IB_QPT_UD) { | ||
| 105 | ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type); | ||
| 106 | return -EINVAL; | ||
| 107 | } | ||
| 108 | |||
| 109 | if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) { | ||
| 110 | ehca_err(ibqp->device, "invalid mulitcast gid"); | ||
| 111 | return -EINVAL; | ||
| 112 | } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) { | ||
| 113 | ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid); | ||
| 114 | return -EINVAL; | ||
| 115 | } | ||
| 116 | |||
| 117 | memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); | ||
| 118 | |||
| 119 | subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); | ||
| 120 | interface_id = be64_to_cpu(my_gid.global.interface_id); | ||
| 121 | h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle, | ||
| 122 | my_qp->ipz_qp_handle, | ||
| 123 | my_qp->galpas.kernel, | ||
| 124 | lid, subnet_prefix, interface_id); | ||
| 125 | if (h_ret != H_SUCCESS) | ||
| 126 | ehca_err(ibqp->device, | ||
| 127 | "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " | ||
| 128 | "h_ret=%lx", my_qp, ibqp->qp_num, h_ret); | ||
| 129 | |||
| 130 | return ehca2ib_return_code(h_ret); | ||
| 131 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c new file mode 100644 index 00000000000..5ca65441e1d --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
| @@ -0,0 +1,2261 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * MR/MW functions | ||
| 5 | * | ||
| 6 | * Authors: Dietmar Decker <ddecker@de.ibm.com> | ||
| 7 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #include <asm/current.h> | ||
| 43 | |||
| 44 | #include "ehca_iverbs.h" | ||
| 45 | #include "ehca_mrmw.h" | ||
| 46 | #include "hcp_if.h" | ||
| 47 | #include "hipz_hw.h" | ||
| 48 | |||
| 49 | static struct kmem_cache *mr_cache; | ||
| 50 | static struct kmem_cache *mw_cache; | ||
| 51 | |||
| 52 | static struct ehca_mr *ehca_mr_new(void) | ||
| 53 | { | ||
| 54 | struct ehca_mr *me; | ||
| 55 | |||
| 56 | me = kmem_cache_alloc(mr_cache, SLAB_KERNEL); | ||
| 57 | if (me) { | ||
| 58 | memset(me, 0, sizeof(struct ehca_mr)); | ||
| 59 | spin_lock_init(&me->mrlock); | ||
| 60 | } else | ||
| 61 | ehca_gen_err("alloc failed"); | ||
| 62 | |||
| 63 | return me; | ||
| 64 | } | ||
| 65 | |||
| 66 | static void ehca_mr_delete(struct ehca_mr *me) | ||
| 67 | { | ||
| 68 | kmem_cache_free(mr_cache, me); | ||
| 69 | } | ||
| 70 | |||
| 71 | static struct ehca_mw *ehca_mw_new(void) | ||
| 72 | { | ||
| 73 | struct ehca_mw *me; | ||
| 74 | |||
| 75 | me = kmem_cache_alloc(mw_cache, SLAB_KERNEL); | ||
| 76 | if (me) { | ||
| 77 | memset(me, 0, sizeof(struct ehca_mw)); | ||
| 78 | spin_lock_init(&me->mwlock); | ||
| 79 | } else | ||
| 80 | ehca_gen_err("alloc failed"); | ||
| 81 | |||
| 82 | return me; | ||
| 83 | } | ||
| 84 | |||
| 85 | static void ehca_mw_delete(struct ehca_mw *me) | ||
| 86 | { | ||
| 87 | kmem_cache_free(mw_cache, me); | ||
| 88 | } | ||
| 89 | |||
| 90 | /*----------------------------------------------------------------------*/ | ||
| 91 | |||
| 92 | struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags) | ||
| 93 | { | ||
| 94 | struct ib_mr *ib_mr; | ||
| 95 | int ret; | ||
| 96 | struct ehca_mr *e_maxmr; | ||
| 97 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | ||
| 98 | struct ehca_shca *shca = | ||
| 99 | container_of(pd->device, struct ehca_shca, ib_device); | ||
| 100 | |||
| 101 | if (shca->maxmr) { | ||
| 102 | e_maxmr = ehca_mr_new(); | ||
| 103 | if (!e_maxmr) { | ||
| 104 | ehca_err(&shca->ib_device, "out of memory"); | ||
| 105 | ib_mr = ERR_PTR(-ENOMEM); | ||
| 106 | goto get_dma_mr_exit0; | ||
| 107 | } | ||
| 108 | |||
| 109 | ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE, | ||
| 110 | mr_access_flags, e_pd, | ||
| 111 | &e_maxmr->ib.ib_mr.lkey, | ||
| 112 | &e_maxmr->ib.ib_mr.rkey); | ||
| 113 | if (ret) { | ||
| 114 | ib_mr = ERR_PTR(ret); | ||
| 115 | goto get_dma_mr_exit0; | ||
| 116 | } | ||
| 117 | ib_mr = &e_maxmr->ib.ib_mr; | ||
| 118 | } else { | ||
| 119 | ehca_err(&shca->ib_device, "no internal max-MR exist!"); | ||
| 120 | ib_mr = ERR_PTR(-EINVAL); | ||
| 121 | goto get_dma_mr_exit0; | ||
| 122 | } | ||
| 123 | |||
| 124 | get_dma_mr_exit0: | ||
| 125 | if (IS_ERR(ib_mr)) | ||
| 126 | ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ", | ||
| 127 | PTR_ERR(ib_mr), pd, mr_access_flags); | ||
| 128 | return ib_mr; | ||
| 129 | } /* end ehca_get_dma_mr() */ | ||
| 130 | |||
| 131 | /*----------------------------------------------------------------------*/ | ||
| 132 | |||
| 133 | struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, | ||
| 134 | struct ib_phys_buf *phys_buf_array, | ||
| 135 | int num_phys_buf, | ||
| 136 | int mr_access_flags, | ||
| 137 | u64 *iova_start) | ||
| 138 | { | ||
| 139 | struct ib_mr *ib_mr; | ||
| 140 | int ret; | ||
| 141 | struct ehca_mr *e_mr; | ||
| 142 | struct ehca_shca *shca = | ||
| 143 | container_of(pd->device, struct ehca_shca, ib_device); | ||
| 144 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | ||
| 145 | |||
| 146 | u64 size; | ||
| 147 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | ||
| 148 | u32 num_pages_mr; | ||
| 149 | u32 num_pages_4k; /* 4k portion "pages" */ | ||
| 150 | |||
| 151 | if ((num_phys_buf <= 0) || !phys_buf_array) { | ||
| 152 | ehca_err(pd->device, "bad input values: num_phys_buf=%x " | ||
| 153 | "phys_buf_array=%p", num_phys_buf, phys_buf_array); | ||
| 154 | ib_mr = ERR_PTR(-EINVAL); | ||
| 155 | goto reg_phys_mr_exit0; | ||
| 156 | } | ||
| 157 | if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && | ||
| 158 | !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || | ||
| 159 | ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && | ||
| 160 | !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { | ||
| 161 | /* | ||
| 162 | * Remote Write Access requires Local Write Access | ||
| 163 | * Remote Atomic Access requires Local Write Access | ||
| 164 | */ | ||
| 165 | ehca_err(pd->device, "bad input values: mr_access_flags=%x", | ||
| 166 | mr_access_flags); | ||
| 167 | ib_mr = ERR_PTR(-EINVAL); | ||
| 168 | goto reg_phys_mr_exit0; | ||
| 169 | } | ||
| 170 | |||
| 171 | /* check physical buffer list and calculate size */ | ||
| 172 | ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf, | ||
| 173 | iova_start, &size); | ||
| 174 | if (ret) { | ||
| 175 | ib_mr = ERR_PTR(ret); | ||
| 176 | goto reg_phys_mr_exit0; | ||
| 177 | } | ||
| 178 | if ((size == 0) || | ||
| 179 | (((u64)iova_start + size) < (u64)iova_start)) { | ||
| 180 | ehca_err(pd->device, "bad input values: size=%lx iova_start=%p", | ||
| 181 | size, iova_start); | ||
| 182 | ib_mr = ERR_PTR(-EINVAL); | ||
| 183 | goto reg_phys_mr_exit0; | ||
| 184 | } | ||
| 185 | |||
| 186 | e_mr = ehca_mr_new(); | ||
| 187 | if (!e_mr) { | ||
| 188 | ehca_err(pd->device, "out of memory"); | ||
| 189 | ib_mr = ERR_PTR(-ENOMEM); | ||
| 190 | goto reg_phys_mr_exit0; | ||
| 191 | } | ||
| 192 | |||
| 193 | /* determine number of MR pages */ | ||
| 194 | num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size + | ||
| 195 | PAGE_SIZE - 1) / PAGE_SIZE); | ||
| 196 | num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size + | ||
| 197 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); | ||
| 198 | |||
| 199 | /* register MR on HCA */ | ||
| 200 | if (ehca_mr_is_maxmr(size, iova_start)) { | ||
| 201 | e_mr->flags |= EHCA_MR_FLAG_MAXMR; | ||
| 202 | ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags, | ||
| 203 | e_pd, &e_mr->ib.ib_mr.lkey, | ||
| 204 | &e_mr->ib.ib_mr.rkey); | ||
| 205 | if (ret) { | ||
| 206 | ib_mr = ERR_PTR(ret); | ||
| 207 | goto reg_phys_mr_exit1; | ||
| 208 | } | ||
| 209 | } else { | ||
| 210 | pginfo.type = EHCA_MR_PGI_PHYS; | ||
| 211 | pginfo.num_pages = num_pages_mr; | ||
| 212 | pginfo.num_4k = num_pages_4k; | ||
| 213 | pginfo.num_phys_buf = num_phys_buf; | ||
| 214 | pginfo.phys_buf_array = phys_buf_array; | ||
| 215 | pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / | ||
| 216 | EHCA_PAGESIZE); | ||
| 217 | |||
| 218 | ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, | ||
| 219 | e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, | ||
| 220 | &e_mr->ib.ib_mr.rkey); | ||
| 221 | if (ret) { | ||
| 222 | ib_mr = ERR_PTR(ret); | ||
| 223 | goto reg_phys_mr_exit1; | ||
| 224 | } | ||
| 225 | } | ||
| 226 | |||
| 227 | /* successful registration of all pages */ | ||
| 228 | return &e_mr->ib.ib_mr; | ||
| 229 | |||
| 230 | reg_phys_mr_exit1: | ||
| 231 | ehca_mr_delete(e_mr); | ||
| 232 | reg_phys_mr_exit0: | ||
| 233 | if (IS_ERR(ib_mr)) | ||
| 234 | ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p " | ||
| 235 | "num_phys_buf=%x mr_access_flags=%x iova_start=%p", | ||
| 236 | PTR_ERR(ib_mr), pd, phys_buf_array, | ||
| 237 | num_phys_buf, mr_access_flags, iova_start); | ||
| 238 | return ib_mr; | ||
| 239 | } /* end ehca_reg_phys_mr() */ | ||
| 240 | |||
| 241 | /*----------------------------------------------------------------------*/ | ||
| 242 | |||
| 243 | struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, | ||
| 244 | struct ib_umem *region, | ||
| 245 | int mr_access_flags, | ||
| 246 | struct ib_udata *udata) | ||
| 247 | { | ||
| 248 | struct ib_mr *ib_mr; | ||
| 249 | struct ehca_mr *e_mr; | ||
| 250 | struct ehca_shca *shca = | ||
| 251 | container_of(pd->device, struct ehca_shca, ib_device); | ||
| 252 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | ||
| 253 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | ||
| 254 | int ret; | ||
| 255 | u32 num_pages_mr; | ||
| 256 | u32 num_pages_4k; /* 4k portion "pages" */ | ||
| 257 | |||
| 258 | if (!pd) { | ||
| 259 | ehca_gen_err("bad pd=%p", pd); | ||
| 260 | return ERR_PTR(-EFAULT); | ||
| 261 | } | ||
| 262 | if (!region) { | ||
| 263 | ehca_err(pd->device, "bad input values: region=%p", region); | ||
| 264 | ib_mr = ERR_PTR(-EINVAL); | ||
| 265 | goto reg_user_mr_exit0; | ||
| 266 | } | ||
| 267 | if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && | ||
| 268 | !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || | ||
| 269 | ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && | ||
| 270 | !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { | ||
| 271 | /* | ||
| 272 | * Remote Write Access requires Local Write Access | ||
| 273 | * Remote Atomic Access requires Local Write Access | ||
| 274 | */ | ||
| 275 | ehca_err(pd->device, "bad input values: mr_access_flags=%x", | ||
| 276 | mr_access_flags); | ||
| 277 | ib_mr = ERR_PTR(-EINVAL); | ||
| 278 | goto reg_user_mr_exit0; | ||
| 279 | } | ||
| 280 | if (region->page_size != PAGE_SIZE) { | ||
| 281 | ehca_err(pd->device, "page size not supported, " | ||
| 282 | "region->page_size=%x", region->page_size); | ||
| 283 | ib_mr = ERR_PTR(-EINVAL); | ||
| 284 | goto reg_user_mr_exit0; | ||
| 285 | } | ||
| 286 | |||
| 287 | if ((region->length == 0) || | ||
| 288 | ((region->virt_base + region->length) < region->virt_base)) { | ||
| 289 | ehca_err(pd->device, "bad input values: length=%lx " | ||
| 290 | "virt_base=%lx", region->length, region->virt_base); | ||
| 291 | ib_mr = ERR_PTR(-EINVAL); | ||
| 292 | goto reg_user_mr_exit0; | ||
| 293 | } | ||
| 294 | |||
| 295 | e_mr = ehca_mr_new(); | ||
| 296 | if (!e_mr) { | ||
| 297 | ehca_err(pd->device, "out of memory"); | ||
| 298 | ib_mr = ERR_PTR(-ENOMEM); | ||
| 299 | goto reg_user_mr_exit0; | ||
| 300 | } | ||
| 301 | |||
| 302 | /* determine number of MR pages */ | ||
| 303 | num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length + | ||
| 304 | PAGE_SIZE - 1) / PAGE_SIZE); | ||
| 305 | num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length + | ||
| 306 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); | ||
| 307 | |||
| 308 | /* register MR on HCA */ | ||
| 309 | pginfo.type = EHCA_MR_PGI_USER; | ||
| 310 | pginfo.num_pages = num_pages_mr; | ||
| 311 | pginfo.num_4k = num_pages_4k; | ||
| 312 | pginfo.region = region; | ||
| 313 | pginfo.next_4k = region->offset / EHCA_PAGESIZE; | ||
| 314 | pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk, | ||
| 315 | (®ion->chunk_list), | ||
| 316 | list); | ||
| 317 | |||
| 318 | ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base, | ||
| 319 | region->length, mr_access_flags, e_pd, &pginfo, | ||
| 320 | &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); | ||
| 321 | if (ret) { | ||
| 322 | ib_mr = ERR_PTR(ret); | ||
| 323 | goto reg_user_mr_exit1; | ||
| 324 | } | ||
| 325 | |||
| 326 | /* successful registration of all pages */ | ||
| 327 | return &e_mr->ib.ib_mr; | ||
| 328 | |||
| 329 | reg_user_mr_exit1: | ||
| 330 | ehca_mr_delete(e_mr); | ||
| 331 | reg_user_mr_exit0: | ||
| 332 | if (IS_ERR(ib_mr)) | ||
| 333 | ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x" | ||
| 334 | " udata=%p", | ||
| 335 | PTR_ERR(ib_mr), pd, region, mr_access_flags, udata); | ||
| 336 | return ib_mr; | ||
| 337 | } /* end ehca_reg_user_mr() */ | ||
| 338 | |||
| 339 | /*----------------------------------------------------------------------*/ | ||
| 340 | |||
| 341 | int ehca_rereg_phys_mr(struct ib_mr *mr, | ||
| 342 | int mr_rereg_mask, | ||
| 343 | struct ib_pd *pd, | ||
| 344 | struct ib_phys_buf *phys_buf_array, | ||
| 345 | int num_phys_buf, | ||
| 346 | int mr_access_flags, | ||
| 347 | u64 *iova_start) | ||
| 348 | { | ||
| 349 | int ret; | ||
| 350 | |||
| 351 | struct ehca_shca *shca = | ||
| 352 | container_of(mr->device, struct ehca_shca, ib_device); | ||
| 353 | struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); | ||
| 354 | struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); | ||
| 355 | u64 new_size; | ||
| 356 | u64 *new_start; | ||
| 357 | u32 new_acl; | ||
| 358 | struct ehca_pd *new_pd; | ||
| 359 | u32 tmp_lkey, tmp_rkey; | ||
| 360 | unsigned long sl_flags; | ||
| 361 | u32 num_pages_mr = 0; | ||
| 362 | u32 num_pages_4k = 0; /* 4k portion "pages" */ | ||
| 363 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | ||
| 364 | u32 cur_pid = current->tgid; | ||
| 365 | |||
| 366 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 367 | (my_pd->ownpid != cur_pid)) { | ||
| 368 | ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x", | ||
| 369 | cur_pid, my_pd->ownpid); | ||
| 370 | ret = -EINVAL; | ||
| 371 | goto rereg_phys_mr_exit0; | ||
| 372 | } | ||
| 373 | |||
| 374 | if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) { | ||
| 375 | /* TODO not supported, because PHYP rereg hCall needs pages */ | ||
| 376 | ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not " | ||
| 377 | "supported yet, mr_rereg_mask=%x", mr_rereg_mask); | ||
| 378 | ret = -EINVAL; | ||
| 379 | goto rereg_phys_mr_exit0; | ||
| 380 | } | ||
| 381 | |||
| 382 | if (mr_rereg_mask & IB_MR_REREG_PD) { | ||
| 383 | if (!pd) { | ||
| 384 | ehca_err(mr->device, "rereg with bad pd, pd=%p " | ||
| 385 | "mr_rereg_mask=%x", pd, mr_rereg_mask); | ||
| 386 | ret = -EINVAL; | ||
| 387 | goto rereg_phys_mr_exit0; | ||
| 388 | } | ||
| 389 | } | ||
| 390 | |||
| 391 | if ((mr_rereg_mask & | ||
| 392 | ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) || | ||
| 393 | (mr_rereg_mask == 0)) { | ||
| 394 | ret = -EINVAL; | ||
| 395 | goto rereg_phys_mr_exit0; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* check other parameters */ | ||
| 399 | if (e_mr == shca->maxmr) { | ||
| 400 | /* should be impossible, however reject to be sure */ | ||
| 401 | ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p " | ||
| 402 | "shca->maxmr=%p mr->lkey=%x", | ||
| 403 | mr, shca->maxmr, mr->lkey); | ||
| 404 | ret = -EINVAL; | ||
| 405 | goto rereg_phys_mr_exit0; | ||
| 406 | } | ||
| 407 | if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */ | ||
| 408 | if (e_mr->flags & EHCA_MR_FLAG_FMR) { | ||
| 409 | ehca_err(mr->device, "not supported for FMR, mr=%p " | ||
| 410 | "flags=%x", mr, e_mr->flags); | ||
| 411 | ret = -EINVAL; | ||
| 412 | goto rereg_phys_mr_exit0; | ||
| 413 | } | ||
| 414 | if (!phys_buf_array || num_phys_buf <= 0) { | ||
| 415 | ehca_err(mr->device, "bad input values: mr_rereg_mask=%x" | ||
| 416 | " phys_buf_array=%p num_phys_buf=%x", | ||
| 417 | mr_rereg_mask, phys_buf_array, num_phys_buf); | ||
| 418 | ret = -EINVAL; | ||
| 419 | goto rereg_phys_mr_exit0; | ||
| 420 | } | ||
| 421 | } | ||
| 422 | if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */ | ||
| 423 | (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && | ||
| 424 | !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || | ||
| 425 | ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && | ||
| 426 | !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) { | ||
| 427 | /* | ||
| 428 | * Remote Write Access requires Local Write Access | ||
| 429 | * Remote Atomic Access requires Local Write Access | ||
| 430 | */ | ||
| 431 | ehca_err(mr->device, "bad input values: mr_rereg_mask=%x " | ||
| 432 | "mr_access_flags=%x", mr_rereg_mask, mr_access_flags); | ||
| 433 | ret = -EINVAL; | ||
| 434 | goto rereg_phys_mr_exit0; | ||
| 435 | } | ||
| 436 | |||
| 437 | /* set requested values dependent on rereg request */ | ||
| 438 | spin_lock_irqsave(&e_mr->mrlock, sl_flags); | ||
| 439 | new_start = e_mr->start; /* new == old address */ | ||
| 440 | new_size = e_mr->size; /* new == old length */ | ||
| 441 | new_acl = e_mr->acl; /* new == old access control */ | ||
| 442 | new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/ | ||
| 443 | |||
| 444 | if (mr_rereg_mask & IB_MR_REREG_TRANS) { | ||
| 445 | new_start = iova_start; /* change address */ | ||
| 446 | /* check physical buffer list and calculate size */ | ||
| 447 | ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, | ||
| 448 | num_phys_buf, iova_start, | ||
| 449 | &new_size); | ||
| 450 | if (ret) | ||
| 451 | goto rereg_phys_mr_exit1; | ||
| 452 | if ((new_size == 0) || | ||
| 453 | (((u64)iova_start + new_size) < (u64)iova_start)) { | ||
| 454 | ehca_err(mr->device, "bad input values: new_size=%lx " | ||
| 455 | "iova_start=%p", new_size, iova_start); | ||
| 456 | ret = -EINVAL; | ||
| 457 | goto rereg_phys_mr_exit1; | ||
| 458 | } | ||
| 459 | num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size + | ||
| 460 | PAGE_SIZE - 1) / PAGE_SIZE); | ||
| 461 | num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size + | ||
| 462 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); | ||
| 463 | pginfo.type = EHCA_MR_PGI_PHYS; | ||
| 464 | pginfo.num_pages = num_pages_mr; | ||
| 465 | pginfo.num_4k = num_pages_4k; | ||
| 466 | pginfo.num_phys_buf = num_phys_buf; | ||
| 467 | pginfo.phys_buf_array = phys_buf_array; | ||
| 468 | pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / | ||
| 469 | EHCA_PAGESIZE); | ||
| 470 | } | ||
| 471 | if (mr_rereg_mask & IB_MR_REREG_ACCESS) | ||
| 472 | new_acl = mr_access_flags; | ||
| 473 | if (mr_rereg_mask & IB_MR_REREG_PD) | ||
| 474 | new_pd = container_of(pd, struct ehca_pd, ib_pd); | ||
| 475 | |||
| 476 | ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl, | ||
| 477 | new_pd, &pginfo, &tmp_lkey, &tmp_rkey); | ||
| 478 | if (ret) | ||
| 479 | goto rereg_phys_mr_exit1; | ||
| 480 | |||
| 481 | /* successful reregistration */ | ||
| 482 | if (mr_rereg_mask & IB_MR_REREG_PD) | ||
| 483 | mr->pd = pd; | ||
| 484 | mr->lkey = tmp_lkey; | ||
| 485 | mr->rkey = tmp_rkey; | ||
| 486 | |||
| 487 | rereg_phys_mr_exit1: | ||
| 488 | spin_unlock_irqrestore(&e_mr->mrlock, sl_flags); | ||
| 489 | rereg_phys_mr_exit0: | ||
| 490 | if (ret) | ||
| 491 | ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p " | ||
| 492 | "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x " | ||
| 493 | "iova_start=%p", | ||
| 494 | ret, mr, mr_rereg_mask, pd, phys_buf_array, | ||
| 495 | num_phys_buf, mr_access_flags, iova_start); | ||
| 496 | return ret; | ||
| 497 | } /* end ehca_rereg_phys_mr() */ | ||
| 498 | |||
| 499 | /*----------------------------------------------------------------------*/ | ||
| 500 | |||
| 501 | int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) | ||
| 502 | { | ||
| 503 | int ret = 0; | ||
| 504 | u64 h_ret; | ||
| 505 | struct ehca_shca *shca = | ||
| 506 | container_of(mr->device, struct ehca_shca, ib_device); | ||
| 507 | struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); | ||
| 508 | struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); | ||
| 509 | u32 cur_pid = current->tgid; | ||
| 510 | unsigned long sl_flags; | ||
| 511 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | ||
| 512 | |||
| 513 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 514 | (my_pd->ownpid != cur_pid)) { | ||
| 515 | ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x", | ||
| 516 | cur_pid, my_pd->ownpid); | ||
| 517 | ret = -EINVAL; | ||
| 518 | goto query_mr_exit0; | ||
| 519 | } | ||
| 520 | |||
| 521 | if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { | ||
| 522 | ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " | ||
| 523 | "e_mr->flags=%x", mr, e_mr, e_mr->flags); | ||
| 524 | ret = -EINVAL; | ||
| 525 | goto query_mr_exit0; | ||
| 526 | } | ||
| 527 | |||
| 528 | memset(mr_attr, 0, sizeof(struct ib_mr_attr)); | ||
| 529 | spin_lock_irqsave(&e_mr->mrlock, sl_flags); | ||
| 530 | |||
| 531 | h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); | ||
| 532 | if (h_ret != H_SUCCESS) { | ||
| 533 | ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p " | ||
| 534 | "hca_hndl=%lx mr_hndl=%lx lkey=%x", | ||
| 535 | h_ret, mr, shca->ipz_hca_handle.handle, | ||
| 536 | e_mr->ipz_mr_handle.handle, mr->lkey); | ||
| 537 | ret = ehca_mrmw_map_hrc_query_mr(h_ret); | ||
| 538 | goto query_mr_exit1; | ||
| 539 | } | ||
| 540 | mr_attr->pd = mr->pd; | ||
| 541 | mr_attr->device_virt_addr = hipzout.vaddr; | ||
| 542 | mr_attr->size = hipzout.len; | ||
| 543 | mr_attr->lkey = hipzout.lkey; | ||
| 544 | mr_attr->rkey = hipzout.rkey; | ||
| 545 | ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags); | ||
| 546 | |||
| 547 | query_mr_exit1: | ||
| 548 | spin_unlock_irqrestore(&e_mr->mrlock, sl_flags); | ||
| 549 | query_mr_exit0: | ||
| 550 | if (ret) | ||
| 551 | ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p", | ||
| 552 | ret, mr, mr_attr); | ||
| 553 | return ret; | ||
| 554 | } /* end ehca_query_mr() */ | ||
| 555 | |||
| 556 | /*----------------------------------------------------------------------*/ | ||
| 557 | |||
| 558 | int ehca_dereg_mr(struct ib_mr *mr) | ||
| 559 | { | ||
| 560 | int ret = 0; | ||
| 561 | u64 h_ret; | ||
| 562 | struct ehca_shca *shca = | ||
| 563 | container_of(mr->device, struct ehca_shca, ib_device); | ||
| 564 | struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); | ||
| 565 | struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); | ||
| 566 | u32 cur_pid = current->tgid; | ||
| 567 | |||
| 568 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 569 | (my_pd->ownpid != cur_pid)) { | ||
| 570 | ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x", | ||
| 571 | cur_pid, my_pd->ownpid); | ||
| 572 | ret = -EINVAL; | ||
| 573 | goto dereg_mr_exit0; | ||
| 574 | } | ||
| 575 | |||
| 576 | if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { | ||
| 577 | ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " | ||
| 578 | "e_mr->flags=%x", mr, e_mr, e_mr->flags); | ||
| 579 | ret = -EINVAL; | ||
| 580 | goto dereg_mr_exit0; | ||
| 581 | } else if (e_mr == shca->maxmr) { | ||
| 582 | /* should be impossible, however reject to be sure */ | ||
| 583 | ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p " | ||
| 584 | "shca->maxmr=%p mr->lkey=%x", | ||
| 585 | mr, shca->maxmr, mr->lkey); | ||
| 586 | ret = -EINVAL; | ||
| 587 | goto dereg_mr_exit0; | ||
| 588 | } | ||
| 589 | |||
| 590 | /* TODO: BUSY: MR still has bound window(s) */ | ||
| 591 | h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); | ||
| 592 | if (h_ret != H_SUCCESS) { | ||
| 593 | ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p " | ||
| 594 | "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", | ||
| 595 | h_ret, shca, e_mr, shca->ipz_hca_handle.handle, | ||
| 596 | e_mr->ipz_mr_handle.handle, mr->lkey); | ||
| 597 | ret = ehca_mrmw_map_hrc_free_mr(h_ret); | ||
| 598 | goto dereg_mr_exit0; | ||
| 599 | } | ||
| 600 | |||
| 601 | /* successful deregistration */ | ||
| 602 | ehca_mr_delete(e_mr); | ||
| 603 | |||
| 604 | dereg_mr_exit0: | ||
| 605 | if (ret) | ||
| 606 | ehca_err(mr->device, "ret=%x mr=%p", ret, mr); | ||
| 607 | return ret; | ||
| 608 | } /* end ehca_dereg_mr() */ | ||
| 609 | |||
| 610 | /*----------------------------------------------------------------------*/ | ||
| 611 | |||
| 612 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) | ||
| 613 | { | ||
| 614 | struct ib_mw *ib_mw; | ||
| 615 | u64 h_ret; | ||
| 616 | struct ehca_mw *e_mw; | ||
| 617 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | ||
| 618 | struct ehca_shca *shca = | ||
| 619 | container_of(pd->device, struct ehca_shca, ib_device); | ||
| 620 | struct ehca_mw_hipzout_parms hipzout = {{0},0}; | ||
| 621 | |||
| 622 | e_mw = ehca_mw_new(); | ||
| 623 | if (!e_mw) { | ||
| 624 | ib_mw = ERR_PTR(-ENOMEM); | ||
| 625 | goto alloc_mw_exit0; | ||
| 626 | } | ||
| 627 | |||
| 628 | h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, | ||
| 629 | e_pd->fw_pd, &hipzout); | ||
| 630 | if (h_ret != H_SUCCESS) { | ||
| 631 | ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx " | ||
| 632 | "shca=%p hca_hndl=%lx mw=%p", | ||
| 633 | h_ret, shca, shca->ipz_hca_handle.handle, e_mw); | ||
| 634 | ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret)); | ||
| 635 | goto alloc_mw_exit1; | ||
| 636 | } | ||
| 637 | /* successful MW allocation */ | ||
| 638 | e_mw->ipz_mw_handle = hipzout.handle; | ||
| 639 | e_mw->ib_mw.rkey = hipzout.rkey; | ||
| 640 | return &e_mw->ib_mw; | ||
| 641 | |||
| 642 | alloc_mw_exit1: | ||
| 643 | ehca_mw_delete(e_mw); | ||
| 644 | alloc_mw_exit0: | ||
| 645 | if (IS_ERR(ib_mw)) | ||
| 646 | ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd); | ||
| 647 | return ib_mw; | ||
| 648 | } /* end ehca_alloc_mw() */ | ||
| 649 | |||
| 650 | /*----------------------------------------------------------------------*/ | ||
| 651 | |||
| 652 | int ehca_bind_mw(struct ib_qp *qp, | ||
| 653 | struct ib_mw *mw, | ||
| 654 | struct ib_mw_bind *mw_bind) | ||
| 655 | { | ||
| 656 | /* TODO: not supported up to now */ | ||
| 657 | ehca_gen_err("bind MW currently not supported by HCAD"); | ||
| 658 | |||
| 659 | return -EPERM; | ||
| 660 | } /* end ehca_bind_mw() */ | ||
| 661 | |||
| 662 | /*----------------------------------------------------------------------*/ | ||
| 663 | |||
| 664 | int ehca_dealloc_mw(struct ib_mw *mw) | ||
| 665 | { | ||
| 666 | u64 h_ret; | ||
| 667 | struct ehca_shca *shca = | ||
| 668 | container_of(mw->device, struct ehca_shca, ib_device); | ||
| 669 | struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw); | ||
| 670 | |||
| 671 | h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); | ||
| 672 | if (h_ret != H_SUCCESS) { | ||
| 673 | ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p " | ||
| 674 | "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", | ||
| 675 | h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, | ||
| 676 | e_mw->ipz_mw_handle.handle); | ||
| 677 | return ehca_mrmw_map_hrc_free_mw(h_ret); | ||
| 678 | } | ||
| 679 | /* successful deallocation */ | ||
| 680 | ehca_mw_delete(e_mw); | ||
| 681 | return 0; | ||
| 682 | } /* end ehca_dealloc_mw() */ | ||
| 683 | |||
| 684 | /*----------------------------------------------------------------------*/ | ||
| 685 | |||
| 686 | struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, | ||
| 687 | int mr_access_flags, | ||
| 688 | struct ib_fmr_attr *fmr_attr) | ||
| 689 | { | ||
| 690 | struct ib_fmr *ib_fmr; | ||
| 691 | struct ehca_shca *shca = | ||
| 692 | container_of(pd->device, struct ehca_shca, ib_device); | ||
| 693 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | ||
| 694 | struct ehca_mr *e_fmr; | ||
| 695 | int ret; | ||
| 696 | u32 tmp_lkey, tmp_rkey; | ||
| 697 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | ||
| 698 | |||
| 699 | /* check other parameters */ | ||
| 700 | if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && | ||
| 701 | !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || | ||
| 702 | ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && | ||
| 703 | !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { | ||
| 704 | /* | ||
| 705 | * Remote Write Access requires Local Write Access | ||
| 706 | * Remote Atomic Access requires Local Write Access | ||
| 707 | */ | ||
| 708 | ehca_err(pd->device, "bad input values: mr_access_flags=%x", | ||
| 709 | mr_access_flags); | ||
| 710 | ib_fmr = ERR_PTR(-EINVAL); | ||
| 711 | goto alloc_fmr_exit0; | ||
| 712 | } | ||
| 713 | if (mr_access_flags & IB_ACCESS_MW_BIND) { | ||
| 714 | ehca_err(pd->device, "bad input values: mr_access_flags=%x", | ||
| 715 | mr_access_flags); | ||
| 716 | ib_fmr = ERR_PTR(-EINVAL); | ||
| 717 | goto alloc_fmr_exit0; | ||
| 718 | } | ||
| 719 | if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) { | ||
| 720 | ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x " | ||
| 721 | "fmr_attr->max_maps=%x fmr_attr->page_shift=%x", | ||
| 722 | fmr_attr->max_pages, fmr_attr->max_maps, | ||
| 723 | fmr_attr->page_shift); | ||
| 724 | ib_fmr = ERR_PTR(-EINVAL); | ||
| 725 | goto alloc_fmr_exit0; | ||
| 726 | } | ||
| 727 | if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) && | ||
| 728 | ((1 << fmr_attr->page_shift) != PAGE_SIZE)) { | ||
| 729 | ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x", | ||
| 730 | fmr_attr->page_shift); | ||
| 731 | ib_fmr = ERR_PTR(-EINVAL); | ||
| 732 | goto alloc_fmr_exit0; | ||
| 733 | } | ||
| 734 | |||
| 735 | e_fmr = ehca_mr_new(); | ||
| 736 | if (!e_fmr) { | ||
| 737 | ib_fmr = ERR_PTR(-ENOMEM); | ||
| 738 | goto alloc_fmr_exit0; | ||
| 739 | } | ||
| 740 | e_fmr->flags |= EHCA_MR_FLAG_FMR; | ||
| 741 | |||
| 742 | /* register MR on HCA */ | ||
| 743 | ret = ehca_reg_mr(shca, e_fmr, NULL, | ||
| 744 | fmr_attr->max_pages * (1 << fmr_attr->page_shift), | ||
| 745 | mr_access_flags, e_pd, &pginfo, | ||
| 746 | &tmp_lkey, &tmp_rkey); | ||
| 747 | if (ret) { | ||
| 748 | ib_fmr = ERR_PTR(ret); | ||
| 749 | goto alloc_fmr_exit1; | ||
| 750 | } | ||
| 751 | |||
| 752 | /* successful */ | ||
| 753 | e_fmr->fmr_page_size = 1 << fmr_attr->page_shift; | ||
| 754 | e_fmr->fmr_max_pages = fmr_attr->max_pages; | ||
| 755 | e_fmr->fmr_max_maps = fmr_attr->max_maps; | ||
| 756 | e_fmr->fmr_map_cnt = 0; | ||
| 757 | return &e_fmr->ib.ib_fmr; | ||
| 758 | |||
| 759 | alloc_fmr_exit1: | ||
| 760 | ehca_mr_delete(e_fmr); | ||
| 761 | alloc_fmr_exit0: | ||
| 762 | if (IS_ERR(ib_fmr)) | ||
| 763 | ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x " | ||
| 764 | "fmr_attr=%p", PTR_ERR(ib_fmr), pd, | ||
| 765 | mr_access_flags, fmr_attr); | ||
| 766 | return ib_fmr; | ||
| 767 | } /* end ehca_alloc_fmr() */ | ||
| 768 | |||
| 769 | /*----------------------------------------------------------------------*/ | ||
| 770 | |||
| 771 | int ehca_map_phys_fmr(struct ib_fmr *fmr, | ||
| 772 | u64 *page_list, | ||
| 773 | int list_len, | ||
| 774 | u64 iova) | ||
| 775 | { | ||
| 776 | int ret; | ||
| 777 | struct ehca_shca *shca = | ||
| 778 | container_of(fmr->device, struct ehca_shca, ib_device); | ||
| 779 | struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); | ||
| 780 | struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); | ||
| 781 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | ||
| 782 | u32 tmp_lkey, tmp_rkey; | ||
| 783 | |||
| 784 | if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { | ||
| 785 | ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", | ||
| 786 | e_fmr, e_fmr->flags); | ||
| 787 | ret = -EINVAL; | ||
| 788 | goto map_phys_fmr_exit0; | ||
| 789 | } | ||
| 790 | ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len); | ||
| 791 | if (ret) | ||
| 792 | goto map_phys_fmr_exit0; | ||
| 793 | if (iova % e_fmr->fmr_page_size) { | ||
| 794 | /* only whole-numbered pages */ | ||
| 795 | ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x", | ||
| 796 | iova, e_fmr->fmr_page_size); | ||
| 797 | ret = -EINVAL; | ||
| 798 | goto map_phys_fmr_exit0; | ||
| 799 | } | ||
| 800 | if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) { | ||
| 801 | /* HCAD does not limit the maps, however trace this anyway */ | ||
| 802 | ehca_info(fmr->device, "map limit exceeded, fmr=%p " | ||
| 803 | "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x", | ||
| 804 | fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); | ||
| 805 | } | ||
| 806 | |||
| 807 | pginfo.type = EHCA_MR_PGI_FMR; | ||
| 808 | pginfo.num_pages = list_len; | ||
| 809 | pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); | ||
| 810 | pginfo.page_list = page_list; | ||
| 811 | pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) / | ||
| 812 | EHCA_PAGESIZE); | ||
| 813 | |||
| 814 | ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova, | ||
| 815 | list_len * e_fmr->fmr_page_size, | ||
| 816 | e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey); | ||
| 817 | if (ret) | ||
| 818 | goto map_phys_fmr_exit0; | ||
| 819 | |||
| 820 | /* successful reregistration */ | ||
| 821 | e_fmr->fmr_map_cnt++; | ||
| 822 | e_fmr->ib.ib_fmr.lkey = tmp_lkey; | ||
| 823 | e_fmr->ib.ib_fmr.rkey = tmp_rkey; | ||
| 824 | return 0; | ||
| 825 | |||
| 826 | map_phys_fmr_exit0: | ||
| 827 | if (ret) | ||
| 828 | ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x " | ||
| 829 | "iova=%lx", | ||
| 830 | ret, fmr, page_list, list_len, iova); | ||
| 831 | return ret; | ||
| 832 | } /* end ehca_map_phys_fmr() */ | ||
| 833 | |||
| 834 | /*----------------------------------------------------------------------*/ | ||
| 835 | |||
| 836 | int ehca_unmap_fmr(struct list_head *fmr_list) | ||
| 837 | { | ||
| 838 | int ret = 0; | ||
| 839 | struct ib_fmr *ib_fmr; | ||
| 840 | struct ehca_shca *shca = NULL; | ||
| 841 | struct ehca_shca *prev_shca; | ||
| 842 | struct ehca_mr *e_fmr; | ||
| 843 | u32 num_fmr = 0; | ||
| 844 | u32 unmap_fmr_cnt = 0; | ||
| 845 | |||
| 846 | /* check all FMR belong to same SHCA, and check internal flag */ | ||
| 847 | list_for_each_entry(ib_fmr, fmr_list, list) { | ||
| 848 | prev_shca = shca; | ||
| 849 | if (!ib_fmr) { | ||
| 850 | ehca_gen_err("bad fmr=%p in list", ib_fmr); | ||
| 851 | ret = -EINVAL; | ||
| 852 | goto unmap_fmr_exit0; | ||
| 853 | } | ||
| 854 | shca = container_of(ib_fmr->device, struct ehca_shca, | ||
| 855 | ib_device); | ||
| 856 | e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); | ||
| 857 | if ((shca != prev_shca) && prev_shca) { | ||
| 858 | ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p " | ||
| 859 | "prev_shca=%p e_fmr=%p", | ||
| 860 | shca, prev_shca, e_fmr); | ||
| 861 | ret = -EINVAL; | ||
| 862 | goto unmap_fmr_exit0; | ||
| 863 | } | ||
| 864 | if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { | ||
| 865 | ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p " | ||
| 866 | "e_fmr->flags=%x", e_fmr, e_fmr->flags); | ||
| 867 | ret = -EINVAL; | ||
| 868 | goto unmap_fmr_exit0; | ||
| 869 | } | ||
| 870 | num_fmr++; | ||
| 871 | } | ||
| 872 | |||
| 873 | /* loop over all FMRs to unmap */ | ||
| 874 | list_for_each_entry(ib_fmr, fmr_list, list) { | ||
| 875 | unmap_fmr_cnt++; | ||
| 876 | e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); | ||
| 877 | shca = container_of(ib_fmr->device, struct ehca_shca, | ||
| 878 | ib_device); | ||
| 879 | ret = ehca_unmap_one_fmr(shca, e_fmr); | ||
| 880 | if (ret) { | ||
| 881 | /* unmap failed, stop unmapping of rest of FMRs */ | ||
| 882 | ehca_err(&shca->ib_device, "unmap of one FMR failed, " | ||
| 883 | "stop rest, e_fmr=%p num_fmr=%x " | ||
| 884 | "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr, | ||
| 885 | unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey); | ||
| 886 | goto unmap_fmr_exit0; | ||
| 887 | } | ||
| 888 | } | ||
| 889 | |||
| 890 | unmap_fmr_exit0: | ||
| 891 | if (ret) | ||
| 892 | ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x", | ||
| 893 | ret, fmr_list, num_fmr, unmap_fmr_cnt); | ||
| 894 | return ret; | ||
| 895 | } /* end ehca_unmap_fmr() */ | ||
| 896 | |||
| 897 | /*----------------------------------------------------------------------*/ | ||
| 898 | |||
| 899 | int ehca_dealloc_fmr(struct ib_fmr *fmr) | ||
| 900 | { | ||
| 901 | int ret; | ||
| 902 | u64 h_ret; | ||
| 903 | struct ehca_shca *shca = | ||
| 904 | container_of(fmr->device, struct ehca_shca, ib_device); | ||
| 905 | struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); | ||
| 906 | |||
| 907 | if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { | ||
| 908 | ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", | ||
| 909 | e_fmr, e_fmr->flags); | ||
| 910 | ret = -EINVAL; | ||
| 911 | goto free_fmr_exit0; | ||
| 912 | } | ||
| 913 | |||
| 914 | h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); | ||
| 915 | if (h_ret != H_SUCCESS) { | ||
| 916 | ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p " | ||
| 917 | "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", | ||
| 918 | h_ret, e_fmr, shca->ipz_hca_handle.handle, | ||
| 919 | e_fmr->ipz_mr_handle.handle, fmr->lkey); | ||
| 920 | ret = ehca_mrmw_map_hrc_free_mr(h_ret); | ||
| 921 | goto free_fmr_exit0; | ||
| 922 | } | ||
| 923 | /* successful deregistration */ | ||
| 924 | ehca_mr_delete(e_fmr); | ||
| 925 | return 0; | ||
| 926 | |||
| 927 | free_fmr_exit0: | ||
| 928 | if (ret) | ||
| 929 | ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr); | ||
| 930 | return ret; | ||
| 931 | } /* end ehca_dealloc_fmr() */ | ||
| 932 | |||
| 933 | /*----------------------------------------------------------------------*/ | ||
| 934 | |||
| 935 | int ehca_reg_mr(struct ehca_shca *shca, | ||
| 936 | struct ehca_mr *e_mr, | ||
| 937 | u64 *iova_start, | ||
| 938 | u64 size, | ||
| 939 | int acl, | ||
| 940 | struct ehca_pd *e_pd, | ||
| 941 | struct ehca_mr_pginfo *pginfo, | ||
| 942 | u32 *lkey, /*OUT*/ | ||
| 943 | u32 *rkey) /*OUT*/ | ||
| 944 | { | ||
| 945 | int ret; | ||
| 946 | u64 h_ret; | ||
| 947 | u32 hipz_acl; | ||
| 948 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | ||
| 949 | |||
| 950 | ehca_mrmw_map_acl(acl, &hipz_acl); | ||
| 951 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | ||
| 952 | if (ehca_use_hp_mr == 1) | ||
| 953 | hipz_acl |= 0x00000001; | ||
| 954 | |||
| 955 | h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr, | ||
| 956 | (u64)iova_start, size, hipz_acl, | ||
| 957 | e_pd->fw_pd, &hipzout); | ||
| 958 | if (h_ret != H_SUCCESS) { | ||
| 959 | ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx " | ||
| 960 | "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); | ||
| 961 | ret = ehca_mrmw_map_hrc_alloc(h_ret); | ||
| 962 | goto ehca_reg_mr_exit0; | ||
| 963 | } | ||
| 964 | |||
| 965 | e_mr->ipz_mr_handle = hipzout.handle; | ||
| 966 | |||
| 967 | ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); | ||
| 968 | if (ret) | ||
| 969 | goto ehca_reg_mr_exit1; | ||
| 970 | |||
| 971 | /* successful registration */ | ||
| 972 | e_mr->num_pages = pginfo->num_pages; | ||
| 973 | e_mr->num_4k = pginfo->num_4k; | ||
| 974 | e_mr->start = iova_start; | ||
| 975 | e_mr->size = size; | ||
| 976 | e_mr->acl = acl; | ||
| 977 | *lkey = hipzout.lkey; | ||
| 978 | *rkey = hipzout.rkey; | ||
| 979 | return 0; | ||
| 980 | |||
| 981 | ehca_reg_mr_exit1: | ||
| 982 | h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); | ||
| 983 | if (h_ret != H_SUCCESS) { | ||
| 984 | ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p " | ||
| 985 | "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " | ||
| 986 | "pginfo=%p num_pages=%lx num_4k=%lx ret=%x", | ||
| 987 | h_ret, shca, e_mr, iova_start, size, acl, e_pd, | ||
| 988 | hipzout.lkey, pginfo, pginfo->num_pages, | ||
| 989 | pginfo->num_4k, ret); | ||
| 990 | ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " | ||
| 991 | "not recoverable"); | ||
| 992 | } | ||
| 993 | ehca_reg_mr_exit0: | ||
| 994 | if (ret) | ||
| 995 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " | ||
| 996 | "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " | ||
| 997 | "num_pages=%lx num_4k=%lx", | ||
| 998 | ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, | ||
| 999 | pginfo->num_pages, pginfo->num_4k); | ||
| 1000 | return ret; | ||
| 1001 | } /* end ehca_reg_mr() */ | ||
| 1002 | |||
| 1003 | /*----------------------------------------------------------------------*/ | ||
| 1004 | |||
| 1005 | int ehca_reg_mr_rpages(struct ehca_shca *shca, | ||
| 1006 | struct ehca_mr *e_mr, | ||
| 1007 | struct ehca_mr_pginfo *pginfo) | ||
| 1008 | { | ||
| 1009 | int ret = 0; | ||
| 1010 | u64 h_ret; | ||
| 1011 | u32 rnum; | ||
| 1012 | u64 rpage; | ||
| 1013 | u32 i; | ||
| 1014 | u64 *kpage; | ||
| 1015 | |||
| 1016 | kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 1017 | if (!kpage) { | ||
| 1018 | ehca_err(&shca->ib_device, "kpage alloc failed"); | ||
| 1019 | ret = -ENOMEM; | ||
| 1020 | goto ehca_reg_mr_rpages_exit0; | ||
| 1021 | } | ||
| 1022 | |||
| 1023 | /* max 512 pages per shot */ | ||
| 1024 | for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) { | ||
| 1025 | |||
| 1026 | if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) { | ||
| 1027 | rnum = pginfo->num_4k % 512; /* last shot */ | ||
| 1028 | if (rnum == 0) | ||
| 1029 | rnum = 512; /* last shot is full */ | ||
| 1030 | } else | ||
| 1031 | rnum = 512; | ||
| 1032 | |||
| 1033 | if (rnum > 1) { | ||
| 1034 | ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage); | ||
| 1035 | if (ret) { | ||
| 1036 | ehca_err(&shca->ib_device, "ehca_set_pagebuf " | ||
| 1037 | "bad rc, ret=%x rnum=%x kpage=%p", | ||
| 1038 | ret, rnum, kpage); | ||
| 1039 | ret = -EFAULT; | ||
| 1040 | goto ehca_reg_mr_rpages_exit1; | ||
| 1041 | } | ||
| 1042 | rpage = virt_to_abs(kpage); | ||
| 1043 | if (!rpage) { | ||
| 1044 | ehca_err(&shca->ib_device, "kpage=%p i=%x", | ||
| 1045 | kpage, i); | ||
| 1046 | ret = -EFAULT; | ||
| 1047 | goto ehca_reg_mr_rpages_exit1; | ||
| 1048 | } | ||
| 1049 | } else { /* rnum==1 */ | ||
| 1050 | ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage); | ||
| 1051 | if (ret) { | ||
| 1052 | ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 " | ||
| 1053 | "bad rc, ret=%x i=%x", ret, i); | ||
| 1054 | ret = -EFAULT; | ||
| 1055 | goto ehca_reg_mr_rpages_exit1; | ||
| 1056 | } | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr, | ||
| 1060 | 0, /* pagesize 4k */ | ||
| 1061 | 0, rpage, rnum); | ||
| 1062 | |||
| 1063 | if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) { | ||
| 1064 | /* | ||
| 1065 | * check for 'registration complete'==H_SUCCESS | ||
| 1066 | * and for 'page registered'==H_PAGE_REGISTERED | ||
| 1067 | */ | ||
| 1068 | if (h_ret != H_SUCCESS) { | ||
| 1069 | ehca_err(&shca->ib_device, "last " | ||
| 1070 | "hipz_reg_rpage_mr failed, h_ret=%lx " | ||
| 1071 | "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx" | ||
| 1072 | " lkey=%x", h_ret, e_mr, i, | ||
| 1073 | shca->ipz_hca_handle.handle, | ||
| 1074 | e_mr->ipz_mr_handle.handle, | ||
| 1075 | e_mr->ib.ib_mr.lkey); | ||
| 1076 | ret = ehca_mrmw_map_hrc_rrpg_last(h_ret); | ||
| 1077 | break; | ||
| 1078 | } else | ||
| 1079 | ret = 0; | ||
| 1080 | } else if (h_ret != H_PAGE_REGISTERED) { | ||
| 1081 | ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " | ||
| 1082 | "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx " | ||
| 1083 | "mr_hndl=%lx", h_ret, e_mr, i, | ||
| 1084 | e_mr->ib.ib_mr.lkey, | ||
| 1085 | shca->ipz_hca_handle.handle, | ||
| 1086 | e_mr->ipz_mr_handle.handle); | ||
| 1087 | ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret); | ||
| 1088 | break; | ||
| 1089 | } else | ||
| 1090 | ret = 0; | ||
| 1091 | } /* end for(i) */ | ||
| 1092 | |||
| 1093 | |||
| 1094 | ehca_reg_mr_rpages_exit1: | ||
| 1095 | kfree(kpage); | ||
| 1096 | ehca_reg_mr_rpages_exit0: | ||
| 1097 | if (ret) | ||
| 1098 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " | ||
| 1099 | "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo, | ||
| 1100 | pginfo->num_pages, pginfo->num_4k); | ||
| 1101 | return ret; | ||
| 1102 | } /* end ehca_reg_mr_rpages() */ | ||
| 1103 | |||
| 1104 | /*----------------------------------------------------------------------*/ | ||
| 1105 | |||
| 1106 | inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, | ||
| 1107 | struct ehca_mr *e_mr, | ||
| 1108 | u64 *iova_start, | ||
| 1109 | u64 size, | ||
| 1110 | u32 acl, | ||
| 1111 | struct ehca_pd *e_pd, | ||
| 1112 | struct ehca_mr_pginfo *pginfo, | ||
| 1113 | u32 *lkey, /*OUT*/ | ||
| 1114 | u32 *rkey) /*OUT*/ | ||
| 1115 | { | ||
| 1116 | int ret; | ||
| 1117 | u64 h_ret; | ||
| 1118 | u32 hipz_acl; | ||
| 1119 | u64 *kpage; | ||
| 1120 | u64 rpage; | ||
| 1121 | struct ehca_mr_pginfo pginfo_save; | ||
| 1122 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | ||
| 1123 | |||
| 1124 | ehca_mrmw_map_acl(acl, &hipz_acl); | ||
| 1125 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | ||
| 1126 | |||
| 1127 | kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 1128 | if (!kpage) { | ||
| 1129 | ehca_err(&shca->ib_device, "kpage alloc failed"); | ||
| 1130 | ret = -ENOMEM; | ||
| 1131 | goto ehca_rereg_mr_rereg1_exit0; | ||
| 1132 | } | ||
| 1133 | |||
| 1134 | pginfo_save = *pginfo; | ||
| 1135 | ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage); | ||
| 1136 | if (ret) { | ||
| 1137 | ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " | ||
| 1138 | "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p", | ||
| 1139 | e_mr, pginfo, pginfo->type, pginfo->num_pages, | ||
| 1140 | pginfo->num_4k,kpage); | ||
| 1141 | goto ehca_rereg_mr_rereg1_exit1; | ||
| 1142 | } | ||
| 1143 | rpage = virt_to_abs(kpage); | ||
| 1144 | if (!rpage) { | ||
| 1145 | ehca_err(&shca->ib_device, "kpage=%p", kpage); | ||
| 1146 | ret = -EFAULT; | ||
| 1147 | goto ehca_rereg_mr_rereg1_exit1; | ||
| 1148 | } | ||
| 1149 | h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr, | ||
| 1150 | (u64)iova_start, size, hipz_acl, | ||
| 1151 | e_pd->fw_pd, rpage, &hipzout); | ||
| 1152 | if (h_ret != H_SUCCESS) { | ||
| 1153 | /* | ||
| 1154 | * reregistration unsuccessful, try it again with the 3 hCalls, | ||
| 1155 | * e.g. this is required in case H_MR_CONDITION | ||
| 1156 | * (MW bound or MR is shared) | ||
| 1157 | */ | ||
| 1158 | ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " | ||
| 1159 | "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr); | ||
| 1160 | *pginfo = pginfo_save; | ||
| 1161 | ret = -EAGAIN; | ||
| 1162 | } else if ((u64*)hipzout.vaddr != iova_start) { | ||
| 1163 | ehca_err(&shca->ib_device, "PHYP changed iova_start in " | ||
| 1164 | "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " | ||
| 1165 | "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, | ||
| 1166 | hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, | ||
| 1167 | e_mr->ib.ib_mr.lkey, hipzout.lkey); | ||
| 1168 | ret = -EFAULT; | ||
| 1169 | } else { | ||
| 1170 | /* | ||
| 1171 | * successful reregistration | ||
| 1172 | * note: start and start_out are identical for eServer HCAs | ||
| 1173 | */ | ||
| 1174 | e_mr->num_pages = pginfo->num_pages; | ||
| 1175 | e_mr->num_4k = pginfo->num_4k; | ||
| 1176 | e_mr->start = iova_start; | ||
| 1177 | e_mr->size = size; | ||
| 1178 | e_mr->acl = acl; | ||
| 1179 | *lkey = hipzout.lkey; | ||
| 1180 | *rkey = hipzout.rkey; | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | ehca_rereg_mr_rereg1_exit1: | ||
| 1184 | kfree(kpage); | ||
| 1185 | ehca_rereg_mr_rereg1_exit0: | ||
| 1186 | if ( ret && (ret != -EAGAIN) ) | ||
| 1187 | ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " | ||
| 1188 | "pginfo=%p num_pages=%lx num_4k=%lx", | ||
| 1189 | ret, *lkey, *rkey, pginfo, pginfo->num_pages, | ||
| 1190 | pginfo->num_4k); | ||
| 1191 | return ret; | ||
| 1192 | } /* end ehca_rereg_mr_rereg1() */ | ||
| 1193 | |||
| 1194 | /*----------------------------------------------------------------------*/ | ||
| 1195 | |||
| 1196 | int ehca_rereg_mr(struct ehca_shca *shca, | ||
| 1197 | struct ehca_mr *e_mr, | ||
| 1198 | u64 *iova_start, | ||
| 1199 | u64 size, | ||
| 1200 | int acl, | ||
| 1201 | struct ehca_pd *e_pd, | ||
| 1202 | struct ehca_mr_pginfo *pginfo, | ||
| 1203 | u32 *lkey, | ||
| 1204 | u32 *rkey) | ||
| 1205 | { | ||
| 1206 | int ret = 0; | ||
| 1207 | u64 h_ret; | ||
| 1208 | int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */ | ||
| 1209 | int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ | ||
| 1210 | |||
| 1211 | /* first determine reregistration hCall(s) */ | ||
| 1212 | if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) || | ||
| 1213 | (pginfo->num_4k > e_mr->num_4k)) { | ||
| 1214 | ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx " | ||
| 1215 | "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k); | ||
| 1216 | rereg_1_hcall = 0; | ||
| 1217 | rereg_3_hcall = 1; | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */ | ||
| 1221 | rereg_1_hcall = 0; | ||
| 1222 | rereg_3_hcall = 1; | ||
| 1223 | e_mr->flags &= ~EHCA_MR_FLAG_MAXMR; | ||
| 1224 | ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p", | ||
| 1225 | e_mr); | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | if (rereg_1_hcall) { | ||
| 1229 | ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size, | ||
| 1230 | acl, e_pd, pginfo, lkey, rkey); | ||
| 1231 | if (ret) { | ||
| 1232 | if (ret == -EAGAIN) | ||
| 1233 | rereg_3_hcall = 1; | ||
| 1234 | else | ||
| 1235 | goto ehca_rereg_mr_exit0; | ||
| 1236 | } | ||
| 1237 | } | ||
| 1238 | |||
| 1239 | if (rereg_3_hcall) { | ||
| 1240 | struct ehca_mr save_mr; | ||
| 1241 | |||
| 1242 | /* first deregister old MR */ | ||
| 1243 | h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); | ||
| 1244 | if (h_ret != H_SUCCESS) { | ||
| 1245 | ehca_err(&shca->ib_device, "hipz_free_mr failed, " | ||
| 1246 | "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx " | ||
| 1247 | "mr->lkey=%x", | ||
| 1248 | h_ret, e_mr, shca->ipz_hca_handle.handle, | ||
| 1249 | e_mr->ipz_mr_handle.handle, | ||
| 1250 | e_mr->ib.ib_mr.lkey); | ||
| 1251 | ret = ehca_mrmw_map_hrc_free_mr(h_ret); | ||
| 1252 | goto ehca_rereg_mr_exit0; | ||
| 1253 | } | ||
| 1254 | /* clean ehca_mr_t, without changing struct ib_mr and lock */ | ||
| 1255 | save_mr = *e_mr; | ||
| 1256 | ehca_mr_deletenew(e_mr); | ||
| 1257 | |||
| 1258 | /* set some MR values */ | ||
| 1259 | e_mr->flags = save_mr.flags; | ||
| 1260 | e_mr->fmr_page_size = save_mr.fmr_page_size; | ||
| 1261 | e_mr->fmr_max_pages = save_mr.fmr_max_pages; | ||
| 1262 | e_mr->fmr_max_maps = save_mr.fmr_max_maps; | ||
| 1263 | e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; | ||
| 1264 | |||
| 1265 | ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, | ||
| 1266 | e_pd, pginfo, lkey, rkey); | ||
| 1267 | if (ret) { | ||
| 1268 | u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; | ||
| 1269 | memcpy(&e_mr->flags, &(save_mr.flags), | ||
| 1270 | sizeof(struct ehca_mr) - offset); | ||
| 1271 | goto ehca_rereg_mr_exit0; | ||
| 1272 | } | ||
| 1273 | } | ||
| 1274 | |||
| 1275 | ehca_rereg_mr_exit0: | ||
| 1276 | if (ret) | ||
| 1277 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " | ||
| 1278 | "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " | ||
| 1279 | "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " | ||
| 1280 | "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, | ||
| 1281 | acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey, | ||
| 1282 | rereg_1_hcall, rereg_3_hcall); | ||
| 1283 | return ret; | ||
| 1284 | } /* end ehca_rereg_mr() */ | ||
| 1285 | |||
| 1286 | /*----------------------------------------------------------------------*/ | ||
| 1287 | |||
| 1288 | int ehca_unmap_one_fmr(struct ehca_shca *shca, | ||
| 1289 | struct ehca_mr *e_fmr) | ||
| 1290 | { | ||
| 1291 | int ret = 0; | ||
| 1292 | u64 h_ret; | ||
| 1293 | int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */ | ||
| 1294 | int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */ | ||
| 1295 | struct ehca_pd *e_pd = | ||
| 1296 | container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); | ||
| 1297 | struct ehca_mr save_fmr; | ||
| 1298 | u32 tmp_lkey, tmp_rkey; | ||
| 1299 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | ||
| 1300 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | ||
| 1301 | |||
| 1302 | /* first check if reregistration hCall can be used for unmap */ | ||
| 1303 | if (e_fmr->fmr_max_pages > 512) { | ||
| 1304 | rereg_1_hcall = 0; | ||
| 1305 | rereg_3_hcall = 1; | ||
| 1306 | } | ||
| 1307 | |||
| 1308 | if (rereg_1_hcall) { | ||
| 1309 | /* | ||
| 1310 | * note: after using rereg hcall with len=0, | ||
| 1311 | * rereg hcall must be used again for registering pages | ||
| 1312 | */ | ||
| 1313 | h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0, | ||
| 1314 | 0, 0, e_pd->fw_pd, 0, &hipzout); | ||
| 1315 | if (h_ret != H_SUCCESS) { | ||
| 1316 | /* | ||
| 1317 | * should not happen, because length checked above, | ||
| 1318 | * FMRs are not shared and no MW bound to FMRs | ||
| 1319 | */ | ||
| 1320 | ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " | ||
| 1321 | "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx " | ||
| 1322 | "mr_hndl=%lx lkey=%x lkey_out=%x", | ||
| 1323 | h_ret, e_fmr, shca->ipz_hca_handle.handle, | ||
| 1324 | e_fmr->ipz_mr_handle.handle, | ||
| 1325 | e_fmr->ib.ib_fmr.lkey, hipzout.lkey); | ||
| 1326 | rereg_3_hcall = 1; | ||
| 1327 | } else { | ||
| 1328 | /* successful reregistration */ | ||
| 1329 | e_fmr->start = NULL; | ||
| 1330 | e_fmr->size = 0; | ||
| 1331 | tmp_lkey = hipzout.lkey; | ||
| 1332 | tmp_rkey = hipzout.rkey; | ||
| 1333 | } | ||
| 1334 | } | ||
| 1335 | |||
| 1336 | if (rereg_3_hcall) { | ||
| 1337 | struct ehca_mr save_mr; | ||
| 1338 | |||
| 1339 | /* first free old FMR */ | ||
| 1340 | h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); | ||
| 1341 | if (h_ret != H_SUCCESS) { | ||
| 1342 | ehca_err(&shca->ib_device, "hipz_free_mr failed, " | ||
| 1343 | "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx " | ||
| 1344 | "lkey=%x", | ||
| 1345 | h_ret, e_fmr, shca->ipz_hca_handle.handle, | ||
| 1346 | e_fmr->ipz_mr_handle.handle, | ||
| 1347 | e_fmr->ib.ib_fmr.lkey); | ||
| 1348 | ret = ehca_mrmw_map_hrc_free_mr(h_ret); | ||
| 1349 | goto ehca_unmap_one_fmr_exit0; | ||
| 1350 | } | ||
| 1351 | /* clean ehca_mr_t, without changing lock */ | ||
| 1352 | save_fmr = *e_fmr; | ||
| 1353 | ehca_mr_deletenew(e_fmr); | ||
| 1354 | |||
| 1355 | /* set some MR values */ | ||
| 1356 | e_fmr->flags = save_fmr.flags; | ||
| 1357 | e_fmr->fmr_page_size = save_fmr.fmr_page_size; | ||
| 1358 | e_fmr->fmr_max_pages = save_fmr.fmr_max_pages; | ||
| 1359 | e_fmr->fmr_max_maps = save_fmr.fmr_max_maps; | ||
| 1360 | e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; | ||
| 1361 | e_fmr->acl = save_fmr.acl; | ||
| 1362 | |||
| 1363 | pginfo.type = EHCA_MR_PGI_FMR; | ||
| 1364 | pginfo.num_pages = 0; | ||
| 1365 | pginfo.num_4k = 0; | ||
| 1366 | ret = ehca_reg_mr(shca, e_fmr, NULL, | ||
| 1367 | (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), | ||
| 1368 | e_fmr->acl, e_pd, &pginfo, &tmp_lkey, | ||
| 1369 | &tmp_rkey); | ||
| 1370 | if (ret) { | ||
| 1371 | u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; | ||
| 1372 | memcpy(&e_fmr->flags, &(save_mr.flags), | ||
| 1373 | sizeof(struct ehca_mr) - offset); | ||
| 1374 | goto ehca_unmap_one_fmr_exit0; | ||
| 1375 | } | ||
| 1376 | } | ||
| 1377 | |||
| 1378 | ehca_unmap_one_fmr_exit0: | ||
| 1379 | if (ret) | ||
| 1380 | ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x " | ||
| 1381 | "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x", | ||
| 1382 | ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages, | ||
| 1383 | rereg_1_hcall, rereg_3_hcall); | ||
| 1384 | return ret; | ||
| 1385 | } /* end ehca_unmap_one_fmr() */ | ||
| 1386 | |||
| 1387 | /*----------------------------------------------------------------------*/ | ||
| 1388 | |||
| 1389 | int ehca_reg_smr(struct ehca_shca *shca, | ||
| 1390 | struct ehca_mr *e_origmr, | ||
| 1391 | struct ehca_mr *e_newmr, | ||
| 1392 | u64 *iova_start, | ||
| 1393 | int acl, | ||
| 1394 | struct ehca_pd *e_pd, | ||
| 1395 | u32 *lkey, /*OUT*/ | ||
| 1396 | u32 *rkey) /*OUT*/ | ||
| 1397 | { | ||
| 1398 | int ret = 0; | ||
| 1399 | u64 h_ret; | ||
| 1400 | u32 hipz_acl; | ||
| 1401 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | ||
| 1402 | |||
| 1403 | ehca_mrmw_map_acl(acl, &hipz_acl); | ||
| 1404 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | ||
| 1405 | |||
| 1406 | h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, | ||
| 1407 | (u64)iova_start, hipz_acl, e_pd->fw_pd, | ||
| 1408 | &hipzout); | ||
| 1409 | if (h_ret != H_SUCCESS) { | ||
| 1410 | ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx " | ||
| 1411 | "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " | ||
| 1412 | "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", | ||
| 1413 | h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, | ||
| 1414 | shca->ipz_hca_handle.handle, | ||
| 1415 | e_origmr->ipz_mr_handle.handle, | ||
| 1416 | e_origmr->ib.ib_mr.lkey); | ||
| 1417 | ret = ehca_mrmw_map_hrc_reg_smr(h_ret); | ||
| 1418 | goto ehca_reg_smr_exit0; | ||
| 1419 | } | ||
| 1420 | /* successful registration */ | ||
| 1421 | e_newmr->num_pages = e_origmr->num_pages; | ||
| 1422 | e_newmr->num_4k = e_origmr->num_4k; | ||
| 1423 | e_newmr->start = iova_start; | ||
| 1424 | e_newmr->size = e_origmr->size; | ||
| 1425 | e_newmr->acl = acl; | ||
| 1426 | e_newmr->ipz_mr_handle = hipzout.handle; | ||
| 1427 | *lkey = hipzout.lkey; | ||
| 1428 | *rkey = hipzout.rkey; | ||
| 1429 | return 0; | ||
| 1430 | |||
| 1431 | ehca_reg_smr_exit0: | ||
| 1432 | if (ret) | ||
| 1433 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p " | ||
| 1434 | "e_newmr=%p iova_start=%p acl=%x e_pd=%p", | ||
| 1435 | ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd); | ||
| 1436 | return ret; | ||
| 1437 | } /* end ehca_reg_smr() */ | ||
| 1438 | |||
| 1439 | /*----------------------------------------------------------------------*/ | ||
| 1440 | |||
| 1441 | /* register internal max-MR to internal SHCA */ | ||
| 1442 | int ehca_reg_internal_maxmr( | ||
| 1443 | struct ehca_shca *shca, | ||
| 1444 | struct ehca_pd *e_pd, | ||
| 1445 | struct ehca_mr **e_maxmr) /*OUT*/ | ||
| 1446 | { | ||
| 1447 | int ret; | ||
| 1448 | struct ehca_mr *e_mr; | ||
| 1449 | u64 *iova_start; | ||
| 1450 | u64 size_maxmr; | ||
| 1451 | struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; | ||
| 1452 | struct ib_phys_buf ib_pbuf; | ||
| 1453 | u32 num_pages_mr; | ||
| 1454 | u32 num_pages_4k; /* 4k portion "pages" */ | ||
| 1455 | |||
| 1456 | e_mr = ehca_mr_new(); | ||
| 1457 | if (!e_mr) { | ||
| 1458 | ehca_err(&shca->ib_device, "out of memory"); | ||
| 1459 | ret = -ENOMEM; | ||
| 1460 | goto ehca_reg_internal_maxmr_exit0; | ||
| 1461 | } | ||
| 1462 | e_mr->flags |= EHCA_MR_FLAG_MAXMR; | ||
| 1463 | |||
| 1464 | /* register internal max-MR on HCA */ | ||
| 1465 | size_maxmr = (u64)high_memory - PAGE_OFFSET; | ||
| 1466 | iova_start = (u64*)KERNELBASE; | ||
| 1467 | ib_pbuf.addr = 0; | ||
| 1468 | ib_pbuf.size = size_maxmr; | ||
| 1469 | num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr + | ||
| 1470 | PAGE_SIZE - 1) / PAGE_SIZE); | ||
| 1471 | num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr + | ||
| 1472 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); | ||
| 1473 | |||
| 1474 | pginfo.type = EHCA_MR_PGI_PHYS; | ||
| 1475 | pginfo.num_pages = num_pages_mr; | ||
| 1476 | pginfo.num_4k = num_pages_4k; | ||
| 1477 | pginfo.num_phys_buf = 1; | ||
| 1478 | pginfo.phys_buf_array = &ib_pbuf; | ||
| 1479 | |||
| 1480 | ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, | ||
| 1481 | &pginfo, &e_mr->ib.ib_mr.lkey, | ||
| 1482 | &e_mr->ib.ib_mr.rkey); | ||
| 1483 | if (ret) { | ||
| 1484 | ehca_err(&shca->ib_device, "reg of internal max MR failed, " | ||
| 1485 | "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x " | ||
| 1486 | "num_pages_4k=%x", e_mr, iova_start, size_maxmr, | ||
| 1487 | num_pages_mr, num_pages_4k); | ||
| 1488 | goto ehca_reg_internal_maxmr_exit1; | ||
| 1489 | } | ||
| 1490 | |||
| 1491 | /* successful registration of all pages */ | ||
| 1492 | e_mr->ib.ib_mr.device = e_pd->ib_pd.device; | ||
| 1493 | e_mr->ib.ib_mr.pd = &e_pd->ib_pd; | ||
| 1494 | e_mr->ib.ib_mr.uobject = NULL; | ||
| 1495 | atomic_inc(&(e_pd->ib_pd.usecnt)); | ||
| 1496 | atomic_set(&(e_mr->ib.ib_mr.usecnt), 0); | ||
| 1497 | *e_maxmr = e_mr; | ||
| 1498 | return 0; | ||
| 1499 | |||
| 1500 | ehca_reg_internal_maxmr_exit1: | ||
| 1501 | ehca_mr_delete(e_mr); | ||
| 1502 | ehca_reg_internal_maxmr_exit0: | ||
| 1503 | if (ret) | ||
| 1504 | ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p", | ||
| 1505 | ret, shca, e_pd, e_maxmr); | ||
| 1506 | return ret; | ||
| 1507 | } /* end ehca_reg_internal_maxmr() */ | ||
| 1508 | |||
| 1509 | /*----------------------------------------------------------------------*/ | ||
| 1510 | |||
| 1511 | int ehca_reg_maxmr(struct ehca_shca *shca, | ||
| 1512 | struct ehca_mr *e_newmr, | ||
| 1513 | u64 *iova_start, | ||
| 1514 | int acl, | ||
| 1515 | struct ehca_pd *e_pd, | ||
| 1516 | u32 *lkey, | ||
| 1517 | u32 *rkey) | ||
| 1518 | { | ||
| 1519 | u64 h_ret; | ||
| 1520 | struct ehca_mr *e_origmr = shca->maxmr; | ||
| 1521 | u32 hipz_acl; | ||
| 1522 | struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; | ||
| 1523 | |||
| 1524 | ehca_mrmw_map_acl(acl, &hipz_acl); | ||
| 1525 | ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); | ||
| 1526 | |||
| 1527 | h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, | ||
| 1528 | (u64)iova_start, hipz_acl, e_pd->fw_pd, | ||
| 1529 | &hipzout); | ||
| 1530 | if (h_ret != H_SUCCESS) { | ||
| 1531 | ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx " | ||
| 1532 | "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", | ||
| 1533 | h_ret, e_origmr, shca->ipz_hca_handle.handle, | ||
| 1534 | e_origmr->ipz_mr_handle.handle, | ||
| 1535 | e_origmr->ib.ib_mr.lkey); | ||
| 1536 | return ehca_mrmw_map_hrc_reg_smr(h_ret); | ||
| 1537 | } | ||
| 1538 | /* successful registration */ | ||
| 1539 | e_newmr->num_pages = e_origmr->num_pages; | ||
| 1540 | e_newmr->num_4k = e_origmr->num_4k; | ||
| 1541 | e_newmr->start = iova_start; | ||
| 1542 | e_newmr->size = e_origmr->size; | ||
| 1543 | e_newmr->acl = acl; | ||
| 1544 | e_newmr->ipz_mr_handle = hipzout.handle; | ||
| 1545 | *lkey = hipzout.lkey; | ||
| 1546 | *rkey = hipzout.rkey; | ||
| 1547 | return 0; | ||
| 1548 | } /* end ehca_reg_maxmr() */ | ||
| 1549 | |||
| 1550 | /*----------------------------------------------------------------------*/ | ||
| 1551 | |||
| 1552 | int ehca_dereg_internal_maxmr(struct ehca_shca *shca) | ||
| 1553 | { | ||
| 1554 | int ret; | ||
| 1555 | struct ehca_mr *e_maxmr; | ||
| 1556 | struct ib_pd *ib_pd; | ||
| 1557 | |||
| 1558 | if (!shca->maxmr) { | ||
| 1559 | ehca_err(&shca->ib_device, "bad call, shca=%p", shca); | ||
| 1560 | ret = -EINVAL; | ||
| 1561 | goto ehca_dereg_internal_maxmr_exit0; | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | e_maxmr = shca->maxmr; | ||
| 1565 | ib_pd = e_maxmr->ib.ib_mr.pd; | ||
| 1566 | shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */ | ||
| 1567 | |||
| 1568 | ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr); | ||
| 1569 | if (ret) { | ||
| 1570 | ehca_err(&shca->ib_device, "dereg internal max-MR failed, " | ||
| 1571 | "ret=%x e_maxmr=%p shca=%p lkey=%x", | ||
| 1572 | ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey); | ||
| 1573 | shca->maxmr = e_maxmr; | ||
| 1574 | goto ehca_dereg_internal_maxmr_exit0; | ||
| 1575 | } | ||
| 1576 | |||
| 1577 | atomic_dec(&ib_pd->usecnt); | ||
| 1578 | |||
| 1579 | ehca_dereg_internal_maxmr_exit0: | ||
| 1580 | if (ret) | ||
| 1581 | ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p", | ||
| 1582 | ret, shca, shca->maxmr); | ||
| 1583 | return ret; | ||
| 1584 | } /* end ehca_dereg_internal_maxmr() */ | ||
| 1585 | |||
| 1586 | /*----------------------------------------------------------------------*/ | ||
| 1587 | |||
| 1588 | /* | ||
| 1589 | * check physical buffer array of MR verbs for validness and | ||
| 1590 | * calculates MR size | ||
| 1591 | */ | ||
| 1592 | int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array, | ||
| 1593 | int num_phys_buf, | ||
| 1594 | u64 *iova_start, | ||
| 1595 | u64 *size) | ||
| 1596 | { | ||
| 1597 | struct ib_phys_buf *pbuf = phys_buf_array; | ||
| 1598 | u64 size_count = 0; | ||
| 1599 | u32 i; | ||
| 1600 | |||
| 1601 | if (num_phys_buf == 0) { | ||
| 1602 | ehca_gen_err("bad phys buf array len, num_phys_buf=0"); | ||
| 1603 | return -EINVAL; | ||
| 1604 | } | ||
| 1605 | /* check first buffer */ | ||
| 1606 | if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { | ||
| 1607 | ehca_gen_err("iova_start/addr mismatch, iova_start=%p " | ||
| 1608 | "pbuf->addr=%lx pbuf->size=%lx", | ||
| 1609 | iova_start, pbuf->addr, pbuf->size); | ||
| 1610 | return -EINVAL; | ||
| 1611 | } | ||
| 1612 | if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && | ||
| 1613 | (num_phys_buf > 1)) { | ||
| 1614 | ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx " | ||
| 1615 | "pbuf->size=%lx", pbuf->addr, pbuf->size); | ||
| 1616 | return -EINVAL; | ||
| 1617 | } | ||
| 1618 | |||
| 1619 | for (i = 0; i < num_phys_buf; i++) { | ||
| 1620 | if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { | ||
| 1621 | ehca_gen_err("bad address, i=%x pbuf->addr=%lx " | ||
| 1622 | "pbuf->size=%lx", | ||
| 1623 | i, pbuf->addr, pbuf->size); | ||
| 1624 | return -EINVAL; | ||
| 1625 | } | ||
| 1626 | if (((i > 0) && /* not 1st */ | ||
| 1627 | (i < (num_phys_buf - 1)) && /* not last */ | ||
| 1628 | (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { | ||
| 1629 | ehca_gen_err("bad size, i=%x pbuf->size=%lx", | ||
| 1630 | i, pbuf->size); | ||
| 1631 | return -EINVAL; | ||
| 1632 | } | ||
| 1633 | size_count += pbuf->size; | ||
| 1634 | pbuf++; | ||
| 1635 | } | ||
| 1636 | |||
| 1637 | *size = size_count; | ||
| 1638 | return 0; | ||
| 1639 | } /* end ehca_mr_chk_buf_and_calc_size() */ | ||
| 1640 | |||
| 1641 | /*----------------------------------------------------------------------*/ | ||
| 1642 | |||
| 1643 | /* check page list of map FMR verb for validness */ | ||
| 1644 | int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, | ||
| 1645 | u64 *page_list, | ||
| 1646 | int list_len) | ||
| 1647 | { | ||
| 1648 | u32 i; | ||
| 1649 | u64 *page; | ||
| 1650 | |||
| 1651 | if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) { | ||
| 1652 | ehca_gen_err("bad list_len, list_len=%x " | ||
| 1653 | "e_fmr->fmr_max_pages=%x fmr=%p", | ||
| 1654 | list_len, e_fmr->fmr_max_pages, e_fmr); | ||
| 1655 | return -EINVAL; | ||
| 1656 | } | ||
| 1657 | |||
| 1658 | /* each page must be aligned */ | ||
| 1659 | page = page_list; | ||
| 1660 | for (i = 0; i < list_len; i++) { | ||
| 1661 | if (*page % e_fmr->fmr_page_size) { | ||
| 1662 | ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p " | ||
| 1663 | "fmr_page_size=%x", i, *page, page, e_fmr, | ||
| 1664 | e_fmr->fmr_page_size); | ||
| 1665 | return -EINVAL; | ||
| 1666 | } | ||
| 1667 | page++; | ||
| 1668 | } | ||
| 1669 | |||
| 1670 | return 0; | ||
| 1671 | } /* end ehca_fmr_check_page_list() */ | ||
| 1672 | |||
| 1673 | /*----------------------------------------------------------------------*/ | ||
| 1674 | |||
| 1675 | /* setup page buffer from page info */ | ||
| 1676 | int ehca_set_pagebuf(struct ehca_mr *e_mr, | ||
| 1677 | struct ehca_mr_pginfo *pginfo, | ||
| 1678 | u32 number, | ||
| 1679 | u64 *kpage) | ||
| 1680 | { | ||
| 1681 | int ret = 0; | ||
| 1682 | struct ib_umem_chunk *prev_chunk; | ||
| 1683 | struct ib_umem_chunk *chunk; | ||
| 1684 | struct ib_phys_buf *pbuf; | ||
| 1685 | u64 *fmrlist; | ||
| 1686 | u64 num4k, pgaddr, offs4k; | ||
| 1687 | u32 i = 0; | ||
| 1688 | u32 j = 0; | ||
| 1689 | |||
| 1690 | if (pginfo->type == EHCA_MR_PGI_PHYS) { | ||
| 1691 | /* loop over desired phys_buf_array entries */ | ||
| 1692 | while (i < number) { | ||
| 1693 | pbuf = pginfo->phys_buf_array + pginfo->next_buf; | ||
| 1694 | num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size + | ||
| 1695 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE; | ||
| 1696 | offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; | ||
| 1697 | while (pginfo->next_4k < offs4k + num4k) { | ||
| 1698 | /* sanity check */ | ||
| 1699 | if ((pginfo->page_cnt >= pginfo->num_pages) || | ||
| 1700 | (pginfo->page_4k_cnt >= pginfo->num_4k)) { | ||
| 1701 | ehca_gen_err("page_cnt >= num_pages, " | ||
| 1702 | "page_cnt=%lx " | ||
| 1703 | "num_pages=%lx " | ||
| 1704 | "page_4k_cnt=%lx " | ||
| 1705 | "num_4k=%lx i=%x", | ||
| 1706 | pginfo->page_cnt, | ||
| 1707 | pginfo->num_pages, | ||
| 1708 | pginfo->page_4k_cnt, | ||
| 1709 | pginfo->num_4k, i); | ||
| 1710 | ret = -EFAULT; | ||
| 1711 | goto ehca_set_pagebuf_exit0; | ||
| 1712 | } | ||
| 1713 | *kpage = phys_to_abs( | ||
| 1714 | (pbuf->addr & EHCA_PAGEMASK) | ||
| 1715 | + (pginfo->next_4k * EHCA_PAGESIZE)); | ||
| 1716 | if ( !(*kpage) && pbuf->addr ) { | ||
| 1717 | ehca_gen_err("pbuf->addr=%lx " | ||
| 1718 | "pbuf->size=%lx " | ||
| 1719 | "next_4k=%lx", pbuf->addr, | ||
| 1720 | pbuf->size, | ||
| 1721 | pginfo->next_4k); | ||
| 1722 | ret = -EFAULT; | ||
| 1723 | goto ehca_set_pagebuf_exit0; | ||
| 1724 | } | ||
| 1725 | (pginfo->page_4k_cnt)++; | ||
| 1726 | (pginfo->next_4k)++; | ||
| 1727 | if (pginfo->next_4k % | ||
| 1728 | (PAGE_SIZE / EHCA_PAGESIZE) == 0) | ||
| 1729 | (pginfo->page_cnt)++; | ||
| 1730 | kpage++; | ||
| 1731 | i++; | ||
| 1732 | if (i >= number) break; | ||
| 1733 | } | ||
| 1734 | if (pginfo->next_4k >= offs4k + num4k) { | ||
| 1735 | (pginfo->next_buf)++; | ||
| 1736 | pginfo->next_4k = 0; | ||
| 1737 | } | ||
| 1738 | } | ||
| 1739 | } else if (pginfo->type == EHCA_MR_PGI_USER) { | ||
| 1740 | /* loop over desired chunk entries */ | ||
| 1741 | chunk = pginfo->next_chunk; | ||
| 1742 | prev_chunk = pginfo->next_chunk; | ||
| 1743 | list_for_each_entry_continue(chunk, | ||
| 1744 | (&(pginfo->region->chunk_list)), | ||
| 1745 | list) { | ||
| 1746 | for (i = pginfo->next_nmap; i < chunk->nmap; ) { | ||
| 1747 | pgaddr = ( page_to_pfn(chunk->page_list[i].page) | ||
| 1748 | << PAGE_SHIFT ); | ||
| 1749 | *kpage = phys_to_abs(pgaddr + | ||
| 1750 | (pginfo->next_4k * | ||
| 1751 | EHCA_PAGESIZE)); | ||
| 1752 | if ( !(*kpage) ) { | ||
| 1753 | ehca_gen_err("pgaddr=%lx " | ||
| 1754 | "chunk->page_list[i]=%lx " | ||
| 1755 | "i=%x next_4k=%lx mr=%p", | ||
| 1756 | pgaddr, | ||
| 1757 | (u64)sg_dma_address( | ||
| 1758 | &chunk-> | ||
| 1759 | page_list[i]), | ||
| 1760 | i, pginfo->next_4k, e_mr); | ||
| 1761 | ret = -EFAULT; | ||
| 1762 | goto ehca_set_pagebuf_exit0; | ||
| 1763 | } | ||
| 1764 | (pginfo->page_4k_cnt)++; | ||
| 1765 | (pginfo->next_4k)++; | ||
| 1766 | kpage++; | ||
| 1767 | if (pginfo->next_4k % | ||
| 1768 | (PAGE_SIZE / EHCA_PAGESIZE) == 0) { | ||
| 1769 | (pginfo->page_cnt)++; | ||
| 1770 | (pginfo->next_nmap)++; | ||
| 1771 | pginfo->next_4k = 0; | ||
| 1772 | i++; | ||
| 1773 | } | ||
| 1774 | j++; | ||
| 1775 | if (j >= number) break; | ||
| 1776 | } | ||
| 1777 | if ((pginfo->next_nmap >= chunk->nmap) && | ||
| 1778 | (j >= number)) { | ||
| 1779 | pginfo->next_nmap = 0; | ||
| 1780 | prev_chunk = chunk; | ||
| 1781 | break; | ||
| 1782 | } else if (pginfo->next_nmap >= chunk->nmap) { | ||
| 1783 | pginfo->next_nmap = 0; | ||
| 1784 | prev_chunk = chunk; | ||
| 1785 | } else if (j >= number) | ||
| 1786 | break; | ||
| 1787 | else | ||
| 1788 | prev_chunk = chunk; | ||
| 1789 | } | ||
| 1790 | pginfo->next_chunk = | ||
| 1791 | list_prepare_entry(prev_chunk, | ||
| 1792 | (&(pginfo->region->chunk_list)), | ||
| 1793 | list); | ||
| 1794 | } else if (pginfo->type == EHCA_MR_PGI_FMR) { | ||
| 1795 | /* loop over desired page_list entries */ | ||
| 1796 | fmrlist = pginfo->page_list + pginfo->next_listelem; | ||
| 1797 | for (i = 0; i < number; i++) { | ||
| 1798 | *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + | ||
| 1799 | pginfo->next_4k * EHCA_PAGESIZE); | ||
| 1800 | if ( !(*kpage) ) { | ||
| 1801 | ehca_gen_err("*fmrlist=%lx fmrlist=%p " | ||
| 1802 | "next_listelem=%lx next_4k=%lx", | ||
| 1803 | *fmrlist, fmrlist, | ||
| 1804 | pginfo->next_listelem, | ||
| 1805 | pginfo->next_4k); | ||
| 1806 | ret = -EFAULT; | ||
| 1807 | goto ehca_set_pagebuf_exit0; | ||
| 1808 | } | ||
| 1809 | (pginfo->page_4k_cnt)++; | ||
| 1810 | (pginfo->next_4k)++; | ||
| 1811 | kpage++; | ||
| 1812 | if (pginfo->next_4k % | ||
| 1813 | (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { | ||
| 1814 | (pginfo->page_cnt)++; | ||
| 1815 | (pginfo->next_listelem)++; | ||
| 1816 | fmrlist++; | ||
| 1817 | pginfo->next_4k = 0; | ||
| 1818 | } | ||
| 1819 | } | ||
| 1820 | } else { | ||
| 1821 | ehca_gen_err("bad pginfo->type=%x", pginfo->type); | ||
| 1822 | ret = -EFAULT; | ||
| 1823 | goto ehca_set_pagebuf_exit0; | ||
| 1824 | } | ||
| 1825 | |||
| 1826 | ehca_set_pagebuf_exit0: | ||
| 1827 | if (ret) | ||
| 1828 | ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " | ||
| 1829 | "num_4k=%lx next_buf=%lx next_4k=%lx number=%x " | ||
| 1830 | "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x " | ||
| 1831 | "next_listelem=%lx region=%p next_chunk=%p " | ||
| 1832 | "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type, | ||
| 1833 | pginfo->num_pages, pginfo->num_4k, | ||
| 1834 | pginfo->next_buf, pginfo->next_4k, number, kpage, | ||
| 1835 | pginfo->page_cnt, pginfo->page_4k_cnt, i, | ||
| 1836 | pginfo->next_listelem, pginfo->region, | ||
| 1837 | pginfo->next_chunk, pginfo->next_nmap); | ||
| 1838 | return ret; | ||
| 1839 | } /* end ehca_set_pagebuf() */ | ||
| 1840 | |||
| 1841 | /*----------------------------------------------------------------------*/ | ||
| 1842 | |||
| 1843 | /* setup 1 page from page info page buffer */ | ||
| 1844 | int ehca_set_pagebuf_1(struct ehca_mr *e_mr, | ||
| 1845 | struct ehca_mr_pginfo *pginfo, | ||
| 1846 | u64 *rpage) | ||
| 1847 | { | ||
| 1848 | int ret = 0; | ||
| 1849 | struct ib_phys_buf *tmp_pbuf; | ||
| 1850 | u64 *fmrlist; | ||
| 1851 | struct ib_umem_chunk *chunk; | ||
| 1852 | struct ib_umem_chunk *prev_chunk; | ||
| 1853 | u64 pgaddr, num4k, offs4k; | ||
| 1854 | |||
| 1855 | if (pginfo->type == EHCA_MR_PGI_PHYS) { | ||
| 1856 | /* sanity check */ | ||
| 1857 | if ((pginfo->page_cnt >= pginfo->num_pages) || | ||
| 1858 | (pginfo->page_4k_cnt >= pginfo->num_4k)) { | ||
| 1859 | ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx " | ||
| 1860 | "num_pages=%lx page_4k_cnt=%lx num_4k=%lx", | ||
| 1861 | pginfo->page_cnt, pginfo->num_pages, | ||
| 1862 | pginfo->page_4k_cnt, pginfo->num_4k); | ||
| 1863 | ret = -EFAULT; | ||
| 1864 | goto ehca_set_pagebuf_1_exit0; | ||
| 1865 | } | ||
| 1866 | tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf; | ||
| 1867 | num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size + | ||
| 1868 | EHCA_PAGESIZE - 1) / EHCA_PAGESIZE; | ||
| 1869 | offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; | ||
| 1870 | *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) + | ||
| 1871 | (pginfo->next_4k * EHCA_PAGESIZE)); | ||
| 1872 | if ( !(*rpage) && tmp_pbuf->addr ) { | ||
| 1873 | ehca_gen_err("tmp_pbuf->addr=%lx" | ||
| 1874 | " tmp_pbuf->size=%lx next_4k=%lx", | ||
| 1875 | tmp_pbuf->addr, tmp_pbuf->size, | ||
| 1876 | pginfo->next_4k); | ||
| 1877 | ret = -EFAULT; | ||
| 1878 | goto ehca_set_pagebuf_1_exit0; | ||
| 1879 | } | ||
| 1880 | (pginfo->page_4k_cnt)++; | ||
| 1881 | (pginfo->next_4k)++; | ||
| 1882 | if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0) | ||
| 1883 | (pginfo->page_cnt)++; | ||
| 1884 | if (pginfo->next_4k >= offs4k + num4k) { | ||
| 1885 | (pginfo->next_buf)++; | ||
| 1886 | pginfo->next_4k = 0; | ||
| 1887 | } | ||
| 1888 | } else if (pginfo->type == EHCA_MR_PGI_USER) { | ||
| 1889 | chunk = pginfo->next_chunk; | ||
| 1890 | prev_chunk = pginfo->next_chunk; | ||
| 1891 | list_for_each_entry_continue(chunk, | ||
| 1892 | (&(pginfo->region->chunk_list)), | ||
| 1893 | list) { | ||
| 1894 | pgaddr = ( page_to_pfn(chunk->page_list[ | ||
| 1895 | pginfo->next_nmap].page) | ||
| 1896 | << PAGE_SHIFT); | ||
| 1897 | *rpage = phys_to_abs(pgaddr + | ||
| 1898 | (pginfo->next_4k * EHCA_PAGESIZE)); | ||
| 1899 | if ( !(*rpage) ) { | ||
| 1900 | ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx" | ||
| 1901 | " next_nmap=%lx next_4k=%lx mr=%p", | ||
| 1902 | pgaddr, (u64)sg_dma_address( | ||
| 1903 | &chunk->page_list[ | ||
| 1904 | pginfo-> | ||
| 1905 | next_nmap]), | ||
| 1906 | pginfo->next_nmap, pginfo->next_4k, | ||
| 1907 | e_mr); | ||
| 1908 | ret = -EFAULT; | ||
| 1909 | goto ehca_set_pagebuf_1_exit0; | ||
| 1910 | } | ||
| 1911 | (pginfo->page_4k_cnt)++; | ||
| 1912 | (pginfo->next_4k)++; | ||
| 1913 | if (pginfo->next_4k % | ||
| 1914 | (PAGE_SIZE / EHCA_PAGESIZE) == 0) { | ||
| 1915 | (pginfo->page_cnt)++; | ||
| 1916 | (pginfo->next_nmap)++; | ||
| 1917 | pginfo->next_4k = 0; | ||
| 1918 | } | ||
| 1919 | if (pginfo->next_nmap >= chunk->nmap) { | ||
| 1920 | pginfo->next_nmap = 0; | ||
| 1921 | prev_chunk = chunk; | ||
| 1922 | } | ||
| 1923 | break; | ||
| 1924 | } | ||
| 1925 | pginfo->next_chunk = | ||
| 1926 | list_prepare_entry(prev_chunk, | ||
| 1927 | (&(pginfo->region->chunk_list)), | ||
| 1928 | list); | ||
| 1929 | } else if (pginfo->type == EHCA_MR_PGI_FMR) { | ||
| 1930 | fmrlist = pginfo->page_list + pginfo->next_listelem; | ||
| 1931 | *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + | ||
| 1932 | pginfo->next_4k * EHCA_PAGESIZE); | ||
| 1933 | if ( !(*rpage) ) { | ||
| 1934 | ehca_gen_err("*fmrlist=%lx fmrlist=%p " | ||
| 1935 | "next_listelem=%lx next_4k=%lx", | ||
| 1936 | *fmrlist, fmrlist, pginfo->next_listelem, | ||
| 1937 | pginfo->next_4k); | ||
| 1938 | ret = -EFAULT; | ||
| 1939 | goto ehca_set_pagebuf_1_exit0; | ||
| 1940 | } | ||
| 1941 | (pginfo->page_4k_cnt)++; | ||
| 1942 | (pginfo->next_4k)++; | ||
| 1943 | if (pginfo->next_4k % | ||
| 1944 | (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { | ||
| 1945 | (pginfo->page_cnt)++; | ||
| 1946 | (pginfo->next_listelem)++; | ||
| 1947 | pginfo->next_4k = 0; | ||
| 1948 | } | ||
| 1949 | } else { | ||
| 1950 | ehca_gen_err("bad pginfo->type=%x", pginfo->type); | ||
| 1951 | ret = -EFAULT; | ||
| 1952 | goto ehca_set_pagebuf_1_exit0; | ||
| 1953 | } | ||
| 1954 | |||
| 1955 | ehca_set_pagebuf_1_exit0: | ||
| 1956 | if (ret) | ||
| 1957 | ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " | ||
| 1958 | "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p " | ||
| 1959 | "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx " | ||
| 1960 | "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr, | ||
| 1961 | pginfo, pginfo->type, pginfo->num_pages, | ||
| 1962 | pginfo->num_4k, pginfo->next_buf, pginfo->next_4k, | ||
| 1963 | rpage, pginfo->page_cnt, pginfo->page_4k_cnt, | ||
| 1964 | pginfo->next_listelem, pginfo->region, | ||
| 1965 | pginfo->next_chunk, pginfo->next_nmap); | ||
| 1966 | return ret; | ||
| 1967 | } /* end ehca_set_pagebuf_1() */ | ||
| 1968 | |||
| 1969 | /*----------------------------------------------------------------------*/ | ||
| 1970 | |||
| 1971 | /* | ||
| 1972 | * check MR if it is a max-MR, i.e. uses whole memory | ||
| 1973 | * in case it's a max-MR 1 is returned, else 0 | ||
| 1974 | */ | ||
| 1975 | int ehca_mr_is_maxmr(u64 size, | ||
| 1976 | u64 *iova_start) | ||
| 1977 | { | ||
| 1978 | /* a MR is treated as max-MR only if it fits following: */ | ||
| 1979 | if ((size == ((u64)high_memory - PAGE_OFFSET)) && | ||
| 1980 | (iova_start == (void*)KERNELBASE)) { | ||
| 1981 | ehca_gen_dbg("this is a max-MR"); | ||
| 1982 | return 1; | ||
| 1983 | } else | ||
| 1984 | return 0; | ||
| 1985 | } /* end ehca_mr_is_maxmr() */ | ||
| 1986 | |||
| 1987 | /*----------------------------------------------------------------------*/ | ||
| 1988 | |||
| 1989 | /* map access control for MR/MW. This routine is used for MR and MW. */ | ||
| 1990 | void ehca_mrmw_map_acl(int ib_acl, | ||
| 1991 | u32 *hipz_acl) | ||
| 1992 | { | ||
| 1993 | *hipz_acl = 0; | ||
| 1994 | if (ib_acl & IB_ACCESS_REMOTE_READ) | ||
| 1995 | *hipz_acl |= HIPZ_ACCESSCTRL_R_READ; | ||
| 1996 | if (ib_acl & IB_ACCESS_REMOTE_WRITE) | ||
| 1997 | *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE; | ||
| 1998 | if (ib_acl & IB_ACCESS_REMOTE_ATOMIC) | ||
| 1999 | *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC; | ||
| 2000 | if (ib_acl & IB_ACCESS_LOCAL_WRITE) | ||
| 2001 | *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE; | ||
| 2002 | if (ib_acl & IB_ACCESS_MW_BIND) | ||
| 2003 | *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND; | ||
| 2004 | } /* end ehca_mrmw_map_acl() */ | ||
| 2005 | |||
| 2006 | /*----------------------------------------------------------------------*/ | ||
| 2007 | |||
| 2008 | /* sets page size in hipz access control for MR/MW. */ | ||
| 2009 | void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/ | ||
| 2010 | { | ||
| 2011 | return; /* HCA supports only 4k */ | ||
| 2012 | } /* end ehca_mrmw_set_pgsize_hipz_acl() */ | ||
| 2013 | |||
| 2014 | /*----------------------------------------------------------------------*/ | ||
| 2015 | |||
| 2016 | /* | ||
| 2017 | * reverse map access control for MR/MW. | ||
| 2018 | * This routine is used for MR and MW. | ||
| 2019 | */ | ||
| 2020 | void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, | ||
| 2021 | int *ib_acl) /*OUT*/ | ||
| 2022 | { | ||
| 2023 | *ib_acl = 0; | ||
| 2024 | if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ) | ||
| 2025 | *ib_acl |= IB_ACCESS_REMOTE_READ; | ||
| 2026 | if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE) | ||
| 2027 | *ib_acl |= IB_ACCESS_REMOTE_WRITE; | ||
| 2028 | if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC) | ||
| 2029 | *ib_acl |= IB_ACCESS_REMOTE_ATOMIC; | ||
| 2030 | if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE) | ||
| 2031 | *ib_acl |= IB_ACCESS_LOCAL_WRITE; | ||
| 2032 | if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND) | ||
| 2033 | *ib_acl |= IB_ACCESS_MW_BIND; | ||
| 2034 | } /* end ehca_mrmw_reverse_map_acl() */ | ||
| 2035 | |||
| 2036 | |||
| 2037 | /*----------------------------------------------------------------------*/ | ||
| 2038 | |||
| 2039 | /* | ||
| 2040 | * map HIPZ rc to IB retcodes for MR/MW allocations | ||
| 2041 | * Used for hipz_mr_reg_alloc and hipz_mw_alloc. | ||
| 2042 | */ | ||
| 2043 | int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc) | ||
| 2044 | { | ||
| 2045 | switch (hipz_rc) { | ||
| 2046 | case H_SUCCESS: /* successful completion */ | ||
| 2047 | return 0; | ||
| 2048 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
| 2049 | case H_RT_PARM: /* invalid resource type */ | ||
| 2050 | case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */ | ||
| 2051 | case H_MLENGTH_PARM: /* invalid memory length */ | ||
| 2052 | case H_MEM_ACCESS_PARM: /* invalid access controls */ | ||
| 2053 | case H_CONSTRAINED: /* resource constraint */ | ||
| 2054 | return -EINVAL; | ||
| 2055 | case H_BUSY: /* long busy */ | ||
| 2056 | return -EBUSY; | ||
| 2057 | default: | ||
| 2058 | return -EINVAL; | ||
| 2059 | } | ||
| 2060 | } /* end ehca_mrmw_map_hrc_alloc() */ | ||
| 2061 | |||
| 2062 | /*----------------------------------------------------------------------*/ | ||
| 2063 | |||
| 2064 | /* | ||
| 2065 | * map HIPZ rc to IB retcodes for MR register rpage | ||
| 2066 | * Used for hipz_h_register_rpage_mr at registering last page | ||
| 2067 | */ | ||
| 2068 | int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc) | ||
| 2069 | { | ||
| 2070 | switch (hipz_rc) { | ||
| 2071 | case H_SUCCESS: /* registration complete */ | ||
| 2072 | return 0; | ||
| 2073 | case H_PAGE_REGISTERED: /* page registered */ | ||
| 2074 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
| 2075 | case H_RH_PARM: /* invalid resource handle */ | ||
| 2076 | /* case H_QT_PARM: invalid queue type */ | ||
| 2077 | case H_PARAMETER: /* | ||
| 2078 | * invalid logical address, | ||
| 2079 | * or count zero or greater 512 | ||
| 2080 | */ | ||
| 2081 | case H_TABLE_FULL: /* page table full */ | ||
| 2082 | case H_HARDWARE: /* HCA not operational */ | ||
| 2083 | return -EINVAL; | ||
| 2084 | case H_BUSY: /* long busy */ | ||
| 2085 | return -EBUSY; | ||
| 2086 | default: | ||
| 2087 | return -EINVAL; | ||
| 2088 | } | ||
| 2089 | } /* end ehca_mrmw_map_hrc_rrpg_last() */ | ||
| 2090 | |||
| 2091 | /*----------------------------------------------------------------------*/ | ||
| 2092 | |||
| 2093 | /* | ||
| 2094 | * map HIPZ rc to IB retcodes for MR register rpage | ||
| 2095 | * Used for hipz_h_register_rpage_mr at registering one page, but not last page | ||
| 2096 | */ | ||
| 2097 | int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc) | ||
| 2098 | { | ||
| 2099 | switch (hipz_rc) { | ||
| 2100 | case H_PAGE_REGISTERED: /* page registered */ | ||
| 2101 | return 0; | ||
| 2102 | case H_SUCCESS: /* registration complete */ | ||
| 2103 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
| 2104 | case H_RH_PARM: /* invalid resource handle */ | ||
| 2105 | /* case H_QT_PARM: invalid queue type */ | ||
| 2106 | case H_PARAMETER: /* | ||
| 2107 | * invalid logical address, | ||
| 2108 | * or count zero or greater 512 | ||
| 2109 | */ | ||
| 2110 | case H_TABLE_FULL: /* page table full */ | ||
| 2111 | case H_HARDWARE: /* HCA not operational */ | ||
| 2112 | return -EINVAL; | ||
| 2113 | case H_BUSY: /* long busy */ | ||
| 2114 | return -EBUSY; | ||
| 2115 | default: | ||
| 2116 | return -EINVAL; | ||
| 2117 | } | ||
| 2118 | } /* end ehca_mrmw_map_hrc_rrpg_notlast() */ | ||
| 2119 | |||
| 2120 | /*----------------------------------------------------------------------*/ | ||
| 2121 | |||
| 2122 | /* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */ | ||
| 2123 | int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc) | ||
| 2124 | { | ||
| 2125 | switch (hipz_rc) { | ||
| 2126 | case H_SUCCESS: /* successful completion */ | ||
| 2127 | return 0; | ||
| 2128 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
| 2129 | case H_RH_PARM: /* invalid resource handle */ | ||
| 2130 | return -EINVAL; | ||
| 2131 | case H_BUSY: /* long busy */ | ||
| 2132 | return -EBUSY; | ||
| 2133 | default: | ||
| 2134 | return -EINVAL; | ||
| 2135 | } | ||
| 2136 | } /* end ehca_mrmw_map_hrc_query_mr() */ | ||
| 2137 | |||
| 2138 | /*----------------------------------------------------------------------*/ | ||
| 2139 | /*----------------------------------------------------------------------*/ | ||
| 2140 | |||
| 2141 | /* | ||
| 2142 | * map HIPZ rc to IB retcodes for freeing MR resource | ||
| 2143 | * Used for hipz_h_free_resource_mr | ||
| 2144 | */ | ||
| 2145 | int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc) | ||
| 2146 | { | ||
| 2147 | switch (hipz_rc) { | ||
| 2148 | case H_SUCCESS: /* resource freed */ | ||
| 2149 | return 0; | ||
| 2150 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
| 2151 | case H_RH_PARM: /* invalid resource handle */ | ||
| 2152 | case H_R_STATE: /* invalid resource state */ | ||
| 2153 | case H_HARDWARE: /* HCA not operational */ | ||
| 2154 | return -EINVAL; | ||
| 2155 | case H_RESOURCE: /* Resource in use */ | ||
| 2156 | case H_BUSY: /* long busy */ | ||
| 2157 | return -EBUSY; | ||
| 2158 | default: | ||
| 2159 | return -EINVAL; | ||
| 2160 | } | ||
| 2161 | } /* end ehca_mrmw_map_hrc_free_mr() */ | ||
| 2162 | |||
| 2163 | /*----------------------------------------------------------------------*/ | ||
| 2164 | |||
| 2165 | /* | ||
| 2166 | * map HIPZ rc to IB retcodes for freeing MW resource | ||
| 2167 | * Used for hipz_h_free_resource_mw | ||
| 2168 | */ | ||
| 2169 | int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc) | ||
| 2170 | { | ||
| 2171 | switch (hipz_rc) { | ||
| 2172 | case H_SUCCESS: /* resource freed */ | ||
| 2173 | return 0; | ||
| 2174 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
| 2175 | case H_RH_PARM: /* invalid resource handle */ | ||
| 2176 | case H_R_STATE: /* invalid resource state */ | ||
| 2177 | case H_HARDWARE: /* HCA not operational */ | ||
| 2178 | return -EINVAL; | ||
| 2179 | case H_RESOURCE: /* Resource in use */ | ||
| 2180 | case H_BUSY: /* long busy */ | ||
| 2181 | return -EBUSY; | ||
| 2182 | default: | ||
| 2183 | return -EINVAL; | ||
| 2184 | } | ||
| 2185 | } /* end ehca_mrmw_map_hrc_free_mw() */ | ||
| 2186 | |||
| 2187 | /*----------------------------------------------------------------------*/ | ||
| 2188 | |||
| 2189 | /* | ||
| 2190 | * map HIPZ rc to IB retcodes for SMR registrations | ||
| 2191 | * Used for hipz_h_register_smr. | ||
| 2192 | */ | ||
| 2193 | int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc) | ||
| 2194 | { | ||
| 2195 | switch (hipz_rc) { | ||
| 2196 | case H_SUCCESS: /* successful completion */ | ||
| 2197 | return 0; | ||
| 2198 | case H_ADAPTER_PARM: /* invalid adapter handle */ | ||
| 2199 | case H_RH_PARM: /* invalid resource handle */ | ||
| 2200 | case H_MEM_PARM: /* invalid MR virtual address */ | ||
| 2201 | case H_MEM_ACCESS_PARM: /* invalid access controls */ | ||
| 2202 | case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */ | ||
| 2203 | return -EINVAL; | ||
| 2204 | case H_BUSY: /* long busy */ | ||
| 2205 | return -EBUSY; | ||
| 2206 | default: | ||
| 2207 | return -EINVAL; | ||
| 2208 | } | ||
| 2209 | } /* end ehca_mrmw_map_hrc_reg_smr() */ | ||
| 2210 | |||
| 2211 | /*----------------------------------------------------------------------*/ | ||
| 2212 | |||
| 2213 | /* | ||
| 2214 | * MR destructor and constructor | ||
| 2215 | * used in Reregister MR verb, sets all fields in ehca_mr_t to 0, | ||
| 2216 | * except struct ib_mr and spinlock | ||
| 2217 | */ | ||
| 2218 | void ehca_mr_deletenew(struct ehca_mr *mr) | ||
| 2219 | { | ||
| 2220 | mr->flags = 0; | ||
| 2221 | mr->num_pages = 0; | ||
| 2222 | mr->num_4k = 0; | ||
| 2223 | mr->acl = 0; | ||
| 2224 | mr->start = NULL; | ||
| 2225 | mr->fmr_page_size = 0; | ||
| 2226 | mr->fmr_max_pages = 0; | ||
| 2227 | mr->fmr_max_maps = 0; | ||
| 2228 | mr->fmr_map_cnt = 0; | ||
| 2229 | memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); | ||
| 2230 | memset(&mr->galpas, 0, sizeof(mr->galpas)); | ||
| 2231 | mr->nr_of_pages = 0; | ||
| 2232 | mr->pagearray = NULL; | ||
| 2233 | } /* end ehca_mr_deletenew() */ | ||
| 2234 | |||
| 2235 | int ehca_init_mrmw_cache(void) | ||
| 2236 | { | ||
| 2237 | mr_cache = kmem_cache_create("ehca_cache_mr", | ||
| 2238 | sizeof(struct ehca_mr), 0, | ||
| 2239 | SLAB_HWCACHE_ALIGN, | ||
| 2240 | NULL, NULL); | ||
| 2241 | if (!mr_cache) | ||
| 2242 | return -ENOMEM; | ||
| 2243 | mw_cache = kmem_cache_create("ehca_cache_mw", | ||
| 2244 | sizeof(struct ehca_mw), 0, | ||
| 2245 | SLAB_HWCACHE_ALIGN, | ||
| 2246 | NULL, NULL); | ||
| 2247 | if (!mw_cache) { | ||
| 2248 | kmem_cache_destroy(mr_cache); | ||
| 2249 | mr_cache = NULL; | ||
| 2250 | return -ENOMEM; | ||
| 2251 | } | ||
| 2252 | return 0; | ||
| 2253 | } | ||
| 2254 | |||
| 2255 | void ehca_cleanup_mrmw_cache(void) | ||
| 2256 | { | ||
| 2257 | if (mr_cache) | ||
| 2258 | kmem_cache_destroy(mr_cache); | ||
| 2259 | if (mw_cache) | ||
| 2260 | kmem_cache_destroy(mw_cache); | ||
| 2261 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h new file mode 100644 index 00000000000..d936e40a574 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h | |||
| @@ -0,0 +1,140 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * MR/MW declarations and inline functions | ||
| 5 | * | ||
| 6 | * Authors: Dietmar Decker <ddecker@de.ibm.com> | ||
| 7 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #ifndef _EHCA_MRMW_H_ | ||
| 43 | #define _EHCA_MRMW_H_ | ||
| 44 | |||
| 45 | int ehca_reg_mr(struct ehca_shca *shca, | ||
| 46 | struct ehca_mr *e_mr, | ||
| 47 | u64 *iova_start, | ||
| 48 | u64 size, | ||
| 49 | int acl, | ||
| 50 | struct ehca_pd *e_pd, | ||
| 51 | struct ehca_mr_pginfo *pginfo, | ||
| 52 | u32 *lkey, | ||
| 53 | u32 *rkey); | ||
| 54 | |||
| 55 | int ehca_reg_mr_rpages(struct ehca_shca *shca, | ||
| 56 | struct ehca_mr *e_mr, | ||
| 57 | struct ehca_mr_pginfo *pginfo); | ||
| 58 | |||
| 59 | int ehca_rereg_mr(struct ehca_shca *shca, | ||
| 60 | struct ehca_mr *e_mr, | ||
| 61 | u64 *iova_start, | ||
| 62 | u64 size, | ||
| 63 | int mr_access_flags, | ||
| 64 | struct ehca_pd *e_pd, | ||
| 65 | struct ehca_mr_pginfo *pginfo, | ||
| 66 | u32 *lkey, | ||
| 67 | u32 *rkey); | ||
| 68 | |||
| 69 | int ehca_unmap_one_fmr(struct ehca_shca *shca, | ||
| 70 | struct ehca_mr *e_fmr); | ||
| 71 | |||
| 72 | int ehca_reg_smr(struct ehca_shca *shca, | ||
| 73 | struct ehca_mr *e_origmr, | ||
| 74 | struct ehca_mr *e_newmr, | ||
| 75 | u64 *iova_start, | ||
| 76 | int acl, | ||
| 77 | struct ehca_pd *e_pd, | ||
| 78 | u32 *lkey, | ||
| 79 | u32 *rkey); | ||
| 80 | |||
| 81 | int ehca_reg_internal_maxmr(struct ehca_shca *shca, | ||
| 82 | struct ehca_pd *e_pd, | ||
| 83 | struct ehca_mr **maxmr); | ||
| 84 | |||
| 85 | int ehca_reg_maxmr(struct ehca_shca *shca, | ||
| 86 | struct ehca_mr *e_newmr, | ||
| 87 | u64 *iova_start, | ||
| 88 | int acl, | ||
| 89 | struct ehca_pd *e_pd, | ||
| 90 | u32 *lkey, | ||
| 91 | u32 *rkey); | ||
| 92 | |||
| 93 | int ehca_dereg_internal_maxmr(struct ehca_shca *shca); | ||
| 94 | |||
| 95 | int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array, | ||
| 96 | int num_phys_buf, | ||
| 97 | u64 *iova_start, | ||
| 98 | u64 *size); | ||
| 99 | |||
| 100 | int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, | ||
| 101 | u64 *page_list, | ||
| 102 | int list_len); | ||
| 103 | |||
| 104 | int ehca_set_pagebuf(struct ehca_mr *e_mr, | ||
| 105 | struct ehca_mr_pginfo *pginfo, | ||
| 106 | u32 number, | ||
| 107 | u64 *kpage); | ||
| 108 | |||
| 109 | int ehca_set_pagebuf_1(struct ehca_mr *e_mr, | ||
| 110 | struct ehca_mr_pginfo *pginfo, | ||
| 111 | u64 *rpage); | ||
| 112 | |||
| 113 | int ehca_mr_is_maxmr(u64 size, | ||
| 114 | u64 *iova_start); | ||
| 115 | |||
| 116 | void ehca_mrmw_map_acl(int ib_acl, | ||
| 117 | u32 *hipz_acl); | ||
| 118 | |||
| 119 | void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl); | ||
| 120 | |||
| 121 | void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, | ||
| 122 | int *ib_acl); | ||
| 123 | |||
| 124 | int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc); | ||
| 125 | |||
| 126 | int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc); | ||
| 127 | |||
| 128 | int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc); | ||
| 129 | |||
| 130 | int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc); | ||
| 131 | |||
| 132 | int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc); | ||
| 133 | |||
| 134 | int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc); | ||
| 135 | |||
| 136 | int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc); | ||
| 137 | |||
| 138 | void ehca_mr_deletenew(struct ehca_mr *mr); | ||
| 139 | |||
| 140 | #endif /*_EHCA_MRMW_H_*/ | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c new file mode 100644 index 00000000000..2c3cdc6f7b3 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_pd.c | |||
| @@ -0,0 +1,114 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * PD functions | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * | ||
| 8 | * Copyright (c) 2005 IBM Corporation | ||
| 9 | * | ||
| 10 | * All rights reserved. | ||
| 11 | * | ||
| 12 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 13 | * BSD. | ||
| 14 | * | ||
| 15 | * OpenIB BSD License | ||
| 16 | * | ||
| 17 | * Redistribution and use in source and binary forms, with or without | ||
| 18 | * modification, are permitted provided that the following conditions are met: | ||
| 19 | * | ||
| 20 | * Redistributions of source code must retain the above copyright notice, this | ||
| 21 | * list of conditions and the following disclaimer. | ||
| 22 | * | ||
| 23 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 24 | * this list of conditions and the following disclaimer in the documentation | ||
| 25 | * and/or other materials | ||
| 26 | * provided with the distribution. | ||
| 27 | * | ||
| 28 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 29 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 30 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 31 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 32 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 33 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 34 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 35 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 36 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 37 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 38 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 39 | */ | ||
| 40 | |||
| 41 | #include <asm/current.h> | ||
| 42 | |||
| 43 | #include "ehca_tools.h" | ||
| 44 | #include "ehca_iverbs.h" | ||
| 45 | |||
| 46 | static struct kmem_cache *pd_cache; | ||
| 47 | |||
| 48 | struct ib_pd *ehca_alloc_pd(struct ib_device *device, | ||
| 49 | struct ib_ucontext *context, struct ib_udata *udata) | ||
| 50 | { | ||
| 51 | struct ehca_pd *pd; | ||
| 52 | |||
| 53 | pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL); | ||
| 54 | if (!pd) { | ||
| 55 | ehca_err(device, "device=%p context=%p out of memory", | ||
| 56 | device, context); | ||
| 57 | return ERR_PTR(-ENOMEM); | ||
| 58 | } | ||
| 59 | |||
| 60 | memset(pd, 0, sizeof(struct ehca_pd)); | ||
| 61 | pd->ownpid = current->tgid; | ||
| 62 | |||
| 63 | /* | ||
| 64 | * Kernel PD: when device = -1, 0 | ||
| 65 | * User PD: when context != -1 | ||
| 66 | */ | ||
| 67 | if (!context) { | ||
| 68 | /* | ||
| 69 | * Kernel PDs after init reuses always | ||
| 70 | * the one created in ehca_shca_reopen() | ||
| 71 | */ | ||
| 72 | struct ehca_shca *shca = container_of(device, struct ehca_shca, | ||
| 73 | ib_device); | ||
| 74 | pd->fw_pd.value = shca->pd->fw_pd.value; | ||
| 75 | } else | ||
| 76 | pd->fw_pd.value = (u64)pd; | ||
| 77 | |||
| 78 | return &pd->ib_pd; | ||
| 79 | } | ||
| 80 | |||
| 81 | int ehca_dealloc_pd(struct ib_pd *pd) | ||
| 82 | { | ||
| 83 | u32 cur_pid = current->tgid; | ||
| 84 | struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); | ||
| 85 | |||
| 86 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 87 | my_pd->ownpid != cur_pid) { | ||
| 88 | ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x", | ||
| 89 | cur_pid, my_pd->ownpid); | ||
| 90 | return -EINVAL; | ||
| 91 | } | ||
| 92 | |||
| 93 | kmem_cache_free(pd_cache, | ||
| 94 | container_of(pd, struct ehca_pd, ib_pd)); | ||
| 95 | |||
| 96 | return 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | int ehca_init_pd_cache(void) | ||
| 100 | { | ||
| 101 | pd_cache = kmem_cache_create("ehca_cache_pd", | ||
| 102 | sizeof(struct ehca_pd), 0, | ||
| 103 | SLAB_HWCACHE_ALIGN, | ||
| 104 | NULL, NULL); | ||
| 105 | if (!pd_cache) | ||
| 106 | return -ENOMEM; | ||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | |||
| 110 | void ehca_cleanup_pd_cache(void) | ||
| 111 | { | ||
| 112 | if (pd_cache) | ||
| 113 | kmem_cache_destroy(pd_cache); | ||
| 114 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h new file mode 100644 index 00000000000..8707d297ce4 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_qes.h | |||
| @@ -0,0 +1,259 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Hardware request structures | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 8 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 9 | * | ||
| 10 | * Copyright (c) 2005 IBM Corporation | ||
| 11 | * | ||
| 12 | * All rights reserved. | ||
| 13 | * | ||
| 14 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 15 | * BSD. | ||
| 16 | * | ||
| 17 | * OpenIB BSD License | ||
| 18 | * | ||
| 19 | * Redistribution and use in source and binary forms, with or without | ||
| 20 | * modification, are permitted provided that the following conditions are met: | ||
| 21 | * | ||
| 22 | * Redistributions of source code must retain the above copyright notice, this | ||
| 23 | * list of conditions and the following disclaimer. | ||
| 24 | * | ||
| 25 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 26 | * this list of conditions and the following disclaimer in the documentation | ||
| 27 | * and/or other materials | ||
| 28 | * provided with the distribution. | ||
| 29 | * | ||
| 30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 31 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 34 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 35 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 36 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 37 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 38 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 39 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 40 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | |||
| 44 | #ifndef _EHCA_QES_H_ | ||
| 45 | #define _EHCA_QES_H_ | ||
| 46 | |||
| 47 | #include "ehca_tools.h" | ||
| 48 | |||
| 49 | /* virtual scatter gather entry to specify remote adresses with length */ | ||
| 50 | struct ehca_vsgentry { | ||
| 51 | u64 vaddr; | ||
| 52 | u32 lkey; | ||
| 53 | u32 length; | ||
| 54 | }; | ||
| 55 | |||
| 56 | #define GRH_FLAG_MASK EHCA_BMASK_IBM(7,7) | ||
| 57 | #define GRH_IPVERSION_MASK EHCA_BMASK_IBM(0,3) | ||
| 58 | #define GRH_TCLASS_MASK EHCA_BMASK_IBM(4,12) | ||
| 59 | #define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13,31) | ||
| 60 | #define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32,47) | ||
| 61 | #define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48,55) | ||
| 62 | #define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56,63) | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Unreliable Datagram Address Vector Format | ||
| 66 | * see IBTA Vol1 chapter 8.3 Global Routing Header | ||
| 67 | */ | ||
| 68 | struct ehca_ud_av { | ||
| 69 | u8 sl; | ||
| 70 | u8 lnh; | ||
| 71 | u16 dlid; | ||
| 72 | u8 reserved1; | ||
| 73 | u8 reserved2; | ||
| 74 | u8 reserved3; | ||
| 75 | u8 slid_path_bits; | ||
| 76 | u8 reserved4; | ||
| 77 | u8 ipd; | ||
| 78 | u8 reserved5; | ||
| 79 | u8 pmtu; | ||
| 80 | u32 reserved6; | ||
| 81 | u64 reserved7; | ||
| 82 | union { | ||
| 83 | struct { | ||
| 84 | u64 word_0; /* always set to 6 */ | ||
| 85 | /*should be 0x1B for IB transport */ | ||
| 86 | u64 word_1; | ||
| 87 | u64 word_2; | ||
| 88 | u64 word_3; | ||
| 89 | u64 word_4; | ||
| 90 | } grh; | ||
| 91 | struct { | ||
| 92 | u32 wd_0; | ||
| 93 | u32 wd_1; | ||
| 94 | /* DWord_1 --> SGID */ | ||
| 95 | |||
| 96 | u32 sgid_wd3; | ||
| 97 | u32 sgid_wd2; | ||
| 98 | |||
| 99 | u32 sgid_wd1; | ||
| 100 | u32 sgid_wd0; | ||
| 101 | /* DWord_3 --> DGID */ | ||
| 102 | |||
| 103 | u32 dgid_wd3; | ||
| 104 | u32 dgid_wd2; | ||
| 105 | |||
| 106 | u32 dgid_wd1; | ||
| 107 | u32 dgid_wd0; | ||
| 108 | } grh_l; | ||
| 109 | }; | ||
| 110 | }; | ||
| 111 | |||
| 112 | /* maximum number of sg entries allowed in a WQE */ | ||
| 113 | #define MAX_WQE_SG_ENTRIES 252 | ||
| 114 | |||
| 115 | #define WQE_OPTYPE_SEND 0x80 | ||
| 116 | #define WQE_OPTYPE_RDMAREAD 0x40 | ||
| 117 | #define WQE_OPTYPE_RDMAWRITE 0x20 | ||
| 118 | #define WQE_OPTYPE_CMPSWAP 0x10 | ||
| 119 | #define WQE_OPTYPE_FETCHADD 0x08 | ||
| 120 | #define WQE_OPTYPE_BIND 0x04 | ||
| 121 | |||
| 122 | #define WQE_WRFLAG_REQ_SIGNAL_COM 0x80 | ||
| 123 | #define WQE_WRFLAG_FENCE 0x40 | ||
| 124 | #define WQE_WRFLAG_IMM_DATA_PRESENT 0x20 | ||
| 125 | #define WQE_WRFLAG_SOLIC_EVENT 0x10 | ||
| 126 | |||
| 127 | #define WQEF_CACHE_HINT 0x80 | ||
| 128 | #define WQEF_CACHE_HINT_RD_WR 0x40 | ||
| 129 | #define WQEF_TIMED_WQE 0x20 | ||
| 130 | #define WQEF_PURGE 0x08 | ||
| 131 | #define WQEF_HIGH_NIBBLE 0xF0 | ||
| 132 | |||
| 133 | #define MW_BIND_ACCESSCTRL_R_WRITE 0x40 | ||
| 134 | #define MW_BIND_ACCESSCTRL_R_READ 0x20 | ||
| 135 | #define MW_BIND_ACCESSCTRL_R_ATOMIC 0x10 | ||
| 136 | |||
| 137 | struct ehca_wqe { | ||
| 138 | u64 work_request_id; | ||
| 139 | u8 optype; | ||
| 140 | u8 wr_flag; | ||
| 141 | u16 pkeyi; | ||
| 142 | u8 wqef; | ||
| 143 | u8 nr_of_data_seg; | ||
| 144 | u16 wqe_provided_slid; | ||
| 145 | u32 destination_qp_number; | ||
| 146 | u32 resync_psn_sqp; | ||
| 147 | u32 local_ee_context_qkey; | ||
| 148 | u32 immediate_data; | ||
| 149 | union { | ||
| 150 | struct { | ||
| 151 | u64 remote_virtual_adress; | ||
| 152 | u32 rkey; | ||
| 153 | u32 reserved; | ||
| 154 | u64 atomic_1st_op_dma_len; | ||
| 155 | u64 atomic_2nd_op; | ||
| 156 | struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; | ||
| 157 | |||
| 158 | } nud; | ||
| 159 | struct { | ||
| 160 | u64 ehca_ud_av_ptr; | ||
| 161 | u64 reserved1; | ||
| 162 | u64 reserved2; | ||
| 163 | u64 reserved3; | ||
| 164 | struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; | ||
| 165 | } ud_avp; | ||
| 166 | struct { | ||
| 167 | struct ehca_ud_av ud_av; | ||
| 168 | struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES - | ||
| 169 | 2]; | ||
| 170 | } ud_av; | ||
| 171 | struct { | ||
| 172 | u64 reserved0; | ||
| 173 | u64 reserved1; | ||
| 174 | u64 reserved2; | ||
| 175 | u64 reserved3; | ||
| 176 | struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; | ||
| 177 | } all_rcv; | ||
| 178 | |||
| 179 | struct { | ||
| 180 | u64 reserved; | ||
| 181 | u32 rkey; | ||
| 182 | u32 old_rkey; | ||
| 183 | u64 reserved1; | ||
| 184 | u64 reserved2; | ||
| 185 | u64 virtual_address; | ||
| 186 | u32 reserved3; | ||
| 187 | u32 length; | ||
| 188 | u32 reserved4; | ||
| 189 | u16 reserved5; | ||
| 190 | u8 reserved6; | ||
| 191 | u8 lr_ctl; | ||
| 192 | u32 lkey; | ||
| 193 | u32 reserved7; | ||
| 194 | u64 reserved8; | ||
| 195 | u64 reserved9; | ||
| 196 | u64 reserved10; | ||
| 197 | u64 reserved11; | ||
| 198 | } bind; | ||
| 199 | struct { | ||
| 200 | u64 reserved12; | ||
| 201 | u64 reserved13; | ||
| 202 | u32 size; | ||
| 203 | u32 start; | ||
| 204 | } inline_data; | ||
| 205 | } u; | ||
| 206 | |||
| 207 | }; | ||
| 208 | |||
| 209 | #define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0) | ||
| 210 | #define WC_IMM_DATA EHCA_BMASK_IBM(1,1) | ||
| 211 | #define WC_GRH_PRESENT EHCA_BMASK_IBM(2,2) | ||
| 212 | #define WC_SE_BIT EHCA_BMASK_IBM(3,3) | ||
| 213 | #define WC_STATUS_ERROR_BIT 0x80000000 | ||
| 214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 | ||
| 215 | #define WC_STATUS_PURGE_BIT 0x10 | ||
| 216 | |||
| 217 | struct ehca_cqe { | ||
| 218 | u64 work_request_id; | ||
| 219 | u8 optype; | ||
| 220 | u8 w_completion_flags; | ||
| 221 | u16 reserved1; | ||
| 222 | u32 nr_bytes_transferred; | ||
| 223 | u32 immediate_data; | ||
| 224 | u32 local_qp_number; | ||
| 225 | u8 freed_resource_count; | ||
| 226 | u8 service_level; | ||
| 227 | u16 wqe_count; | ||
| 228 | u32 qp_token; | ||
| 229 | u32 qkey_ee_token; | ||
| 230 | u32 remote_qp_number; | ||
| 231 | u16 dlid; | ||
| 232 | u16 rlid; | ||
| 233 | u16 reserved2; | ||
| 234 | u16 pkey_index; | ||
| 235 | u32 cqe_timestamp; | ||
| 236 | u32 wqe_timestamp; | ||
| 237 | u8 wqe_timestamp_valid; | ||
| 238 | u8 reserved3; | ||
| 239 | u8 reserved4; | ||
| 240 | u8 cqe_flags; | ||
| 241 | u32 status; | ||
| 242 | }; | ||
| 243 | |||
| 244 | struct ehca_eqe { | ||
| 245 | u64 entry; | ||
| 246 | }; | ||
| 247 | |||
| 248 | struct ehca_mrte { | ||
| 249 | u64 starting_va; | ||
| 250 | u64 length; /* length of memory region in bytes*/ | ||
| 251 | u32 pd; | ||
| 252 | u8 key_instance; | ||
| 253 | u8 pagesize; | ||
| 254 | u8 mr_control; | ||
| 255 | u8 local_remote_access_ctrl; | ||
| 256 | u8 reserved[0x20 - 0x18]; | ||
| 257 | u64 at_pointer[4]; | ||
| 258 | }; | ||
| 259 | #endif /*_EHCA_QES_H_*/ | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c new file mode 100644 index 00000000000..4b27bedc6c2 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
| @@ -0,0 +1,1506 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * QP functions | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 9 | * Heiko J Schick <schickhj@de.ibm.com> | ||
| 10 | * | ||
| 11 | * Copyright (c) 2005 IBM Corporation | ||
| 12 | * | ||
| 13 | * All rights reserved. | ||
| 14 | * | ||
| 15 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 16 | * BSD. | ||
| 17 | * | ||
| 18 | * OpenIB BSD License | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions are met: | ||
| 22 | * | ||
| 23 | * Redistributions of source code must retain the above copyright notice, this | ||
| 24 | * list of conditions and the following disclaimer. | ||
| 25 | * | ||
| 26 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 27 | * this list of conditions and the following disclaimer in the documentation | ||
| 28 | * and/or other materials | ||
| 29 | * provided with the distribution. | ||
| 30 | * | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 32 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 33 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 34 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 35 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 36 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 37 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 38 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 39 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 40 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 41 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 42 | */ | ||
| 43 | |||
| 44 | |||
| 45 | #include <asm/current.h> | ||
| 46 | |||
| 47 | #include "ehca_classes.h" | ||
| 48 | #include "ehca_tools.h" | ||
| 49 | #include "ehca_qes.h" | ||
| 50 | #include "ehca_iverbs.h" | ||
| 51 | #include "hcp_if.h" | ||
| 52 | #include "hipz_fns.h" | ||
| 53 | |||
| 54 | static struct kmem_cache *qp_cache; | ||
| 55 | |||
| 56 | /* | ||
| 57 | * attributes not supported by query qp | ||
| 58 | */ | ||
| 59 | #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \ | ||
| 60 | IB_QP_MAX_QP_RD_ATOMIC | \ | ||
| 61 | IB_QP_ACCESS_FLAGS | \ | ||
| 62 | IB_QP_EN_SQD_ASYNC_NOTIFY) | ||
| 63 | |||
| 64 | /* | ||
| 65 | * ehca (internal) qp state values | ||
| 66 | */ | ||
| 67 | enum ehca_qp_state { | ||
| 68 | EHCA_QPS_RESET = 1, | ||
| 69 | EHCA_QPS_INIT = 2, | ||
| 70 | EHCA_QPS_RTR = 3, | ||
| 71 | EHCA_QPS_RTS = 5, | ||
| 72 | EHCA_QPS_SQD = 6, | ||
| 73 | EHCA_QPS_SQE = 8, | ||
| 74 | EHCA_QPS_ERR = 128 | ||
| 75 | }; | ||
| 76 | |||
| 77 | /* | ||
| 78 | * qp state transitions as defined by IB Arch Rel 1.1 page 431 | ||
| 79 | */ | ||
| 80 | enum ib_qp_statetrans { | ||
| 81 | IB_QPST_ANY2RESET, | ||
| 82 | IB_QPST_ANY2ERR, | ||
| 83 | IB_QPST_RESET2INIT, | ||
| 84 | IB_QPST_INIT2RTR, | ||
| 85 | IB_QPST_INIT2INIT, | ||
| 86 | IB_QPST_RTR2RTS, | ||
| 87 | IB_QPST_RTS2SQD, | ||
| 88 | IB_QPST_RTS2RTS, | ||
| 89 | IB_QPST_SQD2RTS, | ||
| 90 | IB_QPST_SQE2RTS, | ||
| 91 | IB_QPST_SQD2SQD, | ||
| 92 | IB_QPST_MAX /* nr of transitions, this must be last!!! */ | ||
| 93 | }; | ||
| 94 | |||
| 95 | /* | ||
| 96 | * ib2ehca_qp_state maps IB to ehca qp_state | ||
| 97 | * returns ehca qp state corresponding to given ib qp state | ||
| 98 | */ | ||
| 99 | static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state) | ||
| 100 | { | ||
| 101 | switch (ib_qp_state) { | ||
| 102 | case IB_QPS_RESET: | ||
| 103 | return EHCA_QPS_RESET; | ||
| 104 | case IB_QPS_INIT: | ||
| 105 | return EHCA_QPS_INIT; | ||
| 106 | case IB_QPS_RTR: | ||
| 107 | return EHCA_QPS_RTR; | ||
| 108 | case IB_QPS_RTS: | ||
| 109 | return EHCA_QPS_RTS; | ||
| 110 | case IB_QPS_SQD: | ||
| 111 | return EHCA_QPS_SQD; | ||
| 112 | case IB_QPS_SQE: | ||
| 113 | return EHCA_QPS_SQE; | ||
| 114 | case IB_QPS_ERR: | ||
| 115 | return EHCA_QPS_ERR; | ||
| 116 | default: | ||
| 117 | ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state); | ||
| 118 | return -EINVAL; | ||
| 119 | } | ||
| 120 | } | ||
| 121 | |||
| 122 | /* | ||
| 123 | * ehca2ib_qp_state maps ehca to IB qp_state | ||
| 124 | * returns ib qp state corresponding to given ehca qp state | ||
| 125 | */ | ||
| 126 | static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state | ||
| 127 | ehca_qp_state) | ||
| 128 | { | ||
| 129 | switch (ehca_qp_state) { | ||
| 130 | case EHCA_QPS_RESET: | ||
| 131 | return IB_QPS_RESET; | ||
| 132 | case EHCA_QPS_INIT: | ||
| 133 | return IB_QPS_INIT; | ||
| 134 | case EHCA_QPS_RTR: | ||
| 135 | return IB_QPS_RTR; | ||
| 136 | case EHCA_QPS_RTS: | ||
| 137 | return IB_QPS_RTS; | ||
| 138 | case EHCA_QPS_SQD: | ||
| 139 | return IB_QPS_SQD; | ||
| 140 | case EHCA_QPS_SQE: | ||
| 141 | return IB_QPS_SQE; | ||
| 142 | case EHCA_QPS_ERR: | ||
| 143 | return IB_QPS_ERR; | ||
| 144 | default: | ||
| 145 | ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state); | ||
| 146 | return -EINVAL; | ||
| 147 | } | ||
| 148 | } | ||
| 149 | |||
| 150 | /* | ||
| 151 | * ehca_qp_type used as index for req_attr and opt_attr of | ||
| 152 | * struct ehca_modqp_statetrans | ||
| 153 | */ | ||
| 154 | enum ehca_qp_type { | ||
| 155 | QPT_RC = 0, | ||
| 156 | QPT_UC = 1, | ||
| 157 | QPT_UD = 2, | ||
| 158 | QPT_SQP = 3, | ||
| 159 | QPT_MAX | ||
| 160 | }; | ||
| 161 | |||
| 162 | /* | ||
| 163 | * ib2ehcaqptype maps Ib to ehca qp_type | ||
| 164 | * returns ehca qp type corresponding to ib qp type | ||
| 165 | */ | ||
| 166 | static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype) | ||
| 167 | { | ||
| 168 | switch (ibqptype) { | ||
| 169 | case IB_QPT_SMI: | ||
| 170 | case IB_QPT_GSI: | ||
| 171 | return QPT_SQP; | ||
| 172 | case IB_QPT_RC: | ||
| 173 | return QPT_RC; | ||
| 174 | case IB_QPT_UC: | ||
| 175 | return QPT_UC; | ||
| 176 | case IB_QPT_UD: | ||
| 177 | return QPT_UD; | ||
| 178 | default: | ||
| 179 | ehca_gen_err("Invalid ibqptype=%x", ibqptype); | ||
| 180 | return -EINVAL; | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate, | ||
| 185 | int ib_tostate) | ||
| 186 | { | ||
| 187 | int index = -EINVAL; | ||
| 188 | switch (ib_tostate) { | ||
| 189 | case IB_QPS_RESET: | ||
| 190 | index = IB_QPST_ANY2RESET; | ||
| 191 | break; | ||
| 192 | case IB_QPS_INIT: | ||
| 193 | switch (ib_fromstate) { | ||
| 194 | case IB_QPS_RESET: | ||
| 195 | index = IB_QPST_RESET2INIT; | ||
| 196 | break; | ||
| 197 | case IB_QPS_INIT: | ||
| 198 | index = IB_QPST_INIT2INIT; | ||
| 199 | break; | ||
| 200 | } | ||
| 201 | break; | ||
| 202 | case IB_QPS_RTR: | ||
| 203 | if (ib_fromstate == IB_QPS_INIT) | ||
| 204 | index = IB_QPST_INIT2RTR; | ||
| 205 | break; | ||
| 206 | case IB_QPS_RTS: | ||
| 207 | switch (ib_fromstate) { | ||
| 208 | case IB_QPS_RTR: | ||
| 209 | index = IB_QPST_RTR2RTS; | ||
| 210 | break; | ||
| 211 | case IB_QPS_RTS: | ||
| 212 | index = IB_QPST_RTS2RTS; | ||
| 213 | break; | ||
| 214 | case IB_QPS_SQD: | ||
| 215 | index = IB_QPST_SQD2RTS; | ||
| 216 | break; | ||
| 217 | case IB_QPS_SQE: | ||
| 218 | index = IB_QPST_SQE2RTS; | ||
| 219 | break; | ||
| 220 | } | ||
| 221 | break; | ||
| 222 | case IB_QPS_SQD: | ||
| 223 | if (ib_fromstate == IB_QPS_RTS) | ||
| 224 | index = IB_QPST_RTS2SQD; | ||
| 225 | break; | ||
| 226 | case IB_QPS_SQE: | ||
| 227 | break; | ||
| 228 | case IB_QPS_ERR: | ||
| 229 | index = IB_QPST_ANY2ERR; | ||
| 230 | break; | ||
| 231 | default: | ||
| 232 | break; | ||
| 233 | } | ||
| 234 | return index; | ||
| 235 | } | ||
| 236 | |||
| 237 | enum ehca_service_type { | ||
| 238 | ST_RC = 0, | ||
| 239 | ST_UC = 1, | ||
| 240 | ST_RD = 2, | ||
| 241 | ST_UD = 3 | ||
| 242 | }; | ||
| 243 | |||
| 244 | /* | ||
| 245 | * ibqptype2servicetype returns hcp service type corresponding to given | ||
| 246 | * ib qp type used by create_qp() | ||
| 247 | */ | ||
| 248 | static inline int ibqptype2servicetype(enum ib_qp_type ibqptype) | ||
| 249 | { | ||
| 250 | switch (ibqptype) { | ||
| 251 | case IB_QPT_SMI: | ||
| 252 | case IB_QPT_GSI: | ||
| 253 | return ST_UD; | ||
| 254 | case IB_QPT_RC: | ||
| 255 | return ST_RC; | ||
| 256 | case IB_QPT_UC: | ||
| 257 | return ST_UC; | ||
| 258 | case IB_QPT_UD: | ||
| 259 | return ST_UD; | ||
| 260 | case IB_QPT_RAW_IPV6: | ||
| 261 | return -EINVAL; | ||
| 262 | case IB_QPT_RAW_ETY: | ||
| 263 | return -EINVAL; | ||
| 264 | default: | ||
| 265 | ehca_gen_err("Invalid ibqptype=%x", ibqptype); | ||
| 266 | return -EINVAL; | ||
| 267 | } | ||
| 268 | } | ||
| 269 | |||
| 270 | /* | ||
| 271 | * init_qp_queues initializes/constructs r/squeue and registers queue pages. | ||
| 272 | */ | ||
| 273 | static inline int init_qp_queues(struct ehca_shca *shca, | ||
| 274 | struct ehca_qp *my_qp, | ||
| 275 | int nr_sq_pages, | ||
| 276 | int nr_rq_pages, | ||
| 277 | int swqe_size, | ||
| 278 | int rwqe_size, | ||
| 279 | int nr_send_sges, int nr_receive_sges) | ||
| 280 | { | ||
| 281 | int ret, cnt, ipz_rc; | ||
| 282 | void *vpage; | ||
| 283 | u64 rpage, h_ret; | ||
| 284 | struct ib_device *ib_dev = &shca->ib_device; | ||
| 285 | struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle; | ||
| 286 | |||
| 287 | ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue, | ||
| 288 | nr_sq_pages, | ||
| 289 | EHCA_PAGESIZE, swqe_size, nr_send_sges); | ||
| 290 | if (!ipz_rc) { | ||
| 291 | ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x", | ||
| 292 | ipz_rc); | ||
| 293 | return -EBUSY; | ||
| 294 | } | ||
| 295 | |||
| 296 | ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue, | ||
| 297 | nr_rq_pages, | ||
| 298 | EHCA_PAGESIZE, rwqe_size, nr_receive_sges); | ||
| 299 | if (!ipz_rc) { | ||
| 300 | ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x", | ||
| 301 | ipz_rc); | ||
| 302 | ret = -EBUSY; | ||
| 303 | goto init_qp_queues0; | ||
| 304 | } | ||
| 305 | /* register SQ pages */ | ||
| 306 | for (cnt = 0; cnt < nr_sq_pages; cnt++) { | ||
| 307 | vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue); | ||
| 308 | if (!vpage) { | ||
| 309 | ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() " | ||
| 310 | "failed p_vpage= %p", vpage); | ||
| 311 | ret = -EINVAL; | ||
| 312 | goto init_qp_queues1; | ||
| 313 | } | ||
| 314 | rpage = virt_to_abs(vpage); | ||
| 315 | |||
| 316 | h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, | ||
| 317 | my_qp->ipz_qp_handle, | ||
| 318 | &my_qp->pf, 0, 0, | ||
| 319 | rpage, 1, | ||
| 320 | my_qp->galpas.kernel); | ||
| 321 | if (h_ret < H_SUCCESS) { | ||
| 322 | ehca_err(ib_dev, "SQ hipz_qp_register_rpage()" | ||
| 323 | " failed rc=%lx", h_ret); | ||
| 324 | ret = ehca2ib_return_code(h_ret); | ||
| 325 | goto init_qp_queues1; | ||
| 326 | } | ||
| 327 | } | ||
| 328 | |||
| 329 | ipz_qeit_reset(&my_qp->ipz_squeue); | ||
| 330 | |||
| 331 | /* register RQ pages */ | ||
| 332 | for (cnt = 0; cnt < nr_rq_pages; cnt++) { | ||
| 333 | vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); | ||
| 334 | if (!vpage) { | ||
| 335 | ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() " | ||
| 336 | "failed p_vpage = %p", vpage); | ||
| 337 | ret = -EINVAL; | ||
| 338 | goto init_qp_queues1; | ||
| 339 | } | ||
| 340 | |||
| 341 | rpage = virt_to_abs(vpage); | ||
| 342 | |||
| 343 | h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, | ||
| 344 | my_qp->ipz_qp_handle, | ||
| 345 | &my_qp->pf, 0, 1, | ||
| 346 | rpage, 1,my_qp->galpas.kernel); | ||
| 347 | if (h_ret < H_SUCCESS) { | ||
| 348 | ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed " | ||
| 349 | "rc=%lx", h_ret); | ||
| 350 | ret = ehca2ib_return_code(h_ret); | ||
| 351 | goto init_qp_queues1; | ||
| 352 | } | ||
| 353 | if (cnt == (nr_rq_pages - 1)) { /* last page! */ | ||
| 354 | if (h_ret != H_SUCCESS) { | ||
| 355 | ehca_err(ib_dev, "RQ hipz_qp_register_rpage() " | ||
| 356 | "h_ret= %lx ", h_ret); | ||
| 357 | ret = ehca2ib_return_code(h_ret); | ||
| 358 | goto init_qp_queues1; | ||
| 359 | } | ||
| 360 | vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); | ||
| 361 | if (vpage) { | ||
| 362 | ehca_err(ib_dev, "ipz_qpageit_get_inc() " | ||
| 363 | "should not succeed vpage=%p", vpage); | ||
| 364 | ret = -EINVAL; | ||
| 365 | goto init_qp_queues1; | ||
| 366 | } | ||
| 367 | } else { | ||
| 368 | if (h_ret != H_PAGE_REGISTERED) { | ||
| 369 | ehca_err(ib_dev, "RQ hipz_qp_register_rpage() " | ||
| 370 | "h_ret= %lx ", h_ret); | ||
| 371 | ret = ehca2ib_return_code(h_ret); | ||
| 372 | goto init_qp_queues1; | ||
| 373 | } | ||
| 374 | } | ||
| 375 | } | ||
| 376 | |||
| 377 | ipz_qeit_reset(&my_qp->ipz_rqueue); | ||
| 378 | |||
| 379 | return 0; | ||
| 380 | |||
| 381 | init_qp_queues1: | ||
| 382 | ipz_queue_dtor(&my_qp->ipz_rqueue); | ||
| 383 | init_qp_queues0: | ||
| 384 | ipz_queue_dtor(&my_qp->ipz_squeue); | ||
| 385 | return ret; | ||
| 386 | } | ||
| 387 | |||
| 388 | struct ib_qp *ehca_create_qp(struct ib_pd *pd, | ||
| 389 | struct ib_qp_init_attr *init_attr, | ||
| 390 | struct ib_udata *udata) | ||
| 391 | { | ||
| 392 | static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 }; | ||
| 393 | static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 }; | ||
| 394 | struct ehca_qp *my_qp; | ||
| 395 | struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); | ||
| 396 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, | ||
| 397 | ib_device); | ||
| 398 | struct ib_ucontext *context = NULL; | ||
| 399 | u64 h_ret; | ||
| 400 | int max_send_sge, max_recv_sge, ret; | ||
| 401 | |||
| 402 | /* h_call's out parameters */ | ||
| 403 | struct ehca_alloc_qp_parms parms; | ||
| 404 | u32 swqe_size = 0, rwqe_size = 0; | ||
| 405 | u8 daqp_completion, isdaqp; | ||
| 406 | unsigned long flags; | ||
| 407 | |||
| 408 | if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR && | ||
| 409 | init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) { | ||
| 410 | ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed", | ||
| 411 | init_attr->sq_sig_type); | ||
| 412 | return ERR_PTR(-EINVAL); | ||
| 413 | } | ||
| 414 | |||
| 415 | /* save daqp completion bits */ | ||
| 416 | daqp_completion = init_attr->qp_type & 0x60; | ||
| 417 | /* save daqp bit */ | ||
| 418 | isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0; | ||
| 419 | init_attr->qp_type = init_attr->qp_type & 0x1F; | ||
| 420 | |||
| 421 | if (init_attr->qp_type != IB_QPT_UD && | ||
| 422 | init_attr->qp_type != IB_QPT_SMI && | ||
| 423 | init_attr->qp_type != IB_QPT_GSI && | ||
| 424 | init_attr->qp_type != IB_QPT_UC && | ||
| 425 | init_attr->qp_type != IB_QPT_RC) { | ||
| 426 | ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type); | ||
| 427 | return ERR_PTR(-EINVAL); | ||
| 428 | } | ||
| 429 | if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD) | ||
| 430 | && isdaqp) { | ||
| 431 | ehca_err(pd->device, "unsupported LL QP Type=%x", | ||
| 432 | init_attr->qp_type); | ||
| 433 | return ERR_PTR(-EINVAL); | ||
| 434 | } else if (init_attr->qp_type == IB_QPT_RC && isdaqp && | ||
| 435 | (init_attr->cap.max_send_wr > 255 || | ||
| 436 | init_attr->cap.max_recv_wr > 255 )) { | ||
| 437 | ehca_err(pd->device, "Invalid Number of max_sq_wr =%x " | ||
| 438 | "or max_rq_wr=%x for QP Type=%x", | ||
| 439 | init_attr->cap.max_send_wr, | ||
| 440 | init_attr->cap.max_recv_wr,init_attr->qp_type); | ||
| 441 | return ERR_PTR(-EINVAL); | ||
| 442 | } else if (init_attr->qp_type == IB_QPT_UD && isdaqp && | ||
| 443 | init_attr->cap.max_send_wr > 255) { | ||
| 444 | ehca_err(pd->device, | ||
| 445 | "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x", | ||
| 446 | init_attr->cap.max_send_wr, init_attr->qp_type); | ||
| 447 | return ERR_PTR(-EINVAL); | ||
| 448 | } | ||
| 449 | |||
| 450 | if (pd->uobject && udata) | ||
| 451 | context = pd->uobject->context; | ||
| 452 | |||
| 453 | my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL); | ||
| 454 | if (!my_qp) { | ||
| 455 | ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); | ||
| 456 | return ERR_PTR(-ENOMEM); | ||
| 457 | } | ||
| 458 | |||
| 459 | memset(my_qp, 0, sizeof(struct ehca_qp)); | ||
| 460 | memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms)); | ||
| 461 | spin_lock_init(&my_qp->spinlock_s); | ||
| 462 | spin_lock_init(&my_qp->spinlock_r); | ||
| 463 | |||
| 464 | my_qp->recv_cq = | ||
| 465 | container_of(init_attr->recv_cq, struct ehca_cq, ib_cq); | ||
| 466 | my_qp->send_cq = | ||
| 467 | container_of(init_attr->send_cq, struct ehca_cq, ib_cq); | ||
| 468 | |||
| 469 | my_qp->init_attr = *init_attr; | ||
| 470 | |||
| 471 | do { | ||
| 472 | if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) { | ||
| 473 | ret = -ENOMEM; | ||
| 474 | ehca_err(pd->device, "Can't reserve idr resources."); | ||
| 475 | goto create_qp_exit0; | ||
| 476 | } | ||
| 477 | |||
| 478 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | ||
| 479 | ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token); | ||
| 480 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | ||
| 481 | |||
| 482 | } while (ret == -EAGAIN); | ||
| 483 | |||
| 484 | if (ret) { | ||
| 485 | ret = -ENOMEM; | ||
| 486 | ehca_err(pd->device, "Can't allocate new idr entry."); | ||
| 487 | goto create_qp_exit0; | ||
| 488 | } | ||
| 489 | |||
| 490 | parms.servicetype = ibqptype2servicetype(init_attr->qp_type); | ||
| 491 | if (parms.servicetype < 0) { | ||
| 492 | ret = -EINVAL; | ||
| 493 | ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type); | ||
| 494 | goto create_qp_exit0; | ||
| 495 | } | ||
| 496 | |||
| 497 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | ||
| 498 | parms.sigtype = HCALL_SIGT_EVERY; | ||
| 499 | else | ||
| 500 | parms.sigtype = HCALL_SIGT_BY_WQE; | ||
| 501 | |||
| 502 | /* UD_AV CIRCUMVENTION */ | ||
| 503 | max_send_sge = init_attr->cap.max_send_sge; | ||
| 504 | max_recv_sge = init_attr->cap.max_recv_sge; | ||
| 505 | if (IB_QPT_UD == init_attr->qp_type || | ||
| 506 | IB_QPT_GSI == init_attr->qp_type || | ||
| 507 | IB_QPT_SMI == init_attr->qp_type) { | ||
| 508 | max_send_sge += 2; | ||
| 509 | max_recv_sge += 2; | ||
| 510 | } | ||
| 511 | |||
| 512 | parms.ipz_eq_handle = shca->eq.ipz_eq_handle; | ||
| 513 | parms.daqp_ctrl = isdaqp | daqp_completion; | ||
| 514 | parms.pd = my_pd->fw_pd; | ||
| 515 | parms.max_recv_sge = max_recv_sge; | ||
| 516 | parms.max_send_sge = max_send_sge; | ||
| 517 | |||
| 518 | h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms); | ||
| 519 | |||
| 520 | if (h_ret != H_SUCCESS) { | ||
| 521 | ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx", | ||
| 522 | h_ret); | ||
| 523 | ret = ehca2ib_return_code(h_ret); | ||
| 524 | goto create_qp_exit1; | ||
| 525 | } | ||
| 526 | |||
| 527 | switch (init_attr->qp_type) { | ||
| 528 | case IB_QPT_RC: | ||
| 529 | if (isdaqp == 0) { | ||
| 530 | swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[ | ||
| 531 | (parms.act_nr_send_sges)]); | ||
| 532 | rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[ | ||
| 533 | (parms.act_nr_recv_sges)]); | ||
| 534 | } else { /* for daqp we need to use msg size, not wqe size */ | ||
| 535 | swqe_size = da_rc_msg_size[max_send_sge]; | ||
| 536 | rwqe_size = da_rc_msg_size[max_recv_sge]; | ||
| 537 | parms.act_nr_send_sges = 1; | ||
| 538 | parms.act_nr_recv_sges = 1; | ||
| 539 | } | ||
| 540 | break; | ||
| 541 | case IB_QPT_UC: | ||
| 542 | swqe_size = offsetof(struct ehca_wqe, | ||
| 543 | u.nud.sg_list[parms.act_nr_send_sges]); | ||
| 544 | rwqe_size = offsetof(struct ehca_wqe, | ||
| 545 | u.nud.sg_list[parms.act_nr_recv_sges]); | ||
| 546 | break; | ||
| 547 | |||
| 548 | case IB_QPT_UD: | ||
| 549 | case IB_QPT_GSI: | ||
| 550 | case IB_QPT_SMI: | ||
| 551 | /* UD circumvention */ | ||
| 552 | parms.act_nr_recv_sges -= 2; | ||
| 553 | parms.act_nr_send_sges -= 2; | ||
| 554 | if (isdaqp) { | ||
| 555 | swqe_size = da_ud_sq_msg_size[max_send_sge]; | ||
| 556 | rwqe_size = da_rc_msg_size[max_recv_sge]; | ||
| 557 | parms.act_nr_send_sges = 1; | ||
| 558 | parms.act_nr_recv_sges = 1; | ||
| 559 | } else { | ||
| 560 | swqe_size = offsetof(struct ehca_wqe, | ||
| 561 | u.ud_av.sg_list[parms.act_nr_send_sges]); | ||
| 562 | rwqe_size = offsetof(struct ehca_wqe, | ||
| 563 | u.ud_av.sg_list[parms.act_nr_recv_sges]); | ||
| 564 | } | ||
| 565 | |||
| 566 | if (IB_QPT_GSI == init_attr->qp_type || | ||
| 567 | IB_QPT_SMI == init_attr->qp_type) { | ||
| 568 | parms.act_nr_send_wqes = init_attr->cap.max_send_wr; | ||
| 569 | parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; | ||
| 570 | parms.act_nr_send_sges = init_attr->cap.max_send_sge; | ||
| 571 | parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; | ||
| 572 | my_qp->real_qp_num = | ||
| 573 | (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1; | ||
| 574 | } | ||
| 575 | |||
| 576 | break; | ||
| 577 | |||
| 578 | default: | ||
| 579 | break; | ||
| 580 | } | ||
| 581 | |||
| 582 | /* initializes r/squeue and registers queue pages */ | ||
| 583 | ret = init_qp_queues(shca, my_qp, | ||
| 584 | parms.nr_sq_pages, parms.nr_rq_pages, | ||
| 585 | swqe_size, rwqe_size, | ||
| 586 | parms.act_nr_send_sges, parms.act_nr_recv_sges); | ||
| 587 | if (ret) { | ||
| 588 | ehca_err(pd->device, | ||
| 589 | "Couldn't initialize r/squeue and pages ret=%x", ret); | ||
| 590 | goto create_qp_exit2; | ||
| 591 | } | ||
| 592 | |||
| 593 | my_qp->ib_qp.pd = &my_pd->ib_pd; | ||
| 594 | my_qp->ib_qp.device = my_pd->ib_pd.device; | ||
| 595 | |||
| 596 | my_qp->ib_qp.recv_cq = init_attr->recv_cq; | ||
| 597 | my_qp->ib_qp.send_cq = init_attr->send_cq; | ||
| 598 | |||
| 599 | my_qp->ib_qp.qp_num = my_qp->real_qp_num; | ||
| 600 | my_qp->ib_qp.qp_type = init_attr->qp_type; | ||
| 601 | |||
| 602 | my_qp->qp_type = init_attr->qp_type; | ||
| 603 | my_qp->ib_qp.srq = init_attr->srq; | ||
| 604 | |||
| 605 | my_qp->ib_qp.qp_context = init_attr->qp_context; | ||
| 606 | my_qp->ib_qp.event_handler = init_attr->event_handler; | ||
| 607 | |||
| 608 | init_attr->cap.max_inline_data = 0; /* not supported yet */ | ||
| 609 | init_attr->cap.max_recv_sge = parms.act_nr_recv_sges; | ||
| 610 | init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes; | ||
| 611 | init_attr->cap.max_send_sge = parms.act_nr_send_sges; | ||
| 612 | init_attr->cap.max_send_wr = parms.act_nr_send_wqes; | ||
| 613 | |||
| 614 | /* NOTE: define_apq0() not supported yet */ | ||
| 615 | if (init_attr->qp_type == IB_QPT_GSI) { | ||
| 616 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); | ||
| 617 | if (h_ret != H_SUCCESS) { | ||
| 618 | ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx", | ||
| 619 | h_ret); | ||
| 620 | ret = ehca2ib_return_code(h_ret); | ||
| 621 | goto create_qp_exit3; | ||
| 622 | } | ||
| 623 | } | ||
| 624 | if (init_attr->send_cq) { | ||
| 625 | struct ehca_cq *cq = container_of(init_attr->send_cq, | ||
| 626 | struct ehca_cq, ib_cq); | ||
| 627 | ret = ehca_cq_assign_qp(cq, my_qp); | ||
| 628 | if (ret) { | ||
| 629 | ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x", | ||
| 630 | ret); | ||
| 631 | goto create_qp_exit3; | ||
| 632 | } | ||
| 633 | my_qp->send_cq = cq; | ||
| 634 | } | ||
| 635 | /* copy queues, galpa data to user space */ | ||
| 636 | if (context && udata) { | ||
| 637 | struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue; | ||
| 638 | struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue; | ||
| 639 | struct ehca_create_qp_resp resp; | ||
| 640 | struct vm_area_struct * vma; | ||
| 641 | memset(&resp, 0, sizeof(resp)); | ||
| 642 | |||
| 643 | resp.qp_num = my_qp->real_qp_num; | ||
| 644 | resp.token = my_qp->token; | ||
| 645 | resp.qp_type = my_qp->qp_type; | ||
| 646 | resp.qkey = my_qp->qkey; | ||
| 647 | resp.real_qp_num = my_qp->real_qp_num; | ||
| 648 | /* rqueue properties */ | ||
| 649 | resp.ipz_rqueue.qe_size = ipz_rqueue->qe_size; | ||
| 650 | resp.ipz_rqueue.act_nr_of_sg = ipz_rqueue->act_nr_of_sg; | ||
| 651 | resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length; | ||
| 652 | resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize; | ||
| 653 | resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state; | ||
| 654 | ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000, | ||
| 655 | ipz_rqueue->queue_length, | ||
| 656 | (void**)&resp.ipz_rqueue.queue, | ||
| 657 | &vma); | ||
| 658 | if (ret) { | ||
| 659 | ehca_err(pd->device, "Could not mmap rqueue pages"); | ||
| 660 | goto create_qp_exit3; | ||
| 661 | } | ||
| 662 | my_qp->uspace_rqueue = resp.ipz_rqueue.queue; | ||
| 663 | /* squeue properties */ | ||
| 664 | resp.ipz_squeue.qe_size = ipz_squeue->qe_size; | ||
| 665 | resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg; | ||
| 666 | resp.ipz_squeue.queue_length = ipz_squeue->queue_length; | ||
| 667 | resp.ipz_squeue.pagesize = ipz_squeue->pagesize; | ||
| 668 | resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state; | ||
| 669 | ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000, | ||
| 670 | ipz_squeue->queue_length, | ||
| 671 | (void**)&resp.ipz_squeue.queue, | ||
| 672 | &vma); | ||
| 673 | if (ret) { | ||
| 674 | ehca_err(pd->device, "Could not mmap squeue pages"); | ||
| 675 | goto create_qp_exit4; | ||
| 676 | } | ||
| 677 | my_qp->uspace_squeue = resp.ipz_squeue.queue; | ||
| 678 | /* fw_handle */ | ||
| 679 | resp.galpas = my_qp->galpas; | ||
| 680 | ret = ehca_mmap_register(my_qp->galpas.user.fw_handle, | ||
| 681 | (void**)&resp.galpas.kernel.fw_handle, | ||
| 682 | &vma); | ||
| 683 | if (ret) { | ||
| 684 | ehca_err(pd->device, "Could not mmap fw_handle"); | ||
| 685 | goto create_qp_exit5; | ||
| 686 | } | ||
| 687 | my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle; | ||
| 688 | |||
| 689 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { | ||
| 690 | ehca_err(pd->device, "Copy to udata failed"); | ||
| 691 | ret = -EINVAL; | ||
| 692 | goto create_qp_exit6; | ||
| 693 | } | ||
| 694 | } | ||
| 695 | |||
| 696 | return &my_qp->ib_qp; | ||
| 697 | |||
| 698 | create_qp_exit6: | ||
| 699 | ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE); | ||
| 700 | |||
| 701 | create_qp_exit5: | ||
| 702 | ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length); | ||
| 703 | |||
| 704 | create_qp_exit4: | ||
| 705 | ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length); | ||
| 706 | |||
| 707 | create_qp_exit3: | ||
| 708 | ipz_queue_dtor(&my_qp->ipz_rqueue); | ||
| 709 | ipz_queue_dtor(&my_qp->ipz_squeue); | ||
| 710 | |||
| 711 | create_qp_exit2: | ||
| 712 | hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); | ||
| 713 | |||
| 714 | create_qp_exit1: | ||
| 715 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | ||
| 716 | idr_remove(&ehca_qp_idr, my_qp->token); | ||
| 717 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | ||
| 718 | |||
| 719 | create_qp_exit0: | ||
| 720 | kmem_cache_free(qp_cache, my_qp); | ||
| 721 | return ERR_PTR(ret); | ||
| 722 | } | ||
| 723 | |||
| 724 | /* | ||
| 725 | * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts | ||
| 726 | * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe | ||
| 727 | * returns total number of bad wqes in bad_wqe_cnt | ||
| 728 | */ | ||
| 729 | static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | ||
| 730 | int *bad_wqe_cnt) | ||
| 731 | { | ||
| 732 | u64 h_ret; | ||
| 733 | struct ipz_queue *squeue; | ||
| 734 | void *bad_send_wqe_p, *bad_send_wqe_v; | ||
| 735 | void *squeue_start_p, *squeue_end_p; | ||
| 736 | void *squeue_start_v, *squeue_end_v; | ||
| 737 | struct ehca_wqe *wqe; | ||
| 738 | int qp_num = my_qp->ib_qp.qp_num; | ||
| 739 | |||
| 740 | /* get send wqe pointer */ | ||
| 741 | h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle, | ||
| 742 | my_qp->ipz_qp_handle, &my_qp->pf, | ||
| 743 | &bad_send_wqe_p, NULL, 2); | ||
| 744 | if (h_ret != H_SUCCESS) { | ||
| 745 | ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" | ||
| 746 | " ehca_qp=%p qp_num=%x h_ret=%lx", | ||
| 747 | my_qp, qp_num, h_ret); | ||
| 748 | return ehca2ib_return_code(h_ret); | ||
| 749 | } | ||
| 750 | bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63))); | ||
| 751 | ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p", | ||
| 752 | qp_num, bad_send_wqe_p); | ||
| 753 | /* convert wqe pointer to vadr */ | ||
| 754 | bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); | ||
| 755 | if (ehca_debug_level) | ||
| 756 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); | ||
| 757 | squeue = &my_qp->ipz_squeue; | ||
| 758 | squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L)); | ||
| 759 | squeue_end_p = squeue_start_p+squeue->queue_length; | ||
| 760 | squeue_start_v = abs_to_virt((u64)squeue_start_p); | ||
| 761 | squeue_end_v = abs_to_virt((u64)squeue_end_p); | ||
| 762 | ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p", | ||
| 763 | qp_num, squeue_start_v, squeue_end_v); | ||
| 764 | |||
| 765 | /* loop sets wqe's purge bit */ | ||
| 766 | wqe = (struct ehca_wqe*)bad_send_wqe_v; | ||
| 767 | *bad_wqe_cnt = 0; | ||
| 768 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { | ||
| 769 | if (ehca_debug_level) | ||
| 770 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); | ||
| 771 | wqe->nr_of_data_seg = 0; /* suppress data access */ | ||
| 772 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ | ||
| 773 | wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size); | ||
| 774 | *bad_wqe_cnt = (*bad_wqe_cnt)+1; | ||
| 775 | if ((void*)wqe >= squeue_end_v) { | ||
| 776 | wqe = squeue_start_v; | ||
| 777 | } | ||
| 778 | } | ||
| 779 | /* | ||
| 780 | * bad wqe will be reprocessed and ignored when pol_cq() is called, | ||
| 781 | * i.e. nr of wqes with flush error status is one less | ||
| 782 | */ | ||
| 783 | ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x", | ||
| 784 | qp_num, (*bad_wqe_cnt)-1); | ||
| 785 | wqe->wqef = 0; | ||
| 786 | |||
| 787 | return 0; | ||
| 788 | } | ||
| 789 | |||
| 790 | /* | ||
| 791 | * internal_modify_qp with circumvention to handle aqp0 properly | ||
| 792 | * smi_reset2init indicates if this is an internal reset-to-init-call for | ||
| 793 | * smi. This flag must always be zero if called from ehca_modify_qp()! | ||
| 794 | * This internal func was intorduced to avoid recursion of ehca_modify_qp()! | ||
| 795 | */ | ||
| 796 | static int internal_modify_qp(struct ib_qp *ibqp, | ||
| 797 | struct ib_qp_attr *attr, | ||
| 798 | int attr_mask, int smi_reset2init) | ||
| 799 | { | ||
| 800 | enum ib_qp_state qp_cur_state, qp_new_state; | ||
| 801 | int cnt, qp_attr_idx, ret = 0; | ||
| 802 | enum ib_qp_statetrans statetrans; | ||
| 803 | struct hcp_modify_qp_control_block *mqpcb; | ||
| 804 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); | ||
| 805 | struct ehca_shca *shca = | ||
| 806 | container_of(ibqp->pd->device, struct ehca_shca, ib_device); | ||
| 807 | u64 update_mask; | ||
| 808 | u64 h_ret; | ||
| 809 | int bad_wqe_cnt = 0; | ||
| 810 | int squeue_locked = 0; | ||
| 811 | unsigned long spl_flags = 0; | ||
| 812 | |||
| 813 | /* do query_qp to obtain current attr values */ | ||
| 814 | mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
| 815 | if (mqpcb == NULL) { | ||
| 816 | ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " | ||
| 817 | "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); | ||
| 818 | return -ENOMEM; | ||
| 819 | } | ||
| 820 | |||
| 821 | h_ret = hipz_h_query_qp(shca->ipz_hca_handle, | ||
| 822 | my_qp->ipz_qp_handle, | ||
| 823 | &my_qp->pf, | ||
| 824 | mqpcb, my_qp->galpas.kernel); | ||
| 825 | if (h_ret != H_SUCCESS) { | ||
| 826 | ehca_err(ibqp->device, "hipz_h_query_qp() failed " | ||
| 827 | "ehca_qp=%p qp_num=%x h_ret=%lx", | ||
| 828 | my_qp, ibqp->qp_num, h_ret); | ||
| 829 | ret = ehca2ib_return_code(h_ret); | ||
| 830 | goto modify_qp_exit1; | ||
| 831 | } | ||
| 832 | |||
| 833 | qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); | ||
| 834 | |||
| 835 | if (qp_cur_state == -EINVAL) { /* invalid qp state */ | ||
| 836 | ret = -EINVAL; | ||
| 837 | ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x " | ||
| 838 | "ehca_qp=%p qp_num=%x", | ||
| 839 | mqpcb->qp_state, my_qp, ibqp->qp_num); | ||
| 840 | goto modify_qp_exit1; | ||
| 841 | } | ||
| 842 | /* | ||
| 843 | * circumvention to set aqp0 initial state to init | ||
| 844 | * as expected by IB spec | ||
| 845 | */ | ||
| 846 | if (smi_reset2init == 0 && | ||
| 847 | ibqp->qp_type == IB_QPT_SMI && | ||
| 848 | qp_cur_state == IB_QPS_RESET && | ||
| 849 | (attr_mask & IB_QP_STATE) && | ||
| 850 | attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */ | ||
| 851 | struct ib_qp_attr smiqp_attr = { | ||
| 852 | .qp_state = IB_QPS_INIT, | ||
| 853 | .port_num = my_qp->init_attr.port_num, | ||
| 854 | .pkey_index = 0, | ||
| 855 | .qkey = 0 | ||
| 856 | }; | ||
| 857 | int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT | | ||
| 858 | IB_QP_PKEY_INDEX | IB_QP_QKEY; | ||
| 859 | int smirc = internal_modify_qp( | ||
| 860 | ibqp, &smiqp_attr, smiqp_attr_mask, 1); | ||
| 861 | if (smirc) { | ||
| 862 | ehca_err(ibqp->device, "SMI RESET -> INIT failed. " | ||
| 863 | "ehca_modify_qp() rc=%x", smirc); | ||
| 864 | ret = H_PARAMETER; | ||
| 865 | goto modify_qp_exit1; | ||
| 866 | } | ||
| 867 | qp_cur_state = IB_QPS_INIT; | ||
| 868 | ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded"); | ||
| 869 | } | ||
| 870 | /* is transmitted current state equal to "real" current state */ | ||
| 871 | if ((attr_mask & IB_QP_CUR_STATE) && | ||
| 872 | qp_cur_state != attr->cur_qp_state) { | ||
| 873 | ret = -EINVAL; | ||
| 874 | ehca_err(ibqp->device, | ||
| 875 | "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>" | ||
| 876 | " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x", | ||
| 877 | attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num); | ||
| 878 | goto modify_qp_exit1; | ||
| 879 | } | ||
| 880 | |||
| 881 | ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x " | ||
| 882 | "new qp_state=%x attribute_mask=%x", | ||
| 883 | my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask); | ||
| 884 | |||
| 885 | qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state; | ||
| 886 | if (!smi_reset2init && | ||
| 887 | !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type, | ||
| 888 | attr_mask)) { | ||
| 889 | ret = -EINVAL; | ||
| 890 | ehca_err(ibqp->device, | ||
| 891 | "Invalid qp transition new_state=%x cur_state=%x " | ||
| 892 | "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state, | ||
| 893 | qp_cur_state, my_qp, ibqp->qp_num, attr_mask); | ||
| 894 | goto modify_qp_exit1; | ||
| 895 | } | ||
| 896 | |||
| 897 | if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state))) | ||
| 898 | update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1); | ||
| 899 | else { | ||
| 900 | ret = -EINVAL; | ||
| 901 | ehca_err(ibqp->device, "Invalid new qp state=%x " | ||
| 902 | "ehca_qp=%p qp_num=%x", | ||
| 903 | qp_new_state, my_qp, ibqp->qp_num); | ||
| 904 | goto modify_qp_exit1; | ||
| 905 | } | ||
| 906 | |||
| 907 | /* retrieve state transition struct to get req and opt attrs */ | ||
| 908 | statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state); | ||
| 909 | if (statetrans < 0) { | ||
| 910 | ret = -EINVAL; | ||
| 911 | ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x " | ||
| 912 | "new_qp_state=%x State_xsition=%x ehca_qp=%p " | ||
| 913 | "qp_num=%x", qp_cur_state, qp_new_state, | ||
| 914 | statetrans, my_qp, ibqp->qp_num); | ||
| 915 | goto modify_qp_exit1; | ||
| 916 | } | ||
| 917 | |||
| 918 | qp_attr_idx = ib2ehcaqptype(ibqp->qp_type); | ||
| 919 | |||
| 920 | if (qp_attr_idx < 0) { | ||
| 921 | ret = qp_attr_idx; | ||
| 922 | ehca_err(ibqp->device, | ||
| 923 | "Invalid QP type=%x ehca_qp=%p qp_num=%x", | ||
| 924 | ibqp->qp_type, my_qp, ibqp->qp_num); | ||
| 925 | goto modify_qp_exit1; | ||
| 926 | } | ||
| 927 | |||
| 928 | ehca_dbg(ibqp->device, | ||
| 929 | "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x", | ||
| 930 | my_qp, ibqp->qp_num, statetrans); | ||
| 931 | |||
| 932 | /* sqe -> rts: set purge bit of bad wqe before actual trans */ | ||
| 933 | if ((my_qp->qp_type == IB_QPT_UD || | ||
| 934 | my_qp->qp_type == IB_QPT_GSI || | ||
| 935 | my_qp->qp_type == IB_QPT_SMI) && | ||
| 936 | statetrans == IB_QPST_SQE2RTS) { | ||
| 937 | /* mark next free wqe if kernel */ | ||
| 938 | if (my_qp->uspace_squeue == 0) { | ||
| 939 | struct ehca_wqe *wqe; | ||
| 940 | /* lock send queue */ | ||
| 941 | spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); | ||
| 942 | squeue_locked = 1; | ||
| 943 | /* mark next free wqe */ | ||
| 944 | wqe = (struct ehca_wqe*) | ||
| 945 | ipz_qeit_get(&my_qp->ipz_squeue); | ||
| 946 | wqe->optype = wqe->wqef = 0xff; | ||
| 947 | ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p", | ||
| 948 | ibqp->qp_num, wqe); | ||
| 949 | } | ||
| 950 | ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt); | ||
| 951 | if (ret) { | ||
| 952 | ehca_err(ibqp->device, "prepare_sqe_rts() failed " | ||
| 953 | "ehca_qp=%p qp_num=%x ret=%x", | ||
| 954 | my_qp, ibqp->qp_num, ret); | ||
| 955 | goto modify_qp_exit2; | ||
| 956 | } | ||
| 957 | } | ||
| 958 | |||
| 959 | /* | ||
| 960 | * enable RDMA_Atomic_Control if reset->init und reliable con | ||
| 961 | * this is necessary since gen2 does not provide that flag, | ||
| 962 | * but pHyp requires it | ||
| 963 | */ | ||
| 964 | if (statetrans == IB_QPST_RESET2INIT && | ||
| 965 | (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) { | ||
| 966 | mqpcb->rdma_atomic_ctrl = 3; | ||
| 967 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1); | ||
| 968 | } | ||
| 969 | /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */ | ||
| 970 | if (statetrans == IB_QPST_INIT2RTR && | ||
| 971 | (ibqp->qp_type == IB_QPT_UC) && | ||
| 972 | !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) { | ||
| 973 | mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */ | ||
| 974 | update_mask |= | ||
| 975 | EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1); | ||
| 976 | } | ||
| 977 | |||
| 978 | if (attr_mask & IB_QP_PKEY_INDEX) { | ||
| 979 | mqpcb->prim_p_key_idx = attr->pkey_index; | ||
| 980 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1); | ||
| 981 | } | ||
| 982 | if (attr_mask & IB_QP_PORT) { | ||
| 983 | if (attr->port_num < 1 || attr->port_num > shca->num_ports) { | ||
| 984 | ret = -EINVAL; | ||
| 985 | ehca_err(ibqp->device, "Invalid port=%x. " | ||
| 986 | "ehca_qp=%p qp_num=%x num_ports=%x", | ||
| 987 | attr->port_num, my_qp, ibqp->qp_num, | ||
| 988 | shca->num_ports); | ||
| 989 | goto modify_qp_exit2; | ||
| 990 | } | ||
| 991 | mqpcb->prim_phys_port = attr->port_num; | ||
| 992 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1); | ||
| 993 | } | ||
| 994 | if (attr_mask & IB_QP_QKEY) { | ||
| 995 | mqpcb->qkey = attr->qkey; | ||
| 996 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1); | ||
| 997 | } | ||
| 998 | if (attr_mask & IB_QP_AV) { | ||
| 999 | int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate); | ||
| 1000 | int ehca_mult = ib_rate_to_mult(shca->sport[my_qp-> | ||
| 1001 | init_attr.port_num].rate); | ||
| 1002 | |||
| 1003 | mqpcb->dlid = attr->ah_attr.dlid; | ||
| 1004 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1); | ||
| 1005 | mqpcb->source_path_bits = attr->ah_attr.src_path_bits; | ||
| 1006 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1); | ||
| 1007 | mqpcb->service_level = attr->ah_attr.sl; | ||
| 1008 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1); | ||
| 1009 | |||
| 1010 | if (ah_mult < ehca_mult) | ||
| 1011 | mqpcb->max_static_rate = (ah_mult > 0) ? | ||
| 1012 | ((ehca_mult - 1) / ah_mult) : 0; | ||
| 1013 | else | ||
| 1014 | mqpcb->max_static_rate = 0; | ||
| 1015 | |||
| 1016 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); | ||
| 1017 | |||
| 1018 | /* | ||
| 1019 | * only if GRH is TRUE we might consider SOURCE_GID_IDX | ||
| 1020 | * and DEST_GID otherwise phype will return H_ATTR_PARM!!! | ||
| 1021 | */ | ||
| 1022 | if (attr->ah_attr.ah_flags == IB_AH_GRH) { | ||
| 1023 | mqpcb->send_grh_flag = 1 << 31; | ||
| 1024 | update_mask |= | ||
| 1025 | EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1); | ||
| 1026 | mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; | ||
| 1027 | update_mask |= | ||
| 1028 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); | ||
| 1029 | |||
| 1030 | for (cnt = 0; cnt < 16; cnt++) | ||
| 1031 | mqpcb->dest_gid.byte[cnt] = | ||
| 1032 | attr->ah_attr.grh.dgid.raw[cnt]; | ||
| 1033 | |||
| 1034 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1); | ||
| 1035 | mqpcb->flow_label = attr->ah_attr.grh.flow_label; | ||
| 1036 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1); | ||
| 1037 | mqpcb->hop_limit = attr->ah_attr.grh.hop_limit; | ||
| 1038 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1); | ||
| 1039 | mqpcb->traffic_class = attr->ah_attr.grh.traffic_class; | ||
| 1040 | update_mask |= | ||
| 1041 | EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1); | ||
| 1042 | } | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | if (attr_mask & IB_QP_PATH_MTU) { | ||
| 1046 | mqpcb->path_mtu = attr->path_mtu; | ||
| 1047 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1); | ||
| 1048 | } | ||
| 1049 | if (attr_mask & IB_QP_TIMEOUT) { | ||
| 1050 | mqpcb->timeout = attr->timeout; | ||
| 1051 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1); | ||
| 1052 | } | ||
| 1053 | if (attr_mask & IB_QP_RETRY_CNT) { | ||
| 1054 | mqpcb->retry_count = attr->retry_cnt; | ||
| 1055 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1); | ||
| 1056 | } | ||
| 1057 | if (attr_mask & IB_QP_RNR_RETRY) { | ||
| 1058 | mqpcb->rnr_retry_count = attr->rnr_retry; | ||
| 1059 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1); | ||
| 1060 | } | ||
| 1061 | if (attr_mask & IB_QP_RQ_PSN) { | ||
| 1062 | mqpcb->receive_psn = attr->rq_psn; | ||
| 1063 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1); | ||
| 1064 | } | ||
| 1065 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { | ||
| 1066 | mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ? | ||
| 1067 | attr->max_dest_rd_atomic : 2; | ||
| 1068 | update_mask |= | ||
| 1069 | EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1); | ||
| 1070 | } | ||
| 1071 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { | ||
| 1072 | mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ? | ||
| 1073 | attr->max_rd_atomic : 2; | ||
| 1074 | update_mask |= | ||
| 1075 | EHCA_BMASK_SET | ||
| 1076 | (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1); | ||
| 1077 | } | ||
| 1078 | if (attr_mask & IB_QP_ALT_PATH) { | ||
| 1079 | int ah_mult = ib_rate_to_mult(attr->alt_ah_attr.static_rate); | ||
| 1080 | int ehca_mult = ib_rate_to_mult( | ||
| 1081 | shca->sport[my_qp->init_attr.port_num].rate); | ||
| 1082 | |||
| 1083 | mqpcb->dlid_al = attr->alt_ah_attr.dlid; | ||
| 1084 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1); | ||
| 1085 | mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits; | ||
| 1086 | update_mask |= | ||
| 1087 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1); | ||
| 1088 | mqpcb->service_level_al = attr->alt_ah_attr.sl; | ||
| 1089 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1); | ||
| 1090 | |||
| 1091 | if (ah_mult < ehca_mult) | ||
| 1092 | mqpcb->max_static_rate = (ah_mult > 0) ? | ||
| 1093 | ((ehca_mult - 1) / ah_mult) : 0; | ||
| 1094 | else | ||
| 1095 | mqpcb->max_static_rate_al = 0; | ||
| 1096 | |||
| 1097 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1); | ||
| 1098 | |||
| 1099 | /* | ||
| 1100 | * only if GRH is TRUE we might consider SOURCE_GID_IDX | ||
| 1101 | * and DEST_GID otherwise phype will return H_ATTR_PARM!!! | ||
| 1102 | */ | ||
| 1103 | if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) { | ||
| 1104 | mqpcb->send_grh_flag_al = 1 << 31; | ||
| 1105 | update_mask |= | ||
| 1106 | EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1); | ||
| 1107 | mqpcb->source_gid_idx_al = | ||
| 1108 | attr->alt_ah_attr.grh.sgid_index; | ||
| 1109 | update_mask |= | ||
| 1110 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1); | ||
| 1111 | |||
| 1112 | for (cnt = 0; cnt < 16; cnt++) | ||
| 1113 | mqpcb->dest_gid_al.byte[cnt] = | ||
| 1114 | attr->alt_ah_attr.grh.dgid.raw[cnt]; | ||
| 1115 | |||
| 1116 | update_mask |= | ||
| 1117 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1); | ||
| 1118 | mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label; | ||
| 1119 | update_mask |= | ||
| 1120 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1); | ||
| 1121 | mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit; | ||
| 1122 | update_mask |= | ||
| 1123 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1); | ||
| 1124 | mqpcb->traffic_class_al = | ||
| 1125 | attr->alt_ah_attr.grh.traffic_class; | ||
| 1126 | update_mask |= | ||
| 1127 | EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1); | ||
| 1128 | } | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { | ||
| 1132 | mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer; | ||
| 1133 | update_mask |= | ||
| 1134 | EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1); | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | if (attr_mask & IB_QP_SQ_PSN) { | ||
| 1138 | mqpcb->send_psn = attr->sq_psn; | ||
| 1139 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1); | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | if (attr_mask & IB_QP_DEST_QPN) { | ||
| 1143 | mqpcb->dest_qp_nr = attr->dest_qp_num; | ||
| 1144 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1); | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | if (attr_mask & IB_QP_PATH_MIG_STATE) { | ||
| 1148 | mqpcb->path_migration_state = attr->path_mig_state; | ||
| 1149 | update_mask |= | ||
| 1150 | EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1); | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | if (attr_mask & IB_QP_CAP) { | ||
| 1154 | mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1; | ||
| 1155 | update_mask |= | ||
| 1156 | EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1); | ||
| 1157 | mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1; | ||
| 1158 | update_mask |= | ||
| 1159 | EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1); | ||
| 1160 | /* no support for max_send/recv_sge yet */ | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | if (ehca_debug_level) | ||
| 1164 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); | ||
| 1165 | |||
| 1166 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, | ||
| 1167 | my_qp->ipz_qp_handle, | ||
| 1168 | &my_qp->pf, | ||
| 1169 | update_mask, | ||
| 1170 | mqpcb, my_qp->galpas.kernel); | ||
| 1171 | |||
| 1172 | if (h_ret != H_SUCCESS) { | ||
| 1173 | ret = ehca2ib_return_code(h_ret); | ||
| 1174 | ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx " | ||
| 1175 | "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num); | ||
| 1176 | goto modify_qp_exit2; | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | if ((my_qp->qp_type == IB_QPT_UD || | ||
| 1180 | my_qp->qp_type == IB_QPT_GSI || | ||
| 1181 | my_qp->qp_type == IB_QPT_SMI) && | ||
| 1182 | statetrans == IB_QPST_SQE2RTS) { | ||
| 1183 | /* doorbell to reprocessing wqes */ | ||
| 1184 | iosync(); /* serialize GAL register access */ | ||
| 1185 | hipz_update_sqa(my_qp, bad_wqe_cnt-1); | ||
| 1186 | ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt); | ||
| 1187 | } | ||
| 1188 | |||
| 1189 | if (statetrans == IB_QPST_RESET2INIT || | ||
| 1190 | statetrans == IB_QPST_INIT2INIT) { | ||
| 1191 | mqpcb->qp_enable = 1; | ||
| 1192 | mqpcb->qp_state = EHCA_QPS_INIT; | ||
| 1193 | update_mask = 0; | ||
| 1194 | update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1); | ||
| 1195 | |||
| 1196 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, | ||
| 1197 | my_qp->ipz_qp_handle, | ||
| 1198 | &my_qp->pf, | ||
| 1199 | update_mask, | ||
| 1200 | mqpcb, | ||
| 1201 | my_qp->galpas.kernel); | ||
| 1202 | |||
| 1203 | if (h_ret != H_SUCCESS) { | ||
| 1204 | ret = ehca2ib_return_code(h_ret); | ||
| 1205 | ehca_err(ibqp->device, "ENABLE in context of " | ||
| 1206 | "RESET_2_INIT failed! Maybe you didn't get " | ||
| 1207 | "a LID h_ret=%lx ehca_qp=%p qp_num=%x", | ||
| 1208 | h_ret, my_qp, ibqp->qp_num); | ||
| 1209 | goto modify_qp_exit2; | ||
| 1210 | } | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | if (statetrans == IB_QPST_ANY2RESET) { | ||
| 1214 | ipz_qeit_reset(&my_qp->ipz_rqueue); | ||
| 1215 | ipz_qeit_reset(&my_qp->ipz_squeue); | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | if (attr_mask & IB_QP_QKEY) | ||
| 1219 | my_qp->qkey = attr->qkey; | ||
| 1220 | |||
| 1221 | modify_qp_exit2: | ||
| 1222 | if (squeue_locked) { /* this means: sqe -> rts */ | ||
| 1223 | spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags); | ||
| 1224 | my_qp->sqerr_purgeflag = 1; | ||
| 1225 | } | ||
| 1226 | |||
| 1227 | modify_qp_exit1: | ||
| 1228 | kfree(mqpcb); | ||
| 1229 | |||
| 1230 | return ret; | ||
| 1231 | } | ||
| 1232 | |||
| 1233 | int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | ||
| 1234 | { | ||
| 1235 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); | ||
| 1236 | struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, | ||
| 1237 | ib_pd); | ||
| 1238 | u32 cur_pid = current->tgid; | ||
| 1239 | |||
| 1240 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 1241 | my_pd->ownpid != cur_pid) { | ||
| 1242 | ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x", | ||
| 1243 | cur_pid, my_pd->ownpid); | ||
| 1244 | return -EINVAL; | ||
| 1245 | } | ||
| 1246 | |||
| 1247 | return internal_modify_qp(ibqp, attr, attr_mask, 0); | ||
| 1248 | } | ||
| 1249 | |||
| 1250 | int ehca_query_qp(struct ib_qp *qp, | ||
| 1251 | struct ib_qp_attr *qp_attr, | ||
| 1252 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) | ||
| 1253 | { | ||
| 1254 | struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); | ||
| 1255 | struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, | ||
| 1256 | ib_pd); | ||
| 1257 | struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, | ||
| 1258 | ib_device); | ||
| 1259 | struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; | ||
| 1260 | struct hcp_modify_qp_control_block *qpcb; | ||
| 1261 | u32 cur_pid = current->tgid; | ||
| 1262 | int cnt, ret = 0; | ||
| 1263 | u64 h_ret; | ||
| 1264 | |||
| 1265 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 1266 | my_pd->ownpid != cur_pid) { | ||
| 1267 | ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x", | ||
| 1268 | cur_pid, my_pd->ownpid); | ||
| 1269 | return -EINVAL; | ||
| 1270 | } | ||
| 1271 | |||
| 1272 | if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) { | ||
| 1273 | ehca_err(qp->device,"Invalid attribute mask " | ||
| 1274 | "ehca_qp=%p qp_num=%x qp_attr_mask=%x ", | ||
| 1275 | my_qp, qp->qp_num, qp_attr_mask); | ||
| 1276 | return -EINVAL; | ||
| 1277 | } | ||
| 1278 | |||
| 1279 | qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL ); | ||
| 1280 | if (!qpcb) { | ||
| 1281 | ehca_err(qp->device,"Out of memory for qpcb " | ||
| 1282 | "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); | ||
| 1283 | return -ENOMEM; | ||
| 1284 | } | ||
| 1285 | |||
| 1286 | h_ret = hipz_h_query_qp(adapter_handle, | ||
| 1287 | my_qp->ipz_qp_handle, | ||
| 1288 | &my_qp->pf, | ||
| 1289 | qpcb, my_qp->galpas.kernel); | ||
| 1290 | |||
| 1291 | if (h_ret != H_SUCCESS) { | ||
| 1292 | ret = ehca2ib_return_code(h_ret); | ||
| 1293 | ehca_err(qp->device,"hipz_h_query_qp() failed " | ||
| 1294 | "ehca_qp=%p qp_num=%x h_ret=%lx", | ||
| 1295 | my_qp, qp->qp_num, h_ret); | ||
| 1296 | goto query_qp_exit1; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state); | ||
| 1300 | qp_attr->qp_state = qp_attr->cur_qp_state; | ||
| 1301 | |||
| 1302 | if (qp_attr->cur_qp_state == -EINVAL) { | ||
| 1303 | ret = -EINVAL; | ||
| 1304 | ehca_err(qp->device,"Got invalid ehca_qp_state=%x " | ||
| 1305 | "ehca_qp=%p qp_num=%x", | ||
| 1306 | qpcb->qp_state, my_qp, qp->qp_num); | ||
| 1307 | goto query_qp_exit1; | ||
| 1308 | } | ||
| 1309 | |||
| 1310 | if (qp_attr->qp_state == IB_QPS_SQD) | ||
| 1311 | qp_attr->sq_draining = 1; | ||
| 1312 | |||
| 1313 | qp_attr->qkey = qpcb->qkey; | ||
| 1314 | qp_attr->path_mtu = qpcb->path_mtu; | ||
| 1315 | qp_attr->path_mig_state = qpcb->path_migration_state; | ||
| 1316 | qp_attr->rq_psn = qpcb->receive_psn; | ||
| 1317 | qp_attr->sq_psn = qpcb->send_psn; | ||
| 1318 | qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field; | ||
| 1319 | qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1; | ||
| 1320 | qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1; | ||
| 1321 | /* UD_AV CIRCUMVENTION */ | ||
| 1322 | if (my_qp->qp_type == IB_QPT_UD) { | ||
| 1323 | qp_attr->cap.max_send_sge = | ||
| 1324 | qpcb->actual_nr_sges_in_sq_wqe - 2; | ||
| 1325 | qp_attr->cap.max_recv_sge = | ||
| 1326 | qpcb->actual_nr_sges_in_rq_wqe - 2; | ||
| 1327 | } else { | ||
| 1328 | qp_attr->cap.max_send_sge = | ||
| 1329 | qpcb->actual_nr_sges_in_sq_wqe; | ||
| 1330 | qp_attr->cap.max_recv_sge = | ||
| 1331 | qpcb->actual_nr_sges_in_rq_wqe; | ||
| 1332 | } | ||
| 1333 | |||
| 1334 | qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; | ||
| 1335 | qp_attr->dest_qp_num = qpcb->dest_qp_nr; | ||
| 1336 | |||
| 1337 | qp_attr->pkey_index = | ||
| 1338 | EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); | ||
| 1339 | |||
| 1340 | qp_attr->port_num = | ||
| 1341 | EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port); | ||
| 1342 | |||
| 1343 | qp_attr->timeout = qpcb->timeout; | ||
| 1344 | qp_attr->retry_cnt = qpcb->retry_count; | ||
| 1345 | qp_attr->rnr_retry = qpcb->rnr_retry_count; | ||
| 1346 | |||
| 1347 | qp_attr->alt_pkey_index = | ||
| 1348 | EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx); | ||
| 1349 | |||
| 1350 | qp_attr->alt_port_num = qpcb->alt_phys_port; | ||
| 1351 | qp_attr->alt_timeout = qpcb->timeout_al; | ||
| 1352 | |||
| 1353 | /* primary av */ | ||
| 1354 | qp_attr->ah_attr.sl = qpcb->service_level; | ||
| 1355 | |||
| 1356 | if (qpcb->send_grh_flag) { | ||
| 1357 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | qp_attr->ah_attr.static_rate = qpcb->max_static_rate; | ||
| 1361 | qp_attr->ah_attr.dlid = qpcb->dlid; | ||
| 1362 | qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits; | ||
| 1363 | qp_attr->ah_attr.port_num = qp_attr->port_num; | ||
| 1364 | |||
| 1365 | /* primary GRH */ | ||
| 1366 | qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class; | ||
| 1367 | qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit; | ||
| 1368 | qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx; | ||
| 1369 | qp_attr->ah_attr.grh.flow_label = qpcb->flow_label; | ||
| 1370 | |||
| 1371 | for (cnt = 0; cnt < 16; cnt++) | ||
| 1372 | qp_attr->ah_attr.grh.dgid.raw[cnt] = | ||
| 1373 | qpcb->dest_gid.byte[cnt]; | ||
| 1374 | |||
| 1375 | /* alternate AV */ | ||
| 1376 | qp_attr->alt_ah_attr.sl = qpcb->service_level_al; | ||
| 1377 | if (qpcb->send_grh_flag_al) { | ||
| 1378 | qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH; | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al; | ||
| 1382 | qp_attr->alt_ah_attr.dlid = qpcb->dlid_al; | ||
| 1383 | qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al; | ||
| 1384 | |||
| 1385 | /* alternate GRH */ | ||
| 1386 | qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al; | ||
| 1387 | qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al; | ||
| 1388 | qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al; | ||
| 1389 | qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al; | ||
| 1390 | |||
| 1391 | for (cnt = 0; cnt < 16; cnt++) | ||
| 1392 | qp_attr->alt_ah_attr.grh.dgid.raw[cnt] = | ||
| 1393 | qpcb->dest_gid_al.byte[cnt]; | ||
| 1394 | |||
| 1395 | /* return init attributes given in ehca_create_qp */ | ||
| 1396 | if (qp_init_attr) | ||
| 1397 | *qp_init_attr = my_qp->init_attr; | ||
| 1398 | |||
| 1399 | if (ehca_debug_level) | ||
| 1400 | ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); | ||
| 1401 | |||
| 1402 | query_qp_exit1: | ||
| 1403 | kfree(qpcb); | ||
| 1404 | |||
| 1405 | return ret; | ||
| 1406 | } | ||
| 1407 | |||
| 1408 | int ehca_destroy_qp(struct ib_qp *ibqp) | ||
| 1409 | { | ||
| 1410 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); | ||
| 1411 | struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, | ||
| 1412 | ib_device); | ||
| 1413 | struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, | ||
| 1414 | ib_pd); | ||
| 1415 | u32 cur_pid = current->tgid; | ||
| 1416 | u32 qp_num = ibqp->qp_num; | ||
| 1417 | int ret; | ||
| 1418 | u64 h_ret; | ||
| 1419 | u8 port_num; | ||
| 1420 | enum ib_qp_type qp_type; | ||
| 1421 | unsigned long flags; | ||
| 1422 | |||
| 1423 | if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && | ||
| 1424 | my_pd->ownpid != cur_pid) { | ||
| 1425 | ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x", | ||
| 1426 | cur_pid, my_pd->ownpid); | ||
| 1427 | return -EINVAL; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | if (my_qp->send_cq) { | ||
| 1431 | ret = ehca_cq_unassign_qp(my_qp->send_cq, | ||
| 1432 | my_qp->real_qp_num); | ||
| 1433 | if (ret) { | ||
| 1434 | ehca_err(ibqp->device, "Couldn't unassign qp from " | ||
| 1435 | "send_cq ret=%x qp_num=%x cq_num=%x", ret, | ||
| 1436 | my_qp->ib_qp.qp_num, my_qp->send_cq->cq_number); | ||
| 1437 | return ret; | ||
| 1438 | } | ||
| 1439 | } | ||
| 1440 | |||
| 1441 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | ||
| 1442 | idr_remove(&ehca_qp_idr, my_qp->token); | ||
| 1443 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | ||
| 1444 | |||
| 1445 | /* un-mmap if vma alloc */ | ||
| 1446 | if (my_qp->uspace_rqueue) { | ||
| 1447 | ret = ehca_munmap(my_qp->uspace_rqueue, | ||
| 1448 | my_qp->ipz_rqueue.queue_length); | ||
| 1449 | if (ret) | ||
| 1450 | ehca_err(ibqp->device, "Could not munmap rqueue " | ||
| 1451 | "qp_num=%x", qp_num); | ||
| 1452 | ret = ehca_munmap(my_qp->uspace_squeue, | ||
| 1453 | my_qp->ipz_squeue.queue_length); | ||
| 1454 | if (ret) | ||
| 1455 | ehca_err(ibqp->device, "Could not munmap squeue " | ||
| 1456 | "qp_num=%x", qp_num); | ||
| 1457 | ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE); | ||
| 1458 | if (ret) | ||
| 1459 | ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x", | ||
| 1460 | qp_num); | ||
| 1461 | } | ||
| 1462 | |||
| 1463 | h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); | ||
| 1464 | if (h_ret != H_SUCCESS) { | ||
| 1465 | ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx " | ||
| 1466 | "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); | ||
| 1467 | return ehca2ib_return_code(h_ret); | ||
| 1468 | } | ||
| 1469 | |||
| 1470 | port_num = my_qp->init_attr.port_num; | ||
| 1471 | qp_type = my_qp->init_attr.qp_type; | ||
| 1472 | |||
| 1473 | /* no support for IB_QPT_SMI yet */ | ||
| 1474 | if (qp_type == IB_QPT_GSI) { | ||
| 1475 | struct ib_event event; | ||
| 1476 | ehca_info(ibqp->device, "device %s: port %x is inactive.", | ||
| 1477 | shca->ib_device.name, port_num); | ||
| 1478 | event.device = &shca->ib_device; | ||
| 1479 | event.event = IB_EVENT_PORT_ERR; | ||
| 1480 | event.element.port_num = port_num; | ||
| 1481 | shca->sport[port_num - 1].port_state = IB_PORT_DOWN; | ||
| 1482 | ib_dispatch_event(&event); | ||
| 1483 | } | ||
| 1484 | |||
| 1485 | ipz_queue_dtor(&my_qp->ipz_rqueue); | ||
| 1486 | ipz_queue_dtor(&my_qp->ipz_squeue); | ||
| 1487 | kmem_cache_free(qp_cache, my_qp); | ||
| 1488 | return 0; | ||
| 1489 | } | ||
| 1490 | |||
| 1491 | int ehca_init_qp_cache(void) | ||
| 1492 | { | ||
| 1493 | qp_cache = kmem_cache_create("ehca_cache_qp", | ||
| 1494 | sizeof(struct ehca_qp), 0, | ||
| 1495 | SLAB_HWCACHE_ALIGN, | ||
| 1496 | NULL, NULL); | ||
| 1497 | if (!qp_cache) | ||
| 1498 | return -ENOMEM; | ||
| 1499 | return 0; | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | void ehca_cleanup_qp_cache(void) | ||
| 1503 | { | ||
| 1504 | if (qp_cache) | ||
| 1505 | kmem_cache_destroy(qp_cache); | ||
| 1506 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c new file mode 100644 index 00000000000..b46bda1bf85 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
| @@ -0,0 +1,653 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * post_send/recv, poll_cq, req_notify | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 9 | * | ||
| 10 | * Copyright (c) 2005 IBM Corporation | ||
| 11 | * | ||
| 12 | * All rights reserved. | ||
| 13 | * | ||
| 14 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 15 | * BSD. | ||
| 16 | * | ||
| 17 | * OpenIB BSD License | ||
| 18 | * | ||
| 19 | * Redistribution and use in source and binary forms, with or without | ||
| 20 | * modification, are permitted provided that the following conditions are met: | ||
| 21 | * | ||
| 22 | * Redistributions of source code must retain the above copyright notice, this | ||
| 23 | * list of conditions and the following disclaimer. | ||
| 24 | * | ||
| 25 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 26 | * this list of conditions and the following disclaimer in the documentation | ||
| 27 | * and/or other materials | ||
| 28 | * provided with the distribution. | ||
| 29 | * | ||
| 30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 31 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 34 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 35 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 36 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 37 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 38 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 39 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 40 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | |||
| 44 | #include <asm-powerpc/system.h> | ||
| 45 | #include "ehca_classes.h" | ||
| 46 | #include "ehca_tools.h" | ||
| 47 | #include "ehca_qes.h" | ||
| 48 | #include "ehca_iverbs.h" | ||
| 49 | #include "hcp_if.h" | ||
| 50 | #include "hipz_fns.h" | ||
| 51 | |||
| 52 | static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, | ||
| 53 | struct ehca_wqe *wqe_p, | ||
| 54 | struct ib_recv_wr *recv_wr) | ||
| 55 | { | ||
| 56 | u8 cnt_ds; | ||
| 57 | if (unlikely((recv_wr->num_sge < 0) || | ||
| 58 | (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) { | ||
| 59 | ehca_gen_err("Invalid number of WQE SGE. " | ||
| 60 | "num_sqe=%x max_nr_of_sg=%x", | ||
| 61 | recv_wr->num_sge, ipz_rqueue->act_nr_of_sg); | ||
| 62 | return -EINVAL; /* invalid SG list length */ | ||
| 63 | } | ||
| 64 | |||
| 65 | /* clear wqe header until sglist */ | ||
| 66 | memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); | ||
| 67 | |||
| 68 | wqe_p->work_request_id = recv_wr->wr_id; | ||
| 69 | wqe_p->nr_of_data_seg = recv_wr->num_sge; | ||
| 70 | |||
| 71 | for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) { | ||
| 72 | wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr = | ||
| 73 | recv_wr->sg_list[cnt_ds].addr; | ||
| 74 | wqe_p->u.all_rcv.sg_list[cnt_ds].lkey = | ||
| 75 | recv_wr->sg_list[cnt_ds].lkey; | ||
| 76 | wqe_p->u.all_rcv.sg_list[cnt_ds].length = | ||
| 77 | recv_wr->sg_list[cnt_ds].length; | ||
| 78 | } | ||
| 79 | |||
| 80 | if (ehca_debug_level) { | ||
| 81 | ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue); | ||
| 82 | ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); | ||
| 83 | } | ||
| 84 | |||
| 85 | return 0; | ||
| 86 | } | ||
| 87 | |||
| 88 | #if defined(DEBUG_GSI_SEND_WR) | ||
| 89 | |||
| 90 | /* need ib_mad struct */ | ||
| 91 | #include <rdma/ib_mad.h> | ||
| 92 | |||
| 93 | static void trace_send_wr_ud(const struct ib_send_wr *send_wr) | ||
| 94 | { | ||
| 95 | int idx; | ||
| 96 | int j; | ||
| 97 | while (send_wr) { | ||
| 98 | struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr; | ||
| 99 | struct ib_sge *sge = send_wr->sg_list; | ||
| 100 | ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x " | ||
| 101 | "send_flags=%x opcode=%x",idx, send_wr->wr_id, | ||
| 102 | send_wr->num_sge, send_wr->send_flags, | ||
| 103 | send_wr->opcode); | ||
| 104 | if (mad_hdr) { | ||
| 105 | ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x " | ||
| 106 | "mgmt_class=%x class_version=%x method=%x " | ||
| 107 | "status=%x class_specific=%x tid=%lx " | ||
| 108 | "attr_id=%x resv=%x attr_mod=%x", | ||
| 109 | idx, mad_hdr->base_version, | ||
| 110 | mad_hdr->mgmt_class, | ||
| 111 | mad_hdr->class_version, mad_hdr->method, | ||
| 112 | mad_hdr->status, mad_hdr->class_specific, | ||
| 113 | mad_hdr->tid, mad_hdr->attr_id, | ||
| 114 | mad_hdr->resv, | ||
| 115 | mad_hdr->attr_mod); | ||
| 116 | } | ||
| 117 | for (j = 0; j < send_wr->num_sge; j++) { | ||
| 118 | u8 *data = (u8 *) abs_to_virt(sge->addr); | ||
| 119 | ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " | ||
| 120 | "lkey=%x", | ||
| 121 | idx, j, data, sge->length, sge->lkey); | ||
| 122 | /* assume length is n*16 */ | ||
| 123 | ehca_dmp(data, sge->length, "send_wr#%x sge#%x", | ||
| 124 | idx, j); | ||
| 125 | sge++; | ||
| 126 | } /* eof for j */ | ||
| 127 | idx++; | ||
| 128 | send_wr = send_wr->next; | ||
| 129 | } /* eof while send_wr */ | ||
| 130 | } | ||
| 131 | |||
| 132 | #endif /* DEBUG_GSI_SEND_WR */ | ||
| 133 | |||
| 134 | static inline int ehca_write_swqe(struct ehca_qp *qp, | ||
| 135 | struct ehca_wqe *wqe_p, | ||
| 136 | const struct ib_send_wr *send_wr) | ||
| 137 | { | ||
| 138 | u32 idx; | ||
| 139 | u64 dma_length; | ||
| 140 | struct ehca_av *my_av; | ||
| 141 | u32 remote_qkey = send_wr->wr.ud.remote_qkey; | ||
| 142 | |||
| 143 | if (unlikely((send_wr->num_sge < 0) || | ||
| 144 | (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { | ||
| 145 | ehca_gen_err("Invalid number of WQE SGE. " | ||
| 146 | "num_sqe=%x max_nr_of_sg=%x", | ||
| 147 | send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg); | ||
| 148 | return -EINVAL; /* invalid SG list length */ | ||
| 149 | } | ||
| 150 | |||
| 151 | /* clear wqe header until sglist */ | ||
| 152 | memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); | ||
| 153 | |||
| 154 | wqe_p->work_request_id = send_wr->wr_id; | ||
| 155 | |||
| 156 | switch (send_wr->opcode) { | ||
| 157 | case IB_WR_SEND: | ||
| 158 | case IB_WR_SEND_WITH_IMM: | ||
| 159 | wqe_p->optype = WQE_OPTYPE_SEND; | ||
| 160 | break; | ||
| 161 | case IB_WR_RDMA_WRITE: | ||
| 162 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
| 163 | wqe_p->optype = WQE_OPTYPE_RDMAWRITE; | ||
| 164 | break; | ||
| 165 | case IB_WR_RDMA_READ: | ||
| 166 | wqe_p->optype = WQE_OPTYPE_RDMAREAD; | ||
| 167 | break; | ||
| 168 | default: | ||
| 169 | ehca_gen_err("Invalid opcode=%x", send_wr->opcode); | ||
| 170 | return -EINVAL; /* invalid opcode */ | ||
| 171 | } | ||
| 172 | |||
| 173 | wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE; | ||
| 174 | |||
| 175 | wqe_p->wr_flag = 0; | ||
| 176 | |||
| 177 | if (send_wr->send_flags & IB_SEND_SIGNALED) | ||
| 178 | wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; | ||
| 179 | |||
| 180 | if (send_wr->opcode == IB_WR_SEND_WITH_IMM || | ||
| 181 | send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { | ||
| 182 | /* this might not work as long as HW does not support it */ | ||
| 183 | wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data); | ||
| 184 | wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT; | ||
| 185 | } | ||
| 186 | |||
| 187 | wqe_p->nr_of_data_seg = send_wr->num_sge; | ||
| 188 | |||
| 189 | switch (qp->qp_type) { | ||
| 190 | case IB_QPT_SMI: | ||
| 191 | case IB_QPT_GSI: | ||
| 192 | /* no break is intential here */ | ||
| 193 | case IB_QPT_UD: | ||
| 194 | /* IB 1.2 spec C10-15 compliance */ | ||
| 195 | if (send_wr->wr.ud.remote_qkey & 0x80000000) | ||
| 196 | remote_qkey = qp->qkey; | ||
| 197 | |||
| 198 | wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8; | ||
| 199 | wqe_p->local_ee_context_qkey = remote_qkey; | ||
| 200 | if (!send_wr->wr.ud.ah) { | ||
| 201 | ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); | ||
| 202 | return -EINVAL; | ||
| 203 | } | ||
| 204 | my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah); | ||
| 205 | wqe_p->u.ud_av.ud_av = my_av->av; | ||
| 206 | |||
| 207 | /* | ||
| 208 | * omitted check of IB_SEND_INLINE | ||
| 209 | * since HW does not support it | ||
| 210 | */ | ||
| 211 | for (idx = 0; idx < send_wr->num_sge; idx++) { | ||
| 212 | wqe_p->u.ud_av.sg_list[idx].vaddr = | ||
| 213 | send_wr->sg_list[idx].addr; | ||
| 214 | wqe_p->u.ud_av.sg_list[idx].lkey = | ||
| 215 | send_wr->sg_list[idx].lkey; | ||
| 216 | wqe_p->u.ud_av.sg_list[idx].length = | ||
| 217 | send_wr->sg_list[idx].length; | ||
| 218 | } /* eof for idx */ | ||
| 219 | if (qp->qp_type == IB_QPT_SMI || | ||
| 220 | qp->qp_type == IB_QPT_GSI) | ||
| 221 | wqe_p->u.ud_av.ud_av.pmtu = 1; | ||
| 222 | if (qp->qp_type == IB_QPT_GSI) { | ||
| 223 | wqe_p->pkeyi = send_wr->wr.ud.pkey_index; | ||
| 224 | #ifdef DEBUG_GSI_SEND_WR | ||
| 225 | trace_send_wr_ud(send_wr); | ||
| 226 | #endif /* DEBUG_GSI_SEND_WR */ | ||
| 227 | } | ||
| 228 | break; | ||
| 229 | |||
| 230 | case IB_QPT_UC: | ||
| 231 | if (send_wr->send_flags & IB_SEND_FENCE) | ||
| 232 | wqe_p->wr_flag |= WQE_WRFLAG_FENCE; | ||
| 233 | /* no break is intentional here */ | ||
| 234 | case IB_QPT_RC: | ||
| 235 | /* TODO: atomic not implemented */ | ||
| 236 | wqe_p->u.nud.remote_virtual_adress = | ||
| 237 | send_wr->wr.rdma.remote_addr; | ||
| 238 | wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey; | ||
| 239 | |||
| 240 | /* | ||
| 241 | * omitted checking of IB_SEND_INLINE | ||
| 242 | * since HW does not support it | ||
| 243 | */ | ||
| 244 | dma_length = 0; | ||
| 245 | for (idx = 0; idx < send_wr->num_sge; idx++) { | ||
| 246 | wqe_p->u.nud.sg_list[idx].vaddr = | ||
| 247 | send_wr->sg_list[idx].addr; | ||
| 248 | wqe_p->u.nud.sg_list[idx].lkey = | ||
| 249 | send_wr->sg_list[idx].lkey; | ||
| 250 | wqe_p->u.nud.sg_list[idx].length = | ||
| 251 | send_wr->sg_list[idx].length; | ||
| 252 | dma_length += send_wr->sg_list[idx].length; | ||
| 253 | } /* eof idx */ | ||
| 254 | wqe_p->u.nud.atomic_1st_op_dma_len = dma_length; | ||
| 255 | |||
| 256 | break; | ||
| 257 | |||
| 258 | default: | ||
| 259 | ehca_gen_err("Invalid qptype=%x", qp->qp_type); | ||
| 260 | return -EINVAL; | ||
| 261 | } | ||
| 262 | |||
| 263 | if (ehca_debug_level) { | ||
| 264 | ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); | ||
| 265 | ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); | ||
| 266 | } | ||
| 267 | return 0; | ||
| 268 | } | ||
| 269 | |||
| 270 | /* map_ib_wc_status converts raw cqe_status to ib_wc_status */ | ||
| 271 | static inline void map_ib_wc_status(u32 cqe_status, | ||
| 272 | enum ib_wc_status *wc_status) | ||
| 273 | { | ||
| 274 | if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) { | ||
| 275 | switch (cqe_status & 0x3F) { | ||
| 276 | case 0x01: | ||
| 277 | case 0x21: | ||
| 278 | *wc_status = IB_WC_LOC_LEN_ERR; | ||
| 279 | break; | ||
| 280 | case 0x02: | ||
| 281 | case 0x22: | ||
| 282 | *wc_status = IB_WC_LOC_QP_OP_ERR; | ||
| 283 | break; | ||
| 284 | case 0x03: | ||
| 285 | case 0x23: | ||
| 286 | *wc_status = IB_WC_LOC_EEC_OP_ERR; | ||
| 287 | break; | ||
| 288 | case 0x04: | ||
| 289 | case 0x24: | ||
| 290 | *wc_status = IB_WC_LOC_PROT_ERR; | ||
| 291 | break; | ||
| 292 | case 0x05: | ||
| 293 | case 0x25: | ||
| 294 | *wc_status = IB_WC_WR_FLUSH_ERR; | ||
| 295 | break; | ||
| 296 | case 0x06: | ||
| 297 | *wc_status = IB_WC_MW_BIND_ERR; | ||
| 298 | break; | ||
| 299 | case 0x07: /* remote error - look into bits 20:24 */ | ||
| 300 | switch ((cqe_status | ||
| 301 | & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) { | ||
| 302 | case 0x0: | ||
| 303 | /* | ||
| 304 | * PSN Sequence Error! | ||
| 305 | * couldn't find a matching status! | ||
| 306 | */ | ||
| 307 | *wc_status = IB_WC_GENERAL_ERR; | ||
| 308 | break; | ||
| 309 | case 0x1: | ||
| 310 | *wc_status = IB_WC_REM_INV_REQ_ERR; | ||
| 311 | break; | ||
| 312 | case 0x2: | ||
| 313 | *wc_status = IB_WC_REM_ACCESS_ERR; | ||
| 314 | break; | ||
| 315 | case 0x3: | ||
| 316 | *wc_status = IB_WC_REM_OP_ERR; | ||
| 317 | break; | ||
| 318 | case 0x4: | ||
| 319 | *wc_status = IB_WC_REM_INV_RD_REQ_ERR; | ||
| 320 | break; | ||
| 321 | } | ||
| 322 | break; | ||
| 323 | case 0x08: | ||
| 324 | *wc_status = IB_WC_RETRY_EXC_ERR; | ||
| 325 | break; | ||
| 326 | case 0x09: | ||
| 327 | *wc_status = IB_WC_RNR_RETRY_EXC_ERR; | ||
| 328 | break; | ||
| 329 | case 0x0A: | ||
| 330 | case 0x2D: | ||
| 331 | *wc_status = IB_WC_REM_ABORT_ERR; | ||
| 332 | break; | ||
| 333 | case 0x0B: | ||
| 334 | case 0x2E: | ||
| 335 | *wc_status = IB_WC_INV_EECN_ERR; | ||
| 336 | break; | ||
| 337 | case 0x0C: | ||
| 338 | case 0x2F: | ||
| 339 | *wc_status = IB_WC_INV_EEC_STATE_ERR; | ||
| 340 | break; | ||
| 341 | case 0x0D: | ||
| 342 | *wc_status = IB_WC_BAD_RESP_ERR; | ||
| 343 | break; | ||
| 344 | case 0x10: | ||
| 345 | /* WQE purged */ | ||
| 346 | *wc_status = IB_WC_WR_FLUSH_ERR; | ||
| 347 | break; | ||
| 348 | default: | ||
| 349 | *wc_status = IB_WC_FATAL_ERR; | ||
| 350 | |||
| 351 | } | ||
| 352 | } else | ||
| 353 | *wc_status = IB_WC_SUCCESS; | ||
| 354 | } | ||
| 355 | |||
| 356 | int ehca_post_send(struct ib_qp *qp, | ||
| 357 | struct ib_send_wr *send_wr, | ||
| 358 | struct ib_send_wr **bad_send_wr) | ||
| 359 | { | ||
| 360 | struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); | ||
| 361 | struct ib_send_wr *cur_send_wr; | ||
| 362 | struct ehca_wqe *wqe_p; | ||
| 363 | int wqe_cnt = 0; | ||
| 364 | int ret = 0; | ||
| 365 | unsigned long spl_flags; | ||
| 366 | |||
| 367 | /* LOCK the QUEUE */ | ||
| 368 | spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); | ||
| 369 | |||
| 370 | /* loop processes list of send reqs */ | ||
| 371 | for (cur_send_wr = send_wr; cur_send_wr != NULL; | ||
| 372 | cur_send_wr = cur_send_wr->next) { | ||
| 373 | u64 start_offset = my_qp->ipz_squeue.current_q_offset; | ||
| 374 | /* get pointer next to free WQE */ | ||
| 375 | wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue); | ||
| 376 | if (unlikely(!wqe_p)) { | ||
| 377 | /* too many posted work requests: queue overflow */ | ||
| 378 | if (bad_send_wr) | ||
| 379 | *bad_send_wr = cur_send_wr; | ||
| 380 | if (wqe_cnt == 0) { | ||
| 381 | ret = -ENOMEM; | ||
| 382 | ehca_err(qp->device, "Too many posted WQEs " | ||
| 383 | "qp_num=%x", qp->qp_num); | ||
| 384 | } | ||
| 385 | goto post_send_exit0; | ||
| 386 | } | ||
| 387 | /* write a SEND WQE into the QUEUE */ | ||
| 388 | ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr); | ||
| 389 | /* | ||
| 390 | * if something failed, | ||
| 391 | * reset the free entry pointer to the start value | ||
| 392 | */ | ||
| 393 | if (unlikely(ret)) { | ||
| 394 | my_qp->ipz_squeue.current_q_offset = start_offset; | ||
| 395 | *bad_send_wr = cur_send_wr; | ||
| 396 | if (wqe_cnt == 0) { | ||
| 397 | ret = -EINVAL; | ||
| 398 | ehca_err(qp->device, "Could not write WQE " | ||
| 399 | "qp_num=%x", qp->qp_num); | ||
| 400 | } | ||
| 401 | goto post_send_exit0; | ||
| 402 | } | ||
| 403 | wqe_cnt++; | ||
| 404 | ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d", | ||
| 405 | my_qp, qp->qp_num, wqe_cnt); | ||
| 406 | } /* eof for cur_send_wr */ | ||
| 407 | |||
| 408 | post_send_exit0: | ||
| 409 | /* UNLOCK the QUEUE */ | ||
| 410 | spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags); | ||
| 411 | iosync(); /* serialize GAL register access */ | ||
| 412 | hipz_update_sqa(my_qp, wqe_cnt); | ||
| 413 | return ret; | ||
| 414 | } | ||
| 415 | |||
| 416 | int ehca_post_recv(struct ib_qp *qp, | ||
| 417 | struct ib_recv_wr *recv_wr, | ||
| 418 | struct ib_recv_wr **bad_recv_wr) | ||
| 419 | { | ||
| 420 | struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); | ||
| 421 | struct ib_recv_wr *cur_recv_wr; | ||
| 422 | struct ehca_wqe *wqe_p; | ||
| 423 | int wqe_cnt = 0; | ||
| 424 | int ret = 0; | ||
| 425 | unsigned long spl_flags; | ||
| 426 | |||
| 427 | /* LOCK the QUEUE */ | ||
| 428 | spin_lock_irqsave(&my_qp->spinlock_r, spl_flags); | ||
| 429 | |||
| 430 | /* loop processes list of send reqs */ | ||
| 431 | for (cur_recv_wr = recv_wr; cur_recv_wr != NULL; | ||
| 432 | cur_recv_wr = cur_recv_wr->next) { | ||
| 433 | u64 start_offset = my_qp->ipz_rqueue.current_q_offset; | ||
| 434 | /* get pointer next to free WQE */ | ||
| 435 | wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue); | ||
| 436 | if (unlikely(!wqe_p)) { | ||
| 437 | /* too many posted work requests: queue overflow */ | ||
| 438 | if (bad_recv_wr) | ||
| 439 | *bad_recv_wr = cur_recv_wr; | ||
| 440 | if (wqe_cnt == 0) { | ||
| 441 | ret = -ENOMEM; | ||
| 442 | ehca_err(qp->device, "Too many posted WQEs " | ||
| 443 | "qp_num=%x", qp->qp_num); | ||
| 444 | } | ||
| 445 | goto post_recv_exit0; | ||
| 446 | } | ||
| 447 | /* write a RECV WQE into the QUEUE */ | ||
| 448 | ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr); | ||
| 449 | /* | ||
| 450 | * if something failed, | ||
| 451 | * reset the free entry pointer to the start value | ||
| 452 | */ | ||
| 453 | if (unlikely(ret)) { | ||
| 454 | my_qp->ipz_rqueue.current_q_offset = start_offset; | ||
| 455 | *bad_recv_wr = cur_recv_wr; | ||
| 456 | if (wqe_cnt == 0) { | ||
| 457 | ret = -EINVAL; | ||
| 458 | ehca_err(qp->device, "Could not write WQE " | ||
| 459 | "qp_num=%x", qp->qp_num); | ||
| 460 | } | ||
| 461 | goto post_recv_exit0; | ||
| 462 | } | ||
| 463 | wqe_cnt++; | ||
| 464 | ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d", | ||
| 465 | my_qp, qp->qp_num, wqe_cnt); | ||
| 466 | } /* eof for cur_recv_wr */ | ||
| 467 | |||
| 468 | post_recv_exit0: | ||
| 469 | spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags); | ||
| 470 | iosync(); /* serialize GAL register access */ | ||
| 471 | hipz_update_rqa(my_qp, wqe_cnt); | ||
| 472 | return ret; | ||
| 473 | } | ||
| 474 | |||
| 475 | /* | ||
| 476 | * ib_wc_opcode table converts ehca wc opcode to ib | ||
| 477 | * Since we use zero to indicate invalid opcode, the actual ib opcode must | ||
| 478 | * be decremented!!! | ||
| 479 | */ | ||
| 480 | static const u8 ib_wc_opcode[255] = { | ||
| 481 | [0x01] = IB_WC_RECV+1, | ||
| 482 | [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1, | ||
| 483 | [0x04] = IB_WC_BIND_MW+1, | ||
| 484 | [0x08] = IB_WC_FETCH_ADD+1, | ||
| 485 | [0x10] = IB_WC_COMP_SWAP+1, | ||
| 486 | [0x20] = IB_WC_RDMA_WRITE+1, | ||
| 487 | [0x40] = IB_WC_RDMA_READ+1, | ||
| 488 | [0x80] = IB_WC_SEND+1 | ||
| 489 | }; | ||
| 490 | |||
| 491 | /* internal function to poll one entry of cq */ | ||
| 492 | static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) | ||
| 493 | { | ||
| 494 | int ret = 0; | ||
| 495 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); | ||
| 496 | struct ehca_cqe *cqe; | ||
| 497 | int cqe_count = 0; | ||
| 498 | |||
| 499 | poll_cq_one_read_cqe: | ||
| 500 | cqe = (struct ehca_cqe *) | ||
| 501 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); | ||
| 502 | if (!cqe) { | ||
| 503 | ret = -EAGAIN; | ||
| 504 | ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p " | ||
| 505 | "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret); | ||
| 506 | goto poll_cq_one_exit0; | ||
| 507 | } | ||
| 508 | |||
| 509 | /* prevents loads being reordered across this point */ | ||
| 510 | rmb(); | ||
| 511 | |||
| 512 | cqe_count++; | ||
| 513 | if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { | ||
| 514 | struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number); | ||
| 515 | int purgeflag; | ||
| 516 | unsigned long spl_flags; | ||
| 517 | if (!qp) { | ||
| 518 | ehca_err(cq->device, "cq_num=%x qp_num=%x " | ||
| 519 | "could not find qp -> ignore cqe", | ||
| 520 | my_cq->cq_number, cqe->local_qp_number); | ||
| 521 | ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", | ||
| 522 | my_cq->cq_number, cqe->local_qp_number); | ||
| 523 | /* ignore this purged cqe */ | ||
| 524 | goto poll_cq_one_read_cqe; | ||
| 525 | } | ||
| 526 | spin_lock_irqsave(&qp->spinlock_s, spl_flags); | ||
| 527 | purgeflag = qp->sqerr_purgeflag; | ||
| 528 | spin_unlock_irqrestore(&qp->spinlock_s, spl_flags); | ||
| 529 | |||
| 530 | if (purgeflag) { | ||
| 531 | ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x " | ||
| 532 | "src_qp=%x", | ||
| 533 | cqe->local_qp_number, cqe->remote_qp_number); | ||
| 534 | if (ehca_debug_level) | ||
| 535 | ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", | ||
| 536 | cqe->local_qp_number, | ||
| 537 | cqe->remote_qp_number); | ||
| 538 | /* | ||
| 539 | * ignore this to avoid double cqes of bad wqe | ||
| 540 | * that caused sqe and turn off purge flag | ||
| 541 | */ | ||
| 542 | qp->sqerr_purgeflag = 0; | ||
| 543 | goto poll_cq_one_read_cqe; | ||
| 544 | } | ||
| 545 | } | ||
| 546 | |||
| 547 | /* tracing cqe */ | ||
| 548 | if (ehca_debug_level) { | ||
| 549 | ehca_dbg(cq->device, | ||
| 550 | "Received COMPLETION ehca_cq=%p cq_num=%x -----", | ||
| 551 | my_cq, my_cq->cq_number); | ||
| 552 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", | ||
| 553 | my_cq, my_cq->cq_number); | ||
| 554 | ehca_dbg(cq->device, | ||
| 555 | "ehca_cq=%p cq_num=%x -------------------------", | ||
| 556 | my_cq, my_cq->cq_number); | ||
| 557 | } | ||
| 558 | |||
| 559 | /* we got a completion! */ | ||
| 560 | wc->wr_id = cqe->work_request_id; | ||
| 561 | |||
| 562 | /* eval ib_wc_opcode */ | ||
| 563 | wc->opcode = ib_wc_opcode[cqe->optype]-1; | ||
| 564 | if (unlikely(wc->opcode == -1)) { | ||
| 565 | ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x " | ||
| 566 | "ehca_cq=%p cq_num=%x", | ||
| 567 | cqe->optype, cqe->status, my_cq, my_cq->cq_number); | ||
| 568 | /* dump cqe for other infos */ | ||
| 569 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", | ||
| 570 | my_cq, my_cq->cq_number); | ||
| 571 | /* update also queue adder to throw away this entry!!! */ | ||
| 572 | goto poll_cq_one_exit0; | ||
| 573 | } | ||
| 574 | /* eval ib_wc_status */ | ||
| 575 | if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { | ||
| 576 | /* complete with errors */ | ||
| 577 | map_ib_wc_status(cqe->status, &wc->status); | ||
| 578 | wc->vendor_err = wc->status; | ||
| 579 | } else | ||
| 580 | wc->status = IB_WC_SUCCESS; | ||
| 581 | |||
| 582 | wc->qp_num = cqe->local_qp_number; | ||
| 583 | wc->byte_len = cqe->nr_bytes_transferred; | ||
| 584 | wc->pkey_index = cqe->pkey_index; | ||
| 585 | wc->slid = cqe->rlid; | ||
| 586 | wc->dlid_path_bits = cqe->dlid; | ||
| 587 | wc->src_qp = cqe->remote_qp_number; | ||
| 588 | wc->wc_flags = cqe->w_completion_flags; | ||
| 589 | wc->imm_data = cpu_to_be32(cqe->immediate_data); | ||
| 590 | wc->sl = cqe->service_level; | ||
| 591 | |||
| 592 | if (wc->status != IB_WC_SUCCESS) | ||
| 593 | ehca_dbg(cq->device, | ||
| 594 | "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe " | ||
| 595 | "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx " | ||
| 596 | "cqe=%p", my_cq, my_cq->cq_number, cqe->optype, | ||
| 597 | cqe->status, cqe->local_qp_number, | ||
| 598 | cqe->remote_qp_number, cqe->work_request_id, cqe); | ||
| 599 | |||
| 600 | poll_cq_one_exit0: | ||
| 601 | if (cqe_count > 0) | ||
| 602 | hipz_update_feca(my_cq, cqe_count); | ||
| 603 | |||
| 604 | return ret; | ||
| 605 | } | ||
| 606 | |||
| 607 | int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) | ||
| 608 | { | ||
| 609 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); | ||
| 610 | int nr; | ||
| 611 | struct ib_wc *current_wc = wc; | ||
| 612 | int ret = 0; | ||
| 613 | unsigned long spl_flags; | ||
| 614 | |||
| 615 | if (num_entries < 1) { | ||
| 616 | ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p " | ||
| 617 | "cq_num=%x", num_entries, my_cq, my_cq->cq_number); | ||
| 618 | ret = -EINVAL; | ||
| 619 | goto poll_cq_exit0; | ||
| 620 | } | ||
| 621 | |||
| 622 | spin_lock_irqsave(&my_cq->spinlock, spl_flags); | ||
| 623 | for (nr = 0; nr < num_entries; nr++) { | ||
| 624 | ret = ehca_poll_cq_one(cq, current_wc); | ||
| 625 | if (ret) | ||
| 626 | break; | ||
| 627 | current_wc++; | ||
| 628 | } /* eof for nr */ | ||
| 629 | spin_unlock_irqrestore(&my_cq->spinlock, spl_flags); | ||
| 630 | if (ret == -EAGAIN || !ret) | ||
| 631 | ret = nr; | ||
| 632 | |||
| 633 | poll_cq_exit0: | ||
| 634 | return ret; | ||
| 635 | } | ||
| 636 | |||
| 637 | int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) | ||
| 638 | { | ||
| 639 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); | ||
| 640 | |||
| 641 | switch (cq_notify) { | ||
| 642 | case IB_CQ_SOLICITED: | ||
| 643 | hipz_set_cqx_n0(my_cq, 1); | ||
| 644 | break; | ||
| 645 | case IB_CQ_NEXT_COMP: | ||
| 646 | hipz_set_cqx_n1(my_cq, 1); | ||
| 647 | break; | ||
| 648 | default: | ||
| 649 | return -EINVAL; | ||
| 650 | } | ||
| 651 | |||
| 652 | return 0; | ||
| 653 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c new file mode 100644 index 00000000000..9f16e9c7939 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * SQP functions | ||
| 5 | * | ||
| 6 | * Authors: Khadija Souissi <souissi@de.ibm.com> | ||
| 7 | * Heiko J Schick <schickhj@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | |||
| 43 | #include <linux/module.h> | ||
| 44 | #include <linux/err.h> | ||
| 45 | #include "ehca_classes.h" | ||
| 46 | #include "ehca_tools.h" | ||
| 47 | #include "ehca_qes.h" | ||
| 48 | #include "ehca_iverbs.h" | ||
| 49 | #include "hcp_if.h" | ||
| 50 | |||
| 51 | |||
| 52 | /** | ||
| 53 | * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue | ||
| 54 | * pair is created successfully, the corresponding port gets active. | ||
| 55 | * | ||
| 56 | * Define Special Queue pair 0 (SMI QP) is still not supported. | ||
| 57 | * | ||
| 58 | * @qp_init_attr: Queue pair init attributes with port and queue pair type | ||
| 59 | */ | ||
| 60 | |||
| 61 | u64 ehca_define_sqp(struct ehca_shca *shca, | ||
| 62 | struct ehca_qp *ehca_qp, | ||
| 63 | struct ib_qp_init_attr *qp_init_attr) | ||
| 64 | { | ||
| 65 | u32 pma_qp_nr, bma_qp_nr; | ||
| 66 | u64 ret; | ||
| 67 | u8 port = qp_init_attr->port_num; | ||
| 68 | int counter; | ||
| 69 | |||
| 70 | shca->sport[port - 1].port_state = IB_PORT_DOWN; | ||
| 71 | |||
| 72 | switch (qp_init_attr->qp_type) { | ||
| 73 | case IB_QPT_SMI: | ||
| 74 | /* function not supported yet */ | ||
| 75 | break; | ||
| 76 | case IB_QPT_GSI: | ||
| 77 | ret = hipz_h_define_aqp1(shca->ipz_hca_handle, | ||
| 78 | ehca_qp->ipz_qp_handle, | ||
| 79 | ehca_qp->galpas.kernel, | ||
| 80 | (u32) qp_init_attr->port_num, | ||
| 81 | &pma_qp_nr, &bma_qp_nr); | ||
| 82 | |||
| 83 | if (ret != H_SUCCESS) { | ||
| 84 | ehca_err(&shca->ib_device, | ||
| 85 | "Can't define AQP1 for port %x. rc=%lx", | ||
| 86 | port, ret); | ||
| 87 | return ret; | ||
| 88 | } | ||
| 89 | break; | ||
| 90 | default: | ||
| 91 | ehca_err(&shca->ib_device, "invalid qp_type=%x", | ||
| 92 | qp_init_attr->qp_type); | ||
| 93 | return H_PARAMETER; | ||
| 94 | } | ||
| 95 | |||
| 96 | for (counter = 0; | ||
| 97 | shca->sport[port - 1].port_state != IB_PORT_ACTIVE && | ||
| 98 | counter < ehca_port_act_time; | ||
| 99 | counter++) { | ||
| 100 | ehca_dbg(&shca->ib_device, "... wait until port %x is active", | ||
| 101 | port); | ||
| 102 | msleep_interruptible(1000); | ||
| 103 | } | ||
| 104 | |||
| 105 | if (counter == ehca_port_act_time) { | ||
| 106 | ehca_err(&shca->ib_device, "Port %x is not active.", port); | ||
| 107 | return H_HARDWARE; | ||
| 108 | } | ||
| 109 | |||
| 110 | return H_SUCCESS; | ||
| 111 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h new file mode 100644 index 00000000000..9f56bb846d9 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_tools.h | |||
| @@ -0,0 +1,172 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * auxiliary functions | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * Khadija Souissi <souissik@de.ibm.com> | ||
| 9 | * Waleri Fomin <fomin@de.ibm.com> | ||
| 10 | * Heiko J Schick <schickhj@de.ibm.com> | ||
| 11 | * | ||
| 12 | * Copyright (c) 2005 IBM Corporation | ||
| 13 | * | ||
| 14 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 15 | * BSD. | ||
| 16 | * | ||
| 17 | * OpenIB BSD License | ||
| 18 | * | ||
| 19 | * Redistribution and use in source and binary forms, with or without | ||
| 20 | * modification, are permitted provided that the following conditions are met: | ||
| 21 | * | ||
| 22 | * Redistributions of source code must retain the above copyright notice, this | ||
| 23 | * list of conditions and the following disclaimer. | ||
| 24 | * | ||
| 25 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 26 | * this list of conditions and the following disclaimer in the documentation | ||
| 27 | * and/or other materials | ||
| 28 | * provided with the distribution. | ||
| 29 | * | ||
| 30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 31 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 34 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 35 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 36 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 37 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 38 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 39 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 40 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | |||
| 44 | #ifndef EHCA_TOOLS_H | ||
| 45 | #define EHCA_TOOLS_H | ||
| 46 | |||
| 47 | #include <linux/kernel.h> | ||
| 48 | #include <linux/spinlock.h> | ||
| 49 | #include <linux/delay.h> | ||
| 50 | #include <linux/idr.h> | ||
| 51 | #include <linux/kthread.h> | ||
| 52 | #include <linux/mm.h> | ||
| 53 | #include <linux/mman.h> | ||
| 54 | #include <linux/module.h> | ||
| 55 | #include <linux/moduleparam.h> | ||
| 56 | #include <linux/vmalloc.h> | ||
| 57 | #include <linux/version.h> | ||
| 58 | #include <linux/notifier.h> | ||
| 59 | #include <linux/cpu.h> | ||
| 60 | #include <linux/device.h> | ||
| 61 | |||
| 62 | #include <asm/abs_addr.h> | ||
| 63 | #include <asm/ibmebus.h> | ||
| 64 | #include <asm/io.h> | ||
| 65 | #include <asm/pgtable.h> | ||
| 66 | |||
| 67 | extern int ehca_debug_level; | ||
| 68 | |||
| 69 | #define ehca_dbg(ib_dev, format, arg...) \ | ||
| 70 | do { \ | ||
| 71 | if (unlikely(ehca_debug_level)) \ | ||
| 72 | dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \ | ||
| 73 | "PU%04x EHCA_DBG:%s " format "\n", \ | ||
| 74 | get_paca()->paca_index, __FUNCTION__, \ | ||
| 75 | ## arg); \ | ||
| 76 | } while (0) | ||
| 77 | |||
| 78 | #define ehca_info(ib_dev, format, arg...) \ | ||
| 79 | dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \ | ||
| 80 | get_paca()->paca_index, __FUNCTION__, ## arg) | ||
| 81 | |||
| 82 | #define ehca_warn(ib_dev, format, arg...) \ | ||
| 83 | dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \ | ||
| 84 | get_paca()->paca_index, __FUNCTION__, ## arg) | ||
| 85 | |||
| 86 | #define ehca_err(ib_dev, format, arg...) \ | ||
| 87 | dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \ | ||
| 88 | get_paca()->paca_index, __FUNCTION__, ## arg) | ||
| 89 | |||
| 90 | /* use this one only if no ib_dev available */ | ||
| 91 | #define ehca_gen_dbg(format, arg...) \ | ||
| 92 | do { \ | ||
| 93 | if (unlikely(ehca_debug_level)) \ | ||
| 94 | printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\ | ||
| 95 | get_paca()->paca_index, __FUNCTION__, ## arg); \ | ||
| 96 | } while (0) | ||
| 97 | |||
| 98 | #define ehca_gen_warn(format, arg...) \ | ||
| 99 | do { \ | ||
| 100 | if (unlikely(ehca_debug_level)) \ | ||
| 101 | printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\ | ||
| 102 | get_paca()->paca_index, __FUNCTION__, ## arg); \ | ||
| 103 | } while (0) | ||
| 104 | |||
| 105 | #define ehca_gen_err(format, arg...) \ | ||
| 106 | printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \ | ||
| 107 | get_paca()->paca_index, __FUNCTION__, ## arg) | ||
| 108 | |||
| 109 | /** | ||
| 110 | * ehca_dmp - printk a memory block, whose length is n*8 bytes. | ||
| 111 | * Each line has the following layout: | ||
| 112 | * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex> | ||
| 113 | */ | ||
| 114 | #define ehca_dmp(adr, len, format, args...) \ | ||
| 115 | do { \ | ||
| 116 | unsigned int x; \ | ||
| 117 | unsigned int l = (unsigned int)(len); \ | ||
| 118 | unsigned char *deb = (unsigned char*)(adr); \ | ||
| 119 | for (x = 0; x < l; x += 16) { \ | ||
| 120 | printk("EHCA_DMP:%s" format \ | ||
| 121 | " adr=%p ofs=%04x %016lx %016lx\n", \ | ||
| 122 | __FUNCTION__, ##args, deb, x, \ | ||
| 123 | *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ | ||
| 124 | deb += 16; \ | ||
| 125 | } \ | ||
| 126 | } while (0) | ||
| 127 | |||
| 128 | /* define a bitmask, little endian version */ | ||
| 129 | #define EHCA_BMASK(pos,length) (((pos)<<16)+(length)) | ||
| 130 | |||
| 131 | /* define a bitmask, the ibm way... */ | ||
| 132 | #define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1)) | ||
| 133 | |||
| 134 | /* internal function, don't use */ | ||
| 135 | #define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff) | ||
| 136 | |||
| 137 | /* internal function, don't use */ | ||
| 138 | #define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff)) | ||
| 139 | |||
| 140 | /** | ||
| 141 | * EHCA_BMASK_SET - return value shifted and masked by mask | ||
| 142 | * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable | ||
| 143 | * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask | ||
| 144 | * in variable | ||
| 145 | */ | ||
| 146 | #define EHCA_BMASK_SET(mask,value) \ | ||
| 147 | ((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask)) | ||
| 148 | |||
| 149 | /** | ||
| 150 | * EHCA_BMASK_GET - extract a parameter from value by mask | ||
| 151 | */ | ||
| 152 | #define EHCA_BMASK_GET(mask,value) \ | ||
| 153 | (EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask))) | ||
| 154 | |||
| 155 | |||
| 156 | /* Converts ehca to ib return code */ | ||
| 157 | static inline int ehca2ib_return_code(u64 ehca_rc) | ||
| 158 | { | ||
| 159 | switch (ehca_rc) { | ||
| 160 | case H_SUCCESS: | ||
| 161 | return 0; | ||
| 162 | case H_BUSY: | ||
| 163 | return -EBUSY; | ||
| 164 | case H_NO_MEM: | ||
| 165 | return -ENOMEM; | ||
| 166 | default: | ||
| 167 | return -EINVAL; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | |||
| 171 | |||
| 172 | #endif /* EHCA_TOOLS_H */ | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c new file mode 100644 index 00000000000..e08764e4aef --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c | |||
| @@ -0,0 +1,392 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * userspace support verbs | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * Heiko J Schick <schickhj@de.ibm.com> | ||
| 9 | * | ||
| 10 | * Copyright (c) 2005 IBM Corporation | ||
| 11 | * | ||
| 12 | * All rights reserved. | ||
| 13 | * | ||
| 14 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 15 | * BSD. | ||
| 16 | * | ||
| 17 | * OpenIB BSD License | ||
| 18 | * | ||
| 19 | * Redistribution and use in source and binary forms, with or without | ||
| 20 | * modification, are permitted provided that the following conditions are met: | ||
| 21 | * | ||
| 22 | * Redistributions of source code must retain the above copyright notice, this | ||
| 23 | * list of conditions and the following disclaimer. | ||
| 24 | * | ||
| 25 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 26 | * this list of conditions and the following disclaimer in the documentation | ||
| 27 | * and/or other materials | ||
| 28 | * provided with the distribution. | ||
| 29 | * | ||
| 30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 31 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 34 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 35 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 36 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 37 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 38 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 39 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 40 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | #include <asm/current.h> | ||
| 44 | |||
| 45 | #include "ehca_classes.h" | ||
| 46 | #include "ehca_iverbs.h" | ||
| 47 | #include "ehca_mrmw.h" | ||
| 48 | #include "ehca_tools.h" | ||
| 49 | #include "hcp_if.h" | ||
| 50 | |||
| 51 | struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device, | ||
| 52 | struct ib_udata *udata) | ||
| 53 | { | ||
| 54 | struct ehca_ucontext *my_context; | ||
| 55 | |||
| 56 | my_context = kzalloc(sizeof *my_context, GFP_KERNEL); | ||
| 57 | if (!my_context) { | ||
| 58 | ehca_err(device, "Out of memory device=%p", device); | ||
| 59 | return ERR_PTR(-ENOMEM); | ||
| 60 | } | ||
| 61 | |||
| 62 | return &my_context->ib_ucontext; | ||
| 63 | } | ||
| 64 | |||
| 65 | int ehca_dealloc_ucontext(struct ib_ucontext *context) | ||
| 66 | { | ||
| 67 | kfree(container_of(context, struct ehca_ucontext, ib_ucontext)); | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | struct page *ehca_nopage(struct vm_area_struct *vma, | ||
| 72 | unsigned long address, int *type) | ||
| 73 | { | ||
| 74 | struct page *mypage = NULL; | ||
| 75 | u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; | ||
| 76 | u32 idr_handle = fileoffset >> 32; | ||
| 77 | u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ | ||
| 78 | u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ | ||
| 79 | u32 cur_pid = current->tgid; | ||
| 80 | unsigned long flags; | ||
| 81 | struct ehca_cq *cq; | ||
| 82 | struct ehca_qp *qp; | ||
| 83 | struct ehca_pd *pd; | ||
| 84 | u64 offset; | ||
| 85 | void *vaddr; | ||
| 86 | |||
| 87 | switch (q_type) { | ||
| 88 | case 1: /* CQ */ | ||
| 89 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | ||
| 90 | cq = idr_find(&ehca_cq_idr, idr_handle); | ||
| 91 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
| 92 | |||
| 93 | /* make sure this mmap really belongs to the authorized user */ | ||
| 94 | if (!cq) { | ||
| 95 | ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS"); | ||
| 96 | return NOPAGE_SIGBUS; | ||
| 97 | } | ||
| 98 | |||
| 99 | if (cq->ownpid != cur_pid) { | ||
| 100 | ehca_err(cq->ib_cq.device, | ||
| 101 | "Invalid caller pid=%x ownpid=%x", | ||
| 102 | cur_pid, cq->ownpid); | ||
| 103 | return NOPAGE_SIGBUS; | ||
| 104 | } | ||
| 105 | |||
| 106 | if (rsrc_type == 2) { | ||
| 107 | ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq); | ||
| 108 | offset = address - vma->vm_start; | ||
| 109 | vaddr = ipz_qeit_calc(&cq->ipz_queue, offset); | ||
| 110 | ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p", | ||
| 111 | offset, vaddr); | ||
| 112 | mypage = virt_to_page(vaddr); | ||
| 113 | } | ||
| 114 | break; | ||
| 115 | |||
| 116 | case 2: /* QP */ | ||
| 117 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | ||
| 118 | qp = idr_find(&ehca_qp_idr, idr_handle); | ||
| 119 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | ||
| 120 | |||
| 121 | /* make sure this mmap really belongs to the authorized user */ | ||
| 122 | if (!qp) { | ||
| 123 | ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS"); | ||
| 124 | return NOPAGE_SIGBUS; | ||
| 125 | } | ||
| 126 | |||
| 127 | pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); | ||
| 128 | if (pd->ownpid != cur_pid) { | ||
| 129 | ehca_err(qp->ib_qp.device, | ||
| 130 | "Invalid caller pid=%x ownpid=%x", | ||
| 131 | cur_pid, pd->ownpid); | ||
| 132 | return NOPAGE_SIGBUS; | ||
| 133 | } | ||
| 134 | |||
| 135 | if (rsrc_type == 2) { /* rqueue */ | ||
| 136 | ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp); | ||
| 137 | offset = address - vma->vm_start; | ||
| 138 | vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset); | ||
| 139 | ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", | ||
| 140 | offset, vaddr); | ||
| 141 | mypage = virt_to_page(vaddr); | ||
| 142 | } else if (rsrc_type == 3) { /* squeue */ | ||
| 143 | ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp); | ||
| 144 | offset = address - vma->vm_start; | ||
| 145 | vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset); | ||
| 146 | ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", | ||
| 147 | offset, vaddr); | ||
| 148 | mypage = virt_to_page(vaddr); | ||
| 149 | } | ||
| 150 | break; | ||
| 151 | |||
| 152 | default: | ||
| 153 | ehca_gen_err("bad queue type %x", q_type); | ||
| 154 | return NOPAGE_SIGBUS; | ||
| 155 | } | ||
| 156 | |||
| 157 | if (!mypage) { | ||
| 158 | ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS"); | ||
| 159 | return NOPAGE_SIGBUS; | ||
| 160 | } | ||
| 161 | get_page(mypage); | ||
| 162 | |||
| 163 | return mypage; | ||
| 164 | } | ||
| 165 | |||
| 166 | static struct vm_operations_struct ehcau_vm_ops = { | ||
| 167 | .nopage = ehca_nopage, | ||
| 168 | }; | ||
| 169 | |||
| 170 | int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | ||
| 171 | { | ||
| 172 | u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; | ||
| 173 | u32 idr_handle = fileoffset >> 32; | ||
| 174 | u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ | ||
| 175 | u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ | ||
| 176 | u32 cur_pid = current->tgid; | ||
| 177 | u32 ret; | ||
| 178 | u64 vsize, physical; | ||
| 179 | unsigned long flags; | ||
| 180 | struct ehca_cq *cq; | ||
| 181 | struct ehca_qp *qp; | ||
| 182 | struct ehca_pd *pd; | ||
| 183 | |||
| 184 | switch (q_type) { | ||
| 185 | case 1: /* CQ */ | ||
| 186 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | ||
| 187 | cq = idr_find(&ehca_cq_idr, idr_handle); | ||
| 188 | spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); | ||
| 189 | |||
| 190 | /* make sure this mmap really belongs to the authorized user */ | ||
| 191 | if (!cq) | ||
| 192 | return -EINVAL; | ||
| 193 | |||
| 194 | if (cq->ownpid != cur_pid) { | ||
| 195 | ehca_err(cq->ib_cq.device, | ||
| 196 | "Invalid caller pid=%x ownpid=%x", | ||
| 197 | cur_pid, cq->ownpid); | ||
| 198 | return -ENOMEM; | ||
| 199 | } | ||
| 200 | |||
| 201 | if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) | ||
| 202 | return -EINVAL; | ||
| 203 | |||
| 204 | switch (rsrc_type) { | ||
| 205 | case 1: /* galpa fw handle */ | ||
| 206 | ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq); | ||
| 207 | vma->vm_flags |= VM_RESERVED; | ||
| 208 | vsize = vma->vm_end - vma->vm_start; | ||
| 209 | if (vsize != EHCA_PAGESIZE) { | ||
| 210 | ehca_err(cq->ib_cq.device, "invalid vsize=%lx", | ||
| 211 | vma->vm_end - vma->vm_start); | ||
| 212 | return -EINVAL; | ||
| 213 | } | ||
| 214 | |||
| 215 | physical = cq->galpas.user.fw_handle; | ||
| 216 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
| 217 | vma->vm_flags |= VM_IO | VM_RESERVED; | ||
| 218 | |||
| 219 | ehca_dbg(cq->ib_cq.device, | ||
| 220 | "vsize=%lx physical=%lx", vsize, physical); | ||
| 221 | ret = remap_pfn_range(vma, vma->vm_start, | ||
| 222 | physical >> PAGE_SHIFT, vsize, | ||
| 223 | vma->vm_page_prot); | ||
| 224 | if (ret) { | ||
| 225 | ehca_err(cq->ib_cq.device, | ||
| 226 | "remap_pfn_range() failed ret=%x", | ||
| 227 | ret); | ||
| 228 | return -ENOMEM; | ||
| 229 | } | ||
| 230 | break; | ||
| 231 | |||
| 232 | case 2: /* cq queue_addr */ | ||
| 233 | ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq); | ||
| 234 | vma->vm_flags |= VM_RESERVED; | ||
| 235 | vma->vm_ops = &ehcau_vm_ops; | ||
| 236 | break; | ||
| 237 | |||
| 238 | default: | ||
| 239 | ehca_err(cq->ib_cq.device, "bad resource type %x", | ||
| 240 | rsrc_type); | ||
| 241 | return -EINVAL; | ||
| 242 | } | ||
| 243 | break; | ||
| 244 | |||
| 245 | case 2: /* QP */ | ||
| 246 | spin_lock_irqsave(&ehca_qp_idr_lock, flags); | ||
| 247 | qp = idr_find(&ehca_qp_idr, idr_handle); | ||
| 248 | spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); | ||
| 249 | |||
| 250 | /* make sure this mmap really belongs to the authorized user */ | ||
| 251 | if (!qp) | ||
| 252 | return -EINVAL; | ||
| 253 | |||
| 254 | pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); | ||
| 255 | if (pd->ownpid != cur_pid) { | ||
| 256 | ehca_err(qp->ib_qp.device, | ||
| 257 | "Invalid caller pid=%x ownpid=%x", | ||
| 258 | cur_pid, pd->ownpid); | ||
| 259 | return -ENOMEM; | ||
| 260 | } | ||
| 261 | |||
| 262 | if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) | ||
| 263 | return -EINVAL; | ||
| 264 | |||
| 265 | switch (rsrc_type) { | ||
| 266 | case 1: /* galpa fw handle */ | ||
| 267 | ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp); | ||
| 268 | vma->vm_flags |= VM_RESERVED; | ||
| 269 | vsize = vma->vm_end - vma->vm_start; | ||
| 270 | if (vsize != EHCA_PAGESIZE) { | ||
| 271 | ehca_err(qp->ib_qp.device, "invalid vsize=%lx", | ||
| 272 | vma->vm_end - vma->vm_start); | ||
| 273 | return -EINVAL; | ||
| 274 | } | ||
| 275 | |||
| 276 | physical = qp->galpas.user.fw_handle; | ||
| 277 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
| 278 | vma->vm_flags |= VM_IO | VM_RESERVED; | ||
| 279 | |||
| 280 | ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx", | ||
| 281 | vsize, physical); | ||
| 282 | ret = remap_pfn_range(vma, vma->vm_start, | ||
| 283 | physical >> PAGE_SHIFT, vsize, | ||
| 284 | vma->vm_page_prot); | ||
| 285 | if (ret) { | ||
| 286 | ehca_err(qp->ib_qp.device, | ||
| 287 | "remap_pfn_range() failed ret=%x", | ||
| 288 | ret); | ||
| 289 | return -ENOMEM; | ||
| 290 | } | ||
| 291 | break; | ||
| 292 | |||
| 293 | case 2: /* qp rqueue_addr */ | ||
| 294 | ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp); | ||
| 295 | vma->vm_flags |= VM_RESERVED; | ||
| 296 | vma->vm_ops = &ehcau_vm_ops; | ||
| 297 | break; | ||
| 298 | |||
| 299 | case 3: /* qp squeue_addr */ | ||
| 300 | ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp); | ||
| 301 | vma->vm_flags |= VM_RESERVED; | ||
| 302 | vma->vm_ops = &ehcau_vm_ops; | ||
| 303 | break; | ||
| 304 | |||
| 305 | default: | ||
| 306 | ehca_err(qp->ib_qp.device, "bad resource type %x", | ||
| 307 | rsrc_type); | ||
| 308 | return -EINVAL; | ||
| 309 | } | ||
| 310 | break; | ||
| 311 | |||
| 312 | default: | ||
| 313 | ehca_gen_err("bad queue type %x", q_type); | ||
| 314 | return -EINVAL; | ||
| 315 | } | ||
| 316 | |||
| 317 | return 0; | ||
| 318 | } | ||
| 319 | |||
| 320 | int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped, | ||
| 321 | struct vm_area_struct **vma) | ||
| 322 | { | ||
| 323 | down_write(¤t->mm->mmap_sem); | ||
| 324 | *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE, | ||
| 325 | MAP_SHARED | MAP_ANONYMOUS, | ||
| 326 | foffset); | ||
| 327 | up_write(¤t->mm->mmap_sem); | ||
| 328 | if (!(*mapped)) { | ||
| 329 | ehca_gen_err("couldn't mmap foffset=%lx length=%lx", | ||
| 330 | foffset, length); | ||
| 331 | return -EINVAL; | ||
| 332 | } | ||
| 333 | |||
| 334 | *vma = find_vma(current->mm, (u64)*mapped); | ||
| 335 | if (!(*vma)) { | ||
| 336 | down_write(¤t->mm->mmap_sem); | ||
| 337 | do_munmap(current->mm, 0, length); | ||
| 338 | up_write(¤t->mm->mmap_sem); | ||
| 339 | ehca_gen_err("couldn't find vma queue=%p", *mapped); | ||
| 340 | return -EINVAL; | ||
| 341 | } | ||
| 342 | (*vma)->vm_flags |= VM_RESERVED; | ||
| 343 | (*vma)->vm_ops = &ehcau_vm_ops; | ||
| 344 | |||
| 345 | return 0; | ||
| 346 | } | ||
| 347 | |||
| 348 | int ehca_mmap_register(u64 physical, void **mapped, | ||
| 349 | struct vm_area_struct **vma) | ||
| 350 | { | ||
| 351 | int ret; | ||
| 352 | unsigned long vsize; | ||
| 353 | /* ehca hw supports only 4k page */ | ||
| 354 | ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma); | ||
| 355 | if (ret) { | ||
| 356 | ehca_gen_err("could'nt mmap physical=%lx", physical); | ||
| 357 | return ret; | ||
| 358 | } | ||
| 359 | |||
| 360 | (*vma)->vm_flags |= VM_RESERVED; | ||
| 361 | vsize = (*vma)->vm_end - (*vma)->vm_start; | ||
| 362 | if (vsize != EHCA_PAGESIZE) { | ||
| 363 | ehca_gen_err("invalid vsize=%lx", | ||
| 364 | (*vma)->vm_end - (*vma)->vm_start); | ||
| 365 | return -EINVAL; | ||
| 366 | } | ||
| 367 | |||
| 368 | (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot); | ||
| 369 | (*vma)->vm_flags |= VM_IO | VM_RESERVED; | ||
| 370 | |||
| 371 | ret = remap_pfn_range((*vma), (*vma)->vm_start, | ||
| 372 | physical >> PAGE_SHIFT, vsize, | ||
| 373 | (*vma)->vm_page_prot); | ||
| 374 | if (ret) { | ||
| 375 | ehca_gen_err("remap_pfn_range() failed ret=%x", ret); | ||
| 376 | return -ENOMEM; | ||
| 377 | } | ||
| 378 | |||
| 379 | return 0; | ||
| 380 | |||
| 381 | } | ||
| 382 | |||
| 383 | int ehca_munmap(unsigned long addr, size_t len) { | ||
| 384 | int ret = 0; | ||
| 385 | struct mm_struct *mm = current->mm; | ||
| 386 | if (mm) { | ||
| 387 | down_write(&mm->mmap_sem); | ||
| 388 | ret = do_munmap(mm, addr, len); | ||
| 389 | up_write(&mm->mmap_sem); | ||
| 390 | } | ||
| 391 | return ret; | ||
| 392 | } | ||
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c new file mode 100644 index 00000000000..3fb46e67df8 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
| @@ -0,0 +1,874 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Firmware Infiniband Interface code for POWER | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * Gerd Bayer <gerd.bayer@de.ibm.com> | ||
| 9 | * Waleri Fomin <fomin@de.ibm.com> | ||
| 10 | * | ||
| 11 | * Copyright (c) 2005 IBM Corporation | ||
| 12 | * | ||
| 13 | * All rights reserved. | ||
| 14 | * | ||
| 15 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 16 | * BSD. | ||
| 17 | * | ||
| 18 | * OpenIB BSD License | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions are met: | ||
| 22 | * | ||
| 23 | * Redistributions of source code must retain the above copyright notice, this | ||
| 24 | * list of conditions and the following disclaimer. | ||
| 25 | * | ||
| 26 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 27 | * this list of conditions and the following disclaimer in the documentation | ||
| 28 | * and/or other materials | ||
| 29 | * provided with the distribution. | ||
| 30 | * | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 32 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 33 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 34 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 35 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 36 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 37 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 38 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 39 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 40 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 41 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 42 | */ | ||
| 43 | |||
| 44 | #include <asm/hvcall.h> | ||
| 45 | #include "ehca_tools.h" | ||
| 46 | #include "hcp_if.h" | ||
| 47 | #include "hcp_phyp.h" | ||
| 48 | #include "hipz_fns.h" | ||
| 49 | #include "ipz_pt_fn.h" | ||
| 50 | |||
| 51 | #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11) | ||
| 52 | #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12) | ||
| 53 | #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15) | ||
| 54 | #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18) | ||
| 55 | #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21) | ||
| 56 | #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23) | ||
| 57 | #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31) | ||
| 58 | #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63) | ||
| 59 | |||
| 60 | #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15) | ||
| 61 | #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) | ||
| 62 | #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39) | ||
| 63 | #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47) | ||
| 64 | |||
| 65 | #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) | ||
| 66 | #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63) | ||
| 67 | #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15) | ||
| 68 | #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31) | ||
| 69 | |||
| 70 | #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31) | ||
| 71 | #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63) | ||
| 72 | |||
| 73 | /* direct access qp controls */ | ||
| 74 | #define DAQP_CTRL_ENABLE 0x01 | ||
| 75 | #define DAQP_CTRL_SEND_COMP 0x20 | ||
| 76 | #define DAQP_CTRL_RECV_COMP 0x40 | ||
| 77 | |||
| 78 | static u32 get_longbusy_msecs(int longbusy_rc) | ||
| 79 | { | ||
| 80 | switch (longbusy_rc) { | ||
| 81 | case H_LONG_BUSY_ORDER_1_MSEC: | ||
| 82 | return 1; | ||
| 83 | case H_LONG_BUSY_ORDER_10_MSEC: | ||
| 84 | return 10; | ||
| 85 | case H_LONG_BUSY_ORDER_100_MSEC: | ||
| 86 | return 100; | ||
| 87 | case H_LONG_BUSY_ORDER_1_SEC: | ||
| 88 | return 1000; | ||
| 89 | case H_LONG_BUSY_ORDER_10_SEC: | ||
| 90 | return 10000; | ||
| 91 | case H_LONG_BUSY_ORDER_100_SEC: | ||
| 92 | return 100000; | ||
| 93 | default: | ||
| 94 | return 1; | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 98 | static long ehca_plpar_hcall_norets(unsigned long opcode, | ||
| 99 | unsigned long arg1, | ||
| 100 | unsigned long arg2, | ||
| 101 | unsigned long arg3, | ||
| 102 | unsigned long arg4, | ||
| 103 | unsigned long arg5, | ||
| 104 | unsigned long arg6, | ||
| 105 | unsigned long arg7) | ||
| 106 | { | ||
| 107 | long ret; | ||
| 108 | int i, sleep_msecs; | ||
| 109 | |||
| 110 | ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " | ||
| 111 | "arg5=%lx arg6=%lx arg7=%lx", | ||
| 112 | opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); | ||
| 113 | |||
| 114 | for (i = 0; i < 5; i++) { | ||
| 115 | ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4, | ||
| 116 | arg5, arg6, arg7); | ||
| 117 | |||
| 118 | if (H_IS_LONG_BUSY(ret)) { | ||
| 119 | sleep_msecs = get_longbusy_msecs(ret); | ||
| 120 | msleep_interruptible(sleep_msecs); | ||
| 121 | continue; | ||
| 122 | } | ||
| 123 | |||
| 124 | if (ret < H_SUCCESS) | ||
| 125 | ehca_gen_err("opcode=%lx ret=%lx" | ||
| 126 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" | ||
| 127 | " arg5=%lx arg6=%lx arg7=%lx ", | ||
| 128 | opcode, ret, | ||
| 129 | arg1, arg2, arg3, arg4, arg5, | ||
| 130 | arg6, arg7); | ||
| 131 | |||
| 132 | ehca_gen_dbg("opcode=%lx ret=%lx", opcode, ret); | ||
| 133 | return ret; | ||
| 134 | |||
| 135 | } | ||
| 136 | |||
| 137 | return H_BUSY; | ||
| 138 | } | ||
| 139 | |||
| 140 | static long ehca_plpar_hcall9(unsigned long opcode, | ||
| 141 | unsigned long *outs, /* array of 9 outputs */ | ||
| 142 | unsigned long arg1, | ||
| 143 | unsigned long arg2, | ||
| 144 | unsigned long arg3, | ||
| 145 | unsigned long arg4, | ||
| 146 | unsigned long arg5, | ||
| 147 | unsigned long arg6, | ||
| 148 | unsigned long arg7, | ||
| 149 | unsigned long arg8, | ||
| 150 | unsigned long arg9) | ||
| 151 | { | ||
| 152 | long ret; | ||
| 153 | int i, sleep_msecs; | ||
| 154 | |||
| 155 | ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " | ||
| 156 | "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", | ||
| 157 | opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7, | ||
| 158 | arg8, arg9); | ||
| 159 | |||
| 160 | for (i = 0; i < 5; i++) { | ||
| 161 | ret = plpar_hcall9(opcode, outs, | ||
| 162 | arg1, arg2, arg3, arg4, arg5, | ||
| 163 | arg6, arg7, arg8, arg9); | ||
| 164 | |||
| 165 | if (H_IS_LONG_BUSY(ret)) { | ||
| 166 | sleep_msecs = get_longbusy_msecs(ret); | ||
| 167 | msleep_interruptible(sleep_msecs); | ||
| 168 | continue; | ||
| 169 | } | ||
| 170 | |||
| 171 | if (ret < H_SUCCESS) | ||
| 172 | ehca_gen_err("opcode=%lx ret=%lx" | ||
| 173 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" | ||
| 174 | " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" | ||
| 175 | " arg9=%lx" | ||
| 176 | " out1=%lx out2=%lx out3=%lx out4=%lx" | ||
| 177 | " out5=%lx out6=%lx out7=%lx out8=%lx" | ||
| 178 | " out9=%lx", | ||
| 179 | opcode, ret, | ||
| 180 | arg1, arg2, arg3, arg4, arg5, | ||
| 181 | arg6, arg7, arg8, arg9, | ||
| 182 | outs[0], outs[1], outs[2], outs[3], | ||
| 183 | outs[4], outs[5], outs[6], outs[7], | ||
| 184 | outs[8]); | ||
| 185 | |||
| 186 | ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx " | ||
| 187 | "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx " | ||
| 188 | "out9=%lx", | ||
| 189 | opcode, ret, outs[0], outs[1], outs[2], outs[3], | ||
| 190 | outs[4], outs[5], outs[6], outs[7], outs[8]); | ||
| 191 | return ret; | ||
| 192 | |||
| 193 | } | ||
| 194 | |||
| 195 | return H_BUSY; | ||
| 196 | } | ||
| 197 | u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, | ||
| 198 | struct ehca_pfeq *pfeq, | ||
| 199 | const u32 neq_control, | ||
| 200 | const u32 number_of_entries, | ||
| 201 | struct ipz_eq_handle *eq_handle, | ||
| 202 | u32 *act_nr_of_entries, | ||
| 203 | u32 *act_pages, | ||
| 204 | u32 *eq_ist) | ||
| 205 | { | ||
| 206 | u64 ret; | ||
| 207 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 208 | u64 allocate_controls; | ||
| 209 | |||
| 210 | /* resource type */ | ||
| 211 | allocate_controls = 3ULL; | ||
| 212 | |||
| 213 | /* ISN is associated */ | ||
| 214 | if (neq_control != 1) | ||
| 215 | allocate_controls = (1ULL << (63 - 7)) | allocate_controls; | ||
| 216 | else /* notification event queue */ | ||
| 217 | allocate_controls = (1ULL << 63) | allocate_controls; | ||
| 218 | |||
| 219 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, | ||
| 220 | adapter_handle.handle, /* r4 */ | ||
| 221 | allocate_controls, /* r5 */ | ||
| 222 | number_of_entries, /* r6 */ | ||
| 223 | 0, 0, 0, 0, 0, 0); | ||
| 224 | eq_handle->handle = outs[0]; | ||
| 225 | *act_nr_of_entries = (u32)outs[3]; | ||
| 226 | *act_pages = (u32)outs[4]; | ||
| 227 | *eq_ist = (u32)outs[5]; | ||
| 228 | |||
| 229 | if (ret == H_NOT_ENOUGH_RESOURCES) | ||
| 230 | ehca_gen_err("Not enough resource - ret=%lx ", ret); | ||
| 231 | |||
| 232 | return ret; | ||
| 233 | } | ||
| 234 | |||
| 235 | u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle, | ||
| 236 | struct ipz_eq_handle eq_handle, | ||
| 237 | const u64 event_mask) | ||
| 238 | { | ||
| 239 | return ehca_plpar_hcall_norets(H_RESET_EVENTS, | ||
| 240 | adapter_handle.handle, /* r4 */ | ||
| 241 | eq_handle.handle, /* r5 */ | ||
| 242 | event_mask, /* r6 */ | ||
| 243 | 0, 0, 0, 0); | ||
| 244 | } | ||
| 245 | |||
| 246 | u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, | ||
| 247 | struct ehca_cq *cq, | ||
| 248 | struct ehca_alloc_cq_parms *param) | ||
| 249 | { | ||
| 250 | u64 ret; | ||
| 251 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 252 | |||
| 253 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, | ||
| 254 | adapter_handle.handle, /* r4 */ | ||
| 255 | 2, /* r5 */ | ||
| 256 | param->eq_handle.handle, /* r6 */ | ||
| 257 | cq->token, /* r7 */ | ||
| 258 | param->nr_cqe, /* r8 */ | ||
| 259 | 0, 0, 0, 0); | ||
| 260 | cq->ipz_cq_handle.handle = outs[0]; | ||
| 261 | param->act_nr_of_entries = (u32)outs[3]; | ||
| 262 | param->act_pages = (u32)outs[4]; | ||
| 263 | |||
| 264 | if (ret == H_SUCCESS) | ||
| 265 | hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); | ||
| 266 | |||
| 267 | if (ret == H_NOT_ENOUGH_RESOURCES) | ||
| 268 | ehca_gen_err("Not enough resources. ret=%lx", ret); | ||
| 269 | |||
| 270 | return ret; | ||
| 271 | } | ||
| 272 | |||
| 273 | u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 274 | struct ehca_qp *qp, | ||
| 275 | struct ehca_alloc_qp_parms *parms) | ||
| 276 | { | ||
| 277 | u64 ret; | ||
| 278 | u64 allocate_controls; | ||
| 279 | u64 max_r10_reg; | ||
| 280 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 281 | u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1; | ||
| 282 | u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1; | ||
| 283 | int daqp_ctrl = parms->daqp_ctrl; | ||
| 284 | |||
| 285 | allocate_controls = | ||
| 286 | EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, | ||
| 287 | (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0) | ||
| 288 | | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0) | ||
| 289 | | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype) | ||
| 290 | | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype) | ||
| 291 | | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING, | ||
| 292 | (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0) | ||
| 293 | | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING, | ||
| 294 | (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0) | ||
| 295 | | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL, | ||
| 296 | parms->ud_av_l_key_ctl) | ||
| 297 | | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1); | ||
| 298 | |||
| 299 | max_r10_reg = | ||
| 300 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR, | ||
| 301 | max_nr_send_wqes) | ||
| 302 | | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR, | ||
| 303 | max_nr_receive_wqes) | ||
| 304 | | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE, | ||
| 305 | parms->max_send_sge) | ||
| 306 | | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE, | ||
| 307 | parms->max_recv_sge); | ||
| 308 | |||
| 309 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, | ||
| 310 | adapter_handle.handle, /* r4 */ | ||
| 311 | allocate_controls, /* r5 */ | ||
| 312 | qp->send_cq->ipz_cq_handle.handle, | ||
| 313 | qp->recv_cq->ipz_cq_handle.handle, | ||
| 314 | parms->ipz_eq_handle.handle, | ||
| 315 | ((u64)qp->token << 32) | parms->pd.value, | ||
| 316 | max_r10_reg, /* r10 */ | ||
| 317 | parms->ud_av_l_key_ctl, /* r11 */ | ||
| 318 | 0); | ||
| 319 | qp->ipz_qp_handle.handle = outs[0]; | ||
| 320 | qp->real_qp_num = (u32)outs[1]; | ||
| 321 | parms->act_nr_send_sges = | ||
| 322 | (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); | ||
| 323 | parms->act_nr_recv_wqes = | ||
| 324 | (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]); | ||
| 325 | parms->act_nr_send_sges = | ||
| 326 | (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]); | ||
| 327 | parms->act_nr_recv_sges = | ||
| 328 | (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]); | ||
| 329 | parms->nr_sq_pages = | ||
| 330 | (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]); | ||
| 331 | parms->nr_rq_pages = | ||
| 332 | (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); | ||
| 333 | |||
| 334 | if (ret == H_SUCCESS) | ||
| 335 | hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]); | ||
| 336 | |||
| 337 | if (ret == H_NOT_ENOUGH_RESOURCES) | ||
| 338 | ehca_gen_err("Not enough resources. ret=%lx", ret); | ||
| 339 | |||
| 340 | return ret; | ||
| 341 | } | ||
| 342 | |||
| 343 | u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, | ||
| 344 | const u8 port_id, | ||
| 345 | struct hipz_query_port *query_port_response_block) | ||
| 346 | { | ||
| 347 | u64 ret; | ||
| 348 | u64 r_cb = virt_to_abs(query_port_response_block); | ||
| 349 | |||
| 350 | if (r_cb & (EHCA_PAGESIZE-1)) { | ||
| 351 | ehca_gen_err("response block not page aligned"); | ||
| 352 | return H_PARAMETER; | ||
| 353 | } | ||
| 354 | |||
| 355 | ret = ehca_plpar_hcall_norets(H_QUERY_PORT, | ||
| 356 | adapter_handle.handle, /* r4 */ | ||
| 357 | port_id, /* r5 */ | ||
| 358 | r_cb, /* r6 */ | ||
| 359 | 0, 0, 0, 0); | ||
| 360 | |||
| 361 | if (ehca_debug_level) | ||
| 362 | ehca_dmp(query_port_response_block, 64, "response_block"); | ||
| 363 | |||
| 364 | return ret; | ||
| 365 | } | ||
| 366 | |||
| 367 | u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, | ||
| 368 | struct hipz_query_hca *query_hca_rblock) | ||
| 369 | { | ||
| 370 | u64 r_cb = virt_to_abs(query_hca_rblock); | ||
| 371 | |||
| 372 | if (r_cb & (EHCA_PAGESIZE-1)) { | ||
| 373 | ehca_gen_err("response_block=%p not page aligned", | ||
| 374 | query_hca_rblock); | ||
| 375 | return H_PARAMETER; | ||
| 376 | } | ||
| 377 | |||
| 378 | return ehca_plpar_hcall_norets(H_QUERY_HCA, | ||
| 379 | adapter_handle.handle, /* r4 */ | ||
| 380 | r_cb, /* r5 */ | ||
| 381 | 0, 0, 0, 0, 0); | ||
| 382 | } | ||
| 383 | |||
| 384 | u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle, | ||
| 385 | const u8 pagesize, | ||
| 386 | const u8 queue_type, | ||
| 387 | const u64 resource_handle, | ||
| 388 | const u64 logical_address_of_page, | ||
| 389 | u64 count) | ||
| 390 | { | ||
| 391 | return ehca_plpar_hcall_norets(H_REGISTER_RPAGES, | ||
| 392 | adapter_handle.handle, /* r4 */ | ||
| 393 | queue_type | pagesize << 8, /* r5 */ | ||
| 394 | resource_handle, /* r6 */ | ||
| 395 | logical_address_of_page, /* r7 */ | ||
| 396 | count, /* r8 */ | ||
| 397 | 0, 0); | ||
| 398 | } | ||
| 399 | |||
| 400 | u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle, | ||
| 401 | const struct ipz_eq_handle eq_handle, | ||
| 402 | struct ehca_pfeq *pfeq, | ||
| 403 | const u8 pagesize, | ||
| 404 | const u8 queue_type, | ||
| 405 | const u64 logical_address_of_page, | ||
| 406 | const u64 count) | ||
| 407 | { | ||
| 408 | if (count != 1) { | ||
| 409 | ehca_gen_err("Ppage counter=%lx", count); | ||
| 410 | return H_PARAMETER; | ||
| 411 | } | ||
| 412 | return hipz_h_register_rpage(adapter_handle, | ||
| 413 | pagesize, | ||
| 414 | queue_type, | ||
| 415 | eq_handle.handle, | ||
| 416 | logical_address_of_page, count); | ||
| 417 | } | ||
| 418 | |||
| 419 | u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle, | ||
| 420 | u32 ist) | ||
| 421 | { | ||
| 422 | u64 ret; | ||
| 423 | ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE, | ||
| 424 | adapter_handle.handle, /* r4 */ | ||
| 425 | ist, /* r5 */ | ||
| 426 | 0, 0, 0, 0, 0); | ||
| 427 | |||
| 428 | if (ret != H_SUCCESS && ret != H_BUSY) | ||
| 429 | ehca_gen_err("Could not query interrupt state."); | ||
| 430 | |||
| 431 | return ret; | ||
| 432 | } | ||
| 433 | |||
| 434 | u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle, | ||
| 435 | const struct ipz_cq_handle cq_handle, | ||
| 436 | struct ehca_pfcq *pfcq, | ||
| 437 | const u8 pagesize, | ||
| 438 | const u8 queue_type, | ||
| 439 | const u64 logical_address_of_page, | ||
| 440 | const u64 count, | ||
| 441 | const struct h_galpa gal) | ||
| 442 | { | ||
| 443 | if (count != 1) { | ||
| 444 | ehca_gen_err("Page counter=%lx", count); | ||
| 445 | return H_PARAMETER; | ||
| 446 | } | ||
| 447 | |||
| 448 | return hipz_h_register_rpage(adapter_handle, pagesize, queue_type, | ||
| 449 | cq_handle.handle, logical_address_of_page, | ||
| 450 | count); | ||
| 451 | } | ||
| 452 | |||
| 453 | u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 454 | const struct ipz_qp_handle qp_handle, | ||
| 455 | struct ehca_pfqp *pfqp, | ||
| 456 | const u8 pagesize, | ||
| 457 | const u8 queue_type, | ||
| 458 | const u64 logical_address_of_page, | ||
| 459 | const u64 count, | ||
| 460 | const struct h_galpa galpa) | ||
| 461 | { | ||
| 462 | if (count != 1) { | ||
| 463 | ehca_gen_err("Page counter=%lx", count); | ||
| 464 | return H_PARAMETER; | ||
| 465 | } | ||
| 466 | |||
| 467 | return hipz_h_register_rpage(adapter_handle,pagesize,queue_type, | ||
| 468 | qp_handle.handle,logical_address_of_page, | ||
| 469 | count); | ||
| 470 | } | ||
| 471 | |||
| 472 | u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle, | ||
| 473 | const struct ipz_qp_handle qp_handle, | ||
| 474 | struct ehca_pfqp *pfqp, | ||
| 475 | void **log_addr_next_sq_wqe2processed, | ||
| 476 | void **log_addr_next_rq_wqe2processed, | ||
| 477 | int dis_and_get_function_code) | ||
| 478 | { | ||
| 479 | u64 ret; | ||
| 480 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 481 | |||
| 482 | ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, | ||
| 483 | adapter_handle.handle, /* r4 */ | ||
| 484 | dis_and_get_function_code, /* r5 */ | ||
| 485 | qp_handle.handle, /* r6 */ | ||
| 486 | 0, 0, 0, 0, 0, 0); | ||
| 487 | if (log_addr_next_sq_wqe2processed) | ||
| 488 | *log_addr_next_sq_wqe2processed = (void*)outs[0]; | ||
| 489 | if (log_addr_next_rq_wqe2processed) | ||
| 490 | *log_addr_next_rq_wqe2processed = (void*)outs[1]; | ||
| 491 | |||
| 492 | return ret; | ||
| 493 | } | ||
| 494 | |||
| 495 | u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 496 | const struct ipz_qp_handle qp_handle, | ||
| 497 | struct ehca_pfqp *pfqp, | ||
| 498 | const u64 update_mask, | ||
| 499 | struct hcp_modify_qp_control_block *mqpcb, | ||
| 500 | struct h_galpa gal) | ||
| 501 | { | ||
| 502 | u64 ret; | ||
| 503 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 504 | ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, | ||
| 505 | adapter_handle.handle, /* r4 */ | ||
| 506 | qp_handle.handle, /* r5 */ | ||
| 507 | update_mask, /* r6 */ | ||
| 508 | virt_to_abs(mqpcb), /* r7 */ | ||
| 509 | 0, 0, 0, 0, 0); | ||
| 510 | |||
| 511 | if (ret == H_NOT_ENOUGH_RESOURCES) | ||
| 512 | ehca_gen_err("Insufficient resources ret=%lx", ret); | ||
| 513 | |||
| 514 | return ret; | ||
| 515 | } | ||
| 516 | |||
| 517 | u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 518 | const struct ipz_qp_handle qp_handle, | ||
| 519 | struct ehca_pfqp *pfqp, | ||
| 520 | struct hcp_modify_qp_control_block *qqpcb, | ||
| 521 | struct h_galpa gal) | ||
| 522 | { | ||
| 523 | return ehca_plpar_hcall_norets(H_QUERY_QP, | ||
| 524 | adapter_handle.handle, /* r4 */ | ||
| 525 | qp_handle.handle, /* r5 */ | ||
| 526 | virt_to_abs(qqpcb), /* r6 */ | ||
| 527 | 0, 0, 0, 0); | ||
| 528 | } | ||
| 529 | |||
| 530 | u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 531 | struct ehca_qp *qp) | ||
| 532 | { | ||
| 533 | u64 ret; | ||
| 534 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 535 | |||
| 536 | ret = hcp_galpas_dtor(&qp->galpas); | ||
| 537 | if (ret) { | ||
| 538 | ehca_gen_err("Could not destruct qp->galpas"); | ||
| 539 | return H_RESOURCE; | ||
| 540 | } | ||
| 541 | ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, | ||
| 542 | adapter_handle.handle, /* r4 */ | ||
| 543 | /* function code */ | ||
| 544 | 1, /* r5 */ | ||
| 545 | qp->ipz_qp_handle.handle, /* r6 */ | ||
| 546 | 0, 0, 0, 0, 0, 0); | ||
| 547 | if (ret == H_HARDWARE) | ||
| 548 | ehca_gen_err("HCA not operational. ret=%lx", ret); | ||
| 549 | |||
| 550 | ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, | ||
| 551 | adapter_handle.handle, /* r4 */ | ||
| 552 | qp->ipz_qp_handle.handle, /* r5 */ | ||
| 553 | 0, 0, 0, 0, 0); | ||
| 554 | |||
| 555 | if (ret == H_RESOURCE) | ||
| 556 | ehca_gen_err("Resource still in use. ret=%lx", ret); | ||
| 557 | |||
| 558 | return ret; | ||
| 559 | } | ||
| 560 | |||
| 561 | u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle, | ||
| 562 | const struct ipz_qp_handle qp_handle, | ||
| 563 | struct h_galpa gal, | ||
| 564 | u32 port) | ||
| 565 | { | ||
| 566 | return ehca_plpar_hcall_norets(H_DEFINE_AQP0, | ||
| 567 | adapter_handle.handle, /* r4 */ | ||
| 568 | qp_handle.handle, /* r5 */ | ||
| 569 | port, /* r6 */ | ||
| 570 | 0, 0, 0, 0); | ||
| 571 | } | ||
| 572 | |||
| 573 | u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle, | ||
| 574 | const struct ipz_qp_handle qp_handle, | ||
| 575 | struct h_galpa gal, | ||
| 576 | u32 port, u32 * pma_qp_nr, | ||
| 577 | u32 * bma_qp_nr) | ||
| 578 | { | ||
| 579 | u64 ret; | ||
| 580 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 581 | |||
| 582 | ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, | ||
| 583 | adapter_handle.handle, /* r4 */ | ||
| 584 | qp_handle.handle, /* r5 */ | ||
| 585 | port, /* r6 */ | ||
| 586 | 0, 0, 0, 0, 0, 0); | ||
| 587 | *pma_qp_nr = (u32)outs[0]; | ||
| 588 | *bma_qp_nr = (u32)outs[1]; | ||
| 589 | |||
| 590 | if (ret == H_ALIAS_EXIST) | ||
| 591 | ehca_gen_err("AQP1 already exists. ret=%lx", ret); | ||
| 592 | |||
| 593 | return ret; | ||
| 594 | } | ||
| 595 | |||
| 596 | u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle, | ||
| 597 | const struct ipz_qp_handle qp_handle, | ||
| 598 | struct h_galpa gal, | ||
| 599 | u16 mcg_dlid, | ||
| 600 | u64 subnet_prefix, u64 interface_id) | ||
| 601 | { | ||
| 602 | u64 ret; | ||
| 603 | |||
| 604 | ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP, | ||
| 605 | adapter_handle.handle, /* r4 */ | ||
| 606 | qp_handle.handle, /* r5 */ | ||
| 607 | mcg_dlid, /* r6 */ | ||
| 608 | interface_id, /* r7 */ | ||
| 609 | subnet_prefix, /* r8 */ | ||
| 610 | 0, 0); | ||
| 611 | |||
| 612 | if (ret == H_NOT_ENOUGH_RESOURCES) | ||
| 613 | ehca_gen_err("Not enough resources. ret=%lx", ret); | ||
| 614 | |||
| 615 | return ret; | ||
| 616 | } | ||
| 617 | |||
| 618 | u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle, | ||
| 619 | const struct ipz_qp_handle qp_handle, | ||
| 620 | struct h_galpa gal, | ||
| 621 | u16 mcg_dlid, | ||
| 622 | u64 subnet_prefix, u64 interface_id) | ||
| 623 | { | ||
| 624 | return ehca_plpar_hcall_norets(H_DETACH_MCQP, | ||
| 625 | adapter_handle.handle, /* r4 */ | ||
| 626 | qp_handle.handle, /* r5 */ | ||
| 627 | mcg_dlid, /* r6 */ | ||
| 628 | interface_id, /* r7 */ | ||
| 629 | subnet_prefix, /* r8 */ | ||
| 630 | 0, 0); | ||
| 631 | } | ||
| 632 | |||
| 633 | u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle, | ||
| 634 | struct ehca_cq *cq, | ||
| 635 | u8 force_flag) | ||
| 636 | { | ||
| 637 | u64 ret; | ||
| 638 | |||
| 639 | ret = hcp_galpas_dtor(&cq->galpas); | ||
| 640 | if (ret) { | ||
| 641 | ehca_gen_err("Could not destruct cp->galpas"); | ||
| 642 | return H_RESOURCE; | ||
| 643 | } | ||
| 644 | |||
| 645 | ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, | ||
| 646 | adapter_handle.handle, /* r4 */ | ||
| 647 | cq->ipz_cq_handle.handle, /* r5 */ | ||
| 648 | force_flag != 0 ? 1L : 0L, /* r6 */ | ||
| 649 | 0, 0, 0, 0); | ||
| 650 | |||
| 651 | if (ret == H_RESOURCE) | ||
| 652 | ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret); | ||
| 653 | |||
| 654 | return ret; | ||
| 655 | } | ||
| 656 | |||
| 657 | u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle, | ||
| 658 | struct ehca_eq *eq) | ||
| 659 | { | ||
| 660 | u64 ret; | ||
| 661 | |||
| 662 | ret = hcp_galpas_dtor(&eq->galpas); | ||
| 663 | if (ret) { | ||
| 664 | ehca_gen_err("Could not destruct eq->galpas"); | ||
| 665 | return H_RESOURCE; | ||
| 666 | } | ||
| 667 | |||
| 668 | ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, | ||
| 669 | adapter_handle.handle, /* r4 */ | ||
| 670 | eq->ipz_eq_handle.handle, /* r5 */ | ||
| 671 | 0, 0, 0, 0, 0); | ||
| 672 | |||
| 673 | if (ret == H_RESOURCE) | ||
| 674 | ehca_gen_err("Resource in use. ret=%lx ", ret); | ||
| 675 | |||
| 676 | return ret; | ||
| 677 | } | ||
| 678 | |||
| 679 | u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, | ||
| 680 | const struct ehca_mr *mr, | ||
| 681 | const u64 vaddr, | ||
| 682 | const u64 length, | ||
| 683 | const u32 access_ctrl, | ||
| 684 | const struct ipz_pd pd, | ||
| 685 | struct ehca_mr_hipzout_parms *outparms) | ||
| 686 | { | ||
| 687 | u64 ret; | ||
| 688 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 689 | |||
| 690 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, | ||
| 691 | adapter_handle.handle, /* r4 */ | ||
| 692 | 5, /* r5 */ | ||
| 693 | vaddr, /* r6 */ | ||
| 694 | length, /* r7 */ | ||
| 695 | (((u64)access_ctrl) << 32ULL), /* r8 */ | ||
| 696 | pd.value, /* r9 */ | ||
| 697 | 0, 0, 0); | ||
| 698 | outparms->handle.handle = outs[0]; | ||
| 699 | outparms->lkey = (u32)outs[2]; | ||
| 700 | outparms->rkey = (u32)outs[3]; | ||
| 701 | |||
| 702 | return ret; | ||
| 703 | } | ||
| 704 | |||
| 705 | u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, | ||
| 706 | const struct ehca_mr *mr, | ||
| 707 | const u8 pagesize, | ||
| 708 | const u8 queue_type, | ||
| 709 | const u64 logical_address_of_page, | ||
| 710 | const u64 count) | ||
| 711 | { | ||
| 712 | u64 ret; | ||
| 713 | |||
| 714 | if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { | ||
| 715 | ehca_gen_err("logical_address_of_page not on a 4k boundary " | ||
| 716 | "adapter_handle=%lx mr=%p mr_handle=%lx " | ||
| 717 | "pagesize=%x queue_type=%x " | ||
| 718 | "logical_address_of_page=%lx count=%lx", | ||
| 719 | adapter_handle.handle, mr, | ||
| 720 | mr->ipz_mr_handle.handle, pagesize, queue_type, | ||
| 721 | logical_address_of_page, count); | ||
| 722 | ret = H_PARAMETER; | ||
| 723 | } else | ||
| 724 | ret = hipz_h_register_rpage(adapter_handle, pagesize, | ||
| 725 | queue_type, | ||
| 726 | mr->ipz_mr_handle.handle, | ||
| 727 | logical_address_of_page, count); | ||
| 728 | return ret; | ||
| 729 | } | ||
| 730 | |||
| 731 | u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle, | ||
| 732 | const struct ehca_mr *mr, | ||
| 733 | struct ehca_mr_hipzout_parms *outparms) | ||
| 734 | { | ||
| 735 | u64 ret; | ||
| 736 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 737 | |||
| 738 | ret = ehca_plpar_hcall9(H_QUERY_MR, outs, | ||
| 739 | adapter_handle.handle, /* r4 */ | ||
| 740 | mr->ipz_mr_handle.handle, /* r5 */ | ||
| 741 | 0, 0, 0, 0, 0, 0, 0); | ||
| 742 | outparms->len = outs[0]; | ||
| 743 | outparms->vaddr = outs[1]; | ||
| 744 | outparms->acl = outs[4] >> 32; | ||
| 745 | outparms->lkey = (u32)(outs[5] >> 32); | ||
| 746 | outparms->rkey = (u32)(outs[5] & (0xffffffff)); | ||
| 747 | |||
| 748 | return ret; | ||
| 749 | } | ||
| 750 | |||
| 751 | u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle, | ||
| 752 | const struct ehca_mr *mr) | ||
| 753 | { | ||
| 754 | return ehca_plpar_hcall_norets(H_FREE_RESOURCE, | ||
| 755 | adapter_handle.handle, /* r4 */ | ||
| 756 | mr->ipz_mr_handle.handle, /* r5 */ | ||
| 757 | 0, 0, 0, 0, 0); | ||
| 758 | } | ||
| 759 | |||
| 760 | u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle, | ||
| 761 | const struct ehca_mr *mr, | ||
| 762 | const u64 vaddr_in, | ||
| 763 | const u64 length, | ||
| 764 | const u32 access_ctrl, | ||
| 765 | const struct ipz_pd pd, | ||
| 766 | const u64 mr_addr_cb, | ||
| 767 | struct ehca_mr_hipzout_parms *outparms) | ||
| 768 | { | ||
| 769 | u64 ret; | ||
| 770 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 771 | |||
| 772 | ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, | ||
| 773 | adapter_handle.handle, /* r4 */ | ||
| 774 | mr->ipz_mr_handle.handle, /* r5 */ | ||
| 775 | vaddr_in, /* r6 */ | ||
| 776 | length, /* r7 */ | ||
| 777 | /* r8 */ | ||
| 778 | ((((u64)access_ctrl) << 32ULL) | pd.value), | ||
| 779 | mr_addr_cb, /* r9 */ | ||
| 780 | 0, 0, 0); | ||
| 781 | outparms->vaddr = outs[1]; | ||
| 782 | outparms->lkey = (u32)outs[2]; | ||
| 783 | outparms->rkey = (u32)outs[3]; | ||
| 784 | |||
| 785 | return ret; | ||
| 786 | } | ||
| 787 | |||
| 788 | u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle, | ||
| 789 | const struct ehca_mr *mr, | ||
| 790 | const struct ehca_mr *orig_mr, | ||
| 791 | const u64 vaddr_in, | ||
| 792 | const u32 access_ctrl, | ||
| 793 | const struct ipz_pd pd, | ||
| 794 | struct ehca_mr_hipzout_parms *outparms) | ||
| 795 | { | ||
| 796 | u64 ret; | ||
| 797 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 798 | |||
| 799 | ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, | ||
| 800 | adapter_handle.handle, /* r4 */ | ||
| 801 | orig_mr->ipz_mr_handle.handle, /* r5 */ | ||
| 802 | vaddr_in, /* r6 */ | ||
| 803 | (((u64)access_ctrl) << 32ULL), /* r7 */ | ||
| 804 | pd.value, /* r8 */ | ||
| 805 | 0, 0, 0, 0); | ||
| 806 | outparms->handle.handle = outs[0]; | ||
| 807 | outparms->lkey = (u32)outs[2]; | ||
| 808 | outparms->rkey = (u32)outs[3]; | ||
| 809 | |||
| 810 | return ret; | ||
| 811 | } | ||
| 812 | |||
| 813 | u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle, | ||
| 814 | const struct ehca_mw *mw, | ||
| 815 | const struct ipz_pd pd, | ||
| 816 | struct ehca_mw_hipzout_parms *outparms) | ||
| 817 | { | ||
| 818 | u64 ret; | ||
| 819 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 820 | |||
| 821 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, | ||
| 822 | adapter_handle.handle, /* r4 */ | ||
| 823 | 6, /* r5 */ | ||
| 824 | pd.value, /* r6 */ | ||
| 825 | 0, 0, 0, 0, 0, 0); | ||
| 826 | outparms->handle.handle = outs[0]; | ||
| 827 | outparms->rkey = (u32)outs[3]; | ||
| 828 | |||
| 829 | return ret; | ||
| 830 | } | ||
| 831 | |||
| 832 | u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle, | ||
| 833 | const struct ehca_mw *mw, | ||
| 834 | struct ehca_mw_hipzout_parms *outparms) | ||
| 835 | { | ||
| 836 | u64 ret; | ||
| 837 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | ||
| 838 | |||
| 839 | ret = ehca_plpar_hcall9(H_QUERY_MW, outs, | ||
| 840 | adapter_handle.handle, /* r4 */ | ||
| 841 | mw->ipz_mw_handle.handle, /* r5 */ | ||
| 842 | 0, 0, 0, 0, 0, 0, 0); | ||
| 843 | outparms->rkey = (u32)outs[3]; | ||
| 844 | |||
| 845 | return ret; | ||
| 846 | } | ||
| 847 | |||
| 848 | u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle, | ||
| 849 | const struct ehca_mw *mw) | ||
| 850 | { | ||
| 851 | return ehca_plpar_hcall_norets(H_FREE_RESOURCE, | ||
| 852 | adapter_handle.handle, /* r4 */ | ||
| 853 | mw->ipz_mw_handle.handle, /* r5 */ | ||
| 854 | 0, 0, 0, 0, 0); | ||
| 855 | } | ||
| 856 | |||
| 857 | u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle, | ||
| 858 | const u64 ressource_handle, | ||
| 859 | void *rblock, | ||
| 860 | unsigned long *byte_count) | ||
| 861 | { | ||
| 862 | u64 r_cb = virt_to_abs(rblock); | ||
| 863 | |||
| 864 | if (r_cb & (EHCA_PAGESIZE-1)) { | ||
| 865 | ehca_gen_err("rblock not page aligned."); | ||
| 866 | return H_PARAMETER; | ||
| 867 | } | ||
| 868 | |||
| 869 | return ehca_plpar_hcall_norets(H_ERROR_DATA, | ||
| 870 | adapter_handle.handle, | ||
| 871 | ressource_handle, | ||
| 872 | r_cb, | ||
| 873 | 0, 0, 0, 0); | ||
| 874 | } | ||
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h new file mode 100644 index 00000000000..587ebd47095 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hcp_if.h | |||
| @@ -0,0 +1,261 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Firmware Infiniband Interface code for POWER | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * Gerd Bayer <gerd.bayer@de.ibm.com> | ||
| 9 | * Waleri Fomin <fomin@de.ibm.com> | ||
| 10 | * | ||
| 11 | * Copyright (c) 2005 IBM Corporation | ||
| 12 | * | ||
| 13 | * All rights reserved. | ||
| 14 | * | ||
| 15 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 16 | * BSD. | ||
| 17 | * | ||
| 18 | * OpenIB BSD License | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions are met: | ||
| 22 | * | ||
| 23 | * Redistributions of source code must retain the above copyright notice, this | ||
| 24 | * list of conditions and the following disclaimer. | ||
| 25 | * | ||
| 26 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 27 | * this list of conditions and the following disclaimer in the documentation | ||
| 28 | * and/or other materials | ||
| 29 | * provided with the distribution. | ||
| 30 | * | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 32 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 33 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 34 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 35 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 36 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 37 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 38 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 39 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 40 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 41 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 42 | */ | ||
| 43 | |||
| 44 | #ifndef __HCP_IF_H__ | ||
| 45 | #define __HCP_IF_H__ | ||
| 46 | |||
| 47 | #include "ehca_classes.h" | ||
| 48 | #include "ehca_tools.h" | ||
| 49 | #include "hipz_hw.h" | ||
| 50 | |||
| 51 | /* | ||
| 52 | * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initalize | ||
| 53 | * resources, create the empty EQPT (ring). | ||
| 54 | */ | ||
| 55 | u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, | ||
| 56 | struct ehca_pfeq *pfeq, | ||
| 57 | const u32 neq_control, | ||
| 58 | const u32 number_of_entries, | ||
| 59 | struct ipz_eq_handle *eq_handle, | ||
| 60 | u32 * act_nr_of_entries, | ||
| 61 | u32 * act_pages, | ||
| 62 | u32 * eq_ist); | ||
| 63 | |||
| 64 | u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle, | ||
| 65 | struct ipz_eq_handle eq_handle, | ||
| 66 | const u64 event_mask); | ||
| 67 | /* | ||
| 68 | * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize | ||
| 69 | * resources, create the empty CQPT (ring). | ||
| 70 | */ | ||
| 71 | u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, | ||
| 72 | struct ehca_cq *cq, | ||
| 73 | struct ehca_alloc_cq_parms *param); | ||
| 74 | |||
| 75 | |||
| 76 | /* | ||
| 77 | * hipz_h_alloc_resource_qp allocates QP resources in HW and FW, | ||
| 78 | * initialize resources, create empty QPPTs (2 rings). | ||
| 79 | */ | ||
| 80 | u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 81 | struct ehca_qp *qp, | ||
| 82 | struct ehca_alloc_qp_parms *parms); | ||
| 83 | |||
| 84 | u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, | ||
| 85 | const u8 port_id, | ||
| 86 | struct hipz_query_port *query_port_response_block); | ||
| 87 | |||
| 88 | u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, | ||
| 89 | struct hipz_query_hca *query_hca_rblock); | ||
| 90 | |||
| 91 | /* | ||
| 92 | * hipz_h_register_rpage internal function in hcp_if.h for all | ||
| 93 | * hcp_H_REGISTER_RPAGE calls. | ||
| 94 | */ | ||
| 95 | u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle, | ||
| 96 | const u8 pagesize, | ||
| 97 | const u8 queue_type, | ||
| 98 | const u64 resource_handle, | ||
| 99 | const u64 logical_address_of_page, | ||
| 100 | u64 count); | ||
| 101 | |||
| 102 | u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle, | ||
| 103 | const struct ipz_eq_handle eq_handle, | ||
| 104 | struct ehca_pfeq *pfeq, | ||
| 105 | const u8 pagesize, | ||
| 106 | const u8 queue_type, | ||
| 107 | const u64 logical_address_of_page, | ||
| 108 | const u64 count); | ||
| 109 | |||
| 110 | u64 hipz_h_query_int_state(const struct ipz_adapter_handle | ||
| 111 | hcp_adapter_handle, | ||
| 112 | u32 ist); | ||
| 113 | |||
| 114 | u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle, | ||
| 115 | const struct ipz_cq_handle cq_handle, | ||
| 116 | struct ehca_pfcq *pfcq, | ||
| 117 | const u8 pagesize, | ||
| 118 | const u8 queue_type, | ||
| 119 | const u64 logical_address_of_page, | ||
| 120 | const u64 count, | ||
| 121 | const struct h_galpa gal); | ||
| 122 | |||
| 123 | u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 124 | const struct ipz_qp_handle qp_handle, | ||
| 125 | struct ehca_pfqp *pfqp, | ||
| 126 | const u8 pagesize, | ||
| 127 | const u8 queue_type, | ||
| 128 | const u64 logical_address_of_page, | ||
| 129 | const u64 count, | ||
| 130 | const struct h_galpa galpa); | ||
| 131 | |||
| 132 | u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle, | ||
| 133 | const struct ipz_qp_handle qp_handle, | ||
| 134 | struct ehca_pfqp *pfqp, | ||
| 135 | void **log_addr_next_sq_wqe_tb_processed, | ||
| 136 | void **log_addr_next_rq_wqe_tb_processed, | ||
| 137 | int dis_and_get_function_code); | ||
| 138 | enum hcall_sigt { | ||
| 139 | HCALL_SIGT_NO_CQE = 0, | ||
| 140 | HCALL_SIGT_BY_WQE = 1, | ||
| 141 | HCALL_SIGT_EVERY = 2 | ||
| 142 | }; | ||
| 143 | |||
| 144 | u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 145 | const struct ipz_qp_handle qp_handle, | ||
| 146 | struct ehca_pfqp *pfqp, | ||
| 147 | const u64 update_mask, | ||
| 148 | struct hcp_modify_qp_control_block *mqpcb, | ||
| 149 | struct h_galpa gal); | ||
| 150 | |||
| 151 | u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 152 | const struct ipz_qp_handle qp_handle, | ||
| 153 | struct ehca_pfqp *pfqp, | ||
| 154 | struct hcp_modify_qp_control_block *qqpcb, | ||
| 155 | struct h_galpa gal); | ||
| 156 | |||
| 157 | u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, | ||
| 158 | struct ehca_qp *qp); | ||
| 159 | |||
| 160 | u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle, | ||
| 161 | const struct ipz_qp_handle qp_handle, | ||
| 162 | struct h_galpa gal, | ||
| 163 | u32 port); | ||
| 164 | |||
| 165 | u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle, | ||
| 166 | const struct ipz_qp_handle qp_handle, | ||
| 167 | struct h_galpa gal, | ||
| 168 | u32 port, u32 * pma_qp_nr, | ||
| 169 | u32 * bma_qp_nr); | ||
| 170 | |||
| 171 | u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle, | ||
| 172 | const struct ipz_qp_handle qp_handle, | ||
| 173 | struct h_galpa gal, | ||
| 174 | u16 mcg_dlid, | ||
| 175 | u64 subnet_prefix, u64 interface_id); | ||
| 176 | |||
| 177 | u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle, | ||
| 178 | const struct ipz_qp_handle qp_handle, | ||
| 179 | struct h_galpa gal, | ||
| 180 | u16 mcg_dlid, | ||
| 181 | u64 subnet_prefix, u64 interface_id); | ||
| 182 | |||
| 183 | u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle, | ||
| 184 | struct ehca_cq *cq, | ||
| 185 | u8 force_flag); | ||
| 186 | |||
| 187 | u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle, | ||
| 188 | struct ehca_eq *eq); | ||
| 189 | |||
| 190 | /* | ||
| 191 | * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize | ||
| 192 | * resources. | ||
| 193 | */ | ||
| 194 | u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, | ||
| 195 | const struct ehca_mr *mr, | ||
| 196 | const u64 vaddr, | ||
| 197 | const u64 length, | ||
| 198 | const u32 access_ctrl, | ||
| 199 | const struct ipz_pd pd, | ||
| 200 | struct ehca_mr_hipzout_parms *outparms); | ||
| 201 | |||
| 202 | /* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */ | ||
| 203 | u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, | ||
| 204 | const struct ehca_mr *mr, | ||
| 205 | const u8 pagesize, | ||
| 206 | const u8 queue_type, | ||
| 207 | const u64 logical_address_of_page, | ||
| 208 | const u64 count); | ||
| 209 | |||
| 210 | /* hipz_h_query_mr queries MR in HW and FW */ | ||
| 211 | u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle, | ||
| 212 | const struct ehca_mr *mr, | ||
| 213 | struct ehca_mr_hipzout_parms *outparms); | ||
| 214 | |||
| 215 | /* hipz_h_free_resource_mr frees MR resources in HW and FW */ | ||
| 216 | u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle, | ||
| 217 | const struct ehca_mr *mr); | ||
| 218 | |||
| 219 | /* hipz_h_reregister_pmr reregisters MR in HW and FW */ | ||
| 220 | u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle, | ||
| 221 | const struct ehca_mr *mr, | ||
| 222 | const u64 vaddr_in, | ||
| 223 | const u64 length, | ||
| 224 | const u32 access_ctrl, | ||
| 225 | const struct ipz_pd pd, | ||
| 226 | const u64 mr_addr_cb, | ||
| 227 | struct ehca_mr_hipzout_parms *outparms); | ||
| 228 | |||
| 229 | /* hipz_h_register_smr register shared MR in HW and FW */ | ||
| 230 | u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle, | ||
| 231 | const struct ehca_mr *mr, | ||
| 232 | const struct ehca_mr *orig_mr, | ||
| 233 | const u64 vaddr_in, | ||
| 234 | const u32 access_ctrl, | ||
| 235 | const struct ipz_pd pd, | ||
| 236 | struct ehca_mr_hipzout_parms *outparms); | ||
| 237 | |||
| 238 | /* | ||
| 239 | * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize | ||
| 240 | * resources. | ||
| 241 | */ | ||
| 242 | u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle, | ||
| 243 | const struct ehca_mw *mw, | ||
| 244 | const struct ipz_pd pd, | ||
| 245 | struct ehca_mw_hipzout_parms *outparms); | ||
| 246 | |||
| 247 | /* hipz_h_query_mw queries MW in HW and FW */ | ||
| 248 | u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle, | ||
| 249 | const struct ehca_mw *mw, | ||
| 250 | struct ehca_mw_hipzout_parms *outparms); | ||
| 251 | |||
| 252 | /* hipz_h_free_resource_mw frees MW resources in HW and FW */ | ||
| 253 | u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle, | ||
| 254 | const struct ehca_mw *mw); | ||
| 255 | |||
| 256 | u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle, | ||
| 257 | const u64 ressource_handle, | ||
| 258 | void *rblock, | ||
| 259 | unsigned long *byte_count); | ||
| 260 | |||
| 261 | #endif /* __HCP_IF_H__ */ | ||
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c new file mode 100644 index 00000000000..0b1a4772c78 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hcp_phyp.c | |||
| @@ -0,0 +1,80 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * load store abstraction for ehca register access with tracing | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #include "ehca_classes.h" | ||
| 43 | #include "hipz_hw.h" | ||
| 44 | |||
| 45 | int hcall_map_page(u64 physaddr, u64 *mapaddr) | ||
| 46 | { | ||
| 47 | *mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE)); | ||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | |||
| 51 | int hcall_unmap_page(u64 mapaddr) | ||
| 52 | { | ||
| 53 | iounmap((volatile void __iomem*)mapaddr); | ||
| 54 | return 0; | ||
| 55 | } | ||
| 56 | |||
| 57 | int hcp_galpas_ctor(struct h_galpas *galpas, | ||
| 58 | u64 paddr_kernel, u64 paddr_user) | ||
| 59 | { | ||
| 60 | int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); | ||
| 61 | if (ret) | ||
| 62 | return ret; | ||
| 63 | |||
| 64 | galpas->user.fw_handle = paddr_user; | ||
| 65 | |||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | |||
| 69 | int hcp_galpas_dtor(struct h_galpas *galpas) | ||
| 70 | { | ||
| 71 | if (galpas->kernel.fw_handle) { | ||
| 72 | int ret = hcall_unmap_page(galpas->kernel.fw_handle); | ||
| 73 | if (ret) | ||
| 74 | return ret; | ||
| 75 | } | ||
| 76 | |||
| 77 | galpas->user.fw_handle = galpas->kernel.fw_handle = 0; | ||
| 78 | |||
| 79 | return 0; | ||
| 80 | } | ||
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h new file mode 100644 index 00000000000..5305c2a3ed9 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hcp_phyp.h | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * Firmware calls | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 8 | * Waleri Fomin <fomin@de.ibm.com> | ||
| 9 | * Gerd Bayer <gerd.bayer@de.ibm.com> | ||
| 10 | * | ||
| 11 | * Copyright (c) 2005 IBM Corporation | ||
| 12 | * | ||
| 13 | * All rights reserved. | ||
| 14 | * | ||
| 15 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 16 | * BSD. | ||
| 17 | * | ||
| 18 | * OpenIB BSD License | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions are met: | ||
| 22 | * | ||
| 23 | * Redistributions of source code must retain the above copyright notice, this | ||
| 24 | * list of conditions and the following disclaimer. | ||
| 25 | * | ||
| 26 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 27 | * this list of conditions and the following disclaimer in the documentation | ||
| 28 | * and/or other materials | ||
| 29 | * provided with the distribution. | ||
| 30 | * | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 32 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 33 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 34 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 35 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 36 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 37 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 38 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 39 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 40 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 41 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 42 | */ | ||
| 43 | |||
| 44 | #ifndef __HCP_PHYP_H__ | ||
| 45 | #define __HCP_PHYP_H__ | ||
| 46 | |||
| 47 | |||
| 48 | /* | ||
| 49 | * eHCA page (mapped into memory) | ||
| 50 | * resource to access eHCA register pages in CPU address space | ||
| 51 | */ | ||
| 52 | struct h_galpa { | ||
| 53 | u64 fw_handle; | ||
| 54 | /* for pSeries this is a 64bit memory address where | ||
| 55 | I/O memory is mapped into CPU address space (kv) */ | ||
| 56 | }; | ||
| 57 | |||
| 58 | /* | ||
| 59 | * resource to access eHCA address space registers, all types | ||
| 60 | */ | ||
| 61 | struct h_galpas { | ||
| 62 | u32 pid; /*PID of userspace galpa checking */ | ||
| 63 | struct h_galpa user; /* user space accessible resource, | ||
| 64 | set to 0 if unused */ | ||
| 65 | struct h_galpa kernel; /* kernel space accessible resource, | ||
| 66 | set to 0 if unused */ | ||
| 67 | }; | ||
| 68 | |||
| 69 | static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset) | ||
| 70 | { | ||
| 71 | u64 addr = galpa.fw_handle + offset; | ||
| 72 | return *(volatile u64 __force *)addr; | ||
| 73 | } | ||
| 74 | |||
| 75 | static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value) | ||
| 76 | { | ||
| 77 | u64 addr = galpa.fw_handle + offset; | ||
| 78 | *(volatile u64 __force *)addr = value; | ||
| 79 | } | ||
| 80 | |||
| 81 | int hcp_galpas_ctor(struct h_galpas *galpas, | ||
| 82 | u64 paddr_kernel, u64 paddr_user); | ||
| 83 | |||
| 84 | int hcp_galpas_dtor(struct h_galpas *galpas); | ||
| 85 | |||
| 86 | int hcall_map_page(u64 physaddr, u64 * mapaddr); | ||
| 87 | |||
| 88 | int hcall_unmap_page(u64 mapaddr); | ||
| 89 | |||
| 90 | #endif | ||
diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/infiniband/hw/ehca/hipz_fns.h new file mode 100644 index 00000000000..9dac93d0214 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hipz_fns.h | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * HW abstraction register functions | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 8 | * | ||
| 9 | * Copyright (c) 2005 IBM Corporation | ||
| 10 | * | ||
| 11 | * All rights reserved. | ||
| 12 | * | ||
| 13 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 14 | * BSD. | ||
| 15 | * | ||
| 16 | * OpenIB BSD License | ||
| 17 | * | ||
| 18 | * Redistribution and use in source and binary forms, with or without | ||
| 19 | * modification, are permitted provided that the following conditions are met: | ||
| 20 | * | ||
| 21 | * Redistributions of source code must retain the above copyright notice, this | ||
| 22 | * list of conditions and the following disclaimer. | ||
| 23 | * | ||
| 24 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 25 | * this list of conditions and the following disclaimer in the documentation | ||
| 26 | * and/or other materials | ||
| 27 | * provided with the distribution. | ||
| 28 | * | ||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 30 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 33 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 36 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 39 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #ifndef __HIPZ_FNS_H__ | ||
| 43 | #define __HIPZ_FNS_H__ | ||
| 44 | |||
| 45 | #include "ehca_classes.h" | ||
| 46 | #include "hipz_hw.h" | ||
| 47 | |||
| 48 | #include "hipz_fns_core.h" | ||
| 49 | |||
| 50 | #define hipz_galpa_store_eq(gal, offset, value) \ | ||
| 51 | hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value) | ||
| 52 | |||
| 53 | #define hipz_galpa_load_eq(gal, offset) \ | ||
| 54 | hipz_galpa_load(gal, EQTEMM_OFFSET(offset)) | ||
| 55 | |||
| 56 | #define hipz_galpa_store_qped(gal, offset, value) \ | ||
| 57 | hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value) | ||
| 58 | |||
| 59 | #define hipz_galpa_load_qped(gal, offset) \ | ||
| 60 | hipz_galpa_load(gal, QPEDMM_OFFSET(offset)) | ||
| 61 | |||
| 62 | #define hipz_galpa_store_mrmw(gal, offset, value) \ | ||
| 63 | hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value) | ||
| 64 | |||
| 65 | #define hipz_galpa_load_mrmw(gal, offset) \ | ||
| 66 | hipz_galpa_load(gal, MRMWMM_OFFSET(offset)) | ||
| 67 | |||
| 68 | #endif | ||
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/infiniband/hw/ehca/hipz_fns_core.h new file mode 100644 index 00000000000..20898a15344 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hipz_fns_core.h | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * HW abstraction register functions | ||
| 5 | * | ||
| 6 | * Authors: Christoph Raisch <raisch@de.ibm.com> | ||
| 7 | * Heiko J Schick <schickhj@de.ibm.com> | ||
| 8 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | ||
| 9 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 10 | * | ||
| 11 | * Copyright (c) 2005 IBM Corporation | ||
| 12 | * | ||
| 13 | * All rights reserved. | ||
| 14 | * | ||
| 15 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 16 | * BSD. | ||
| 17 | * | ||
| 18 | * OpenIB BSD License | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions are met: | ||
| 22 | * | ||
| 23 | * Redistributions of source code must retain the above copyright notice, this | ||
| 24 | * list of conditions and the following disclaimer. | ||
| 25 | * | ||
| 26 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 27 | * this list of conditions and the following disclaimer in the documentation | ||
| 28 | * and/or other materials | ||
| 29 | * provided with the distribution. | ||
| 30 | * | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 32 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 33 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 34 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 35 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 36 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 37 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 38 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 39 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 40 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 41 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 42 | */ | ||
| 43 | |||
| 44 | #ifndef __HIPZ_FNS_CORE_H__ | ||
| 45 | #define __HIPZ_FNS_CORE_H__ | ||
| 46 | |||
| 47 | #include "hcp_phyp.h" | ||
| 48 | #include "hipz_hw.h" | ||
| 49 | |||
| 50 | #define hipz_galpa_store_cq(gal, offset, value) \ | ||
| 51 | hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value) | ||
| 52 | |||
| 53 | #define hipz_galpa_load_cq(gal, offset) \ | ||
| 54 | hipz_galpa_load(gal, CQTEMM_OFFSET(offset)) | ||
| 55 | |||
| 56 | #define hipz_galpa_store_qp(gal,offset, value) \ | ||
| 57 | hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value) | ||
| 58 | #define hipz_galpa_load_qp(gal, offset) \ | ||
| 59 | hipz_galpa_load(gal,QPTEMM_OFFSET(offset)) | ||
| 60 | |||
| 61 | static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) | ||
| 62 | { | ||
| 63 | /* ringing doorbell :-) */ | ||
| 64 | hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa, | ||
| 65 | EHCA_BMASK_SET(QPX_SQADDER, nr_wqes)); | ||
| 66 | } | ||
| 67 | |||
| 68 | static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes) | ||
| 69 | { | ||
| 70 | /* ringing doorbell :-) */ | ||
| 71 | hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa, | ||
| 72 | EHCA_BMASK_SET(QPX_RQADDER, nr_wqes)); | ||
| 73 | } | ||
| 74 | |||
| 75 | static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes) | ||
| 76 | { | ||
| 77 | hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca, | ||
| 78 | EHCA_BMASK_SET(CQX_FECADDER, nr_cqes)); | ||
| 79 | } | ||
| 80 | |||
| 81 | static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value) | ||
| 82 | { | ||
| 83 | u64 cqx_n0_reg; | ||
| 84 | |||
| 85 | hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0, | ||
| 86 | EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT, | ||
| 87 | value)); | ||
| 88 | cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0); | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value) | ||
| 92 | { | ||
| 93 | u64 cqx_n1_reg; | ||
| 94 | |||
| 95 | hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1, | ||
| 96 | EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value)); | ||
| 97 | cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1); | ||
| 98 | } | ||
| 99 | |||
| 100 | #endif /* __HIPZ_FNC_CORE_H__ */ | ||
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h new file mode 100644 index 00000000000..3fc92b031c5 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hipz_hw.h | |||
| @@ -0,0 +1,388 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * eHCA register definitions | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 8 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 9 | * | ||
| 10 | * Copyright (c) 2005 IBM Corporation | ||
| 11 | * | ||
| 12 | * All rights reserved. | ||
| 13 | * | ||
| 14 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 15 | * BSD. | ||
| 16 | * | ||
| 17 | * OpenIB BSD License | ||
| 18 | * | ||
| 19 | * Redistribution and use in source and binary forms, with or without | ||
| 20 | * modification, are permitted provided that the following conditions are met: | ||
| 21 | * | ||
| 22 | * Redistributions of source code must retain the above copyright notice, this | ||
| 23 | * list of conditions and the following disclaimer. | ||
| 24 | * | ||
| 25 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 26 | * this list of conditions and the following disclaimer in the documentation | ||
| 27 | * and/or other materials | ||
| 28 | * provided with the distribution. | ||
| 29 | * | ||
| 30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 31 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 34 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 35 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 36 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 37 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 38 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 39 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 40 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | #ifndef __HIPZ_HW_H__ | ||
| 44 | #define __HIPZ_HW_H__ | ||
| 45 | |||
| 46 | #include "ehca_tools.h" | ||
| 47 | |||
| 48 | /* QP Table Entry Memory Map */ | ||
| 49 | struct hipz_qptemm { | ||
| 50 | u64 qpx_hcr; | ||
| 51 | u64 qpx_c; | ||
| 52 | u64 qpx_herr; | ||
| 53 | u64 qpx_aer; | ||
| 54 | /* 0x20*/ | ||
| 55 | u64 qpx_sqa; | ||
| 56 | u64 qpx_sqc; | ||
| 57 | u64 qpx_rqa; | ||
| 58 | u64 qpx_rqc; | ||
| 59 | /* 0x40*/ | ||
| 60 | u64 qpx_st; | ||
| 61 | u64 qpx_pmstate; | ||
| 62 | u64 qpx_pmfa; | ||
| 63 | u64 qpx_pkey; | ||
| 64 | /* 0x60*/ | ||
| 65 | u64 qpx_pkeya; | ||
| 66 | u64 qpx_pkeyb; | ||
| 67 | u64 qpx_pkeyc; | ||
| 68 | u64 qpx_pkeyd; | ||
| 69 | /* 0x80*/ | ||
| 70 | u64 qpx_qkey; | ||
| 71 | u64 qpx_dqp; | ||
| 72 | u64 qpx_dlidp; | ||
| 73 | u64 qpx_portp; | ||
| 74 | /* 0xa0*/ | ||
| 75 | u64 qpx_slidp; | ||
| 76 | u64 qpx_slidpp; | ||
| 77 | u64 qpx_dlida; | ||
| 78 | u64 qpx_porta; | ||
| 79 | /* 0xc0*/ | ||
| 80 | u64 qpx_slida; | ||
| 81 | u64 qpx_slidpa; | ||
| 82 | u64 qpx_slvl; | ||
| 83 | u64 qpx_ipd; | ||
| 84 | /* 0xe0*/ | ||
| 85 | u64 qpx_mtu; | ||
| 86 | u64 qpx_lato; | ||
| 87 | u64 qpx_rlimit; | ||
| 88 | u64 qpx_rnrlimit; | ||
| 89 | /* 0x100*/ | ||
| 90 | u64 qpx_t; | ||
| 91 | u64 qpx_sqhp; | ||
| 92 | u64 qpx_sqptp; | ||
| 93 | u64 qpx_nspsn; | ||
| 94 | /* 0x120*/ | ||
| 95 | u64 qpx_nspsnhwm; | ||
| 96 | u64 reserved1; | ||
| 97 | u64 qpx_sdsi; | ||
| 98 | u64 qpx_sdsbc; | ||
| 99 | /* 0x140*/ | ||
| 100 | u64 qpx_sqwsize; | ||
| 101 | u64 qpx_sqwts; | ||
| 102 | u64 qpx_lsn; | ||
| 103 | u64 qpx_nssn; | ||
| 104 | /* 0x160 */ | ||
| 105 | u64 qpx_mor; | ||
| 106 | u64 qpx_cor; | ||
| 107 | u64 qpx_sqsize; | ||
| 108 | u64 qpx_erc; | ||
| 109 | /* 0x180*/ | ||
| 110 | u64 qpx_rnrrc; | ||
| 111 | u64 qpx_ernrwt; | ||
| 112 | u64 qpx_rnrresp; | ||
| 113 | u64 qpx_lmsna; | ||
| 114 | /* 0x1a0 */ | ||
| 115 | u64 qpx_sqhpc; | ||
| 116 | u64 qpx_sqcptp; | ||
| 117 | u64 qpx_sigt; | ||
| 118 | u64 qpx_wqecnt; | ||
| 119 | /* 0x1c0*/ | ||
| 120 | u64 qpx_rqhp; | ||
| 121 | u64 qpx_rqptp; | ||
| 122 | u64 qpx_rqsize; | ||
| 123 | u64 qpx_nrr; | ||
| 124 | /* 0x1e0*/ | ||
| 125 | u64 qpx_rdmac; | ||
| 126 | u64 qpx_nrpsn; | ||
| 127 | u64 qpx_lapsn; | ||
| 128 | u64 qpx_lcr; | ||
| 129 | /* 0x200*/ | ||
| 130 | u64 qpx_rwc; | ||
| 131 | u64 qpx_rwva; | ||
| 132 | u64 qpx_rdsi; | ||
| 133 | u64 qpx_rdsbc; | ||
| 134 | /* 0x220*/ | ||
| 135 | u64 qpx_rqwsize; | ||
| 136 | u64 qpx_crmsn; | ||
| 137 | u64 qpx_rdd; | ||
| 138 | u64 qpx_larpsn; | ||
| 139 | /* 0x240*/ | ||
| 140 | u64 qpx_pd; | ||
| 141 | u64 qpx_scqn; | ||
| 142 | u64 qpx_rcqn; | ||
| 143 | u64 qpx_aeqn; | ||
| 144 | /* 0x260*/ | ||
| 145 | u64 qpx_aaelog; | ||
| 146 | u64 qpx_ram; | ||
| 147 | u64 qpx_rdmaqe0; | ||
| 148 | u64 qpx_rdmaqe1; | ||
| 149 | /* 0x280*/ | ||
| 150 | u64 qpx_rdmaqe2; | ||
| 151 | u64 qpx_rdmaqe3; | ||
| 152 | u64 qpx_nrpsnhwm; | ||
| 153 | /* 0x298*/ | ||
| 154 | u64 reserved[(0x400 - 0x298) / 8]; | ||
| 155 | /* 0x400 extended data */ | ||
| 156 | u64 reserved_ext[(0x500 - 0x400) / 8]; | ||
| 157 | /* 0x500 */ | ||
| 158 | u64 reserved2[(0x1000 - 0x500) / 8]; | ||
| 159 | /* 0x1000 */ | ||
| 160 | }; | ||
| 161 | |||
| 162 | #define QPX_SQADDER EHCA_BMASK_IBM(48,63) | ||
| 163 | #define QPX_RQADDER EHCA_BMASK_IBM(48,63) | ||
| 164 | |||
| 165 | #define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x) | ||
| 166 | |||
| 167 | /* MRMWPT Entry Memory Map */ | ||
| 168 | struct hipz_mrmwmm { | ||
| 169 | /* 0x00 */ | ||
| 170 | u64 mrx_hcr; | ||
| 171 | |||
| 172 | u64 mrx_c; | ||
| 173 | u64 mrx_herr; | ||
| 174 | u64 mrx_aer; | ||
| 175 | /* 0x20 */ | ||
| 176 | u64 mrx_pp; | ||
| 177 | u64 reserved1; | ||
| 178 | u64 reserved2; | ||
| 179 | u64 reserved3; | ||
| 180 | /* 0x40 */ | ||
| 181 | u64 reserved4[(0x200 - 0x40) / 8]; | ||
| 182 | /* 0x200 */ | ||
| 183 | u64 mrx_ctl[64]; | ||
| 184 | |||
| 185 | }; | ||
| 186 | |||
| 187 | #define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x) | ||
| 188 | |||
| 189 | struct hipz_qpedmm { | ||
| 190 | /* 0x00 */ | ||
| 191 | u64 reserved0[(0x400) / 8]; | ||
| 192 | /* 0x400 */ | ||
| 193 | u64 qpedx_phh; | ||
| 194 | u64 qpedx_ppsgp; | ||
| 195 | /* 0x410 */ | ||
| 196 | u64 qpedx_ppsgu; | ||
| 197 | u64 qpedx_ppdgp; | ||
| 198 | /* 0x420 */ | ||
| 199 | u64 qpedx_ppdgu; | ||
| 200 | u64 qpedx_aph; | ||
| 201 | /* 0x430 */ | ||
| 202 | u64 qpedx_apsgp; | ||
| 203 | u64 qpedx_apsgu; | ||
| 204 | /* 0x440 */ | ||
| 205 | u64 qpedx_apdgp; | ||
| 206 | u64 qpedx_apdgu; | ||
| 207 | /* 0x450 */ | ||
| 208 | u64 qpedx_apav; | ||
| 209 | u64 qpedx_apsav; | ||
| 210 | /* 0x460 */ | ||
| 211 | u64 qpedx_hcr; | ||
| 212 | u64 reserved1[4]; | ||
| 213 | /* 0x488 */ | ||
| 214 | u64 qpedx_rrl0; | ||
| 215 | /* 0x490 */ | ||
| 216 | u64 qpedx_rrrkey0; | ||
| 217 | u64 qpedx_rrva0; | ||
| 218 | /* 0x4a0 */ | ||
| 219 | u64 reserved2; | ||
| 220 | u64 qpedx_rrl1; | ||
| 221 | /* 0x4b0 */ | ||
| 222 | u64 qpedx_rrrkey1; | ||
| 223 | u64 qpedx_rrva1; | ||
| 224 | /* 0x4c0 */ | ||
| 225 | u64 reserved3; | ||
| 226 | u64 qpedx_rrl2; | ||
| 227 | /* 0x4d0 */ | ||
| 228 | u64 qpedx_rrrkey2; | ||
| 229 | u64 qpedx_rrva2; | ||
| 230 | /* 0x4e0 */ | ||
| 231 | u64 reserved4; | ||
| 232 | u64 qpedx_rrl3; | ||
| 233 | /* 0x4f0 */ | ||
| 234 | u64 qpedx_rrrkey3; | ||
| 235 | u64 qpedx_rrva3; | ||
| 236 | }; | ||
| 237 | |||
| 238 | #define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x) | ||
| 239 | |||
| 240 | /* CQ Table Entry Memory Map */ | ||
| 241 | struct hipz_cqtemm { | ||
| 242 | u64 cqx_hcr; | ||
| 243 | u64 cqx_c; | ||
| 244 | u64 cqx_herr; | ||
| 245 | u64 cqx_aer; | ||
| 246 | /* 0x20 */ | ||
| 247 | u64 cqx_ptp; | ||
| 248 | u64 cqx_tp; | ||
| 249 | u64 cqx_fec; | ||
| 250 | u64 cqx_feca; | ||
| 251 | /* 0x40 */ | ||
| 252 | u64 cqx_ep; | ||
| 253 | u64 cqx_eq; | ||
| 254 | /* 0x50 */ | ||
| 255 | u64 reserved1; | ||
| 256 | u64 cqx_n0; | ||
| 257 | /* 0x60 */ | ||
| 258 | u64 cqx_n1; | ||
| 259 | u64 reserved2[(0x1000 - 0x60) / 8]; | ||
| 260 | /* 0x1000 */ | ||
| 261 | }; | ||
| 262 | |||
| 263 | #define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32,63) | ||
| 264 | #define CQX_FECADDER EHCA_BMASK_IBM(32,63) | ||
| 265 | #define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0) | ||
| 266 | #define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0) | ||
| 267 | |||
| 268 | #define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x) | ||
| 269 | |||
| 270 | /* EQ Table Entry Memory Map */ | ||
| 271 | struct hipz_eqtemm { | ||
| 272 | u64 eqx_hcr; | ||
| 273 | u64 eqx_c; | ||
| 274 | |||
| 275 | u64 eqx_herr; | ||
| 276 | u64 eqx_aer; | ||
| 277 | /* 0x20 */ | ||
| 278 | u64 eqx_ptp; | ||
| 279 | u64 eqx_tp; | ||
| 280 | u64 eqx_ssba; | ||
| 281 | u64 eqx_psba; | ||
| 282 | |||
| 283 | /* 0x40 */ | ||
| 284 | u64 eqx_cec; | ||
| 285 | u64 eqx_meql; | ||
| 286 | u64 eqx_xisbi; | ||
| 287 | u64 eqx_xisc; | ||
| 288 | /* 0x60 */ | ||
| 289 | u64 eqx_it; | ||
| 290 | |||
| 291 | }; | ||
| 292 | |||
| 293 | #define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x) | ||
| 294 | |||
| 295 | /* access control defines for MR/MW */ | ||
| 296 | #define HIPZ_ACCESSCTRL_L_WRITE 0x00800000 | ||
| 297 | #define HIPZ_ACCESSCTRL_R_WRITE 0x00400000 | ||
| 298 | #define HIPZ_ACCESSCTRL_R_READ 0x00200000 | ||
| 299 | #define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000 | ||
| 300 | #define HIPZ_ACCESSCTRL_MW_BIND 0x00080000 | ||
| 301 | |||
| 302 | /* query hca response block */ | ||
| 303 | struct hipz_query_hca { | ||
| 304 | u32 cur_reliable_dg; | ||
| 305 | u32 cur_qp; | ||
| 306 | u32 cur_cq; | ||
| 307 | u32 cur_eq; | ||
| 308 | u32 cur_mr; | ||
| 309 | u32 cur_mw; | ||
| 310 | u32 cur_ee_context; | ||
| 311 | u32 cur_mcast_grp; | ||
| 312 | u32 cur_qp_attached_mcast_grp; | ||
| 313 | u32 reserved1; | ||
| 314 | u32 cur_ipv6_qp; | ||
| 315 | u32 cur_eth_qp; | ||
| 316 | u32 cur_hp_mr; | ||
| 317 | u32 reserved2[3]; | ||
| 318 | u32 max_rd_domain; | ||
| 319 | u32 max_qp; | ||
| 320 | u32 max_cq; | ||
| 321 | u32 max_eq; | ||
| 322 | u32 max_mr; | ||
| 323 | u32 max_hp_mr; | ||
| 324 | u32 max_mw; | ||
| 325 | u32 max_mrwpte; | ||
| 326 | u32 max_special_mrwpte; | ||
| 327 | u32 max_rd_ee_context; | ||
| 328 | u32 max_mcast_grp; | ||
| 329 | u32 max_total_mcast_qp_attach; | ||
| 330 | u32 max_mcast_qp_attach; | ||
| 331 | u32 max_raw_ipv6_qp; | ||
| 332 | u32 max_raw_ethy_qp; | ||
| 333 | u32 internal_clock_frequency; | ||
| 334 | u32 max_pd; | ||
| 335 | u32 max_ah; | ||
| 336 | u32 max_cqe; | ||
| 337 | u32 max_wqes_wq; | ||
| 338 | u32 max_partitions; | ||
| 339 | u32 max_rr_ee_context; | ||
| 340 | u32 max_rr_qp; | ||
| 341 | u32 max_rr_hca; | ||
| 342 | u32 max_act_wqs_ee_context; | ||
| 343 | u32 max_act_wqs_qp; | ||
| 344 | u32 max_sge; | ||
| 345 | u32 max_sge_rd; | ||
| 346 | u32 memory_page_size_supported; | ||
| 347 | u64 max_mr_size; | ||
| 348 | u32 local_ca_ack_delay; | ||
| 349 | u32 num_ports; | ||
| 350 | u32 vendor_id; | ||
| 351 | u32 vendor_part_id; | ||
| 352 | u32 hw_ver; | ||
| 353 | u64 node_guid; | ||
| 354 | u64 hca_cap_indicators; | ||
| 355 | u32 data_counter_register_size; | ||
| 356 | u32 max_shared_rq; | ||
| 357 | u32 max_isns_eq; | ||
| 358 | u32 max_neq; | ||
| 359 | } __attribute__ ((packed)); | ||
| 360 | |||
| 361 | /* query port response block */ | ||
| 362 | struct hipz_query_port { | ||
| 363 | u32 state; | ||
| 364 | u32 bad_pkey_cntr; | ||
| 365 | u32 lmc; | ||
| 366 | u32 lid; | ||
| 367 | u32 subnet_timeout; | ||
| 368 | u32 qkey_viol_cntr; | ||
| 369 | u32 sm_sl; | ||
| 370 | u32 sm_lid; | ||
| 371 | u32 capability_mask; | ||
| 372 | u32 init_type_reply; | ||
| 373 | u32 pkey_tbl_len; | ||
| 374 | u32 gid_tbl_len; | ||
| 375 | u64 gid_prefix; | ||
| 376 | u32 port_nr; | ||
| 377 | u16 pkey_entries[16]; | ||
| 378 | u8 reserved1[32]; | ||
| 379 | u32 trent_size; | ||
| 380 | u32 trbuf_size; | ||
| 381 | u64 max_msg_sz; | ||
| 382 | u32 max_mtu; | ||
| 383 | u32 vl_cap; | ||
| 384 | u8 reserved2[1900]; | ||
| 385 | u64 guid_entries[255]; | ||
| 386 | } __attribute__ ((packed)); | ||
| 387 | |||
| 388 | #endif | ||
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c new file mode 100644 index 00000000000..e028ff1588c --- /dev/null +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c | |||
| @@ -0,0 +1,149 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * internal queue handling | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 8 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 9 | * | ||
| 10 | * Copyright (c) 2005 IBM Corporation | ||
| 11 | * | ||
| 12 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 13 | * BSD. | ||
| 14 | * | ||
| 15 | * OpenIB BSD License | ||
| 16 | * | ||
| 17 | * Redistribution and use in source and binary forms, with or without | ||
| 18 | * modification, are permitted provided that the following conditions are met: | ||
| 19 | * | ||
| 20 | * Redistributions of source code must retain the above copyright notice, this | ||
| 21 | * list of conditions and the following disclaimer. | ||
| 22 | * | ||
| 23 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 24 | * this list of conditions and the following disclaimer in the documentation | ||
| 25 | * and/or other materials | ||
| 26 | * provided with the distribution. | ||
| 27 | * | ||
| 28 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 29 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 30 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 31 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 32 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 33 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 34 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 35 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 36 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 37 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 38 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 39 | */ | ||
| 40 | |||
| 41 | #include "ehca_tools.h" | ||
| 42 | #include "ipz_pt_fn.h" | ||
| 43 | |||
| 44 | void *ipz_qpageit_get_inc(struct ipz_queue *queue) | ||
| 45 | { | ||
| 46 | void *ret = ipz_qeit_get(queue); | ||
| 47 | queue->current_q_offset += queue->pagesize; | ||
| 48 | if (queue->current_q_offset > queue->queue_length) { | ||
| 49 | queue->current_q_offset -= queue->pagesize; | ||
| 50 | ret = NULL; | ||
| 51 | } | ||
| 52 | if (((u64)ret) % EHCA_PAGESIZE) { | ||
| 53 | ehca_gen_err("ERROR!! not at PAGE-Boundary"); | ||
| 54 | return NULL; | ||
| 55 | } | ||
| 56 | return ret; | ||
| 57 | } | ||
| 58 | |||
| 59 | void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) | ||
| 60 | { | ||
| 61 | void *ret = ipz_qeit_get(queue); | ||
| 62 | u64 last_entry_in_q = queue->queue_length - queue->qe_size; | ||
| 63 | |||
| 64 | queue->current_q_offset += queue->qe_size; | ||
| 65 | if (queue->current_q_offset > last_entry_in_q) { | ||
| 66 | queue->current_q_offset = 0; | ||
| 67 | queue->toggle_state = (~queue->toggle_state) & 1; | ||
| 68 | } | ||
| 69 | |||
| 70 | return ret; | ||
| 71 | } | ||
| 72 | |||
| 73 | int ipz_queue_ctor(struct ipz_queue *queue, | ||
| 74 | const u32 nr_of_pages, | ||
| 75 | const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) | ||
| 76 | { | ||
| 77 | int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT; | ||
| 78 | int f; | ||
| 79 | |||
| 80 | if (pagesize > PAGE_SIZE) { | ||
| 81 | ehca_gen_err("FATAL ERROR: pagesize=%x is greater " | ||
| 82 | "than kernel page size", pagesize); | ||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | if (!pages_per_kpage) { | ||
| 86 | ehca_gen_err("FATAL ERROR: invalid kernel page size. " | ||
| 87 | "pages_per_kpage=%x", pages_per_kpage); | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | queue->queue_length = nr_of_pages * pagesize; | ||
| 91 | queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); | ||
| 92 | if (!queue->queue_pages) { | ||
| 93 | ehca_gen_err("ERROR!! didn't get the memory"); | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); | ||
| 97 | /* | ||
| 98 | * allocate pages for queue: | ||
| 99 | * outer loop allocates whole kernel pages (page aligned) and | ||
| 100 | * inner loop divides a kernel page into smaller hca queue pages | ||
| 101 | */ | ||
| 102 | f = 0; | ||
| 103 | while (f < nr_of_pages) { | ||
| 104 | u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); | ||
| 105 | int k; | ||
| 106 | if (!kpage) | ||
| 107 | goto ipz_queue_ctor_exit0; /*NOMEM*/ | ||
| 108 | for (k = 0; k < pages_per_kpage && f < nr_of_pages; k++) { | ||
| 109 | (queue->queue_pages)[f] = (struct ipz_page *)kpage; | ||
| 110 | kpage += EHCA_PAGESIZE; | ||
| 111 | f++; | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | queue->current_q_offset = 0; | ||
| 116 | queue->qe_size = qe_size; | ||
| 117 | queue->act_nr_of_sg = nr_of_sg; | ||
| 118 | queue->pagesize = pagesize; | ||
| 119 | queue->toggle_state = 1; | ||
| 120 | return 1; | ||
| 121 | |||
| 122 | ipz_queue_ctor_exit0: | ||
| 123 | ehca_gen_err("Couldn't get alloc pages queue=%p f=%x nr_of_pages=%x", | ||
| 124 | queue, f, nr_of_pages); | ||
| 125 | for (f = 0; f < nr_of_pages; f += pages_per_kpage) { | ||
| 126 | if (!(queue->queue_pages)[f]) | ||
| 127 | break; | ||
| 128 | free_page((unsigned long)(queue->queue_pages)[f]); | ||
| 129 | } | ||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | int ipz_queue_dtor(struct ipz_queue *queue) | ||
| 134 | { | ||
| 135 | int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT; | ||
| 136 | int g; | ||
| 137 | int nr_pages; | ||
| 138 | |||
| 139 | if (!queue || !queue->queue_pages) { | ||
| 140 | ehca_gen_dbg("queue or queue_pages is NULL"); | ||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | nr_pages = queue->queue_length / queue->pagesize; | ||
| 144 | for (g = 0; g < nr_pages; g += pages_per_kpage) | ||
| 145 | free_page((unsigned long)(queue->queue_pages)[g]); | ||
| 146 | vfree(queue->queue_pages); | ||
| 147 | |||
| 148 | return 1; | ||
| 149 | } | ||
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h new file mode 100644 index 00000000000..2f13509d525 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h | |||
| @@ -0,0 +1,247 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer eHCA Infiniband device driver for Linux on POWER | ||
| 3 | * | ||
| 4 | * internal queue handling | ||
| 5 | * | ||
| 6 | * Authors: Waleri Fomin <fomin@de.ibm.com> | ||
| 7 | * Reinhard Ernst <rernst@de.ibm.com> | ||
| 8 | * Christoph Raisch <raisch@de.ibm.com> | ||
| 9 | * | ||
| 10 | * Copyright (c) 2005 IBM Corporation | ||
| 11 | * | ||
| 12 | * All rights reserved. | ||
| 13 | * | ||
| 14 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
| 15 | * BSD. | ||
| 16 | * | ||
| 17 | * OpenIB BSD License | ||
| 18 | * | ||
| 19 | * Redistribution and use in source and binary forms, with or without | ||
| 20 | * modification, are permitted provided that the following conditions are met: | ||
| 21 | * | ||
| 22 | * Redistributions of source code must retain the above copyright notice, this | ||
| 23 | * list of conditions and the following disclaimer. | ||
| 24 | * | ||
| 25 | * Redistributions in binary form must reproduce the above copyright notice, | ||
| 26 | * this list of conditions and the following disclaimer in the documentation | ||
| 27 | * and/or other materials | ||
| 28 | * provided with the distribution. | ||
| 29 | * | ||
| 30 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 31 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 32 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 33 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 34 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 35 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 36 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 37 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
| 38 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 39 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 40 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | #ifndef __IPZ_PT_FN_H__ | ||
| 44 | #define __IPZ_PT_FN_H__ | ||
| 45 | |||
| 46 | #define EHCA_PAGESHIFT 12 | ||
| 47 | #define EHCA_PAGESIZE 4096UL | ||
| 48 | #define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1)) | ||
| 49 | #define EHCA_PT_ENTRIES 512UL | ||
| 50 | |||
| 51 | #include "ehca_tools.h" | ||
| 52 | #include "ehca_qes.h" | ||
| 53 | |||
| 54 | /* struct generic ehca page */ | ||
| 55 | struct ipz_page { | ||
| 56 | u8 entries[EHCA_PAGESIZE]; | ||
| 57 | }; | ||
| 58 | |||
| 59 | /* struct generic queue in linux kernel virtual memory (kv) */ | ||
| 60 | struct ipz_queue { | ||
| 61 | u64 current_q_offset; /* current queue entry */ | ||
| 62 | |||
| 63 | struct ipz_page **queue_pages; /* array of pages belonging to queue */ | ||
| 64 | u32 qe_size; /* queue entry size */ | ||
| 65 | u32 act_nr_of_sg; | ||
| 66 | u32 queue_length; /* queue length allocated in bytes */ | ||
| 67 | u32 pagesize; | ||
| 68 | u32 toggle_state; /* toggle flag - per page */ | ||
| 69 | u32 dummy3; /* 64 bit alignment */ | ||
| 70 | }; | ||
| 71 | |||
| 72 | /* | ||
| 73 | * return current Queue Entry for a certain q_offset | ||
| 74 | * returns address (kv) of Queue Entry | ||
| 75 | */ | ||
| 76 | static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) | ||
| 77 | { | ||
| 78 | struct ipz_page *current_page; | ||
| 79 | if (q_offset >= queue->queue_length) | ||
| 80 | return NULL; | ||
| 81 | current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; | ||
| 82 | return ¤t_page->entries[q_offset & (EHCA_PAGESIZE - 1)]; | ||
| 83 | } | ||
| 84 | |||
| 85 | /* | ||
| 86 | * return current Queue Entry | ||
| 87 | * returns address (kv) of Queue Entry | ||
| 88 | */ | ||
| 89 | static inline void *ipz_qeit_get(struct ipz_queue *queue) | ||
| 90 | { | ||
| 91 | return ipz_qeit_calc(queue, queue->current_q_offset); | ||
| 92 | } | ||
| 93 | |||
| 94 | /* | ||
| 95 | * return current Queue Page , increment Queue Page iterator from | ||
| 96 | * page to page in struct ipz_queue, last increment will return 0! and | ||
| 97 | * NOT wrap | ||
| 98 | * returns address (kv) of Queue Page | ||
| 99 | * warning don't use in parallel with ipz_QE_get_inc() | ||
| 100 | */ | ||
| 101 | void *ipz_qpageit_get_inc(struct ipz_queue *queue); | ||
| 102 | |||
| 103 | /* | ||
| 104 | * return current Queue Entry, increment Queue Entry iterator by one | ||
| 105 | * step in struct ipz_queue, will wrap in ringbuffer | ||
| 106 | * returns address (kv) of Queue Entry BEFORE increment | ||
| 107 | * warning don't use in parallel with ipz_qpageit_get_inc() | ||
| 108 | * warning unpredictable results may occur if steps>act_nr_of_queue_entries | ||
| 109 | */ | ||
| 110 | static inline void *ipz_qeit_get_inc(struct ipz_queue *queue) | ||
| 111 | { | ||
| 112 | void *ret = ipz_qeit_get(queue); | ||
| 113 | queue->current_q_offset += queue->qe_size; | ||
| 114 | if (queue->current_q_offset >= queue->queue_length) { | ||
| 115 | queue->current_q_offset = 0; | ||
| 116 | /* toggle the valid flag */ | ||
| 117 | queue->toggle_state = (~queue->toggle_state) & 1; | ||
| 118 | } | ||
| 119 | |||
| 120 | return ret; | ||
| 121 | } | ||
| 122 | |||
| 123 | /* | ||
| 124 | * return current Queue Entry, increment Queue Entry iterator by one | ||
| 125 | * step in struct ipz_queue, will wrap in ringbuffer | ||
| 126 | * returns address (kv) of Queue Entry BEFORE increment | ||
| 127 | * returns 0 and does not increment, if wrong valid state | ||
| 128 | * warning don't use in parallel with ipz_qpageit_get_inc() | ||
| 129 | * warning unpredictable results may occur if steps>act_nr_of_queue_entries | ||
| 130 | */ | ||
| 131 | static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue) | ||
| 132 | { | ||
| 133 | struct ehca_cqe *cqe = ipz_qeit_get(queue); | ||
| 134 | u32 cqe_flags = cqe->cqe_flags; | ||
| 135 | |||
| 136 | if ((cqe_flags >> 7) != (queue->toggle_state & 1)) | ||
| 137 | return NULL; | ||
| 138 | |||
| 139 | ipz_qeit_get_inc(queue); | ||
| 140 | return cqe; | ||
| 141 | } | ||
| 142 | |||
| 143 | /* | ||
| 144 | * returns and resets Queue Entry iterator | ||
| 145 | * returns address (kv) of first Queue Entry | ||
| 146 | */ | ||
| 147 | static inline void *ipz_qeit_reset(struct ipz_queue *queue) | ||
| 148 | { | ||
| 149 | queue->current_q_offset = 0; | ||
| 150 | return ipz_qeit_get(queue); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* struct generic page table */ | ||
| 154 | struct ipz_pt { | ||
| 155 | u64 entries[EHCA_PT_ENTRIES]; | ||
| 156 | }; | ||
| 157 | |||
| 158 | /* struct page table for a queue, only to be used in pf */ | ||
| 159 | struct ipz_qpt { | ||
| 160 | /* queue page tables (kv), use u64 because we know the element length */ | ||
| 161 | u64 *qpts; | ||
| 162 | u32 n_qpts; | ||
| 163 | u32 n_ptes; /* number of page table entries */ | ||
| 164 | u64 *current_pte_addr; | ||
| 165 | }; | ||
| 166 | |||
| 167 | /* | ||
| 168 | * constructor for a ipz_queue_t, placement new for ipz_queue_t, | ||
| 169 | * new for all dependent datastructors | ||
| 170 | * all QP Tables are the same | ||
| 171 | * flow: | ||
| 172 | * allocate+pin queue | ||
| 173 | * see ipz_qpt_ctor() | ||
| 174 | * returns true if ok, false if out of memory | ||
| 175 | */ | ||
| 176 | int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages, | ||
| 177 | const u32 pagesize, const u32 qe_size, | ||
| 178 | const u32 nr_of_sg); | ||
| 179 | |||
| 180 | /* | ||
| 181 | * destructor for a ipz_queue_t | ||
| 182 | * -# free queue | ||
| 183 | * see ipz_queue_ctor() | ||
| 184 | * returns true if ok, false if queue was NULL-ptr of free failed | ||
| 185 | */ | ||
| 186 | int ipz_queue_dtor(struct ipz_queue *queue); | ||
| 187 | |||
| 188 | /* | ||
| 189 | * constructor for a ipz_qpt_t, | ||
| 190 | * placement new for struct ipz_queue, new for all dependent datastructors | ||
| 191 | * all QP Tables are the same, | ||
| 192 | * flow: | ||
| 193 | * -# allocate+pin queue | ||
| 194 | * -# initialise ptcb | ||
| 195 | * -# allocate+pin PTs | ||
| 196 | * -# link PTs to a ring, according to HCA Arch, set bit62 id needed | ||
| 197 | * -# the ring must have room for exactly nr_of_PTEs | ||
| 198 | * see ipz_qpt_ctor() | ||
| 199 | */ | ||
| 200 | void ipz_qpt_ctor(struct ipz_qpt *qpt, | ||
| 201 | const u32 nr_of_qes, | ||
| 202 | const u32 pagesize, | ||
| 203 | const u32 qe_size, | ||
| 204 | const u8 lowbyte, const u8 toggle, | ||
| 205 | u32 * act_nr_of_QEs, u32 * act_nr_of_pages); | ||
| 206 | |||
| 207 | /* | ||
| 208 | * return current Queue Entry, increment Queue Entry iterator by one | ||
| 209 | * step in struct ipz_queue, will wrap in ringbuffer | ||
| 210 | * returns address (kv) of Queue Entry BEFORE increment | ||
| 211 | * warning don't use in parallel with ipz_qpageit_get_inc() | ||
| 212 | * warning unpredictable results may occur if steps>act_nr_of_queue_entries | ||
| 213 | * fix EQ page problems | ||
| 214 | */ | ||
| 215 | void *ipz_qeit_eq_get_inc(struct ipz_queue *queue); | ||
| 216 | |||
| 217 | /* | ||
| 218 | * return current Event Queue Entry, increment Queue Entry iterator | ||
| 219 | * by one step in struct ipz_queue if valid, will wrap in ringbuffer | ||
| 220 | * returns address (kv) of Queue Entry BEFORE increment | ||
| 221 | * returns 0 and does not increment, if wrong valid state | ||
| 222 | * warning don't use in parallel with ipz_queue_QPageit_get_inc() | ||
| 223 | * warning unpredictable results may occur if steps>act_nr_of_queue_entries | ||
| 224 | */ | ||
| 225 | static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) | ||
| 226 | { | ||
| 227 | void *ret = ipz_qeit_get(queue); | ||
| 228 | u32 qe = *(u8 *) ret; | ||
| 229 | if ((qe >> 7) != (queue->toggle_state & 1)) | ||
| 230 | return NULL; | ||
| 231 | ipz_qeit_eq_get_inc(queue); /* this is a good one */ | ||
| 232 | return ret; | ||
| 233 | } | ||
| 234 | |||
| 235 | /* returns address (GX) of first queue entry */ | ||
| 236 | static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt) | ||
| 237 | { | ||
| 238 | return be64_to_cpu(qpt->qpts[0]); | ||
| 239 | } | ||
| 240 | |||
| 241 | /* returns address (kv) of first page of queue page table */ | ||
| 242 | static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt) | ||
| 243 | { | ||
| 244 | return qpt->qpts; | ||
| 245 | } | ||
| 246 | |||
| 247 | #endif /* __IPZ_PT_FN_H__ */ | ||
