diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4/main.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 236 |
1 files changed, 158 insertions, 78 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index cc64400d41ac..8be6db816460 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -132,14 +132,35 @@ static int num_ib_ports(struct mlx4_dev *dev) | |||
132 | } | 132 | } |
133 | 133 | ||
134 | static int mlx4_ib_query_device(struct ib_device *ibdev, | 134 | static int mlx4_ib_query_device(struct ib_device *ibdev, |
135 | struct ib_device_attr *props) | 135 | struct ib_device_attr *props, |
136 | struct ib_udata *uhw) | ||
136 | { | 137 | { |
137 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | 138 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
138 | struct ib_smp *in_mad = NULL; | 139 | struct ib_smp *in_mad = NULL; |
139 | struct ib_smp *out_mad = NULL; | 140 | struct ib_smp *out_mad = NULL; |
140 | int err = -ENOMEM; | 141 | int err = -ENOMEM; |
141 | int have_ib_ports; | 142 | int have_ib_ports; |
143 | struct mlx4_uverbs_ex_query_device cmd; | ||
144 | struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0}; | ||
145 | struct mlx4_clock_params clock_params; | ||
142 | 146 | ||
147 | if (uhw->inlen) { | ||
148 | if (uhw->inlen < sizeof(cmd)) | ||
149 | return -EINVAL; | ||
150 | |||
151 | err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd)); | ||
152 | if (err) | ||
153 | return err; | ||
154 | |||
155 | if (cmd.comp_mask) | ||
156 | return -EINVAL; | ||
157 | |||
158 | if (cmd.reserved) | ||
159 | return -EINVAL; | ||
160 | } | ||
161 | |||
162 | resp.response_length = offsetof(typeof(resp), response_length) + | ||
163 | sizeof(resp.response_length); | ||
143 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | 164 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
144 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | 165 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
145 | if (!in_mad || !out_mad) | 166 | if (!in_mad || !out_mad) |
@@ -229,7 +250,25 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
229 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | 250 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
230 | props->max_mcast_grp; | 251 | props->max_mcast_grp; |
231 | props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; | 252 | props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; |
253 | props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; | ||
254 | props->timestamp_mask = 0xFFFFFFFFFFFFULL; | ||
232 | 255 | ||
256 | if (!mlx4_is_slave(dev->dev)) | ||
257 | err = mlx4_get_internal_clock_params(dev->dev, &clock_params); | ||
258 | |||
259 | if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { | ||
260 | resp.response_length += sizeof(resp.hca_core_clock_offset); | ||
261 | if (!err && !mlx4_is_slave(dev->dev)) { | ||
262 | resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; | ||
263 | resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; | ||
264 | } | ||
265 | } | ||
266 | |||
267 | if (uhw->outlen) { | ||
268 | err = ib_copy_to_udata(uhw, &resp, resp.response_length); | ||
269 | if (err) | ||
270 | goto out; | ||
271 | } | ||
233 | out: | 272 | out: |
234 | kfree(in_mad); | 273 | kfree(in_mad); |
235 | kfree(out_mad); | 274 | kfree(out_mad); |
@@ -712,8 +751,24 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
712 | dev->dev->caps.num_uars, | 751 | dev->dev->caps.num_uars, |
713 | PAGE_SIZE, vma->vm_page_prot)) | 752 | PAGE_SIZE, vma->vm_page_prot)) |
714 | return -EAGAIN; | 753 | return -EAGAIN; |
715 | } else | 754 | } else if (vma->vm_pgoff == 3) { |
755 | struct mlx4_clock_params params; | ||
756 | int ret = mlx4_get_internal_clock_params(dev->dev, ¶ms); | ||
757 | |||
758 | if (ret) | ||
759 | return ret; | ||
760 | |||
761 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
762 | if (io_remap_pfn_range(vma, vma->vm_start, | ||
763 | (pci_resource_start(dev->dev->persist->pdev, | ||
764 | params.bar) + | ||
765 | params.offset) | ||
766 | >> PAGE_SHIFT, | ||
767 | PAGE_SIZE, vma->vm_page_prot)) | ||
768 | return -EAGAIN; | ||
769 | } else { | ||
716 | return -EINVAL; | 770 | return -EINVAL; |
771 | } | ||
717 | 772 | ||
718 | return 0; | 773 | return 0; |
719 | } | 774 | } |
@@ -758,6 +813,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, | |||
758 | struct ib_udata *udata) | 813 | struct ib_udata *udata) |
759 | { | 814 | { |
760 | struct mlx4_ib_xrcd *xrcd; | 815 | struct mlx4_ib_xrcd *xrcd; |
816 | struct ib_cq_init_attr cq_attr = {}; | ||
761 | int err; | 817 | int err; |
762 | 818 | ||
763 | if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) | 819 | if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) |
@@ -777,7 +833,8 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, | |||
777 | goto err2; | 833 | goto err2; |
778 | } | 834 | } |
779 | 835 | ||
780 | xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0); | 836 | cq_attr.cqe = 1; |
837 | xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); | ||
781 | if (IS_ERR(xrcd->cq)) { | 838 | if (IS_ERR(xrcd->cq)) { |
782 | err = PTR_ERR(xrcd->cq); | 839 | err = PTR_ERR(xrcd->cq); |
783 | goto err3; | 840 | goto err3; |
@@ -1090,7 +1147,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att | |||
1090 | 1147 | ||
1091 | ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, | 1148 | ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, |
1092 | MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, | 1149 | MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, |
1093 | MLX4_CMD_NATIVE); | 1150 | MLX4_CMD_WRAPPED); |
1094 | if (ret == -ENOMEM) | 1151 | if (ret == -ENOMEM) |
1095 | pr_err("mcg table is full. Fail to register network rule.\n"); | 1152 | pr_err("mcg table is full. Fail to register network rule.\n"); |
1096 | else if (ret == -ENXIO) | 1153 | else if (ret == -ENXIO) |
@@ -1107,7 +1164,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) | |||
1107 | int err; | 1164 | int err; |
1108 | err = mlx4_cmd(dev, reg_id, 0, 0, | 1165 | err = mlx4_cmd(dev, reg_id, 0, 0, |
1109 | MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, | 1166 | MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, |
1110 | MLX4_CMD_NATIVE); | 1167 | MLX4_CMD_WRAPPED); |
1111 | if (err) | 1168 | if (err) |
1112 | pr_err("Fail to detach network rule. registration id = 0x%llx\n", | 1169 | pr_err("Fail to detach network rule. registration id = 0x%llx\n", |
1113 | reg_id); | 1170 | reg_id); |
@@ -1185,7 +1242,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
1185 | &mflow->reg_id[i].id); | 1242 | &mflow->reg_id[i].id); |
1186 | if (err) | 1243 | if (err) |
1187 | goto err_create_flow; | 1244 | goto err_create_flow; |
1188 | i++; | ||
1189 | if (is_bonded) { | 1245 | if (is_bonded) { |
1190 | /* Application always sees one port so the mirror rule | 1246 | /* Application always sees one port so the mirror rule |
1191 | * must be on port #2 | 1247 | * must be on port #2 |
@@ -1200,6 +1256,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
1200 | j++; | 1256 | j++; |
1201 | } | 1257 | } |
1202 | 1258 | ||
1259 | i++; | ||
1203 | } | 1260 | } |
1204 | 1261 | ||
1205 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { | 1262 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { |
@@ -1207,7 +1264,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
1207 | &mflow->reg_id[i].id); | 1264 | &mflow->reg_id[i].id); |
1208 | if (err) | 1265 | if (err) |
1209 | goto err_create_flow; | 1266 | goto err_create_flow; |
1210 | i++; | 1267 | |
1211 | if (is_bonded) { | 1268 | if (is_bonded) { |
1212 | flow_attr->port = 2; | 1269 | flow_attr->port = 2; |
1213 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, | 1270 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, |
@@ -1218,6 +1275,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
1218 | j++; | 1275 | j++; |
1219 | } | 1276 | } |
1220 | /* function to create mirror rule */ | 1277 | /* function to create mirror rule */ |
1278 | i++; | ||
1221 | } | 1279 | } |
1222 | 1280 | ||
1223 | return &mflow->ibflow; | 1281 | return &mflow->ibflow; |
@@ -2041,77 +2099,75 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) | |||
2041 | 2099 | ||
2042 | static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | 2100 | static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) |
2043 | { | 2101 | { |
2044 | char name[80]; | 2102 | int i, j, eq = 0, total_eqs = 0; |
2045 | int eq_per_port = 0; | ||
2046 | int added_eqs = 0; | ||
2047 | int total_eqs = 0; | ||
2048 | int i, j, eq; | ||
2049 | 2103 | ||
2050 | /* Legacy mode or comp_pool is not large enough */ | 2104 | ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, |
2051 | if (dev->caps.comp_pool == 0 || | 2105 | sizeof(ibdev->eq_table[0]), GFP_KERNEL); |
2052 | dev->caps.num_ports > dev->caps.comp_pool) | ||
2053 | return; | ||
2054 | |||
2055 | eq_per_port = dev->caps.comp_pool / dev->caps.num_ports; | ||
2056 | |||
2057 | /* Init eq table */ | ||
2058 | added_eqs = 0; | ||
2059 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
2060 | added_eqs += eq_per_port; | ||
2061 | |||
2062 | total_eqs = dev->caps.num_comp_vectors + added_eqs; | ||
2063 | |||
2064 | ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL); | ||
2065 | if (!ibdev->eq_table) | 2106 | if (!ibdev->eq_table) |
2066 | return; | 2107 | return; |
2067 | 2108 | ||
2068 | ibdev->eq_added = added_eqs; | 2109 | for (i = 1; i <= dev->caps.num_ports; i++) { |
2069 | 2110 | for (j = 0; j < mlx4_get_eqs_per_port(dev, i); | |
2070 | eq = 0; | 2111 | j++, total_eqs++) { |
2071 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { | 2112 | if (i > 1 && mlx4_is_eq_shared(dev, total_eqs)) |
2072 | for (j = 0; j < eq_per_port; j++) { | 2113 | continue; |
2073 | snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", | 2114 | ibdev->eq_table[eq] = total_eqs; |
2074 | i, j, dev->persist->pdev->bus->name); | 2115 | if (!mlx4_assign_eq(dev, i, |
2075 | /* Set IRQ for specific name (per ring) */ | 2116 | &ibdev->eq_table[eq])) |
2076 | if (mlx4_assign_eq(dev, name, NULL, | 2117 | eq++; |
2077 | &ibdev->eq_table[eq])) { | 2118 | else |
2078 | /* Use legacy (same as mlx4_en driver) */ | 2119 | ibdev->eq_table[eq] = -1; |
2079 | pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq); | ||
2080 | ibdev->eq_table[eq] = | ||
2081 | (eq % dev->caps.num_comp_vectors); | ||
2082 | } | ||
2083 | eq++; | ||
2084 | } | 2120 | } |
2085 | } | 2121 | } |
2086 | 2122 | ||
2087 | /* Fill the reset of the vector with legacy EQ */ | 2123 | for (i = eq; i < dev->caps.num_comp_vectors; |
2088 | for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++) | 2124 | ibdev->eq_table[i++] = -1) |
2089 | ibdev->eq_table[eq++] = i; | 2125 | ; |
2090 | 2126 | ||
2091 | /* Advertise the new number of EQs to clients */ | 2127 | /* Advertise the new number of EQs to clients */ |
2092 | ibdev->ib_dev.num_comp_vectors = total_eqs; | 2128 | ibdev->ib_dev.num_comp_vectors = eq; |
2093 | } | 2129 | } |
2094 | 2130 | ||
2095 | static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | 2131 | static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) |
2096 | { | 2132 | { |
2097 | int i; | 2133 | int i; |
2134 | int total_eqs = ibdev->ib_dev.num_comp_vectors; | ||
2098 | 2135 | ||
2099 | /* no additional eqs were added */ | 2136 | /* no eqs were allocated */ |
2100 | if (!ibdev->eq_table) | 2137 | if (!ibdev->eq_table) |
2101 | return; | 2138 | return; |
2102 | 2139 | ||
2103 | /* Reset the advertised EQ number */ | 2140 | /* Reset the advertised EQ number */ |
2104 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; | 2141 | ibdev->ib_dev.num_comp_vectors = 0; |
2105 | 2142 | ||
2106 | /* Free only the added eqs */ | 2143 | for (i = 0; i < total_eqs; i++) |
2107 | for (i = 0; i < ibdev->eq_added; i++) { | ||
2108 | /* Don't free legacy eqs if used */ | ||
2109 | if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors) | ||
2110 | continue; | ||
2111 | mlx4_release_eq(dev, ibdev->eq_table[i]); | 2144 | mlx4_release_eq(dev, ibdev->eq_table[i]); |
2112 | } | ||
2113 | 2145 | ||
2114 | kfree(ibdev->eq_table); | 2146 | kfree(ibdev->eq_table); |
2147 | ibdev->eq_table = NULL; | ||
2148 | } | ||
2149 | |||
2150 | static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, | ||
2151 | struct ib_port_immutable *immutable) | ||
2152 | { | ||
2153 | struct ib_port_attr attr; | ||
2154 | int err; | ||
2155 | |||
2156 | err = mlx4_ib_query_port(ibdev, port_num, &attr); | ||
2157 | if (err) | ||
2158 | return err; | ||
2159 | |||
2160 | immutable->pkey_tbl_len = attr.pkey_tbl_len; | ||
2161 | immutable->gid_tbl_len = attr.gid_tbl_len; | ||
2162 | |||
2163 | if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) | ||
2164 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; | ||
2165 | else | ||
2166 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; | ||
2167 | |||
2168 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; | ||
2169 | |||
2170 | return 0; | ||
2115 | } | 2171 | } |
2116 | 2172 | ||
2117 | static void *mlx4_ib_add(struct mlx4_dev *dev) | 2173 | static void *mlx4_ib_add(struct mlx4_dev *dev) |
@@ -2123,6 +2179,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2123 | struct mlx4_ib_iboe *iboe; | 2179 | struct mlx4_ib_iboe *iboe; |
2124 | int ib_num_ports = 0; | 2180 | int ib_num_ports = 0; |
2125 | int num_req_counters; | 2181 | int num_req_counters; |
2182 | int allocated; | ||
2183 | u32 counter_index; | ||
2126 | 2184 | ||
2127 | pr_info_once("%s", mlx4_ib_version); | 2185 | pr_info_once("%s", mlx4_ib_version); |
2128 | 2186 | ||
@@ -2241,6 +2299,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2241 | ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; | 2299 | ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; |
2242 | ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; | 2300 | ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; |
2243 | ibdev->ib_dev.process_mad = mlx4_ib_process_mad; | 2301 | ibdev->ib_dev.process_mad = mlx4_ib_process_mad; |
2302 | ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; | ||
2244 | 2303 | ||
2245 | if (!mlx4_is_slave(ibdev->dev)) { | 2304 | if (!mlx4_is_slave(ibdev->dev)) { |
2246 | ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; | 2305 | ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; |
@@ -2278,6 +2337,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2278 | (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); | 2337 | (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); |
2279 | } | 2338 | } |
2280 | 2339 | ||
2340 | ibdev->ib_dev.uverbs_ex_cmd_mask |= | ||
2341 | (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | | ||
2342 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ); | ||
2343 | |||
2281 | mlx4_ib_alloc_eqs(dev, ibdev); | 2344 | mlx4_ib_alloc_eqs(dev, ibdev); |
2282 | 2345 | ||
2283 | spin_lock_init(&iboe->lock); | 2346 | spin_lock_init(&iboe->lock); |
@@ -2288,19 +2351,31 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2288 | num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; | 2351 | num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; |
2289 | for (i = 0; i < num_req_counters; ++i) { | 2352 | for (i = 0; i < num_req_counters; ++i) { |
2290 | mutex_init(&ibdev->qp1_proxy_lock[i]); | 2353 | mutex_init(&ibdev->qp1_proxy_lock[i]); |
2354 | allocated = 0; | ||
2291 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == | 2355 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == |
2292 | IB_LINK_LAYER_ETHERNET) { | 2356 | IB_LINK_LAYER_ETHERNET) { |
2293 | err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); | 2357 | err = mlx4_counter_alloc(ibdev->dev, &counter_index); |
2358 | /* if failed to allocate a new counter, use default */ | ||
2294 | if (err) | 2359 | if (err) |
2295 | ibdev->counters[i] = -1; | 2360 | counter_index = |
2296 | } else { | 2361 | mlx4_get_default_counter_index(dev, |
2297 | ibdev->counters[i] = -1; | 2362 | i + 1); |
2363 | else | ||
2364 | allocated = 1; | ||
2365 | } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */ | ||
2366 | counter_index = mlx4_get_default_counter_index(dev, | ||
2367 | i + 1); | ||
2298 | } | 2368 | } |
2369 | ibdev->counters[i].index = counter_index; | ||
2370 | ibdev->counters[i].allocated = allocated; | ||
2371 | pr_info("counter index %d for port %d allocated %d\n", | ||
2372 | counter_index, i + 1, allocated); | ||
2299 | } | 2373 | } |
2300 | if (mlx4_is_bonded(dev)) | 2374 | if (mlx4_is_bonded(dev)) |
2301 | for (i = 1; i < ibdev->num_ports ; ++i) | 2375 | for (i = 1; i < ibdev->num_ports ; ++i) { |
2302 | ibdev->counters[i] = ibdev->counters[0]; | 2376 | ibdev->counters[i].index = ibdev->counters[0].index; |
2303 | 2377 | ibdev->counters[i].allocated = 0; | |
2378 | } | ||
2304 | 2379 | ||
2305 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | 2380 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
2306 | ib_num_ports++; | 2381 | ib_num_ports++; |
@@ -2440,10 +2515,12 @@ err_steer_qp_release: | |||
2440 | mlx4_qp_release_range(dev, ibdev->steer_qpn_base, | 2515 | mlx4_qp_release_range(dev, ibdev->steer_qpn_base, |
2441 | ibdev->steer_qpn_count); | 2516 | ibdev->steer_qpn_count); |
2442 | err_counter: | 2517 | err_counter: |
2443 | for (; i; --i) | 2518 | for (i = 0; i < ibdev->num_ports; ++i) { |
2444 | if (ibdev->counters[i - 1] != -1) | 2519 | if (ibdev->counters[i].index != -1 && |
2445 | mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]); | 2520 | ibdev->counters[i].allocated) |
2446 | 2521 | mlx4_counter_free(ibdev->dev, | |
2522 | ibdev->counters[i].index); | ||
2523 | } | ||
2447 | err_map: | 2524 | err_map: |
2448 | iounmap(ibdev->uar_map); | 2525 | iounmap(ibdev->uar_map); |
2449 | 2526 | ||
@@ -2560,8 +2637,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
2560 | 2637 | ||
2561 | iounmap(ibdev->uar_map); | 2638 | iounmap(ibdev->uar_map); |
2562 | for (p = 0; p < ibdev->num_ports; ++p) | 2639 | for (p = 0; p < ibdev->num_ports; ++p) |
2563 | if (ibdev->counters[p] != -1) | 2640 | if (ibdev->counters[p].index != -1 && |
2564 | mlx4_counter_free(ibdev->dev, ibdev->counters[p]); | 2641 | ibdev->counters[p].allocated) |
2642 | mlx4_counter_free(ibdev->dev, ibdev->counters[p].index); | ||
2565 | mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) | 2643 | mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) |
2566 | mlx4_CLOSE_PORT(dev, p); | 2644 | mlx4_CLOSE_PORT(dev, p); |
2567 | 2645 | ||
@@ -2592,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) | |||
2592 | dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); | 2670 | dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); |
2593 | if (!dm) { | 2671 | if (!dm) { |
2594 | pr_err("failed to allocate memory for tunneling qp update\n"); | 2672 | pr_err("failed to allocate memory for tunneling qp update\n"); |
2595 | goto out; | 2673 | return; |
2596 | } | 2674 | } |
2597 | 2675 | ||
2598 | for (i = 0; i < ports; i++) { | 2676 | for (i = 0; i < ports; i++) { |
2599 | dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); | 2677 | dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); |
2600 | if (!dm[i]) { | 2678 | if (!dm[i]) { |
2601 | pr_err("failed to allocate memory for tunneling qp update work struct\n"); | 2679 | pr_err("failed to allocate memory for tunneling qp update work struct\n"); |
2602 | for (i = 0; i < dev->caps.num_ports; i++) { | 2680 | while (--i >= 0) |
2603 | if (dm[i]) | 2681 | kfree(dm[i]); |
2604 | kfree(dm[i]); | ||
2605 | } | ||
2606 | goto out; | 2682 | goto out; |
2607 | } | 2683 | } |
2608 | } | ||
2609 | /* initialize or tear down tunnel QPs for the slave */ | ||
2610 | for (i = 0; i < ports; i++) { | ||
2611 | INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); | 2684 | INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); |
2612 | dm[i]->port = first_port + i + 1; | 2685 | dm[i]->port = first_port + i + 1; |
2613 | dm[i]->slave = slave; | 2686 | dm[i]->slave = slave; |
2614 | dm[i]->do_init = do_init; | 2687 | dm[i]->do_init = do_init; |
2615 | dm[i]->dev = ibdev; | 2688 | dm[i]->dev = ibdev; |
2616 | spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); | 2689 | } |
2617 | if (!ibdev->sriov.is_going_down) | 2690 | /* initialize or tear down tunnel QPs for the slave */ |
2691 | spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); | ||
2692 | if (!ibdev->sriov.is_going_down) { | ||
2693 | for (i = 0; i < ports; i++) | ||
2618 | queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); | 2694 | queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); |
2619 | spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); | 2695 | spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); |
2696 | } else { | ||
2697 | spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); | ||
2698 | for (i = 0; i < ports; i++) | ||
2699 | kfree(dm[i]); | ||
2620 | } | 2700 | } |
2621 | out: | 2701 | out: |
2622 | kfree(dm); | 2702 | kfree(dm); |