diff options
58 files changed, 2414 insertions, 911 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 6ef660c1332f..28058ae33d38 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -129,7 +129,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) | |||
129 | dev_put(dev); | 129 | dev_put(dev); |
130 | break; | 130 | break; |
131 | 131 | ||
132 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 132 | #if IS_ENABLED(CONFIG_IPV6) |
133 | case AF_INET6: | 133 | case AF_INET6: |
134 | rcu_read_lock(); | 134 | rcu_read_lock(); |
135 | for_each_netdev_rcu(&init_net, dev) { | 135 | for_each_netdev_rcu(&init_net, dev) { |
@@ -243,7 +243,7 @@ out: | |||
243 | return ret; | 243 | return ret; |
244 | } | 244 | } |
245 | 245 | ||
246 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 246 | #if IS_ENABLED(CONFIG_IPV6) |
247 | static int addr6_resolve(struct sockaddr_in6 *src_in, | 247 | static int addr6_resolve(struct sockaddr_in6 *src_in, |
248 | struct sockaddr_in6 *dst_in, | 248 | struct sockaddr_in6 *dst_in, |
249 | struct rdma_dev_addr *addr) | 249 | struct rdma_dev_addr *addr) |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c889aaef3416..d67999f6e34a 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -3848,24 +3848,28 @@ static int __init ib_cm_init(void) | |||
3848 | INIT_LIST_HEAD(&cm.timewait_list); | 3848 | INIT_LIST_HEAD(&cm.timewait_list); |
3849 | 3849 | ||
3850 | ret = class_register(&cm_class); | 3850 | ret = class_register(&cm_class); |
3851 | if (ret) | 3851 | if (ret) { |
3852 | return -ENOMEM; | 3852 | ret = -ENOMEM; |
3853 | goto error1; | ||
3854 | } | ||
3853 | 3855 | ||
3854 | cm.wq = create_workqueue("ib_cm"); | 3856 | cm.wq = create_workqueue("ib_cm"); |
3855 | if (!cm.wq) { | 3857 | if (!cm.wq) { |
3856 | ret = -ENOMEM; | 3858 | ret = -ENOMEM; |
3857 | goto error1; | 3859 | goto error2; |
3858 | } | 3860 | } |
3859 | 3861 | ||
3860 | ret = ib_register_client(&cm_client); | 3862 | ret = ib_register_client(&cm_client); |
3861 | if (ret) | 3863 | if (ret) |
3862 | goto error2; | 3864 | goto error3; |
3863 | 3865 | ||
3864 | return 0; | 3866 | return 0; |
3865 | error2: | 3867 | error3: |
3866 | destroy_workqueue(cm.wq); | 3868 | destroy_workqueue(cm.wq); |
3867 | error1: | 3869 | error2: |
3868 | class_unregister(&cm_class); | 3870 | class_unregister(&cm_class); |
3871 | error1: | ||
3872 | idr_destroy(&cm.local_id_table); | ||
3869 | return ret; | 3873 | return ret; |
3870 | } | 3874 | } |
3871 | 3875 | ||
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index 7da9b2102341..be068f47e47e 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h | |||
@@ -44,18 +44,6 @@ | |||
44 | 44 | ||
45 | #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ | 45 | #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ |
46 | 46 | ||
47 | #define CM_REQ_ATTR_ID cpu_to_be16(0x0010) | ||
48 | #define CM_MRA_ATTR_ID cpu_to_be16(0x0011) | ||
49 | #define CM_REJ_ATTR_ID cpu_to_be16(0x0012) | ||
50 | #define CM_REP_ATTR_ID cpu_to_be16(0x0013) | ||
51 | #define CM_RTU_ATTR_ID cpu_to_be16(0x0014) | ||
52 | #define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) | ||
53 | #define CM_DREP_ATTR_ID cpu_to_be16(0x0016) | ||
54 | #define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) | ||
55 | #define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) | ||
56 | #define CM_LAP_ATTR_ID cpu_to_be16(0x0019) | ||
57 | #define CM_APR_ATTR_ID cpu_to_be16(0x001A) | ||
58 | |||
59 | enum cm_msg_sequence { | 47 | enum cm_msg_sequence { |
60 | CM_MSG_SEQUENCE_REQ, | 48 | CM_MSG_SEQUENCE_REQ, |
61 | CM_MSG_SEQUENCE_LAP, | 49 | CM_MSG_SEQUENCE_LAP, |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 2e826f9702c6..5a335b5447c6 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -99,6 +99,10 @@ struct rdma_bind_list { | |||
99 | unsigned short port; | 99 | unsigned short port; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | enum { | ||
103 | CMA_OPTION_AFONLY, | ||
104 | }; | ||
105 | |||
102 | /* | 106 | /* |
103 | * Device removal can occur at anytime, so we need extra handling to | 107 | * Device removal can occur at anytime, so we need extra handling to |
104 | * serialize notifying the user of device removal with other callbacks. | 108 | * serialize notifying the user of device removal with other callbacks. |
@@ -137,9 +141,11 @@ struct rdma_id_private { | |||
137 | u32 qkey; | 141 | u32 qkey; |
138 | u32 qp_num; | 142 | u32 qp_num; |
139 | pid_t owner; | 143 | pid_t owner; |
144 | u32 options; | ||
140 | u8 srq; | 145 | u8 srq; |
141 | u8 tos; | 146 | u8 tos; |
142 | u8 reuseaddr; | 147 | u8 reuseaddr; |
148 | u8 afonly; | ||
143 | }; | 149 | }; |
144 | 150 | ||
145 | struct cma_multicast { | 151 | struct cma_multicast { |
@@ -1297,8 +1303,10 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, | |||
1297 | } else { | 1303 | } else { |
1298 | cma_set_ip_ver(cma_data, 4); | 1304 | cma_set_ip_ver(cma_data, 4); |
1299 | cma_set_ip_ver(cma_mask, 0xF); | 1305 | cma_set_ip_ver(cma_mask, 0xF); |
1300 | cma_data->dst_addr.ip4.addr = ip4_addr; | 1306 | if (!cma_any_addr(addr)) { |
1301 | cma_mask->dst_addr.ip4.addr = htonl(~0); | 1307 | cma_data->dst_addr.ip4.addr = ip4_addr; |
1308 | cma_mask->dst_addr.ip4.addr = htonl(~0); | ||
1309 | } | ||
1302 | } | 1310 | } |
1303 | break; | 1311 | break; |
1304 | case AF_INET6: | 1312 | case AF_INET6: |
@@ -1312,9 +1320,11 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, | |||
1312 | } else { | 1320 | } else { |
1313 | cma_set_ip_ver(cma_data, 6); | 1321 | cma_set_ip_ver(cma_data, 6); |
1314 | cma_set_ip_ver(cma_mask, 0xF); | 1322 | cma_set_ip_ver(cma_mask, 0xF); |
1315 | cma_data->dst_addr.ip6 = ip6_addr; | 1323 | if (!cma_any_addr(addr)) { |
1316 | memset(&cma_mask->dst_addr.ip6, 0xFF, | 1324 | cma_data->dst_addr.ip6 = ip6_addr; |
1317 | sizeof cma_mask->dst_addr.ip6); | 1325 | memset(&cma_mask->dst_addr.ip6, 0xFF, |
1326 | sizeof cma_mask->dst_addr.ip6); | ||
1327 | } | ||
1318 | } | 1328 | } |
1319 | break; | 1329 | break; |
1320 | default: | 1330 | default: |
@@ -1499,7 +1509,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) | |||
1499 | 1509 | ||
1500 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; | 1510 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; |
1501 | svc_id = cma_get_service_id(id_priv->id.ps, addr); | 1511 | svc_id = cma_get_service_id(id_priv->id.ps, addr); |
1502 | if (cma_any_addr(addr)) | 1512 | if (cma_any_addr(addr) && !id_priv->afonly) |
1503 | ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); | 1513 | ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); |
1504 | else { | 1514 | else { |
1505 | cma_set_compare_data(id_priv->id.ps, addr, &compare_data); | 1515 | cma_set_compare_data(id_priv->id.ps, addr, &compare_data); |
@@ -1573,6 +1583,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, | |||
1573 | list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); | 1583 | list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); |
1574 | atomic_inc(&id_priv->refcount); | 1584 | atomic_inc(&id_priv->refcount); |
1575 | dev_id_priv->internal_id = 1; | 1585 | dev_id_priv->internal_id = 1; |
1586 | dev_id_priv->afonly = id_priv->afonly; | ||
1576 | 1587 | ||
1577 | ret = rdma_listen(id, id_priv->backlog); | 1588 | ret = rdma_listen(id, id_priv->backlog); |
1578 | if (ret) | 1589 | if (ret) |
@@ -2098,6 +2109,26 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) | |||
2098 | } | 2109 | } |
2099 | EXPORT_SYMBOL(rdma_set_reuseaddr); | 2110 | EXPORT_SYMBOL(rdma_set_reuseaddr); |
2100 | 2111 | ||
2112 | int rdma_set_afonly(struct rdma_cm_id *id, int afonly) | ||
2113 | { | ||
2114 | struct rdma_id_private *id_priv; | ||
2115 | unsigned long flags; | ||
2116 | int ret; | ||
2117 | |||
2118 | id_priv = container_of(id, struct rdma_id_private, id); | ||
2119 | spin_lock_irqsave(&id_priv->lock, flags); | ||
2120 | if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { | ||
2121 | id_priv->options |= (1 << CMA_OPTION_AFONLY); | ||
2122 | id_priv->afonly = afonly; | ||
2123 | ret = 0; | ||
2124 | } else { | ||
2125 | ret = -EINVAL; | ||
2126 | } | ||
2127 | spin_unlock_irqrestore(&id_priv->lock, flags); | ||
2128 | return ret; | ||
2129 | } | ||
2130 | EXPORT_SYMBOL(rdma_set_afonly); | ||
2131 | |||
2101 | static void cma_bind_port(struct rdma_bind_list *bind_list, | 2132 | static void cma_bind_port(struct rdma_bind_list *bind_list, |
2102 | struct rdma_id_private *id_priv) | 2133 | struct rdma_id_private *id_priv) |
2103 | { | 2134 | { |
@@ -2187,22 +2218,24 @@ static int cma_check_port(struct rdma_bind_list *bind_list, | |||
2187 | struct hlist_node *node; | 2218 | struct hlist_node *node; |
2188 | 2219 | ||
2189 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; | 2220 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; |
2190 | if (cma_any_addr(addr) && !reuseaddr) | ||
2191 | return -EADDRNOTAVAIL; | ||
2192 | |||
2193 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { | 2221 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { |
2194 | if (id_priv == cur_id) | 2222 | if (id_priv == cur_id) |
2195 | continue; | 2223 | continue; |
2196 | 2224 | ||
2197 | if ((cur_id->state == RDMA_CM_LISTEN) || | 2225 | if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && |
2198 | !reuseaddr || !cur_id->reuseaddr) { | 2226 | cur_id->reuseaddr) |
2199 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; | 2227 | continue; |
2200 | if (cma_any_addr(cur_addr)) | ||
2201 | return -EADDRNOTAVAIL; | ||
2202 | 2228 | ||
2203 | if (!cma_addr_cmp(addr, cur_addr)) | 2229 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; |
2204 | return -EADDRINUSE; | 2230 | if (id_priv->afonly && cur_id->afonly && |
2205 | } | 2231 | (addr->sa_family != cur_addr->sa_family)) |
2232 | continue; | ||
2233 | |||
2234 | if (cma_any_addr(addr) || cma_any_addr(cur_addr)) | ||
2235 | return -EADDRNOTAVAIL; | ||
2236 | |||
2237 | if (!cma_addr_cmp(addr, cur_addr)) | ||
2238 | return -EADDRINUSE; | ||
2206 | } | 2239 | } |
2207 | return 0; | 2240 | return 0; |
2208 | } | 2241 | } |
@@ -2278,7 +2311,7 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
2278 | static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, | 2311 | static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, |
2279 | struct sockaddr *addr) | 2312 | struct sockaddr *addr) |
2280 | { | 2313 | { |
2281 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 2314 | #if IS_ENABLED(CONFIG_IPV6) |
2282 | struct sockaddr_in6 *sin6; | 2315 | struct sockaddr_in6 *sin6; |
2283 | 2316 | ||
2284 | if (addr->sa_family != AF_INET6) | 2317 | if (addr->sa_family != AF_INET6) |
@@ -2371,6 +2404,14 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2371 | } | 2404 | } |
2372 | 2405 | ||
2373 | memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); | 2406 | memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); |
2407 | if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { | ||
2408 | if (addr->sa_family == AF_INET) | ||
2409 | id_priv->afonly = 1; | ||
2410 | #if IS_ENABLED(CONFIG_IPV6) | ||
2411 | else if (addr->sa_family == AF_INET6) | ||
2412 | id_priv->afonly = init_net.ipv6.sysctl.bindv6only; | ||
2413 | #endif | ||
2414 | } | ||
2374 | ret = cma_get_port(id_priv); | 2415 | ret = cma_get_port(id_priv); |
2375 | if (ret) | 2416 | if (ret) |
2376 | goto err2; | 2417 | goto err2; |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index fbbfa24cf572..a8905abc56e4 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -94,6 +94,12 @@ struct ib_sa_path_query { | |||
94 | struct ib_sa_query sa_query; | 94 | struct ib_sa_query sa_query; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | struct ib_sa_guidinfo_query { | ||
98 | void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); | ||
99 | void *context; | ||
100 | struct ib_sa_query sa_query; | ||
101 | }; | ||
102 | |||
97 | struct ib_sa_mcmember_query { | 103 | struct ib_sa_mcmember_query { |
98 | void (*callback)(int, struct ib_sa_mcmember_rec *, void *); | 104 | void (*callback)(int, struct ib_sa_mcmember_rec *, void *); |
99 | void *context; | 105 | void *context; |
@@ -347,6 +353,34 @@ static const struct ib_field service_rec_table[] = { | |||
347 | .size_bits = 2*64 }, | 353 | .size_bits = 2*64 }, |
348 | }; | 354 | }; |
349 | 355 | ||
356 | #define GUIDINFO_REC_FIELD(field) \ | ||
357 | .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ | ||
358 | .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ | ||
359 | .field_name = "sa_guidinfo_rec:" #field | ||
360 | |||
361 | static const struct ib_field guidinfo_rec_table[] = { | ||
362 | { GUIDINFO_REC_FIELD(lid), | ||
363 | .offset_words = 0, | ||
364 | .offset_bits = 0, | ||
365 | .size_bits = 16 }, | ||
366 | { GUIDINFO_REC_FIELD(block_num), | ||
367 | .offset_words = 0, | ||
368 | .offset_bits = 16, | ||
369 | .size_bits = 8 }, | ||
370 | { GUIDINFO_REC_FIELD(res1), | ||
371 | .offset_words = 0, | ||
372 | .offset_bits = 24, | ||
373 | .size_bits = 8 }, | ||
374 | { GUIDINFO_REC_FIELD(res2), | ||
375 | .offset_words = 1, | ||
376 | .offset_bits = 0, | ||
377 | .size_bits = 32 }, | ||
378 | { GUIDINFO_REC_FIELD(guid_info_list), | ||
379 | .offset_words = 2, | ||
380 | .offset_bits = 0, | ||
381 | .size_bits = 512 }, | ||
382 | }; | ||
383 | |||
350 | static void free_sm_ah(struct kref *kref) | 384 | static void free_sm_ah(struct kref *kref) |
351 | { | 385 | { |
352 | struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); | 386 | struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); |
@@ -945,6 +979,105 @@ err1: | |||
945 | return ret; | 979 | return ret; |
946 | } | 980 | } |
947 | 981 | ||
982 | /* Support GuidInfoRecord */ | ||
983 | static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, | ||
984 | int status, | ||
985 | struct ib_sa_mad *mad) | ||
986 | { | ||
987 | struct ib_sa_guidinfo_query *query = | ||
988 | container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); | ||
989 | |||
990 | if (mad) { | ||
991 | struct ib_sa_guidinfo_rec rec; | ||
992 | |||
993 | ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), | ||
994 | mad->data, &rec); | ||
995 | query->callback(status, &rec, query->context); | ||
996 | } else | ||
997 | query->callback(status, NULL, query->context); | ||
998 | } | ||
999 | |||
1000 | static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) | ||
1001 | { | ||
1002 | kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); | ||
1003 | } | ||
1004 | |||
1005 | int ib_sa_guid_info_rec_query(struct ib_sa_client *client, | ||
1006 | struct ib_device *device, u8 port_num, | ||
1007 | struct ib_sa_guidinfo_rec *rec, | ||
1008 | ib_sa_comp_mask comp_mask, u8 method, | ||
1009 | int timeout_ms, gfp_t gfp_mask, | ||
1010 | void (*callback)(int status, | ||
1011 | struct ib_sa_guidinfo_rec *resp, | ||
1012 | void *context), | ||
1013 | void *context, | ||
1014 | struct ib_sa_query **sa_query) | ||
1015 | { | ||
1016 | struct ib_sa_guidinfo_query *query; | ||
1017 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | ||
1018 | struct ib_sa_port *port; | ||
1019 | struct ib_mad_agent *agent; | ||
1020 | struct ib_sa_mad *mad; | ||
1021 | int ret; | ||
1022 | |||
1023 | if (!sa_dev) | ||
1024 | return -ENODEV; | ||
1025 | |||
1026 | if (method != IB_MGMT_METHOD_GET && | ||
1027 | method != IB_MGMT_METHOD_SET && | ||
1028 | method != IB_SA_METHOD_DELETE) { | ||
1029 | return -EINVAL; | ||
1030 | } | ||
1031 | |||
1032 | port = &sa_dev->port[port_num - sa_dev->start_port]; | ||
1033 | agent = port->agent; | ||
1034 | |||
1035 | query = kmalloc(sizeof *query, gfp_mask); | ||
1036 | if (!query) | ||
1037 | return -ENOMEM; | ||
1038 | |||
1039 | query->sa_query.port = port; | ||
1040 | ret = alloc_mad(&query->sa_query, gfp_mask); | ||
1041 | if (ret) | ||
1042 | goto err1; | ||
1043 | |||
1044 | ib_sa_client_get(client); | ||
1045 | query->sa_query.client = client; | ||
1046 | query->callback = callback; | ||
1047 | query->context = context; | ||
1048 | |||
1049 | mad = query->sa_query.mad_buf->mad; | ||
1050 | init_mad(mad, agent); | ||
1051 | |||
1052 | query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; | ||
1053 | query->sa_query.release = ib_sa_guidinfo_rec_release; | ||
1054 | |||
1055 | mad->mad_hdr.method = method; | ||
1056 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); | ||
1057 | mad->sa_hdr.comp_mask = comp_mask; | ||
1058 | |||
1059 | ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, | ||
1060 | mad->data); | ||
1061 | |||
1062 | *sa_query = &query->sa_query; | ||
1063 | |||
1064 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); | ||
1065 | if (ret < 0) | ||
1066 | goto err2; | ||
1067 | |||
1068 | return ret; | ||
1069 | |||
1070 | err2: | ||
1071 | *sa_query = NULL; | ||
1072 | ib_sa_client_put(query->sa_query.client); | ||
1073 | free_mad(&query->sa_query); | ||
1074 | |||
1075 | err1: | ||
1076 | kfree(query); | ||
1077 | return ret; | ||
1078 | } | ||
1079 | EXPORT_SYMBOL(ib_sa_guid_info_rec_query); | ||
1080 | |||
948 | static void send_handler(struct ib_mad_agent *agent, | 1081 | static void send_handler(struct ib_mad_agent *agent, |
949 | struct ib_mad_send_wc *mad_send_wc) | 1082 | struct ib_mad_send_wc *mad_send_wc) |
950 | { | 1083 | { |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 8002ae642cfe..893cb879462c 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -909,6 +909,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname, | |||
909 | } | 909 | } |
910 | ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); | 910 | ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); |
911 | break; | 911 | break; |
912 | case RDMA_OPTION_ID_AFONLY: | ||
913 | if (optlen != sizeof(int)) { | ||
914 | ret = -EINVAL; | ||
915 | break; | ||
916 | } | ||
917 | ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); | ||
918 | break; | ||
912 | default: | 919 | default: |
913 | ret = -ENOSYS; | 920 | ret = -ENOSYS; |
914 | } | 921 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b18870c455ad..51f42061dae9 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -548,8 +548,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, | |||
548 | } | 548 | } |
549 | 549 | ||
550 | if (mpa_rev_to_use == 2) { | 550 | if (mpa_rev_to_use == 2) { |
551 | mpa->private_data_size += | 551 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
552 | htons(sizeof(struct mpa_v2_conn_params)); | 552 | sizeof (struct mpa_v2_conn_params)); |
553 | mpa_v2_params.ird = htons((u16)ep->ird); | 553 | mpa_v2_params.ird = htons((u16)ep->ird); |
554 | mpa_v2_params.ord = htons((u16)ep->ord); | 554 | mpa_v2_params.ord = htons((u16)ep->ord); |
555 | 555 | ||
@@ -635,8 +635,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
635 | 635 | ||
636 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | 636 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
637 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | 637 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; |
638 | mpa->private_data_size += | 638 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
639 | htons(sizeof(struct mpa_v2_conn_params)); | 639 | sizeof (struct mpa_v2_conn_params)); |
640 | mpa_v2_params.ird = htons(((u16)ep->ird) | | 640 | mpa_v2_params.ird = htons(((u16)ep->ird) | |
641 | (peer2peer ? MPA_V2_PEER2PEER_MODEL : | 641 | (peer2peer ? MPA_V2_PEER2PEER_MODEL : |
642 | 0)); | 642 | 0)); |
@@ -715,8 +715,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
715 | 715 | ||
716 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | 716 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
717 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | 717 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; |
718 | mpa->private_data_size += | 718 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
719 | htons(sizeof(struct mpa_v2_conn_params)); | 719 | sizeof (struct mpa_v2_conn_params)); |
720 | mpa_v2_params.ird = htons((u16)ep->ird); | 720 | mpa_v2_params.ird = htons((u16)ep->ird); |
721 | mpa_v2_params.ord = htons((u16)ep->ord); | 721 | mpa_v2_params.ord = htons((u16)ep->ord); |
722 | if (peer2peer && (ep->mpa_attr.p2p_type != | 722 | if (peer2peer && (ep->mpa_attr.p2p_type != |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 259b0670b51c..c27141fef1ab 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -147,47 +147,51 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) | |||
147 | } | 147 | } |
148 | 148 | ||
149 | /* | 149 | /* |
150 | * Snoop SM MADs for port info and P_Key table sets, so we can | 150 | * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can |
151 | * synthesize LID change and P_Key change events. | 151 | * synthesize LID change, Client-Rereg, GID change, and P_Key change events. |
152 | */ | 152 | */ |
153 | static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, | 153 | static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, |
154 | u16 prev_lid) | 154 | u16 prev_lid) |
155 | { | 155 | { |
156 | struct ib_event event; | 156 | struct ib_port_info *pinfo; |
157 | u16 lid; | ||
157 | 158 | ||
159 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | ||
158 | if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || | 160 | if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
159 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | 161 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && |
160 | mad->mad_hdr.method == IB_MGMT_METHOD_SET) { | 162 | mad->mad_hdr.method == IB_MGMT_METHOD_SET) |
161 | if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { | 163 | switch (mad->mad_hdr.attr_id) { |
162 | struct ib_port_info *pinfo = | 164 | case IB_SMP_ATTR_PORT_INFO: |
163 | (struct ib_port_info *) ((struct ib_smp *) mad)->data; | 165 | pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; |
164 | u16 lid = be16_to_cpu(pinfo->lid); | 166 | lid = be16_to_cpu(pinfo->lid); |
165 | 167 | ||
166 | update_sm_ah(to_mdev(ibdev), port_num, | 168 | update_sm_ah(dev, port_num, |
167 | be16_to_cpu(pinfo->sm_lid), | 169 | be16_to_cpu(pinfo->sm_lid), |
168 | pinfo->neighbormtu_mastersmsl & 0xf); | 170 | pinfo->neighbormtu_mastersmsl & 0xf); |
169 | 171 | ||
170 | event.device = ibdev; | 172 | if (pinfo->clientrereg_resv_subnetto & 0x80) |
171 | event.element.port_num = port_num; | 173 | mlx4_ib_dispatch_event(dev, port_num, |
174 | IB_EVENT_CLIENT_REREGISTER); | ||
172 | 175 | ||
173 | if (pinfo->clientrereg_resv_subnetto & 0x80) { | 176 | if (prev_lid != lid) |
174 | event.event = IB_EVENT_CLIENT_REREGISTER; | 177 | mlx4_ib_dispatch_event(dev, port_num, |
175 | ib_dispatch_event(&event); | 178 | IB_EVENT_LID_CHANGE); |
176 | } | 179 | break; |
177 | 180 | ||
178 | if (prev_lid != lid) { | 181 | case IB_SMP_ATTR_PKEY_TABLE: |
179 | event.event = IB_EVENT_LID_CHANGE; | 182 | mlx4_ib_dispatch_event(dev, port_num, |
180 | ib_dispatch_event(&event); | 183 | IB_EVENT_PKEY_CHANGE); |
181 | } | 184 | break; |
182 | } | ||
183 | 185 | ||
184 | if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { | 186 | case IB_SMP_ATTR_GUID_INFO: |
185 | event.device = ibdev; | 187 | /* paravirtualized master's guid is guid 0 -- does not change */ |
186 | event.event = IB_EVENT_PKEY_CHANGE; | 188 | if (!mlx4_is_master(dev->dev)) |
187 | event.element.port_num = port_num; | 189 | mlx4_ib_dispatch_event(dev, port_num, |
188 | ib_dispatch_event(&event); | 190 | IB_EVENT_GID_CHANGE); |
191 | break; | ||
192 | default: | ||
193 | break; | ||
189 | } | 194 | } |
190 | } | ||
191 | } | 195 | } |
192 | 196 | ||
193 | static void node_desc_override(struct ib_device *dev, | 197 | static void node_desc_override(struct ib_device *dev, |
@@ -242,6 +246,25 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |||
242 | int err; | 246 | int err; |
243 | struct ib_port_attr pattr; | 247 | struct ib_port_attr pattr; |
244 | 248 | ||
249 | if (in_wc && in_wc->qp->qp_num) { | ||
250 | pr_debug("received MAD: slid:%d sqpn:%d " | ||
251 | "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n", | ||
252 | in_wc->slid, in_wc->src_qp, | ||
253 | in_wc->dlid_path_bits, | ||
254 | in_wc->qp->qp_num, | ||
255 | in_wc->wc_flags, | ||
256 | in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method, | ||
257 | be16_to_cpu(in_mad->mad_hdr.attr_id)); | ||
258 | if (in_wc->wc_flags & IB_WC_GRH) { | ||
259 | pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n", | ||
260 | be64_to_cpu(in_grh->sgid.global.subnet_prefix), | ||
261 | be64_to_cpu(in_grh->sgid.global.interface_id)); | ||
262 | pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n", | ||
263 | be64_to_cpu(in_grh->dgid.global.subnet_prefix), | ||
264 | be64_to_cpu(in_grh->dgid.global.interface_id)); | ||
265 | } | ||
266 | } | ||
267 | |||
245 | slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); | 268 | slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); |
246 | 269 | ||
247 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { | 270 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { |
@@ -286,7 +309,8 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |||
286 | return IB_MAD_RESULT_FAILURE; | 309 | return IB_MAD_RESULT_FAILURE; |
287 | 310 | ||
288 | if (!out_mad->mad_hdr.status) { | 311 | if (!out_mad->mad_hdr.status) { |
289 | smp_snoop(ibdev, port_num, in_mad, prev_lid); | 312 | if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) |
313 | smp_snoop(ibdev, port_num, in_mad, prev_lid); | ||
290 | node_desc_override(ibdev, out_mad); | 314 | node_desc_override(ibdev, out_mad); |
291 | } | 315 | } |
292 | 316 | ||
@@ -427,3 +451,64 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) | |||
427 | ib_destroy_ah(dev->sm_ah[p]); | 451 | ib_destroy_ah(dev->sm_ah[p]); |
428 | } | 452 | } |
429 | } | 453 | } |
454 | |||
455 | void handle_port_mgmt_change_event(struct work_struct *work) | ||
456 | { | ||
457 | struct ib_event_work *ew = container_of(work, struct ib_event_work, work); | ||
458 | struct mlx4_ib_dev *dev = ew->ib_dev; | ||
459 | struct mlx4_eqe *eqe = &(ew->ib_eqe); | ||
460 | u8 port = eqe->event.port_mgmt_change.port; | ||
461 | u32 changed_attr; | ||
462 | |||
463 | switch (eqe->subtype) { | ||
464 | case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: | ||
465 | changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); | ||
466 | |||
467 | /* Update the SM ah - This should be done before handling | ||
468 | the other changed attributes so that MADs can be sent to the SM */ | ||
469 | if (changed_attr & MSTR_SM_CHANGE_MASK) { | ||
470 | u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); | ||
471 | u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; | ||
472 | update_sm_ah(dev, port, lid, sl); | ||
473 | } | ||
474 | |||
475 | /* Check if it is a lid change event */ | ||
476 | if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) | ||
477 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE); | ||
478 | |||
479 | /* Generate GUID changed event */ | ||
480 | if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) | ||
481 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); | ||
482 | |||
483 | if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) | ||
484 | mlx4_ib_dispatch_event(dev, port, | ||
485 | IB_EVENT_CLIENT_REREGISTER); | ||
486 | break; | ||
487 | |||
488 | case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: | ||
489 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); | ||
490 | break; | ||
491 | case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: | ||
492 | /* paravirtualized master's guid is guid 0 -- does not change */ | ||
493 | if (!mlx4_is_master(dev->dev)) | ||
494 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); | ||
495 | break; | ||
496 | default: | ||
497 | pr_warn("Unsupported subtype 0x%x for " | ||
498 | "Port Management Change event\n", eqe->subtype); | ||
499 | } | ||
500 | |||
501 | kfree(ew); | ||
502 | } | ||
503 | |||
504 | void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, | ||
505 | enum ib_event_type type) | ||
506 | { | ||
507 | struct ib_event event; | ||
508 | |||
509 | event.device = &dev->ib_dev; | ||
510 | event.element.port_num = port_num; | ||
511 | event.event = type; | ||
512 | |||
513 | ib_dispatch_event(&event); | ||
514 | } | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index a07b774e7864..fe2088cfa6ee 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include "mlx4_ib.h" | 50 | #include "mlx4_ib.h" |
51 | #include "user.h" | 51 | #include "user.h" |
52 | 52 | ||
53 | #define DRV_NAME "mlx4_ib" | 53 | #define DRV_NAME MLX4_IB_DRV_NAME |
54 | #define DRV_VERSION "1.0" | 54 | #define DRV_VERSION "1.0" |
55 | #define DRV_RELDATE "April 4, 2008" | 55 | #define DRV_RELDATE "April 4, 2008" |
56 | 56 | ||
@@ -157,7 +157,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
157 | props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; | 157 | props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; |
158 | props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? | 158 | props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? |
159 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; | 159 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; |
160 | props->masked_atomic_cap = IB_ATOMIC_HCA; | 160 | props->masked_atomic_cap = props->atomic_cap; |
161 | props->max_pkeys = dev->dev->caps.pkey_table_len[1]; | 161 | props->max_pkeys = dev->dev->caps.pkey_table_len[1]; |
162 | props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; | 162 | props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; |
163 | props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; | 163 | props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; |
@@ -946,7 +946,6 @@ static void update_gids_task(struct work_struct *work) | |||
946 | union ib_gid *gids; | 946 | union ib_gid *gids; |
947 | int err; | 947 | int err; |
948 | struct mlx4_dev *dev = gw->dev->dev; | 948 | struct mlx4_dev *dev = gw->dev->dev; |
949 | struct ib_event event; | ||
950 | 949 | ||
951 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 950 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
952 | if (IS_ERR(mailbox)) { | 951 | if (IS_ERR(mailbox)) { |
@@ -964,10 +963,7 @@ static void update_gids_task(struct work_struct *work) | |||
964 | pr_warn("set port command failed\n"); | 963 | pr_warn("set port command failed\n"); |
965 | else { | 964 | else { |
966 | memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); | 965 | memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); |
967 | event.device = &gw->dev->ib_dev; | 966 | mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); |
968 | event.element.port_num = gw->port; | ||
969 | event.event = IB_EVENT_GID_CHANGE; | ||
970 | ib_dispatch_event(&event); | ||
971 | } | 967 | } |
972 | 968 | ||
973 | mlx4_free_cmd_mailbox(dev, mailbox); | 969 | mlx4_free_cmd_mailbox(dev, mailbox); |
@@ -1432,10 +1428,18 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
1432 | } | 1428 | } |
1433 | 1429 | ||
1434 | static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | 1430 | static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, |
1435 | enum mlx4_dev_event event, int port) | 1431 | enum mlx4_dev_event event, unsigned long param) |
1436 | { | 1432 | { |
1437 | struct ib_event ibev; | 1433 | struct ib_event ibev; |
1438 | struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); | 1434 | struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); |
1435 | struct mlx4_eqe *eqe = NULL; | ||
1436 | struct ib_event_work *ew; | ||
1437 | int port = 0; | ||
1438 | |||
1439 | if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) | ||
1440 | eqe = (struct mlx4_eqe *)param; | ||
1441 | else | ||
1442 | port = (u8)param; | ||
1439 | 1443 | ||
1440 | if (port > ibdev->num_ports) | 1444 | if (port > ibdev->num_ports) |
1441 | return; | 1445 | return; |
@@ -1454,6 +1458,19 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
1454 | ibev.event = IB_EVENT_DEVICE_FATAL; | 1458 | ibev.event = IB_EVENT_DEVICE_FATAL; |
1455 | break; | 1459 | break; |
1456 | 1460 | ||
1461 | case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: | ||
1462 | ew = kmalloc(sizeof *ew, GFP_ATOMIC); | ||
1463 | if (!ew) { | ||
1464 | pr_err("failed to allocate memory for events work\n"); | ||
1465 | break; | ||
1466 | } | ||
1467 | |||
1468 | INIT_WORK(&ew->work, handle_port_mgmt_change_event); | ||
1469 | memcpy(&ew->ib_eqe, eqe, sizeof *eqe); | ||
1470 | ew->ib_dev = ibdev; | ||
1471 | handle_port_mgmt_change_event(&ew->work); | ||
1472 | return; | ||
1473 | |||
1457 | default: | 1474 | default: |
1458 | return; | 1475 | return; |
1459 | } | 1476 | } |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 42df4f7a6a5b..c136bb618e29 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -44,6 +44,16 @@ | |||
44 | #include <linux/mlx4/device.h> | 44 | #include <linux/mlx4/device.h> |
45 | #include <linux/mlx4/doorbell.h> | 45 | #include <linux/mlx4/doorbell.h> |
46 | 46 | ||
47 | #define MLX4_IB_DRV_NAME "mlx4_ib" | ||
48 | |||
49 | #ifdef pr_fmt | ||
50 | #undef pr_fmt | ||
51 | #endif | ||
52 | #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__ | ||
53 | |||
54 | #define mlx4_ib_warn(ibdev, format, arg...) \ | ||
55 | dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg) | ||
56 | |||
47 | enum { | 57 | enum { |
48 | MLX4_IB_SQ_MIN_WQE_SHIFT = 6, | 58 | MLX4_IB_SQ_MIN_WQE_SHIFT = 6, |
49 | MLX4_IB_MAX_HEADROOM = 2048 | 59 | MLX4_IB_MAX_HEADROOM = 2048 |
@@ -215,6 +225,12 @@ struct mlx4_ib_dev { | |||
215 | int eq_added; | 225 | int eq_added; |
216 | }; | 226 | }; |
217 | 227 | ||
228 | struct ib_event_work { | ||
229 | struct work_struct work; | ||
230 | struct mlx4_ib_dev *ib_dev; | ||
231 | struct mlx4_eqe ib_eqe; | ||
232 | }; | ||
233 | |||
218 | static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) | 234 | static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) |
219 | { | 235 | { |
220 | return container_of(ibdev, struct mlx4_ib_dev, ib_dev); | 236 | return container_of(ibdev, struct mlx4_ib_dev, ib_dev); |
@@ -372,4 +388,7 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) | |||
372 | int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | 388 | int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, |
373 | union ib_gid *gid); | 389 | union ib_gid *gid); |
374 | 390 | ||
391 | void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, | ||
392 | enum ib_event_type type); | ||
393 | |||
375 | #endif /* MLX4_IB_H */ | 394 | #endif /* MLX4_IB_H */ |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 6af19f6c2b11..a6d8ea060ea8 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1336,11 +1336,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1336 | cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; | 1336 | cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; |
1337 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; | 1337 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
1338 | 1338 | ||
1339 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) | 1339 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { |
1340 | pr_debug("qpn 0x%x: invalid attribute mask specified " | ||
1341 | "for transition %d to %d. qp_type %d," | ||
1342 | " attr_mask 0x%x\n", | ||
1343 | ibqp->qp_num, cur_state, new_state, | ||
1344 | ibqp->qp_type, attr_mask); | ||
1340 | goto out; | 1345 | goto out; |
1346 | } | ||
1341 | 1347 | ||
1342 | if ((attr_mask & IB_QP_PORT) && | 1348 | if ((attr_mask & IB_QP_PORT) && |
1343 | (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { | 1349 | (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { |
1350 | pr_debug("qpn 0x%x: invalid port number (%d) specified " | ||
1351 | "for transition %d to %d. qp_type %d\n", | ||
1352 | ibqp->qp_num, attr->port_num, cur_state, | ||
1353 | new_state, ibqp->qp_type); | ||
1344 | goto out; | 1354 | goto out; |
1345 | } | 1355 | } |
1346 | 1356 | ||
@@ -1351,17 +1361,30 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1351 | 1361 | ||
1352 | if (attr_mask & IB_QP_PKEY_INDEX) { | 1362 | if (attr_mask & IB_QP_PKEY_INDEX) { |
1353 | int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; | 1363 | int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
1354 | if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) | 1364 | if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { |
1365 | pr_debug("qpn 0x%x: invalid pkey index (%d) specified " | ||
1366 | "for transition %d to %d. qp_type %d\n", | ||
1367 | ibqp->qp_num, attr->pkey_index, cur_state, | ||
1368 | new_state, ibqp->qp_type); | ||
1355 | goto out; | 1369 | goto out; |
1370 | } | ||
1356 | } | 1371 | } |
1357 | 1372 | ||
1358 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && | 1373 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
1359 | attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { | 1374 | attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { |
1375 | pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. " | ||
1376 | "Transition %d to %d. qp_type %d\n", | ||
1377 | ibqp->qp_num, attr->max_rd_atomic, cur_state, | ||
1378 | new_state, ibqp->qp_type); | ||
1360 | goto out; | 1379 | goto out; |
1361 | } | 1380 | } |
1362 | 1381 | ||
1363 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | 1382 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && |
1364 | attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { | 1383 | attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { |
1384 | pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. " | ||
1385 | "Transition %d to %d. qp_type %d\n", | ||
1386 | ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, | ||
1387 | new_state, ibqp->qp_type); | ||
1365 | goto out; | 1388 | goto out; |
1366 | } | 1389 | } |
1367 | 1390 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 9601049e14d0..26a684536109 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -247,7 +247,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
247 | spin_unlock(&dev->qp_table.lock); | 247 | spin_unlock(&dev->qp_table.lock); |
248 | 248 | ||
249 | if (!qp) { | 249 | if (!qp) { |
250 | mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); | 250 | mthca_warn(dev, "Async event %d for bogus QP %08x\n", |
251 | event_type, qpn); | ||
251 | return; | 252 | return; |
252 | } | 253 | } |
253 | 254 | ||
@@ -501,6 +502,7 @@ done: | |||
501 | qp_attr->cap.max_inline_data = qp->max_inline_data; | 502 | qp_attr->cap.max_inline_data = qp->max_inline_data; |
502 | 503 | ||
503 | qp_init_attr->cap = qp_attr->cap; | 504 | qp_init_attr->cap = qp_attr->cap; |
505 | qp_init_attr->sq_sig_type = qp->sq_policy; | ||
504 | 506 | ||
505 | out_mailbox: | 507 | out_mailbox: |
506 | mthca_free_mailbox(dev, mailbox); | 508 | mthca_free_mailbox(dev, mailbox); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index b050e629e9c3..5a044526e4f4 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -202,8 +202,7 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) | |||
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \ | 205 | #if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q) |
206 | defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
207 | 206 | ||
208 | static int ocrdma_inet6addr_event(struct notifier_block *notifier, | 207 | static int ocrdma_inet6addr_event(struct notifier_block *notifier, |
209 | unsigned long event, void *ptr) | 208 | unsigned long event, void *ptr) |
@@ -549,7 +548,7 @@ static struct ocrdma_driver ocrdma_drv = { | |||
549 | 548 | ||
550 | static void ocrdma_unregister_inet6addr_notifier(void) | 549 | static void ocrdma_unregister_inet6addr_notifier(void) |
551 | { | 550 | { |
552 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 551 | #if IS_ENABLED(CONFIG_IPV6) |
553 | unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier); | 552 | unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier); |
554 | #endif | 553 | #endif |
555 | } | 554 | } |
@@ -558,7 +557,7 @@ static int __init ocrdma_init_module(void) | |||
558 | { | 557 | { |
559 | int status; | 558 | int status; |
560 | 559 | ||
561 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 560 | #if IS_ENABLED(CONFIG_IPV6) |
562 | status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); | 561 | status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); |
563 | if (status) | 562 | if (status) |
564 | return status; | 563 | return status; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 2e2e7aecc990..b2f9784beb4a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |||
97 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); | 97 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); |
98 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; | 98 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; |
99 | attr->max_srq = (dev->attr.max_qp - 1); | 99 | attr->max_srq = (dev->attr.max_qp - 1); |
100 | attr->max_srq_sge = attr->max_srq_sge; | 100 | attr->max_srq_sge = dev->attr.max_srq_sge; |
101 | attr->max_srq_wr = dev->attr.max_rqe; | 101 | attr->max_srq_wr = dev->attr.max_rqe; |
102 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; | 102 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; |
103 | attr->max_fast_reg_page_list_len = 0; | 103 | attr->max_fast_reg_page_list_len = 0; |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 7e62f4137148..6e19ec844d99 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #ifndef _QIB_KERNEL_H | 1 | #ifndef _QIB_KERNEL_H |
2 | #define _QIB_KERNEL_H | 2 | #define _QIB_KERNEL_H |
3 | /* | 3 | /* |
4 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 4 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
5 | * All rights reserved. | 5 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
6 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 6 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
7 | * | 7 | * |
8 | * This software is available to you under a choice of one of two | 8 | * This software is available to you under a choice of one of two |
@@ -519,6 +519,7 @@ struct qib_pportdata { | |||
519 | struct qib_devdata *dd; | 519 | struct qib_devdata *dd; |
520 | struct qib_chippport_specific *cpspec; /* chip-specific per-port */ | 520 | struct qib_chippport_specific *cpspec; /* chip-specific per-port */ |
521 | struct kobject pport_kobj; | 521 | struct kobject pport_kobj; |
522 | struct kobject pport_cc_kobj; | ||
522 | struct kobject sl2vl_kobj; | 523 | struct kobject sl2vl_kobj; |
523 | struct kobject diagc_kobj; | 524 | struct kobject diagc_kobj; |
524 | 525 | ||
@@ -544,6 +545,7 @@ struct qib_pportdata { | |||
544 | 545 | ||
545 | /* read mostly */ | 546 | /* read mostly */ |
546 | struct qib_sdma_desc *sdma_descq; | 547 | struct qib_sdma_desc *sdma_descq; |
548 | struct workqueue_struct *qib_wq; | ||
547 | struct qib_sdma_state sdma_state; | 549 | struct qib_sdma_state sdma_state; |
548 | dma_addr_t sdma_descq_phys; | 550 | dma_addr_t sdma_descq_phys; |
549 | volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ | 551 | volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ |
@@ -637,6 +639,39 @@ struct qib_pportdata { | |||
637 | struct timer_list led_override_timer; | 639 | struct timer_list led_override_timer; |
638 | struct xmit_wait cong_stats; | 640 | struct xmit_wait cong_stats; |
639 | struct timer_list symerr_clear_timer; | 641 | struct timer_list symerr_clear_timer; |
642 | |||
643 | /* Synchronize access between driver writes and sysfs reads */ | ||
644 | spinlock_t cc_shadow_lock | ||
645 | ____cacheline_aligned_in_smp; | ||
646 | |||
647 | /* Shadow copy of the congestion control table */ | ||
648 | struct cc_table_shadow *ccti_entries_shadow; | ||
649 | |||
650 | /* Shadow copy of the congestion control entries */ | ||
651 | struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow; | ||
652 | |||
653 | /* List of congestion control table entries */ | ||
654 | struct ib_cc_table_entry_shadow *ccti_entries; | ||
655 | |||
656 | /* 16 congestion entries with each entry corresponding to a SL */ | ||
657 | struct ib_cc_congestion_entry_shadow *congestion_entries; | ||
658 | |||
659 | /* Total number of congestion control table entries */ | ||
660 | u16 total_cct_entry; | ||
661 | |||
662 | /* Bit map identifying service level */ | ||
663 | u16 cc_sl_control_map; | ||
664 | |||
665 | /* maximum congestion control table index */ | ||
666 | u16 ccti_limit; | ||
667 | |||
668 | /* CA's max number of 64 entry units in the congestion control table */ | ||
669 | u8 cc_max_table_entries; | ||
670 | |||
671 | /* Maximum number of congestion control entries that the agent expects | ||
672 | * the manager to send. | ||
673 | */ | ||
674 | u8 cc_supported_table_entries; | ||
640 | }; | 675 | }; |
641 | 676 | ||
642 | /* Observers. Not to be taken lightly, possibly not to ship. */ | 677 | /* Observers. Not to be taken lightly, possibly not to ship. */ |
@@ -1077,6 +1112,7 @@ extern u32 qib_cpulist_count; | |||
1077 | extern unsigned long *qib_cpulist; | 1112 | extern unsigned long *qib_cpulist; |
1078 | 1113 | ||
1079 | extern unsigned qib_wc_pat; | 1114 | extern unsigned qib_wc_pat; |
1115 | extern unsigned qib_cc_table_size; | ||
1080 | int qib_init(struct qib_devdata *, int); | 1116 | int qib_init(struct qib_devdata *, int); |
1081 | int init_chip_wc_pat(struct qib_devdata *dd, u32); | 1117 | int init_chip_wc_pat(struct qib_devdata *dd, u32); |
1082 | int qib_enable_wc(struct qib_devdata *dd); | 1118 | int qib_enable_wc(struct qib_devdata *dd); |
@@ -1267,6 +1303,11 @@ int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *, | |||
1267 | /* ppd->sdma_lock should be locked before calling this. */ | 1303 | /* ppd->sdma_lock should be locked before calling this. */ |
1268 | int qib_sdma_make_progress(struct qib_pportdata *dd); | 1304 | int qib_sdma_make_progress(struct qib_pportdata *dd); |
1269 | 1305 | ||
1306 | static inline int qib_sdma_empty(const struct qib_pportdata *ppd) | ||
1307 | { | ||
1308 | return ppd->sdma_descq_added == ppd->sdma_descq_removed; | ||
1309 | } | ||
1310 | |||
1270 | /* must be called under qib_sdma_lock */ | 1311 | /* must be called under qib_sdma_lock */ |
1271 | static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) | 1312 | static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) |
1272 | { | 1313 | { |
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index 9892456a4348..1686fd4bda87 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2010 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -53,6 +53,9 @@ | |||
53 | #include "qib.h" | 53 | #include "qib.h" |
54 | #include "qib_common.h" | 54 | #include "qib_common.h" |
55 | 55 | ||
56 | #undef pr_fmt | ||
57 | #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt | ||
58 | |||
56 | /* | 59 | /* |
57 | * Each client that opens the diag device must read then write | 60 | * Each client that opens the diag device must read then write |
58 | * offset 0, to prevent lossage from random cat or od. diag_state | 61 | * offset 0, to prevent lossage from random cat or od. diag_state |
@@ -598,8 +601,8 @@ static ssize_t qib_diagpkt_write(struct file *fp, | |||
598 | } | 601 | } |
599 | tmpbuf = vmalloc(plen); | 602 | tmpbuf = vmalloc(plen); |
600 | if (!tmpbuf) { | 603 | if (!tmpbuf) { |
601 | qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, " | 604 | qib_devinfo(dd->pcidev, |
602 | "failing\n"); | 605 | "Unable to allocate tmp buffer, failing\n"); |
603 | ret = -ENOMEM; | 606 | ret = -ENOMEM; |
604 | goto bail; | 607 | goto bail; |
605 | } | 608 | } |
@@ -693,7 +696,7 @@ int qib_register_observer(struct qib_devdata *dd, | |||
693 | ret = -ENOMEM; | 696 | ret = -ENOMEM; |
694 | olp = vmalloc(sizeof *olp); | 697 | olp = vmalloc(sizeof *olp); |
695 | if (!olp) { | 698 | if (!olp) { |
696 | printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n"); | 699 | pr_err("vmalloc for observer failed\n"); |
697 | goto bail; | 700 | goto bail; |
698 | } | 701 | } |
699 | if (olp) { | 702 | if (olp) { |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 8895cfec5019..e41e7f7fc763 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
@@ -764,8 +764,9 @@ int qib_reset_device(int unit) | |||
764 | qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); | 764 | qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); |
765 | 765 | ||
766 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { | 766 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { |
767 | qib_devinfo(dd->pcidev, "Invalid unit number %u or " | 767 | qib_devinfo(dd->pcidev, |
768 | "not initialized or not present\n", unit); | 768 | "Invalid unit number %u or not initialized or not present\n", |
769 | unit); | ||
769 | ret = -ENXIO; | 770 | ret = -ENXIO; |
770 | goto bail; | 771 | goto bail; |
771 | } | 772 | } |
@@ -802,11 +803,13 @@ int qib_reset_device(int unit) | |||
802 | else | 803 | else |
803 | ret = -EAGAIN; | 804 | ret = -EAGAIN; |
804 | if (ret) | 805 | if (ret) |
805 | qib_dev_err(dd, "Reinitialize unit %u after " | 806 | qib_dev_err(dd, |
806 | "reset failed with %d\n", unit, ret); | 807 | "Reinitialize unit %u after reset failed with %d\n", |
808 | unit, ret); | ||
807 | else | 809 | else |
808 | qib_devinfo(dd->pcidev, "Reinitialized unit %u after " | 810 | qib_devinfo(dd->pcidev, |
809 | "resetting\n", unit); | 811 | "Reinitialized unit %u after resetting\n", |
812 | unit); | ||
810 | 813 | ||
811 | bail: | 814 | bail: |
812 | return ret; | 815 | return ret; |
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c index 92d9cfe98a68..4d5d71aaa2b4 100644 --- a/drivers/infiniband/hw/qib/qib_eeprom.c +++ b/drivers/infiniband/hw/qib/qib_eeprom.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | 5 | * |
5 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -160,10 +161,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) | |||
160 | if (oguid > bguid[7]) { | 161 | if (oguid > bguid[7]) { |
161 | if (bguid[6] == 0xff) { | 162 | if (bguid[6] == 0xff) { |
162 | if (bguid[5] == 0xff) { | 163 | if (bguid[5] == 0xff) { |
163 | qib_dev_err(dd, "Can't set %s GUID" | 164 | qib_dev_err(dd, |
164 | " from base, wraps to" | 165 | "Can't set %s GUID from base, wraps to OUI!\n", |
165 | " OUI!\n", | 166 | qib_get_unit_name(t)); |
166 | qib_get_unit_name(t)); | ||
167 | dd->base_guid = 0; | 167 | dd->base_guid = 0; |
168 | goto bail; | 168 | goto bail; |
169 | } | 169 | } |
@@ -182,8 +182,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) | |||
182 | len = sizeof(struct qib_flash); | 182 | len = sizeof(struct qib_flash); |
183 | buf = vmalloc(len); | 183 | buf = vmalloc(len); |
184 | if (!buf) { | 184 | if (!buf) { |
185 | qib_dev_err(dd, "Couldn't allocate memory to read %u " | 185 | qib_dev_err(dd, |
186 | "bytes from eeprom for GUID\n", len); | 186 | "Couldn't allocate memory to read %u bytes from eeprom for GUID\n", |
187 | len); | ||
187 | goto bail; | 188 | goto bail; |
188 | } | 189 | } |
189 | 190 | ||
@@ -201,23 +202,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd) | |||
201 | 202 | ||
202 | csum = flash_csum(ifp, 0); | 203 | csum = flash_csum(ifp, 0); |
203 | if (csum != ifp->if_csum) { | 204 | if (csum != ifp->if_csum) { |
204 | qib_devinfo(dd->pcidev, "Bad I2C flash checksum: " | 205 | qib_devinfo(dd->pcidev, |
205 | "0x%x, not 0x%x\n", csum, ifp->if_csum); | 206 | "Bad I2C flash checksum: 0x%x, not 0x%x\n", |
207 | csum, ifp->if_csum); | ||
206 | goto done; | 208 | goto done; |
207 | } | 209 | } |
208 | if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || | 210 | if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || |
209 | *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { | 211 | *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { |
210 | qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n", | 212 | qib_dev_err(dd, |
211 | *(unsigned long long *) ifp->if_guid); | 213 | "Invalid GUID %llx from flash; ignoring\n", |
214 | *(unsigned long long *) ifp->if_guid); | ||
212 | /* don't allow GUID if all 0 or all 1's */ | 215 | /* don't allow GUID if all 0 or all 1's */ |
213 | goto done; | 216 | goto done; |
214 | } | 217 | } |
215 | 218 | ||
216 | /* complain, but allow it */ | 219 | /* complain, but allow it */ |
217 | if (*(u64 *) ifp->if_guid == 0x100007511000000ULL) | 220 | if (*(u64 *) ifp->if_guid == 0x100007511000000ULL) |
218 | qib_devinfo(dd->pcidev, "Warning, GUID %llx is " | 221 | qib_devinfo(dd->pcidev, |
219 | "default, probably not correct!\n", | 222 | "Warning, GUID %llx is default, probably not correct!\n", |
220 | *(unsigned long long *) ifp->if_guid); | 223 | *(unsigned long long *) ifp->if_guid); |
221 | 224 | ||
222 | bguid = ifp->if_guid; | 225 | bguid = ifp->if_guid; |
223 | if (!bguid[0] && !bguid[1] && !bguid[2]) { | 226 | if (!bguid[0] && !bguid[1] && !bguid[2]) { |
@@ -260,8 +263,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) | |||
260 | memcpy(dd->serial, ifp->if_serial, | 263 | memcpy(dd->serial, ifp->if_serial, |
261 | sizeof ifp->if_serial); | 264 | sizeof ifp->if_serial); |
262 | if (!strstr(ifp->if_comment, "Tested successfully")) | 265 | if (!strstr(ifp->if_comment, "Tested successfully")) |
263 | qib_dev_err(dd, "Board SN %s did not pass functional " | 266 | qib_dev_err(dd, |
264 | "test: %s\n", dd->serial, ifp->if_comment); | 267 | "Board SN %s did not pass functional test: %s\n", |
268 | dd->serial, ifp->if_comment); | ||
265 | 269 | ||
266 | memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); | 270 | memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); |
267 | /* | 271 | /* |
@@ -323,8 +327,9 @@ int qib_update_eeprom_log(struct qib_devdata *dd) | |||
323 | buf = vmalloc(len); | 327 | buf = vmalloc(len); |
324 | ret = 1; | 328 | ret = 1; |
325 | if (!buf) { | 329 | if (!buf) { |
326 | qib_dev_err(dd, "Couldn't allocate memory to read %u " | 330 | qib_dev_err(dd, |
327 | "bytes from eeprom for logging\n", len); | 331 | "Couldn't allocate memory to read %u bytes from eeprom for logging\n", |
332 | len); | ||
328 | goto bail; | 333 | goto bail; |
329 | } | 334 | } |
330 | 335 | ||
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index a7403248d83d..faa44cb08071 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -49,6 +49,9 @@ | |||
49 | #include "qib_common.h" | 49 | #include "qib_common.h" |
50 | #include "qib_user_sdma.h" | 50 | #include "qib_user_sdma.h" |
51 | 51 | ||
52 | #undef pr_fmt | ||
53 | #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt | ||
54 | |||
52 | static int qib_open(struct inode *, struct file *); | 55 | static int qib_open(struct inode *, struct file *); |
53 | static int qib_close(struct inode *, struct file *); | 56 | static int qib_close(struct inode *, struct file *); |
54 | static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); | 57 | static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); |
@@ -315,8 +318,9 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, | |||
315 | } | 318 | } |
316 | if (cnt > tidcnt) { | 319 | if (cnt > tidcnt) { |
317 | /* make sure it all fits in tid_pg_list */ | 320 | /* make sure it all fits in tid_pg_list */ |
318 | qib_devinfo(dd->pcidev, "Process tried to allocate %u " | 321 | qib_devinfo(dd->pcidev, |
319 | "TIDs, only trying max (%u)\n", cnt, tidcnt); | 322 | "Process tried to allocate %u TIDs, only trying max (%u)\n", |
323 | cnt, tidcnt); | ||
320 | cnt = tidcnt; | 324 | cnt = tidcnt; |
321 | } | 325 | } |
322 | pagep = (struct page **) rcd->tid_pg_list; | 326 | pagep = (struct page **) rcd->tid_pg_list; |
@@ -750,9 +754,9 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, | |||
750 | ret = remap_pfn_range(vma, vma->vm_start, pfn, | 754 | ret = remap_pfn_range(vma, vma->vm_start, pfn, |
751 | len, vma->vm_page_prot); | 755 | len, vma->vm_page_prot); |
752 | if (ret) | 756 | if (ret) |
753 | qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x " | 757 | qib_devinfo(dd->pcidev, |
754 | "bytes failed: %d\n", what, rcd->ctxt, | 758 | "%s ctxt%u mmap of %lx, %x bytes failed: %d\n", |
755 | pfn, len, ret); | 759 | what, rcd->ctxt, pfn, len, ret); |
756 | bail: | 760 | bail: |
757 | return ret; | 761 | return ret; |
758 | } | 762 | } |
@@ -771,8 +775,9 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, | |||
771 | */ | 775 | */ |
772 | sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; | 776 | sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; |
773 | if ((vma->vm_end - vma->vm_start) > sz) { | 777 | if ((vma->vm_end - vma->vm_start) > sz) { |
774 | qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen " | 778 | qib_devinfo(dd->pcidev, |
775 | "%lx > PAGE\n", vma->vm_end - vma->vm_start); | 779 | "FAIL mmap userreg: reqlen %lx > PAGE\n", |
780 | vma->vm_end - vma->vm_start); | ||
776 | ret = -EFAULT; | 781 | ret = -EFAULT; |
777 | } else { | 782 | } else { |
778 | phys = dd->physaddr + ureg; | 783 | phys = dd->physaddr + ureg; |
@@ -802,8 +807,8 @@ static int mmap_piobufs(struct vm_area_struct *vma, | |||
802 | * for it. | 807 | * for it. |
803 | */ | 808 | */ |
804 | if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { | 809 | if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { |
805 | qib_devinfo(dd->pcidev, "FAIL mmap piobufs: " | 810 | qib_devinfo(dd->pcidev, |
806 | "reqlen %lx > PAGE\n", | 811 | "FAIL mmap piobufs: reqlen %lx > PAGE\n", |
807 | vma->vm_end - vma->vm_start); | 812 | vma->vm_end - vma->vm_start); |
808 | ret = -EINVAL; | 813 | ret = -EINVAL; |
809 | goto bail; | 814 | goto bail; |
@@ -847,8 +852,8 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma, | |||
847 | size = rcd->rcvegrbuf_size; | 852 | size = rcd->rcvegrbuf_size; |
848 | total_size = rcd->rcvegrbuf_chunks * size; | 853 | total_size = rcd->rcvegrbuf_chunks * size; |
849 | if ((vma->vm_end - vma->vm_start) > total_size) { | 854 | if ((vma->vm_end - vma->vm_start) > total_size) { |
850 | qib_devinfo(dd->pcidev, "FAIL on egr bufs: " | 855 | qib_devinfo(dd->pcidev, |
851 | "reqlen %lx > actual %lx\n", | 856 | "FAIL on egr bufs: reqlen %lx > actual %lx\n", |
852 | vma->vm_end - vma->vm_start, | 857 | vma->vm_end - vma->vm_start, |
853 | (unsigned long) total_size); | 858 | (unsigned long) total_size); |
854 | ret = -EINVAL; | 859 | ret = -EINVAL; |
@@ -856,8 +861,9 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma, | |||
856 | } | 861 | } |
857 | 862 | ||
858 | if (vma->vm_flags & VM_WRITE) { | 863 | if (vma->vm_flags & VM_WRITE) { |
859 | qib_devinfo(dd->pcidev, "Can't map eager buffers as " | 864 | qib_devinfo(dd->pcidev, |
860 | "writable (flags=%lx)\n", vma->vm_flags); | 865 | "Can't map eager buffers as writable (flags=%lx)\n", |
866 | vma->vm_flags); | ||
861 | ret = -EPERM; | 867 | ret = -EPERM; |
862 | goto bail; | 868 | goto bail; |
863 | } | 869 | } |
@@ -1270,8 +1276,8 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, | |||
1270 | GFP_KERNEL); | 1276 | GFP_KERNEL); |
1271 | 1277 | ||
1272 | if (!rcd || !ptmp) { | 1278 | if (!rcd || !ptmp) { |
1273 | qib_dev_err(dd, "Unable to allocate ctxtdata " | 1279 | qib_dev_err(dd, |
1274 | "memory, failing open\n"); | 1280 | "Unable to allocate ctxtdata memory, failing open\n"); |
1275 | ret = -ENOMEM; | 1281 | ret = -ENOMEM; |
1276 | goto bailerr; | 1282 | goto bailerr; |
1277 | } | 1283 | } |
@@ -1560,10 +1566,10 @@ done_chk_sdma: | |||
1560 | } else if (weight == 1 && | 1566 | } else if (weight == 1 && |
1561 | test_bit(cpumask_first(tsk_cpus_allowed(current)), | 1567 | test_bit(cpumask_first(tsk_cpus_allowed(current)), |
1562 | qib_cpulist)) | 1568 | qib_cpulist)) |
1563 | qib_devinfo(dd->pcidev, "%s PID %u affinity " | 1569 | qib_devinfo(dd->pcidev, |
1564 | "set to cpu %d; already allocated\n", | 1570 | "%s PID %u affinity set to cpu %d; already allocated\n", |
1565 | current->comm, current->pid, | 1571 | current->comm, current->pid, |
1566 | cpumask_first(tsk_cpus_allowed(current))); | 1572 | cpumask_first(tsk_cpus_allowed(current))); |
1567 | } | 1573 | } |
1568 | 1574 | ||
1569 | mutex_unlock(&qib_mutex); | 1575 | mutex_unlock(&qib_mutex); |
@@ -2185,8 +2191,7 @@ int qib_cdev_init(int minor, const char *name, | |||
2185 | 2191 | ||
2186 | cdev = cdev_alloc(); | 2192 | cdev = cdev_alloc(); |
2187 | if (!cdev) { | 2193 | if (!cdev) { |
2188 | printk(KERN_ERR QIB_DRV_NAME | 2194 | pr_err("Could not allocate cdev for minor %d, %s\n", |
2189 | ": Could not allocate cdev for minor %d, %s\n", | ||
2190 | minor, name); | 2195 | minor, name); |
2191 | ret = -ENOMEM; | 2196 | ret = -ENOMEM; |
2192 | goto done; | 2197 | goto done; |
@@ -2198,8 +2203,7 @@ int qib_cdev_init(int minor, const char *name, | |||
2198 | 2203 | ||
2199 | ret = cdev_add(cdev, dev, 1); | 2204 | ret = cdev_add(cdev, dev, 1); |
2200 | if (ret < 0) { | 2205 | if (ret < 0) { |
2201 | printk(KERN_ERR QIB_DRV_NAME | 2206 | pr_err("Could not add cdev for minor %d, %s (err %d)\n", |
2202 | ": Could not add cdev for minor %d, %s (err %d)\n", | ||
2203 | minor, name, -ret); | 2207 | minor, name, -ret); |
2204 | goto err_cdev; | 2208 | goto err_cdev; |
2205 | } | 2209 | } |
@@ -2209,8 +2213,7 @@ int qib_cdev_init(int minor, const char *name, | |||
2209 | goto done; | 2213 | goto done; |
2210 | ret = PTR_ERR(device); | 2214 | ret = PTR_ERR(device); |
2211 | device = NULL; | 2215 | device = NULL; |
2212 | printk(KERN_ERR QIB_DRV_NAME ": Could not create " | 2216 | pr_err("Could not create device for minor %d, %s (err %d)\n", |
2213 | "device for minor %d, %s (err %d)\n", | ||
2214 | minor, name, -ret); | 2217 | minor, name, -ret); |
2215 | err_cdev: | 2218 | err_cdev: |
2216 | cdev_del(cdev); | 2219 | cdev_del(cdev); |
@@ -2245,16 +2248,14 @@ int __init qib_dev_init(void) | |||
2245 | 2248 | ||
2246 | ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME); | 2249 | ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME); |
2247 | if (ret < 0) { | 2250 | if (ret < 0) { |
2248 | printk(KERN_ERR QIB_DRV_NAME ": Could not allocate " | 2251 | pr_err("Could not allocate chrdev region (err %d)\n", -ret); |
2249 | "chrdev region (err %d)\n", -ret); | ||
2250 | goto done; | 2252 | goto done; |
2251 | } | 2253 | } |
2252 | 2254 | ||
2253 | qib_class = class_create(THIS_MODULE, "ipath"); | 2255 | qib_class = class_create(THIS_MODULE, "ipath"); |
2254 | if (IS_ERR(qib_class)) { | 2256 | if (IS_ERR(qib_class)) { |
2255 | ret = PTR_ERR(qib_class); | 2257 | ret = PTR_ERR(qib_class); |
2256 | printk(KERN_ERR QIB_DRV_NAME ": Could not create " | 2258 | pr_err("Could not create device class (err %d)\n", -ret); |
2257 | "device class (err %d)\n", -ret); | ||
2258 | unregister_chrdev_region(qib_dev, QIB_NMINORS); | 2259 | unregister_chrdev_region(qib_dev, QIB_NMINORS); |
2259 | } | 2260 | } |
2260 | 2261 | ||
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 05e0f17c5b44..cff8a6c32161 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2006 PathScale, Inc. All rights reserved. |
4 | * | 5 | * |
5 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -382,7 +383,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) | |||
382 | ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, | 383 | ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, |
383 | &simple_dir_operations, dd); | 384 | &simple_dir_operations, dd); |
384 | if (ret) { | 385 | if (ret) { |
385 | printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret); | 386 | pr_err("create_file(%s) failed: %d\n", unit, ret); |
386 | goto bail; | 387 | goto bail; |
387 | } | 388 | } |
388 | 389 | ||
@@ -390,21 +391,21 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) | |||
390 | ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp, | 391 | ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp, |
391 | &cntr_ops[0], dd); | 392 | &cntr_ops[0], dd); |
392 | if (ret) { | 393 | if (ret) { |
393 | printk(KERN_ERR "create_file(%s/counters) failed: %d\n", | 394 | pr_err("create_file(%s/counters) failed: %d\n", |
394 | unit, ret); | 395 | unit, ret); |
395 | goto bail; | 396 | goto bail; |
396 | } | 397 | } |
397 | ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp, | 398 | ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp, |
398 | &cntr_ops[1], dd); | 399 | &cntr_ops[1], dd); |
399 | if (ret) { | 400 | if (ret) { |
400 | printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n", | 401 | pr_err("create_file(%s/counter_names) failed: %d\n", |
401 | unit, ret); | 402 | unit, ret); |
402 | goto bail; | 403 | goto bail; |
403 | } | 404 | } |
404 | ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp, | 405 | ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp, |
405 | &portcntr_ops[0], dd); | 406 | &portcntr_ops[0], dd); |
406 | if (ret) { | 407 | if (ret) { |
407 | printk(KERN_ERR "create_file(%s/%s) failed: %d\n", | 408 | pr_err("create_file(%s/%s) failed: %d\n", |
408 | unit, "portcounter_names", ret); | 409 | unit, "portcounter_names", ret); |
409 | goto bail; | 410 | goto bail; |
410 | } | 411 | } |
@@ -416,7 +417,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) | |||
416 | ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, | 417 | ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, |
417 | &portcntr_ops[i], dd); | 418 | &portcntr_ops[i], dd); |
418 | if (ret) { | 419 | if (ret) { |
419 | printk(KERN_ERR "create_file(%s/%s) failed: %d\n", | 420 | pr_err("create_file(%s/%s) failed: %d\n", |
420 | unit, fname, ret); | 421 | unit, fname, ret); |
421 | goto bail; | 422 | goto bail; |
422 | } | 423 | } |
@@ -426,7 +427,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) | |||
426 | ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, | 427 | ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, |
427 | &qsfp_ops[i - 1], dd); | 428 | &qsfp_ops[i - 1], dd); |
428 | if (ret) { | 429 | if (ret) { |
429 | printk(KERN_ERR "create_file(%s/%s) failed: %d\n", | 430 | pr_err("create_file(%s/%s) failed: %d\n", |
430 | unit, fname, ret); | 431 | unit, fname, ret); |
431 | goto bail; | 432 | goto bail; |
432 | } | 433 | } |
@@ -435,7 +436,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) | |||
435 | ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp, | 436 | ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp, |
436 | &flash_ops, dd); | 437 | &flash_ops, dd); |
437 | if (ret) | 438 | if (ret) |
438 | printk(KERN_ERR "create_file(%s/flash) failed: %d\n", | 439 | pr_err("create_file(%s/flash) failed: %d\n", |
439 | unit, ret); | 440 | unit, ret); |
440 | bail: | 441 | bail: |
441 | return ret; | 442 | return ret; |
@@ -486,7 +487,7 @@ static int remove_device_files(struct super_block *sb, | |||
486 | 487 | ||
487 | if (IS_ERR(dir)) { | 488 | if (IS_ERR(dir)) { |
488 | ret = PTR_ERR(dir); | 489 | ret = PTR_ERR(dir); |
489 | printk(KERN_ERR "Lookup of %s failed\n", unit); | 490 | pr_err("Lookup of %s failed\n", unit); |
490 | goto bail; | 491 | goto bail; |
491 | } | 492 | } |
492 | 493 | ||
@@ -532,7 +533,7 @@ static int qibfs_fill_super(struct super_block *sb, void *data, int silent) | |||
532 | 533 | ||
533 | ret = simple_fill_super(sb, QIBFS_MAGIC, files); | 534 | ret = simple_fill_super(sb, QIBFS_MAGIC, files); |
534 | if (ret) { | 535 | if (ret) { |
535 | printk(KERN_ERR "simple_fill_super failed: %d\n", ret); | 536 | pr_err("simple_fill_super failed: %d\n", ret); |
536 | goto bail; | 537 | goto bail; |
537 | } | 538 | } |
538 | 539 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 4d352b90750a..a099ac171e22 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -753,8 +753,8 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, | |||
753 | if (!hwerrs) | 753 | if (!hwerrs) |
754 | return; | 754 | return; |
755 | if (hwerrs == ~0ULL) { | 755 | if (hwerrs == ~0ULL) { |
756 | qib_dev_err(dd, "Read of hardware error status failed " | 756 | qib_dev_err(dd, |
757 | "(all bits set); ignoring\n"); | 757 | "Read of hardware error status failed (all bits set); ignoring\n"); |
758 | return; | 758 | return; |
759 | } | 759 | } |
760 | qib_stats.sps_hwerrs++; | 760 | qib_stats.sps_hwerrs++; |
@@ -779,13 +779,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, | |||
779 | * or it's occurred within the last 5 seconds. | 779 | * or it's occurred within the last 5 seconds. |
780 | */ | 780 | */ |
781 | if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID)) | 781 | if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID)) |
782 | qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " | 782 | qib_devinfo(dd->pcidev, |
783 | "(cleared)\n", (unsigned long long) hwerrs); | 783 | "Hardware error: hwerr=0x%llx (cleared)\n", |
784 | (unsigned long long) hwerrs); | ||
784 | 785 | ||
785 | if (hwerrs & ~IB_HWE_BITSEXTANT) | 786 | if (hwerrs & ~IB_HWE_BITSEXTANT) |
786 | qib_dev_err(dd, "hwerror interrupt with unknown errors " | 787 | qib_dev_err(dd, |
787 | "%llx set\n", (unsigned long long) | 788 | "hwerror interrupt with unknown errors %llx set\n", |
788 | (hwerrs & ~IB_HWE_BITSEXTANT)); | 789 | (unsigned long long)(hwerrs & ~IB_HWE_BITSEXTANT)); |
789 | 790 | ||
790 | ctrl = qib_read_kreg32(dd, kr_control); | 791 | ctrl = qib_read_kreg32(dd, kr_control); |
791 | if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { | 792 | if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { |
@@ -815,8 +816,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, | |||
815 | 816 | ||
816 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | 817 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { |
817 | isfatal = 1; | 818 | isfatal = 1; |
818 | strlcat(msg, "[Memory BIST test failed, InfiniPath hardware" | 819 | strlcat(msg, |
819 | " unusable]", msgl); | 820 | "[Memory BIST test failed, InfiniPath hardware unusable]", |
821 | msgl); | ||
820 | /* ignore from now on, so disable until driver reloaded */ | 822 | /* ignore from now on, so disable until driver reloaded */ |
821 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | 823 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); |
822 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | 824 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); |
@@ -868,8 +870,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, | |||
868 | *msg = 0; /* recovered from all of them */ | 870 | *msg = 0; /* recovered from all of them */ |
869 | 871 | ||
870 | if (isfatal && !dd->diag_client) { | 872 | if (isfatal && !dd->diag_client) { |
871 | qib_dev_err(dd, "Fatal Hardware Error, no longer" | 873 | qib_dev_err(dd, |
872 | " usable, SN %.16s\n", dd->serial); | 874 | "Fatal Hardware Error, no longer usable, SN %.16s\n", |
875 | dd->serial); | ||
873 | /* | 876 | /* |
874 | * for /sys status file and user programs to print; if no | 877 | * for /sys status file and user programs to print; if no |
875 | * trailing brace is copied, we'll know it was truncated. | 878 | * trailing brace is copied, we'll know it was truncated. |
@@ -1017,9 +1020,9 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs) | |||
1017 | qib_inc_eeprom_err(dd, log_idx, 1); | 1020 | qib_inc_eeprom_err(dd, log_idx, 1); |
1018 | 1021 | ||
1019 | if (errs & ~IB_E_BITSEXTANT) | 1022 | if (errs & ~IB_E_BITSEXTANT) |
1020 | qib_dev_err(dd, "error interrupt with unknown errors " | 1023 | qib_dev_err(dd, |
1021 | "%llx set\n", | 1024 | "error interrupt with unknown errors %llx set\n", |
1022 | (unsigned long long) (errs & ~IB_E_BITSEXTANT)); | 1025 | (unsigned long long) (errs & ~IB_E_BITSEXTANT)); |
1023 | 1026 | ||
1024 | if (errs & E_SUM_ERRS) { | 1027 | if (errs & E_SUM_ERRS) { |
1025 | qib_disarm_6120_senderrbufs(ppd); | 1028 | qib_disarm_6120_senderrbufs(ppd); |
@@ -1089,8 +1092,8 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs) | |||
1089 | } | 1092 | } |
1090 | 1093 | ||
1091 | if (errs & ERR_MASK(ResetNegated)) { | 1094 | if (errs & ERR_MASK(ResetNegated)) { |
1092 | qib_dev_err(dd, "Got reset, requires re-init " | 1095 | qib_dev_err(dd, |
1093 | "(unload and reload driver)\n"); | 1096 | "Got reset, requires re-init (unload and reload driver)\n"); |
1094 | dd->flags &= ~QIB_INITTED; /* needs re-init */ | 1097 | dd->flags &= ~QIB_INITTED; /* needs re-init */ |
1095 | /* mark as having had error */ | 1098 | /* mark as having had error */ |
1096 | *dd->devstatusp |= QIB_STATUS_HWERROR; | 1099 | *dd->devstatusp |= QIB_STATUS_HWERROR; |
@@ -1541,8 +1544,9 @@ static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat) | |||
1541 | qib_stats.sps_errints++; | 1544 | qib_stats.sps_errints++; |
1542 | estat = qib_read_kreg64(dd, kr_errstatus); | 1545 | estat = qib_read_kreg64(dd, kr_errstatus); |
1543 | if (!estat) | 1546 | if (!estat) |
1544 | qib_devinfo(dd->pcidev, "error interrupt (%Lx), " | 1547 | qib_devinfo(dd->pcidev, |
1545 | "but no error bits set!\n", istat); | 1548 | "error interrupt (%Lx), but no error bits set!\n", |
1549 | istat); | ||
1546 | handle_6120_errors(dd, estat); | 1550 | handle_6120_errors(dd, estat); |
1547 | } | 1551 | } |
1548 | 1552 | ||
@@ -1715,16 +1719,16 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd) | |||
1715 | } | 1719 | } |
1716 | 1720 | ||
1717 | if (!dd->cspec->irq) | 1721 | if (!dd->cspec->irq) |
1718 | qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " | 1722 | qib_dev_err(dd, |
1719 | "work\n"); | 1723 | "irq is 0, BIOS error? Interrupts won't work\n"); |
1720 | else { | 1724 | else { |
1721 | int ret; | 1725 | int ret; |
1722 | ret = request_irq(dd->cspec->irq, qib_6120intr, 0, | 1726 | ret = request_irq(dd->cspec->irq, qib_6120intr, 0, |
1723 | QIB_DRV_NAME, dd); | 1727 | QIB_DRV_NAME, dd); |
1724 | if (ret) | 1728 | if (ret) |
1725 | qib_dev_err(dd, "Couldn't setup interrupt " | 1729 | qib_dev_err(dd, |
1726 | "(irq=%d): %d\n", dd->cspec->irq, | 1730 | "Couldn't setup interrupt (irq=%d): %d\n", |
1727 | ret); | 1731 | dd->cspec->irq, ret); |
1728 | } | 1732 | } |
1729 | } | 1733 | } |
1730 | 1734 | ||
@@ -1759,8 +1763,9 @@ static void pe_boardname(struct qib_devdata *dd) | |||
1759 | snprintf(dd->boardname, namelen, "%s", n); | 1763 | snprintf(dd->boardname, namelen, "%s", n); |
1760 | 1764 | ||
1761 | if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2) | 1765 | if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2) |
1762 | qib_dev_err(dd, "Unsupported InfiniPath hardware revision " | 1766 | qib_dev_err(dd, |
1763 | "%u.%u!\n", dd->majrev, dd->minrev); | 1767 | "Unsupported InfiniPath hardware revision %u.%u!\n", |
1768 | dd->majrev, dd->minrev); | ||
1764 | 1769 | ||
1765 | snprintf(dd->boardversion, sizeof(dd->boardversion), | 1770 | snprintf(dd->boardversion, sizeof(dd->boardversion), |
1766 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", | 1771 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", |
@@ -1833,8 +1838,8 @@ static int qib_6120_setup_reset(struct qib_devdata *dd) | |||
1833 | bail: | 1838 | bail: |
1834 | if (ret) { | 1839 | if (ret) { |
1835 | if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) | 1840 | if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) |
1836 | qib_dev_err(dd, "Reset failed to setup PCIe or " | 1841 | qib_dev_err(dd, |
1837 | "interrupts; continuing anyway\n"); | 1842 | "Reset failed to setup PCIe or interrupts; continuing anyway\n"); |
1838 | /* clear the reset error, init error/hwerror mask */ | 1843 | /* clear the reset error, init error/hwerror mask */ |
1839 | qib_6120_init_hwerrors(dd); | 1844 | qib_6120_init_hwerrors(dd); |
1840 | /* for Rev2 error interrupts; nop for rev 1 */ | 1845 | /* for Rev2 error interrupts; nop for rev 1 */ |
@@ -1876,8 +1881,9 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | |||
1876 | } | 1881 | } |
1877 | pa >>= 11; | 1882 | pa >>= 11; |
1878 | if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { | 1883 | if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { |
1879 | qib_dev_err(dd, "Physical page address 0x%lx " | 1884 | qib_dev_err(dd, |
1880 | "larger than supported\n", pa); | 1885 | "Physical page address 0x%lx larger than supported\n", |
1886 | pa); | ||
1881 | return; | 1887 | return; |
1882 | } | 1888 | } |
1883 | 1889 | ||
@@ -1941,8 +1947,9 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr, | |||
1941 | } | 1947 | } |
1942 | pa >>= 11; | 1948 | pa >>= 11; |
1943 | if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { | 1949 | if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { |
1944 | qib_dev_err(dd, "Physical page address 0x%lx " | 1950 | qib_dev_err(dd, |
1945 | "larger than supported\n", pa); | 1951 | "Physical page address 0x%lx larger than supported\n", |
1952 | pa); | ||
1946 | return; | 1953 | return; |
1947 | } | 1954 | } |
1948 | 1955 | ||
@@ -2928,8 +2935,9 @@ static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) | |||
2928 | ppd->dd->unit, ppd->port); | 2935 | ppd->dd->unit, ppd->port); |
2929 | } else if (!strncmp(what, "off", 3)) { | 2936 | } else if (!strncmp(what, "off", 3)) { |
2930 | ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); | 2937 | ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); |
2931 | qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " | 2938 | qib_devinfo(ppd->dd->pcidev, |
2932 | "(normal)\n", ppd->dd->unit, ppd->port); | 2939 | "Disabling IB%u:%u IBC loopback (normal)\n", |
2940 | ppd->dd->unit, ppd->port); | ||
2933 | } else | 2941 | } else |
2934 | ret = -EINVAL; | 2942 | ret = -EINVAL; |
2935 | if (!ret) { | 2943 | if (!ret) { |
@@ -3186,11 +3194,10 @@ static int qib_late_6120_initreg(struct qib_devdata *dd) | |||
3186 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | 3194 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); |
3187 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | 3195 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); |
3188 | if (val != dd->pioavailregs_phys) { | 3196 | if (val != dd->pioavailregs_phys) { |
3189 | qib_dev_err(dd, "Catastrophic software error, " | 3197 | qib_dev_err(dd, |
3190 | "SendPIOAvailAddr written as %lx, " | 3198 | "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", |
3191 | "read back as %llx\n", | 3199 | (unsigned long) dd->pioavailregs_phys, |
3192 | (unsigned long) dd->pioavailregs_phys, | 3200 | (unsigned long long) val); |
3193 | (unsigned long long) val); | ||
3194 | ret = -EINVAL; | 3201 | ret = -EINVAL; |
3195 | } | 3202 | } |
3196 | return ret; | 3203 | return ret; |
@@ -3218,8 +3225,8 @@ static int init_6120_variables(struct qib_devdata *dd) | |||
3218 | dd->revision = readq(&dd->kregbase[kr_revision]); | 3225 | dd->revision = readq(&dd->kregbase[kr_revision]); |
3219 | 3226 | ||
3220 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | 3227 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { |
3221 | qib_dev_err(dd, "Revision register read failure, " | 3228 | qib_dev_err(dd, |
3222 | "giving up initialization\n"); | 3229 | "Revision register read failure, giving up initialization\n"); |
3223 | ret = -ENODEV; | 3230 | ret = -ENODEV; |
3224 | goto bail; | 3231 | goto bail; |
3225 | } | 3232 | } |
@@ -3551,8 +3558,8 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, | |||
3551 | goto bail; | 3558 | goto bail; |
3552 | 3559 | ||
3553 | if (qib_pcie_params(dd, 8, NULL, NULL)) | 3560 | if (qib_pcie_params(dd, 8, NULL, NULL)) |
3554 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | 3561 | qib_dev_err(dd, |
3555 | "continuing anyway\n"); | 3562 | "Failed to setup PCIe or interrupts; continuing anyway\n"); |
3556 | dd->cspec->irq = pdev->irq; /* save IRQ */ | 3563 | dd->cspec->irq = pdev->irq; /* save IRQ */ |
3557 | 3564 | ||
3558 | /* clear diagctrl register, in case diags were running and crashed */ | 3565 | /* clear diagctrl register, in case diags were running and crashed */ |
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 86a0ba7ca0c2..64d0ecb90cdc 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -1111,9 +1111,9 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs) | |||
1111 | sdma_7220_errors(ppd, errs); | 1111 | sdma_7220_errors(ppd, errs); |
1112 | 1112 | ||
1113 | if (errs & ~IB_E_BITSEXTANT) | 1113 | if (errs & ~IB_E_BITSEXTANT) |
1114 | qib_dev_err(dd, "error interrupt with unknown errors " | 1114 | qib_dev_err(dd, |
1115 | "%llx set\n", (unsigned long long) | 1115 | "error interrupt with unknown errors %llx set\n", |
1116 | (errs & ~IB_E_BITSEXTANT)); | 1116 | (unsigned long long) (errs & ~IB_E_BITSEXTANT)); |
1117 | 1117 | ||
1118 | if (errs & E_SUM_ERRS) { | 1118 | if (errs & E_SUM_ERRS) { |
1119 | qib_disarm_7220_senderrbufs(ppd); | 1119 | qib_disarm_7220_senderrbufs(ppd); |
@@ -1192,8 +1192,8 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs) | |||
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | if (errs & ERR_MASK(ResetNegated)) { | 1194 | if (errs & ERR_MASK(ResetNegated)) { |
1195 | qib_dev_err(dd, "Got reset, requires re-init " | 1195 | qib_dev_err(dd, |
1196 | "(unload and reload driver)\n"); | 1196 | "Got reset, requires re-init (unload and reload driver)\n"); |
1197 | dd->flags &= ~QIB_INITTED; /* needs re-init */ | 1197 | dd->flags &= ~QIB_INITTED; /* needs re-init */ |
1198 | /* mark as having had error */ | 1198 | /* mark as having had error */ |
1199 | *dd->devstatusp |= QIB_STATUS_HWERROR; | 1199 | *dd->devstatusp |= QIB_STATUS_HWERROR; |
@@ -1305,8 +1305,8 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, | |||
1305 | if (!hwerrs) | 1305 | if (!hwerrs) |
1306 | goto bail; | 1306 | goto bail; |
1307 | if (hwerrs == ~0ULL) { | 1307 | if (hwerrs == ~0ULL) { |
1308 | qib_dev_err(dd, "Read of hardware error status failed " | 1308 | qib_dev_err(dd, |
1309 | "(all bits set); ignoring\n"); | 1309 | "Read of hardware error status failed (all bits set); ignoring\n"); |
1310 | goto bail; | 1310 | goto bail; |
1311 | } | 1311 | } |
1312 | qib_stats.sps_hwerrs++; | 1312 | qib_stats.sps_hwerrs++; |
@@ -1329,13 +1329,14 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, | |||
1329 | qib_inc_eeprom_err(dd, log_idx, 1); | 1329 | qib_inc_eeprom_err(dd, log_idx, 1); |
1330 | if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC | | 1330 | if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC | |
1331 | RXE_PARITY)) | 1331 | RXE_PARITY)) |
1332 | qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " | 1332 | qib_devinfo(dd->pcidev, |
1333 | "(cleared)\n", (unsigned long long) hwerrs); | 1333 | "Hardware error: hwerr=0x%llx (cleared)\n", |
1334 | (unsigned long long) hwerrs); | ||
1334 | 1335 | ||
1335 | if (hwerrs & ~IB_HWE_BITSEXTANT) | 1336 | if (hwerrs & ~IB_HWE_BITSEXTANT) |
1336 | qib_dev_err(dd, "hwerror interrupt with unknown errors " | 1337 | qib_dev_err(dd, |
1337 | "%llx set\n", (unsigned long long) | 1338 | "hwerror interrupt with unknown errors %llx set\n", |
1338 | (hwerrs & ~IB_HWE_BITSEXTANT)); | 1339 | (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT)); |
1339 | 1340 | ||
1340 | if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) | 1341 | if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) |
1341 | qib_sd7220_clr_ibpar(dd); | 1342 | qib_sd7220_clr_ibpar(dd); |
@@ -1362,8 +1363,9 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, | |||
1362 | 1363 | ||
1363 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | 1364 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { |
1364 | isfatal = 1; | 1365 | isfatal = 1; |
1365 | strlcat(msg, "[Memory BIST test failed, " | 1366 | strlcat(msg, |
1366 | "InfiniPath hardware unusable]", msgl); | 1367 | "[Memory BIST test failed, InfiniPath hardware unusable]", |
1368 | msgl); | ||
1367 | /* ignore from now on, so disable until driver reloaded */ | 1369 | /* ignore from now on, so disable until driver reloaded */ |
1368 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | 1370 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); |
1369 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | 1371 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); |
@@ -1409,8 +1411,9 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, | |||
1409 | qib_dev_err(dd, "%s hardware error\n", msg); | 1411 | qib_dev_err(dd, "%s hardware error\n", msg); |
1410 | 1412 | ||
1411 | if (isfatal && !dd->diag_client) { | 1413 | if (isfatal && !dd->diag_client) { |
1412 | qib_dev_err(dd, "Fatal Hardware Error, no longer" | 1414 | qib_dev_err(dd, |
1413 | " usable, SN %.16s\n", dd->serial); | 1415 | "Fatal Hardware Error, no longer usable, SN %.16s\n", |
1416 | dd->serial); | ||
1414 | /* | 1417 | /* |
1415 | * For /sys status file and user programs to print; if no | 1418 | * For /sys status file and user programs to print; if no |
1416 | * trailing brace is copied, we'll know it was truncated. | 1419 | * trailing brace is copied, we'll know it was truncated. |
@@ -1918,8 +1921,9 @@ static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat) | |||
1918 | qib_stats.sps_errints++; | 1921 | qib_stats.sps_errints++; |
1919 | estat = qib_read_kreg64(dd, kr_errstatus); | 1922 | estat = qib_read_kreg64(dd, kr_errstatus); |
1920 | if (!estat) | 1923 | if (!estat) |
1921 | qib_devinfo(dd->pcidev, "error interrupt (%Lx), " | 1924 | qib_devinfo(dd->pcidev, |
1922 | "but no error bits set!\n", istat); | 1925 | "error interrupt (%Lx), but no error bits set!\n", |
1926 | istat); | ||
1923 | else | 1927 | else |
1924 | handle_7220_errors(dd, estat); | 1928 | handle_7220_errors(dd, estat); |
1925 | } | 1929 | } |
@@ -2023,17 +2027,18 @@ bail: | |||
2023 | static void qib_setup_7220_interrupt(struct qib_devdata *dd) | 2027 | static void qib_setup_7220_interrupt(struct qib_devdata *dd) |
2024 | { | 2028 | { |
2025 | if (!dd->cspec->irq) | 2029 | if (!dd->cspec->irq) |
2026 | qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " | 2030 | qib_dev_err(dd, |
2027 | "work\n"); | 2031 | "irq is 0, BIOS error? Interrupts won't work\n"); |
2028 | else { | 2032 | else { |
2029 | int ret = request_irq(dd->cspec->irq, qib_7220intr, | 2033 | int ret = request_irq(dd->cspec->irq, qib_7220intr, |
2030 | dd->msi_lo ? 0 : IRQF_SHARED, | 2034 | dd->msi_lo ? 0 : IRQF_SHARED, |
2031 | QIB_DRV_NAME, dd); | 2035 | QIB_DRV_NAME, dd); |
2032 | 2036 | ||
2033 | if (ret) | 2037 | if (ret) |
2034 | qib_dev_err(dd, "Couldn't setup %s interrupt " | 2038 | qib_dev_err(dd, |
2035 | "(irq=%d): %d\n", dd->msi_lo ? | 2039 | "Couldn't setup %s interrupt (irq=%d): %d\n", |
2036 | "MSI" : "INTx", dd->cspec->irq, ret); | 2040 | dd->msi_lo ? "MSI" : "INTx", |
2041 | dd->cspec->irq, ret); | ||
2037 | } | 2042 | } |
2038 | } | 2043 | } |
2039 | 2044 | ||
@@ -2072,9 +2077,9 @@ static void qib_7220_boardname(struct qib_devdata *dd) | |||
2072 | snprintf(dd->boardname, namelen, "%s", n); | 2077 | snprintf(dd->boardname, namelen, "%s", n); |
2073 | 2078 | ||
2074 | if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2) | 2079 | if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2) |
2075 | qib_dev_err(dd, "Unsupported InfiniPath hardware " | 2080 | qib_dev_err(dd, |
2076 | "revision %u.%u!\n", | 2081 | "Unsupported InfiniPath hardware revision %u.%u!\n", |
2077 | dd->majrev, dd->minrev); | 2082 | dd->majrev, dd->minrev); |
2078 | 2083 | ||
2079 | snprintf(dd->boardversion, sizeof(dd->boardversion), | 2084 | snprintf(dd->boardversion, sizeof(dd->boardversion), |
2080 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", | 2085 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", |
@@ -2146,8 +2151,8 @@ static int qib_setup_7220_reset(struct qib_devdata *dd) | |||
2146 | bail: | 2151 | bail: |
2147 | if (ret) { | 2152 | if (ret) { |
2148 | if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) | 2153 | if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) |
2149 | qib_dev_err(dd, "Reset failed to setup PCIe or " | 2154 | qib_dev_err(dd, |
2150 | "interrupts; continuing anyway\n"); | 2155 | "Reset failed to setup PCIe or interrupts; continuing anyway\n"); |
2151 | 2156 | ||
2152 | /* hold IBC in reset, no sends, etc till later */ | 2157 | /* hold IBC in reset, no sends, etc till later */ |
2153 | qib_write_kreg(dd, kr_control, 0ULL); | 2158 | qib_write_kreg(dd, kr_control, 0ULL); |
@@ -2187,8 +2192,9 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | |||
2187 | return; | 2192 | return; |
2188 | } | 2193 | } |
2189 | if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { | 2194 | if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { |
2190 | qib_dev_err(dd, "Physical page address 0x%lx " | 2195 | qib_dev_err(dd, |
2191 | "larger than supported\n", pa); | 2196 | "Physical page address 0x%lx larger than supported\n", |
2197 | pa); | ||
2192 | return; | 2198 | return; |
2193 | } | 2199 | } |
2194 | 2200 | ||
@@ -2706,8 +2712,9 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) | |||
2706 | ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); | 2712 | ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); |
2707 | /* enable heart beat again */ | 2713 | /* enable heart beat again */ |
2708 | val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; | 2714 | val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; |
2709 | qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " | 2715 | qib_devinfo(ppd->dd->pcidev, |
2710 | "(normal)\n", ppd->dd->unit, ppd->port); | 2716 | "Disabling IB%u:%u IBC loopback (normal)\n", |
2717 | ppd->dd->unit, ppd->port); | ||
2711 | } else | 2718 | } else |
2712 | ret = -EINVAL; | 2719 | ret = -EINVAL; |
2713 | if (!ret) { | 2720 | if (!ret) { |
@@ -3307,8 +3314,8 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd) | |||
3307 | if (!dd->msi_lo) | 3314 | if (!dd->msi_lo) |
3308 | return 0; | 3315 | return 0; |
3309 | 3316 | ||
3310 | qib_devinfo(dd->pcidev, "MSI interrupt not detected," | 3317 | qib_devinfo(dd->pcidev, |
3311 | " trying INTx interrupts\n"); | 3318 | "MSI interrupt not detected, trying INTx interrupts\n"); |
3312 | qib_7220_free_irq(dd); | 3319 | qib_7220_free_irq(dd); |
3313 | qib_enable_intx(dd->pcidev); | 3320 | qib_enable_intx(dd->pcidev); |
3314 | /* | 3321 | /* |
@@ -3980,11 +3987,10 @@ static int qib_late_7220_initreg(struct qib_devdata *dd) | |||
3980 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | 3987 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); |
3981 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | 3988 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); |
3982 | if (val != dd->pioavailregs_phys) { | 3989 | if (val != dd->pioavailregs_phys) { |
3983 | qib_dev_err(dd, "Catastrophic software error, " | 3990 | qib_dev_err(dd, |
3984 | "SendPIOAvailAddr written as %lx, " | 3991 | "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", |
3985 | "read back as %llx\n", | 3992 | (unsigned long) dd->pioavailregs_phys, |
3986 | (unsigned long) dd->pioavailregs_phys, | 3993 | (unsigned long long) val); |
3987 | (unsigned long long) val); | ||
3988 | ret = -EINVAL; | 3994 | ret = -EINVAL; |
3989 | } | 3995 | } |
3990 | qib_register_observer(dd, &sendctrl_observer); | 3996 | qib_register_observer(dd, &sendctrl_observer); |
@@ -4014,8 +4020,8 @@ static int qib_init_7220_variables(struct qib_devdata *dd) | |||
4014 | dd->revision = readq(&dd->kregbase[kr_revision]); | 4020 | dd->revision = readq(&dd->kregbase[kr_revision]); |
4015 | 4021 | ||
4016 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | 4022 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { |
4017 | qib_dev_err(dd, "Revision register read failure, " | 4023 | qib_dev_err(dd, |
4018 | "giving up initialization\n"); | 4024 | "Revision register read failure, giving up initialization\n"); |
4019 | ret = -ENODEV; | 4025 | ret = -ENODEV; |
4020 | goto bail; | 4026 | goto bail; |
4021 | } | 4027 | } |
@@ -4613,8 +4619,8 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, | |||
4613 | break; | 4619 | break; |
4614 | } | 4620 | } |
4615 | if (qib_pcie_params(dd, minwidth, NULL, NULL)) | 4621 | if (qib_pcie_params(dd, minwidth, NULL, NULL)) |
4616 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | 4622 | qib_dev_err(dd, |
4617 | "continuing anyway\n"); | 4623 | "Failed to setup PCIe or interrupts; continuing anyway\n"); |
4618 | 4624 | ||
4619 | /* save IRQ for possible later use */ | 4625 | /* save IRQ for possible later use */ |
4620 | dd->cspec->irq = pdev->irq; | 4626 | dd->cspec->irq = pdev->irq; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index c881e744c091..0d7280af99bc 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -49,6 +50,10 @@ | |||
49 | #include "qib_qsfp.h" | 50 | #include "qib_qsfp.h" |
50 | 51 | ||
51 | #include "qib_mad.h" | 52 | #include "qib_mad.h" |
53 | #include "qib_verbs.h" | ||
54 | |||
55 | #undef pr_fmt | ||
56 | #define pr_fmt(fmt) QIB_DRV_NAME " " fmt | ||
52 | 57 | ||
53 | static void qib_setup_7322_setextled(struct qib_pportdata *, u32); | 58 | static void qib_setup_7322_setextled(struct qib_pportdata *, u32); |
54 | static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); | 59 | static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); |
@@ -1575,8 +1580,8 @@ static noinline void handle_7322_errors(struct qib_devdata *dd) | |||
1575 | qib_stats.sps_errints++; | 1580 | qib_stats.sps_errints++; |
1576 | errs = qib_read_kreg64(dd, kr_errstatus); | 1581 | errs = qib_read_kreg64(dd, kr_errstatus); |
1577 | if (!errs) { | 1582 | if (!errs) { |
1578 | qib_devinfo(dd->pcidev, "device error interrupt, " | 1583 | qib_devinfo(dd->pcidev, |
1579 | "but no error bits set!\n"); | 1584 | "device error interrupt, but no error bits set!\n"); |
1580 | goto done; | 1585 | goto done; |
1581 | } | 1586 | } |
1582 | 1587 | ||
@@ -1622,8 +1627,8 @@ static noinline void handle_7322_errors(struct qib_devdata *dd) | |||
1622 | if (errs & QIB_E_RESET) { | 1627 | if (errs & QIB_E_RESET) { |
1623 | int pidx; | 1628 | int pidx; |
1624 | 1629 | ||
1625 | qib_dev_err(dd, "Got reset, requires re-init " | 1630 | qib_dev_err(dd, |
1626 | "(unload and reload driver)\n"); | 1631 | "Got reset, requires re-init (unload and reload driver)\n"); |
1627 | dd->flags &= ~QIB_INITTED; /* needs re-init */ | 1632 | dd->flags &= ~QIB_INITTED; /* needs re-init */ |
1628 | /* mark as having had error */ | 1633 | /* mark as having had error */ |
1629 | *dd->devstatusp |= QIB_STATUS_HWERROR; | 1634 | *dd->devstatusp |= QIB_STATUS_HWERROR; |
@@ -1760,9 +1765,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) | |||
1760 | ppd->dd->cspec->r1 ? | 1765 | ppd->dd->cspec->r1 ? |
1761 | QDR_STATIC_ADAPT_DOWN_R1 : | 1766 | QDR_STATIC_ADAPT_DOWN_R1 : |
1762 | QDR_STATIC_ADAPT_DOWN); | 1767 | QDR_STATIC_ADAPT_DOWN); |
1763 | printk(KERN_INFO QIB_DRV_NAME | 1768 | pr_info( |
1764 | " IB%u:%u re-enabled QDR adaptation " | 1769 | "IB%u:%u re-enabled QDR adaptation ibclt %x\n", |
1765 | "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt); | 1770 | ppd->dd->unit, ppd->port, ibclt); |
1766 | } | 1771 | } |
1767 | } | 1772 | } |
1768 | } | 1773 | } |
@@ -1804,9 +1809,9 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) | |||
1804 | if (!*msg) | 1809 | if (!*msg) |
1805 | snprintf(msg, sizeof ppd->cpspec->epmsgbuf, | 1810 | snprintf(msg, sizeof ppd->cpspec->epmsgbuf, |
1806 | "no others"); | 1811 | "no others"); |
1807 | qib_dev_porterr(dd, ppd->port, "error interrupt with unknown" | 1812 | qib_dev_porterr(dd, ppd->port, |
1808 | " errors 0x%016Lx set (and %s)\n", | 1813 | "error interrupt with unknown errors 0x%016Lx set (and %s)\n", |
1809 | (errs & ~QIB_E_P_BITSEXTANT), msg); | 1814 | (errs & ~QIB_E_P_BITSEXTANT), msg); |
1810 | *msg = '\0'; | 1815 | *msg = '\0'; |
1811 | } | 1816 | } |
1812 | 1817 | ||
@@ -2024,8 +2029,8 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, | |||
2024 | if (!hwerrs) | 2029 | if (!hwerrs) |
2025 | goto bail; | 2030 | goto bail; |
2026 | if (hwerrs == ~0ULL) { | 2031 | if (hwerrs == ~0ULL) { |
2027 | qib_dev_err(dd, "Read of hardware error status failed " | 2032 | qib_dev_err(dd, |
2028 | "(all bits set); ignoring\n"); | 2033 | "Read of hardware error status failed (all bits set); ignoring\n"); |
2029 | goto bail; | 2034 | goto bail; |
2030 | } | 2035 | } |
2031 | qib_stats.sps_hwerrs++; | 2036 | qib_stats.sps_hwerrs++; |
@@ -2039,8 +2044,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, | |||
2039 | /* no EEPROM logging, yet */ | 2044 | /* no EEPROM logging, yet */ |
2040 | 2045 | ||
2041 | if (hwerrs) | 2046 | if (hwerrs) |
2042 | qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " | 2047 | qib_devinfo(dd->pcidev, |
2043 | "(cleared)\n", (unsigned long long) hwerrs); | 2048 | "Hardware error: hwerr=0x%llx (cleared)\n", |
2049 | (unsigned long long) hwerrs); | ||
2044 | 2050 | ||
2045 | ctrl = qib_read_kreg32(dd, kr_control); | 2051 | ctrl = qib_read_kreg32(dd, kr_control); |
2046 | if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { | 2052 | if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { |
@@ -2064,8 +2070,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, | |||
2064 | 2070 | ||
2065 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | 2071 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { |
2066 | isfatal = 1; | 2072 | isfatal = 1; |
2067 | strlcpy(msg, "[Memory BIST test failed, " | 2073 | strlcpy(msg, |
2068 | "InfiniPath hardware unusable]", msgl); | 2074 | "[Memory BIST test failed, InfiniPath hardware unusable]", |
2075 | msgl); | ||
2069 | /* ignore from now on, so disable until driver reloaded */ | 2076 | /* ignore from now on, so disable until driver reloaded */ |
2070 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | 2077 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); |
2071 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | 2078 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); |
@@ -2078,8 +2085,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, | |||
2078 | qib_dev_err(dd, "%s hardware error\n", msg); | 2085 | qib_dev_err(dd, "%s hardware error\n", msg); |
2079 | 2086 | ||
2080 | if (isfatal && !dd->diag_client) { | 2087 | if (isfatal && !dd->diag_client) { |
2081 | qib_dev_err(dd, "Fatal Hardware Error, no longer" | 2088 | qib_dev_err(dd, |
2082 | " usable, SN %.16s\n", dd->serial); | 2089 | "Fatal Hardware Error, no longer usable, SN %.16s\n", |
2090 | dd->serial); | ||
2083 | /* | 2091 | /* |
2084 | * for /sys status file and user programs to print; if no | 2092 | * for /sys status file and user programs to print; if no |
2085 | * trailing brace is copied, we'll know it was truncated. | 2093 | * trailing brace is copied, we'll know it was truncated. |
@@ -2667,8 +2675,9 @@ static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) | |||
2667 | char msg[128]; | 2675 | char msg[128]; |
2668 | 2676 | ||
2669 | kills = istat & ~QIB_I_BITSEXTANT; | 2677 | kills = istat & ~QIB_I_BITSEXTANT; |
2670 | qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:" | 2678 | qib_dev_err(dd, |
2671 | " %s\n", (unsigned long long) kills, msg); | 2679 | "Clearing reserved interrupt(s) 0x%016llx: %s\n", |
2680 | (unsigned long long) kills, msg); | ||
2672 | qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); | 2681 | qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); |
2673 | } | 2682 | } |
2674 | 2683 | ||
@@ -3101,16 +3110,16 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) | |||
3101 | /* Try to get INTx interrupt */ | 3110 | /* Try to get INTx interrupt */ |
3102 | try_intx: | 3111 | try_intx: |
3103 | if (!dd->pcidev->irq) { | 3112 | if (!dd->pcidev->irq) { |
3104 | qib_dev_err(dd, "irq is 0, BIOS error? " | 3113 | qib_dev_err(dd, |
3105 | "Interrupts won't work\n"); | 3114 | "irq is 0, BIOS error? Interrupts won't work\n"); |
3106 | goto bail; | 3115 | goto bail; |
3107 | } | 3116 | } |
3108 | ret = request_irq(dd->pcidev->irq, qib_7322intr, | 3117 | ret = request_irq(dd->pcidev->irq, qib_7322intr, |
3109 | IRQF_SHARED, QIB_DRV_NAME, dd); | 3118 | IRQF_SHARED, QIB_DRV_NAME, dd); |
3110 | if (ret) { | 3119 | if (ret) { |
3111 | qib_dev_err(dd, "Couldn't setup INTx " | 3120 | qib_dev_err(dd, |
3112 | "interrupt (irq=%d): %d\n", | 3121 | "Couldn't setup INTx interrupt (irq=%d): %d\n", |
3113 | dd->pcidev->irq, ret); | 3122 | dd->pcidev->irq, ret); |
3114 | goto bail; | 3123 | goto bail; |
3115 | } | 3124 | } |
3116 | dd->cspec->irq = dd->pcidev->irq; | 3125 | dd->cspec->irq = dd->pcidev->irq; |
@@ -3185,8 +3194,9 @@ try_intx: | |||
3185 | * Shouldn't happen since the enable said we could | 3194 | * Shouldn't happen since the enable said we could |
3186 | * have as many as we are trying to setup here. | 3195 | * have as many as we are trying to setup here. |
3187 | */ | 3196 | */ |
3188 | qib_dev_err(dd, "Couldn't setup MSIx " | 3197 | qib_dev_err(dd, |
3189 | "interrupt (vec=%d, irq=%d): %d\n", msixnum, | 3198 | "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n", |
3199 | msixnum, | ||
3190 | dd->cspec->msix_entries[msixnum].msix.vector, | 3200 | dd->cspec->msix_entries[msixnum].msix.vector, |
3191 | ret); | 3201 | ret); |
3192 | qib_7322_nomsix(dd); | 3202 | qib_7322_nomsix(dd); |
@@ -3305,8 +3315,9 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd) | |||
3305 | (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); | 3315 | (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); |
3306 | 3316 | ||
3307 | if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { | 3317 | if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { |
3308 | qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode" | 3318 | qib_devinfo(dd->pcidev, |
3309 | " by module parameter\n", dd->unit); | 3319 | "IB%u: Forced to single port mode by module parameter\n", |
3320 | dd->unit); | ||
3310 | features &= PORT_SPD_CAP; | 3321 | features &= PORT_SPD_CAP; |
3311 | } | 3322 | } |
3312 | 3323 | ||
@@ -3400,8 +3411,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd) | |||
3400 | if (val == dd->revision) | 3411 | if (val == dd->revision) |
3401 | break; | 3412 | break; |
3402 | if (i == 5) { | 3413 | if (i == 5) { |
3403 | qib_dev_err(dd, "Failed to initialize after reset, " | 3414 | qib_dev_err(dd, |
3404 | "unusable\n"); | 3415 | "Failed to initialize after reset, unusable\n"); |
3405 | ret = 0; | 3416 | ret = 0; |
3406 | goto bail; | 3417 | goto bail; |
3407 | } | 3418 | } |
@@ -3432,8 +3443,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd) | |||
3432 | if (qib_pcie_params(dd, dd->lbus_width, | 3443 | if (qib_pcie_params(dd, dd->lbus_width, |
3433 | &dd->cspec->num_msix_entries, | 3444 | &dd->cspec->num_msix_entries, |
3434 | dd->cspec->msix_entries)) | 3445 | dd->cspec->msix_entries)) |
3435 | qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; " | 3446 | qib_dev_err(dd, |
3436 | "continuing anyway\n"); | 3447 | "Reset failed to setup PCIe or interrupts; continuing anyway\n"); |
3437 | 3448 | ||
3438 | qib_setup_7322_interrupt(dd, 1); | 3449 | qib_setup_7322_interrupt(dd, 1); |
3439 | 3450 | ||
@@ -3474,8 +3485,9 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | |||
3474 | return; | 3485 | return; |
3475 | } | 3486 | } |
3476 | if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { | 3487 | if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { |
3477 | qib_dev_err(dd, "Physical page address 0x%lx " | 3488 | qib_dev_err(dd, |
3478 | "larger than supported\n", pa); | 3489 | "Physical page address 0x%lx larger than supported\n", |
3490 | pa); | ||
3479 | return; | 3491 | return; |
3480 | } | 3492 | } |
3481 | 3493 | ||
@@ -4029,8 +4041,9 @@ static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what) | |||
4029 | Loopback); | 4041 | Loopback); |
4030 | /* enable heart beat again */ | 4042 | /* enable heart beat again */ |
4031 | val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; | 4043 | val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; |
4032 | qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " | 4044 | qib_devinfo(ppd->dd->pcidev, |
4033 | "(normal)\n", ppd->dd->unit, ppd->port); | 4045 | "Disabling IB%u:%u IBC loopback (normal)\n", |
4046 | ppd->dd->unit, ppd->port); | ||
4034 | } else | 4047 | } else |
4035 | ret = -EINVAL; | 4048 | ret = -EINVAL; |
4036 | if (!ret) { | 4049 | if (!ret) { |
@@ -4714,8 +4727,8 @@ static void init_7322_cntrnames(struct qib_devdata *dd) | |||
4714 | dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs | 4727 | dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs |
4715 | * sizeof(u64), GFP_KERNEL); | 4728 | * sizeof(u64), GFP_KERNEL); |
4716 | if (!dd->pport[i].cpspec->portcntrs) | 4729 | if (!dd->pport[i].cpspec->portcntrs) |
4717 | qib_dev_err(dd, "Failed allocation for" | 4730 | qib_dev_err(dd, |
4718 | " portcounters\n"); | 4731 | "Failed allocation for portcounters\n"); |
4719 | } | 4732 | } |
4720 | } | 4733 | } |
4721 | 4734 | ||
@@ -4865,8 +4878,8 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd) | |||
4865 | if (!dd->cspec->num_msix_entries) | 4878 | if (!dd->cspec->num_msix_entries) |
4866 | return 0; /* already using INTx */ | 4879 | return 0; /* already using INTx */ |
4867 | 4880 | ||
4868 | qib_devinfo(dd->pcidev, "MSIx interrupt not detected," | 4881 | qib_devinfo(dd->pcidev, |
4869 | " trying INTx interrupts\n"); | 4882 | "MSIx interrupt not detected, trying INTx interrupts\n"); |
4870 | qib_7322_nomsix(dd); | 4883 | qib_7322_nomsix(dd); |
4871 | qib_enable_intx(dd->pcidev); | 4884 | qib_enable_intx(dd->pcidev); |
4872 | qib_setup_7322_interrupt(dd, 0); | 4885 | qib_setup_7322_interrupt(dd, 0); |
@@ -5151,15 +5164,11 @@ static void try_7322_ipg(struct qib_pportdata *ppd) | |||
5151 | goto retry; | 5164 | goto retry; |
5152 | 5165 | ||
5153 | if (!ibp->smi_ah) { | 5166 | if (!ibp->smi_ah) { |
5154 | struct ib_ah_attr attr; | ||
5155 | struct ib_ah *ah; | 5167 | struct ib_ah *ah; |
5156 | 5168 | ||
5157 | memset(&attr, 0, sizeof attr); | 5169 | ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE)); |
5158 | attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE); | ||
5159 | attr.port_num = ppd->port; | ||
5160 | ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); | ||
5161 | if (IS_ERR(ah)) | 5170 | if (IS_ERR(ah)) |
5162 | ret = -EINVAL; | 5171 | ret = PTR_ERR(ah); |
5163 | else { | 5172 | else { |
5164 | send_buf->ah = ah; | 5173 | send_buf->ah = ah; |
5165 | ibp->smi_ah = to_iah(ah); | 5174 | ibp->smi_ah = to_iah(ah); |
@@ -5844,22 +5853,21 @@ static int setup_txselect(const char *str, struct kernel_param *kp) | |||
5844 | { | 5853 | { |
5845 | struct qib_devdata *dd; | 5854 | struct qib_devdata *dd; |
5846 | unsigned long val; | 5855 | unsigned long val; |
5847 | char *n; | 5856 | int ret; |
5857 | |||
5848 | if (strlen(str) >= MAX_ATTEN_LEN) { | 5858 | if (strlen(str) >= MAX_ATTEN_LEN) { |
5849 | printk(KERN_INFO QIB_DRV_NAME " txselect_values string " | 5859 | pr_info("txselect_values string too long\n"); |
5850 | "too long\n"); | ||
5851 | return -ENOSPC; | 5860 | return -ENOSPC; |
5852 | } | 5861 | } |
5853 | val = simple_strtoul(str, &n, 0); | 5862 | ret = kstrtoul(str, 0, &val); |
5854 | if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + | 5863 | if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + |
5855 | TXDDS_MFG_SZ)) { | 5864 | TXDDS_MFG_SZ)) { |
5856 | printk(KERN_INFO QIB_DRV_NAME | 5865 | pr_info("txselect_values must start with a number < %d\n", |
5857 | "txselect_values must start with a number < %d\n", | ||
5858 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); | 5866 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); |
5859 | return -EINVAL; | 5867 | return ret ? ret : -EINVAL; |
5860 | } | 5868 | } |
5861 | strcpy(txselect_list, str); | ||
5862 | 5869 | ||
5870 | strcpy(txselect_list, str); | ||
5863 | list_for_each_entry(dd, &qib_dev_list, list) | 5871 | list_for_each_entry(dd, &qib_dev_list, list) |
5864 | if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) | 5872 | if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) |
5865 | set_no_qsfp_atten(dd, 1); | 5873 | set_no_qsfp_atten(dd, 1); |
@@ -5882,11 +5890,10 @@ static int qib_late_7322_initreg(struct qib_devdata *dd) | |||
5882 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | 5890 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); |
5883 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | 5891 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); |
5884 | if (val != dd->pioavailregs_phys) { | 5892 | if (val != dd->pioavailregs_phys) { |
5885 | qib_dev_err(dd, "Catastrophic software error, " | 5893 | qib_dev_err(dd, |
5886 | "SendPIOAvailAddr written as %lx, " | 5894 | "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", |
5887 | "read back as %llx\n", | 5895 | (unsigned long) dd->pioavailregs_phys, |
5888 | (unsigned long) dd->pioavailregs_phys, | 5896 | (unsigned long long) val); |
5889 | (unsigned long long) val); | ||
5890 | ret = -EINVAL; | 5897 | ret = -EINVAL; |
5891 | } | 5898 | } |
5892 | 5899 | ||
@@ -6098,8 +6105,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6098 | dd->revision = readq(&dd->kregbase[kr_revision]); | 6105 | dd->revision = readq(&dd->kregbase[kr_revision]); |
6099 | 6106 | ||
6100 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | 6107 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { |
6101 | qib_dev_err(dd, "Revision register read failure, " | 6108 | qib_dev_err(dd, |
6102 | "giving up initialization\n"); | 6109 | "Revision register read failure, giving up initialization\n"); |
6103 | ret = -ENODEV; | 6110 | ret = -ENODEV; |
6104 | goto bail; | 6111 | goto bail; |
6105 | } | 6112 | } |
@@ -6265,9 +6272,9 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6265 | */ | 6272 | */ |
6266 | if (!(dd->flags & QIB_HAS_QSFP)) { | 6273 | if (!(dd->flags & QIB_HAS_QSFP)) { |
6267 | if (!IS_QMH(dd) && !IS_QME(dd)) | 6274 | if (!IS_QMH(dd) && !IS_QME(dd)) |
6268 | qib_devinfo(dd->pcidev, "IB%u:%u: " | 6275 | qib_devinfo(dd->pcidev, |
6269 | "Unknown mezzanine card type\n", | 6276 | "IB%u:%u: Unknown mezzanine card type\n", |
6270 | dd->unit, ppd->port); | 6277 | dd->unit, ppd->port); |
6271 | cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; | 6278 | cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; |
6272 | /* | 6279 | /* |
6273 | * Choose center value as default tx serdes setting | 6280 | * Choose center value as default tx serdes setting |
@@ -6922,8 +6929,8 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | |||
6922 | dd->cspec->msix_entries[i].msix.entry = i; | 6929 | dd->cspec->msix_entries[i].msix.entry = i; |
6923 | 6930 | ||
6924 | if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) | 6931 | if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) |
6925 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | 6932 | qib_dev_err(dd, |
6926 | "continuing anyway\n"); | 6933 | "Failed to setup PCIe or interrupts; continuing anyway\n"); |
6927 | /* may be less than we wanted, if not enough available */ | 6934 | /* may be less than we wanted, if not enough available */ |
6928 | dd->cspec->num_msix_entries = tabsize; | 6935 | dd->cspec->num_msix_entries = tabsize; |
6929 | 6936 | ||
@@ -7276,8 +7283,7 @@ static void find_best_ent(struct qib_pportdata *ppd, | |||
7276 | ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + | 7283 | ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + |
7277 | TXDDS_MFG_SZ)) { | 7284 | TXDDS_MFG_SZ)) { |
7278 | idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); | 7285 | idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); |
7279 | printk(KERN_INFO QIB_DRV_NAME | 7286 | pr_info("IB%u:%u use idx %u into txdds_mfg\n", |
7280 | " IB%u:%u use idx %u into txdds_mfg\n", | ||
7281 | ppd->dd->unit, ppd->port, idx); | 7287 | ppd->dd->unit, ppd->port, idx); |
7282 | *sdr_dds = &txdds_extra_mfg[idx]; | 7288 | *sdr_dds = &txdds_extra_mfg[idx]; |
7283 | *ddr_dds = &txdds_extra_mfg[idx]; | 7289 | *ddr_dds = &txdds_extra_mfg[idx]; |
@@ -7432,11 +7438,11 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) | |||
7432 | u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN); | 7438 | u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN); |
7433 | 7439 | ||
7434 | if (enable && !state) { | 7440 | if (enable && !state) { |
7435 | printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n", | 7441 | pr_info("IB%u:%u Turning LOS on\n", |
7436 | ppd->dd->unit, ppd->port); | 7442 | ppd->dd->unit, ppd->port); |
7437 | data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); | 7443 | data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); |
7438 | } else if (!enable && state) { | 7444 | } else if (!enable && state) { |
7439 | printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n", | 7445 | pr_info("IB%u:%u Turning LOS off\n", |
7440 | ppd->dd->unit, ppd->port); | 7446 | ppd->dd->unit, ppd->port); |
7441 | data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); | 7447 | data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); |
7442 | } | 7448 | } |
@@ -7672,8 +7678,7 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
7672 | } | 7678 | } |
7673 | } | 7679 | } |
7674 | if (chan_done) { | 7680 | if (chan_done) { |
7675 | printk(KERN_INFO QIB_DRV_NAME | 7681 | pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n", |
7676 | " Serdes %d calibration not done after .5 sec: 0x%x\n", | ||
7677 | IBSD(ppd->hw_pidx), chan_done); | 7682 | IBSD(ppd->hw_pidx), chan_done); |
7678 | } else { | 7683 | } else { |
7679 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | 7684 | for (chan = 0; chan < SERDES_CHANS; ++chan) { |
@@ -7681,9 +7686,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
7681 | (chan + (chan >> 1)), | 7686 | (chan + (chan >> 1)), |
7682 | 25, 0, 0); | 7687 | 25, 0, 0); |
7683 | if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) | 7688 | if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) |
7684 | printk(KERN_INFO QIB_DRV_NAME | 7689 | pr_info("Serdes %d chan %d calibration failed\n", |
7685 | " Serdes %d chan %d calibration " | 7690 | IBSD(ppd->hw_pidx), chan); |
7686 | "failed\n", IBSD(ppd->hw_pidx), chan); | ||
7687 | } | 7691 | } |
7688 | } | 7692 | } |
7689 | 7693 | ||
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index dc14e100a7f1..4443adfcd9ee 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -38,9 +38,14 @@ | |||
38 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
39 | #include <linux/idr.h> | 39 | #include <linux/idr.h> |
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/printk.h> | ||
41 | 42 | ||
42 | #include "qib.h" | 43 | #include "qib.h" |
43 | #include "qib_common.h" | 44 | #include "qib_common.h" |
45 | #include "qib_mad.h" | ||
46 | |||
47 | #undef pr_fmt | ||
48 | #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt | ||
44 | 49 | ||
45 | /* | 50 | /* |
46 | * min buffers we want to have per context, after driver | 51 | * min buffers we want to have per context, after driver |
@@ -71,6 +76,9 @@ unsigned qib_n_krcv_queues; | |||
71 | module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); | 76 | module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); |
72 | MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); | 77 | MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); |
73 | 78 | ||
79 | unsigned qib_cc_table_size; | ||
80 | module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); | ||
81 | MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); | ||
74 | /* | 82 | /* |
75 | * qib_wc_pat parameter: | 83 | * qib_wc_pat parameter: |
76 | * 0 is WC via MTRR | 84 | * 0 is WC via MTRR |
@@ -120,8 +128,8 @@ int qib_create_ctxts(struct qib_devdata *dd) | |||
120 | */ | 128 | */ |
121 | dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); | 129 | dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); |
122 | if (!dd->rcd) { | 130 | if (!dd->rcd) { |
123 | qib_dev_err(dd, "Unable to allocate ctxtdata array, " | 131 | qib_dev_err(dd, |
124 | "failing\n"); | 132 | "Unable to allocate ctxtdata array, failing\n"); |
125 | ret = -ENOMEM; | 133 | ret = -ENOMEM; |
126 | goto done; | 134 | goto done; |
127 | } | 135 | } |
@@ -137,8 +145,8 @@ int qib_create_ctxts(struct qib_devdata *dd) | |||
137 | ppd = dd->pport + (i % dd->num_pports); | 145 | ppd = dd->pport + (i % dd->num_pports); |
138 | rcd = qib_create_ctxtdata(ppd, i); | 146 | rcd = qib_create_ctxtdata(ppd, i); |
139 | if (!rcd) { | 147 | if (!rcd) { |
140 | qib_dev_err(dd, "Unable to allocate ctxtdata" | 148 | qib_dev_err(dd, |
141 | " for Kernel ctxt, failing\n"); | 149 | "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); |
142 | ret = -ENOMEM; | 150 | ret = -ENOMEM; |
143 | goto done; | 151 | goto done; |
144 | } | 152 | } |
@@ -199,6 +207,7 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) | |||
199 | void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, | 207 | void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, |
200 | u8 hw_pidx, u8 port) | 208 | u8 hw_pidx, u8 port) |
201 | { | 209 | { |
210 | int size; | ||
202 | ppd->dd = dd; | 211 | ppd->dd = dd; |
203 | ppd->hw_pidx = hw_pidx; | 212 | ppd->hw_pidx = hw_pidx; |
204 | ppd->port = port; /* IB port number, not index */ | 213 | ppd->port = port; /* IB port number, not index */ |
@@ -210,6 +219,83 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, | |||
210 | init_timer(&ppd->symerr_clear_timer); | 219 | init_timer(&ppd->symerr_clear_timer); |
211 | ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; | 220 | ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; |
212 | ppd->symerr_clear_timer.data = (unsigned long)ppd; | 221 | ppd->symerr_clear_timer.data = (unsigned long)ppd; |
222 | |||
223 | ppd->qib_wq = NULL; | ||
224 | |||
225 | spin_lock_init(&ppd->cc_shadow_lock); | ||
226 | |||
227 | if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) | ||
228 | goto bail; | ||
229 | |||
230 | ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size, | ||
231 | IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT); | ||
232 | |||
233 | ppd->cc_max_table_entries = | ||
234 | ppd->cc_supported_table_entries/IB_CCT_ENTRIES; | ||
235 | |||
236 | size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry) | ||
237 | * IB_CCT_ENTRIES; | ||
238 | ppd->ccti_entries = kzalloc(size, GFP_KERNEL); | ||
239 | if (!ppd->ccti_entries) { | ||
240 | qib_dev_err(dd, | ||
241 | "failed to allocate congestion control table for port %d!\n", | ||
242 | port); | ||
243 | goto bail; | ||
244 | } | ||
245 | |||
246 | size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry); | ||
247 | ppd->congestion_entries = kzalloc(size, GFP_KERNEL); | ||
248 | if (!ppd->congestion_entries) { | ||
249 | qib_dev_err(dd, | ||
250 | "failed to allocate congestion setting list for port %d!\n", | ||
251 | port); | ||
252 | goto bail_1; | ||
253 | } | ||
254 | |||
255 | size = sizeof(struct cc_table_shadow); | ||
256 | ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL); | ||
257 | if (!ppd->ccti_entries_shadow) { | ||
258 | qib_dev_err(dd, | ||
259 | "failed to allocate shadow ccti list for port %d!\n", | ||
260 | port); | ||
261 | goto bail_2; | ||
262 | } | ||
263 | |||
264 | size = sizeof(struct ib_cc_congestion_setting_attr); | ||
265 | ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL); | ||
266 | if (!ppd->congestion_entries_shadow) { | ||
267 | qib_dev_err(dd, | ||
268 | "failed to allocate shadow congestion setting list for port %d!\n", | ||
269 | port); | ||
270 | goto bail_3; | ||
271 | } | ||
272 | |||
273 | return; | ||
274 | |||
275 | bail_3: | ||
276 | kfree(ppd->ccti_entries_shadow); | ||
277 | ppd->ccti_entries_shadow = NULL; | ||
278 | bail_2: | ||
279 | kfree(ppd->congestion_entries); | ||
280 | ppd->congestion_entries = NULL; | ||
281 | bail_1: | ||
282 | kfree(ppd->ccti_entries); | ||
283 | ppd->ccti_entries = NULL; | ||
284 | bail: | ||
285 | /* User is intentionally disabling the congestion control agent */ | ||
286 | if (!qib_cc_table_size) | ||
287 | return; | ||
288 | |||
289 | if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { | ||
290 | qib_cc_table_size = 0; | ||
291 | qib_dev_err(dd, | ||
292 | "Congestion Control table size %d less than minimum %d for port %d\n", | ||
293 | qib_cc_table_size, IB_CCT_MIN_ENTRIES, port); | ||
294 | } | ||
295 | |||
296 | qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", | ||
297 | port); | ||
298 | return; | ||
213 | } | 299 | } |
214 | 300 | ||
215 | static int init_pioavailregs(struct qib_devdata *dd) | 301 | static int init_pioavailregs(struct qib_devdata *dd) |
@@ -221,8 +307,8 @@ static int init_pioavailregs(struct qib_devdata *dd) | |||
221 | &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, | 307 | &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, |
222 | GFP_KERNEL); | 308 | GFP_KERNEL); |
223 | if (!dd->pioavailregs_dma) { | 309 | if (!dd->pioavailregs_dma) { |
224 | qib_dev_err(dd, "failed to allocate PIOavail reg area " | 310 | qib_dev_err(dd, |
225 | "in memory\n"); | 311 | "failed to allocate PIOavail reg area in memory\n"); |
226 | ret = -ENOMEM; | 312 | ret = -ENOMEM; |
227 | goto done; | 313 | goto done; |
228 | } | 314 | } |
@@ -277,15 +363,15 @@ static void init_shadow_tids(struct qib_devdata *dd) | |||
277 | 363 | ||
278 | pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | 364 | pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); |
279 | if (!pages) { | 365 | if (!pages) { |
280 | qib_dev_err(dd, "failed to allocate shadow page * " | 366 | qib_dev_err(dd, |
281 | "array, no expected sends!\n"); | 367 | "failed to allocate shadow page * array, no expected sends!\n"); |
282 | goto bail; | 368 | goto bail; |
283 | } | 369 | } |
284 | 370 | ||
285 | addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | 371 | addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); |
286 | if (!addrs) { | 372 | if (!addrs) { |
287 | qib_dev_err(dd, "failed to allocate shadow dma handle " | 373 | qib_dev_err(dd, |
288 | "array, no expected sends!\n"); | 374 | "failed to allocate shadow dma handle array, no expected sends!\n"); |
289 | goto bail_free; | 375 | goto bail_free; |
290 | } | 376 | } |
291 | 377 | ||
@@ -309,13 +395,13 @@ static int loadtime_init(struct qib_devdata *dd) | |||
309 | 395 | ||
310 | if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & | 396 | if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & |
311 | QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { | 397 | QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { |
312 | qib_dev_err(dd, "Driver only handles version %d, " | 398 | qib_dev_err(dd, |
313 | "chip swversion is %d (%llx), failng\n", | 399 | "Driver only handles version %d, chip swversion is %d (%llx), failng\n", |
314 | QIB_CHIP_SWVERSION, | 400 | QIB_CHIP_SWVERSION, |
315 | (int)(dd->revision >> | 401 | (int)(dd->revision >> |
316 | QLOGIC_IB_R_SOFTWARE_SHIFT) & | 402 | QLOGIC_IB_R_SOFTWARE_SHIFT) & |
317 | QLOGIC_IB_R_SOFTWARE_MASK, | 403 | QLOGIC_IB_R_SOFTWARE_MASK, |
318 | (unsigned long long) dd->revision); | 404 | (unsigned long long) dd->revision); |
319 | ret = -ENOSYS; | 405 | ret = -ENOSYS; |
320 | goto done; | 406 | goto done; |
321 | } | 407 | } |
@@ -419,8 +505,8 @@ static void verify_interrupt(unsigned long opaque) | |||
419 | */ | 505 | */ |
420 | if (dd->int_counter == 0) { | 506 | if (dd->int_counter == 0) { |
421 | if (!dd->f_intr_fallback(dd)) | 507 | if (!dd->f_intr_fallback(dd)) |
422 | dev_err(&dd->pcidev->dev, "No interrupts detected, " | 508 | dev_err(&dd->pcidev->dev, |
423 | "not usable.\n"); | 509 | "No interrupts detected, not usable.\n"); |
424 | else /* re-arm the timer to see if fallback works */ | 510 | else /* re-arm the timer to see if fallback works */ |
425 | mod_timer(&dd->intrchk_timer, jiffies + HZ/2); | 511 | mod_timer(&dd->intrchk_timer, jiffies + HZ/2); |
426 | } | 512 | } |
@@ -483,6 +569,41 @@ static void init_piobuf_state(struct qib_devdata *dd) | |||
483 | } | 569 | } |
484 | 570 | ||
485 | /** | 571 | /** |
572 | * qib_create_workqueues - create per port workqueues | ||
573 | * @dd: the qlogic_ib device | ||
574 | */ | ||
575 | static int qib_create_workqueues(struct qib_devdata *dd) | ||
576 | { | ||
577 | int pidx; | ||
578 | struct qib_pportdata *ppd; | ||
579 | |||
580 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
581 | ppd = dd->pport + pidx; | ||
582 | if (!ppd->qib_wq) { | ||
583 | char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ | ||
584 | snprintf(wq_name, sizeof(wq_name), "qib%d_%d", | ||
585 | dd->unit, pidx); | ||
586 | ppd->qib_wq = | ||
587 | create_singlethread_workqueue(wq_name); | ||
588 | if (!ppd->qib_wq) | ||
589 | goto wq_error; | ||
590 | } | ||
591 | } | ||
592 | return 0; | ||
593 | wq_error: | ||
594 | pr_err("create_singlethread_workqueue failed for port %d\n", | ||
595 | pidx + 1); | ||
596 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
597 | ppd = dd->pport + pidx; | ||
598 | if (ppd->qib_wq) { | ||
599 | destroy_workqueue(ppd->qib_wq); | ||
600 | ppd->qib_wq = NULL; | ||
601 | } | ||
602 | } | ||
603 | return -ENOMEM; | ||
604 | } | ||
605 | |||
606 | /** | ||
486 | * qib_init - do the actual initialization sequence on the chip | 607 | * qib_init - do the actual initialization sequence on the chip |
487 | * @dd: the qlogic_ib device | 608 | * @dd: the qlogic_ib device |
488 | * @reinit: reinitializing, so don't allocate new memory | 609 | * @reinit: reinitializing, so don't allocate new memory |
@@ -547,8 +668,8 @@ int qib_init(struct qib_devdata *dd, int reinit) | |||
547 | if (!lastfail) | 668 | if (!lastfail) |
548 | lastfail = qib_setup_eagerbufs(rcd); | 669 | lastfail = qib_setup_eagerbufs(rcd); |
549 | if (lastfail) { | 670 | if (lastfail) { |
550 | qib_dev_err(dd, "failed to allocate kernel ctxt's " | 671 | qib_dev_err(dd, |
551 | "rcvhdrq and/or egr bufs\n"); | 672 | "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); |
552 | continue; | 673 | continue; |
553 | } | 674 | } |
554 | } | 675 | } |
@@ -764,6 +885,11 @@ static void qib_shutdown_device(struct qib_devdata *dd) | |||
764 | * We can't count on interrupts since we are stopping. | 885 | * We can't count on interrupts since we are stopping. |
765 | */ | 886 | */ |
766 | dd->f_quiet_serdes(ppd); | 887 | dd->f_quiet_serdes(ppd); |
888 | |||
889 | if (ppd->qib_wq) { | ||
890 | destroy_workqueue(ppd->qib_wq); | ||
891 | ppd->qib_wq = NULL; | ||
892 | } | ||
767 | } | 893 | } |
768 | 894 | ||
769 | qib_update_eeprom_log(dd); | 895 | qib_update_eeprom_log(dd); |
@@ -893,8 +1019,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd) | |||
893 | /* 1 GiB/sec, slightly over IB SDR line rate */ | 1019 | /* 1 GiB/sec, slightly over IB SDR line rate */ |
894 | if (lcnt < (emsecs * 1024U)) | 1020 | if (lcnt < (emsecs * 1024U)) |
895 | qib_dev_err(dd, | 1021 | qib_dev_err(dd, |
896 | "Performance problem: bandwidth to PIO buffers is " | 1022 | "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n", |
897 | "only %u MiB/sec\n", | ||
898 | lcnt / (u32) emsecs); | 1023 | lcnt / (u32) emsecs); |
899 | 1024 | ||
900 | preempt_enable(); | 1025 | preempt_enable(); |
@@ -967,8 +1092,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) | |||
967 | if (qib_cpulist) | 1092 | if (qib_cpulist) |
968 | qib_cpulist_count = count; | 1093 | qib_cpulist_count = count; |
969 | else | 1094 | else |
970 | qib_early_err(&pdev->dev, "Could not alloc cpulist " | 1095 | qib_early_err(&pdev->dev, |
971 | "info, cpu affinity might be wrong\n"); | 1096 | "Could not alloc cpulist info, cpu affinity might be wrong\n"); |
972 | } | 1097 | } |
973 | 1098 | ||
974 | bail: | 1099 | bail: |
@@ -1057,21 +1182,20 @@ static int __init qlogic_ib_init(void) | |||
1057 | */ | 1182 | */ |
1058 | idr_init(&qib_unit_table); | 1183 | idr_init(&qib_unit_table); |
1059 | if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { | 1184 | if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { |
1060 | printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n"); | 1185 | pr_err("idr_pre_get() failed\n"); |
1061 | ret = -ENOMEM; | 1186 | ret = -ENOMEM; |
1062 | goto bail_cq_wq; | 1187 | goto bail_cq_wq; |
1063 | } | 1188 | } |
1064 | 1189 | ||
1065 | ret = pci_register_driver(&qib_driver); | 1190 | ret = pci_register_driver(&qib_driver); |
1066 | if (ret < 0) { | 1191 | if (ret < 0) { |
1067 | printk(KERN_ERR QIB_DRV_NAME | 1192 | pr_err("Unable to register driver: error %d\n", -ret); |
1068 | ": Unable to register driver: error %d\n", -ret); | ||
1069 | goto bail_unit; | 1193 | goto bail_unit; |
1070 | } | 1194 | } |
1071 | 1195 | ||
1072 | /* not fatal if it doesn't work */ | 1196 | /* not fatal if it doesn't work */ |
1073 | if (qib_init_qibfs()) | 1197 | if (qib_init_qibfs()) |
1074 | printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n"); | 1198 | pr_err("Unable to register ipathfs\n"); |
1075 | goto bail; /* all OK */ | 1199 | goto bail; /* all OK */ |
1076 | 1200 | ||
1077 | bail_unit: | 1201 | bail_unit: |
@@ -1095,9 +1219,9 @@ static void __exit qlogic_ib_cleanup(void) | |||
1095 | 1219 | ||
1096 | ret = qib_exit_qibfs(); | 1220 | ret = qib_exit_qibfs(); |
1097 | if (ret) | 1221 | if (ret) |
1098 | printk(KERN_ERR QIB_DRV_NAME ": " | 1222 | pr_err( |
1099 | "Unable to cleanup counter filesystem: " | 1223 | "Unable to cleanup counter filesystem: error %d\n", |
1100 | "error %d\n", -ret); | 1224 | -ret); |
1101 | 1225 | ||
1102 | pci_unregister_driver(&qib_driver); | 1226 | pci_unregister_driver(&qib_driver); |
1103 | 1227 | ||
@@ -1121,10 +1245,24 @@ static void cleanup_device_data(struct qib_devdata *dd) | |||
1121 | unsigned long flags; | 1245 | unsigned long flags; |
1122 | 1246 | ||
1123 | /* users can't do anything more with chip */ | 1247 | /* users can't do anything more with chip */ |
1124 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 1248 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { |
1125 | if (dd->pport[pidx].statusp) | 1249 | if (dd->pport[pidx].statusp) |
1126 | *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; | 1250 | *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; |
1127 | 1251 | ||
1252 | spin_lock(&dd->pport[pidx].cc_shadow_lock); | ||
1253 | |||
1254 | kfree(dd->pport[pidx].congestion_entries); | ||
1255 | dd->pport[pidx].congestion_entries = NULL; | ||
1256 | kfree(dd->pport[pidx].ccti_entries); | ||
1257 | dd->pport[pidx].ccti_entries = NULL; | ||
1258 | kfree(dd->pport[pidx].ccti_entries_shadow); | ||
1259 | dd->pport[pidx].ccti_entries_shadow = NULL; | ||
1260 | kfree(dd->pport[pidx].congestion_entries_shadow); | ||
1261 | dd->pport[pidx].congestion_entries_shadow = NULL; | ||
1262 | |||
1263 | spin_unlock(&dd->pport[pidx].cc_shadow_lock); | ||
1264 | } | ||
1265 | |||
1128 | if (!qib_wc_pat) | 1266 | if (!qib_wc_pat) |
1129 | qib_disable_wc(dd); | 1267 | qib_disable_wc(dd); |
1130 | 1268 | ||
@@ -1223,9 +1361,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1223 | #ifdef CONFIG_PCI_MSI | 1361 | #ifdef CONFIG_PCI_MSI |
1224 | dd = qib_init_iba6120_funcs(pdev, ent); | 1362 | dd = qib_init_iba6120_funcs(pdev, ent); |
1225 | #else | 1363 | #else |
1226 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " | 1364 | qib_early_err(&pdev->dev, |
1227 | "work if CONFIG_PCI_MSI is not enabled\n", | 1365 | "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", |
1228 | ent->device); | 1366 | ent->device); |
1229 | dd = ERR_PTR(-ENODEV); | 1367 | dd = ERR_PTR(-ENODEV); |
1230 | #endif | 1368 | #endif |
1231 | break; | 1369 | break; |
@@ -1239,8 +1377,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1239 | break; | 1377 | break; |
1240 | 1378 | ||
1241 | default: | 1379 | default: |
1242 | qib_early_err(&pdev->dev, "Failing on unknown QLogic " | 1380 | qib_early_err(&pdev->dev, |
1243 | "deviceid 0x%x\n", ent->device); | 1381 | "Failing on unknown QLogic deviceid 0x%x\n", |
1382 | ent->device); | ||
1244 | ret = -ENODEV; | 1383 | ret = -ENODEV; |
1245 | } | 1384 | } |
1246 | 1385 | ||
@@ -1249,6 +1388,10 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1249 | if (ret) | 1388 | if (ret) |
1250 | goto bail; /* error already printed */ | 1389 | goto bail; /* error already printed */ |
1251 | 1390 | ||
1391 | ret = qib_create_workqueues(dd); | ||
1392 | if (ret) | ||
1393 | goto bail; | ||
1394 | |||
1252 | /* do the generic initialization */ | 1395 | /* do the generic initialization */ |
1253 | initfail = qib_init(dd, 0); | 1396 | initfail = qib_init(dd, 0); |
1254 | 1397 | ||
@@ -1293,9 +1436,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
1293 | if (!qib_wc_pat) { | 1436 | if (!qib_wc_pat) { |
1294 | ret = qib_enable_wc(dd); | 1437 | ret = qib_enable_wc(dd); |
1295 | if (ret) { | 1438 | if (ret) { |
1296 | qib_dev_err(dd, "Write combining not enabled " | 1439 | qib_dev_err(dd, |
1297 | "(err %d): performance may be poor\n", | 1440 | "Write combining not enabled (err %d): performance may be poor\n", |
1298 | -ret); | 1441 | -ret); |
1299 | ret = 0; | 1442 | ret = 0; |
1300 | } | 1443 | } |
1301 | } | 1444 | } |
@@ -1361,9 +1504,9 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) | |||
1361 | gfp_flags | __GFP_COMP); | 1504 | gfp_flags | __GFP_COMP); |
1362 | 1505 | ||
1363 | if (!rcd->rcvhdrq) { | 1506 | if (!rcd->rcvhdrq) { |
1364 | qib_dev_err(dd, "attempt to allocate %d bytes " | 1507 | qib_dev_err(dd, |
1365 | "for ctxt %u rcvhdrq failed\n", | 1508 | "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", |
1366 | amt, rcd->ctxt); | 1509 | amt, rcd->ctxt); |
1367 | goto bail; | 1510 | goto bail; |
1368 | } | 1511 | } |
1369 | 1512 | ||
@@ -1392,8 +1535,9 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) | |||
1392 | return 0; | 1535 | return 0; |
1393 | 1536 | ||
1394 | bail_free: | 1537 | bail_free: |
1395 | qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u " | 1538 | qib_dev_err(dd, |
1396 | "rcvhdrqtailaddr failed\n", rcd->ctxt); | 1539 | "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", |
1540 | rcd->ctxt); | ||
1397 | vfree(rcd->user_event_mask); | 1541 | vfree(rcd->user_event_mask); |
1398 | rcd->user_event_mask = NULL; | 1542 | rcd->user_event_mask = NULL; |
1399 | bail_free_hdrq: | 1543 | bail_free_hdrq: |
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c index 6ae57d23004a..f4918f2165ec 100644 --- a/drivers/infiniband/hw/qib/qib_intr.c +++ b/drivers/infiniband/hw/qib/qib_intr.c | |||
@@ -224,15 +224,15 @@ void qib_bad_intrstatus(struct qib_devdata *dd) | |||
224 | * We print the message and disable interrupts, in hope of | 224 | * We print the message and disable interrupts, in hope of |
225 | * having a better chance of debugging the problem. | 225 | * having a better chance of debugging the problem. |
226 | */ | 226 | */ |
227 | qib_dev_err(dd, "Read of chip interrupt status failed" | 227 | qib_dev_err(dd, |
228 | " disabling interrupts\n"); | 228 | "Read of chip interrupt status failed disabling interrupts\n"); |
229 | if (allbits++) { | 229 | if (allbits++) { |
230 | /* disable interrupt delivery, something is very wrong */ | 230 | /* disable interrupt delivery, something is very wrong */ |
231 | if (allbits == 2) | 231 | if (allbits == 2) |
232 | dd->f_set_intr_state(dd, 0); | 232 | dd->f_set_intr_state(dd, 0); |
233 | if (allbits == 3) { | 233 | if (allbits == 3) { |
234 | qib_dev_err(dd, "2nd bad interrupt status, " | 234 | qib_dev_err(dd, |
235 | "unregistering interrupts\n"); | 235 | "2nd bad interrupt status, unregistering interrupts\n"); |
236 | dd->flags |= QIB_BADINTR; | 236 | dd->flags |= QIB_BADINTR; |
237 | dd->flags &= ~QIB_INITTED; | 237 | dd->flags &= ~QIB_INITTED; |
238 | dd->f_free_irq(dd); | 238 | dd->f_free_irq(dd); |
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 8fd19a47df0c..e9486c74c226 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c | |||
@@ -35,21 +35,41 @@ | |||
35 | 35 | ||
36 | /** | 36 | /** |
37 | * qib_alloc_lkey - allocate an lkey | 37 | * qib_alloc_lkey - allocate an lkey |
38 | * @rkt: lkey table in which to allocate the lkey | ||
39 | * @mr: memory region that this lkey protects | 38 | * @mr: memory region that this lkey protects |
39 | * @dma_region: 0->normal key, 1->restricted DMA key | ||
40 | * | ||
41 | * Returns 0 if successful, otherwise returns -errno. | ||
42 | * | ||
43 | * Increments mr reference count as required. | ||
44 | * | ||
45 | * Sets the lkey field mr for non-dma regions. | ||
40 | * | 46 | * |
41 | * Returns 1 if successful, otherwise returns 0. | ||
42 | */ | 47 | */ |
43 | 48 | ||
44 | int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) | 49 | int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) |
45 | { | 50 | { |
46 | unsigned long flags; | 51 | unsigned long flags; |
47 | u32 r; | 52 | u32 r; |
48 | u32 n; | 53 | u32 n; |
49 | int ret; | 54 | int ret = 0; |
55 | struct qib_ibdev *dev = to_idev(mr->pd->device); | ||
56 | struct qib_lkey_table *rkt = &dev->lk_table; | ||
50 | 57 | ||
51 | spin_lock_irqsave(&rkt->lock, flags); | 58 | spin_lock_irqsave(&rkt->lock, flags); |
52 | 59 | ||
60 | /* special case for dma_mr lkey == 0 */ | ||
61 | if (dma_region) { | ||
62 | struct qib_mregion *tmr; | ||
63 | |||
64 | tmr = rcu_dereference(dev->dma_mr); | ||
65 | if (!tmr) { | ||
66 | qib_get_mr(mr); | ||
67 | rcu_assign_pointer(dev->dma_mr, mr); | ||
68 | mr->lkey_published = 1; | ||
69 | } | ||
70 | goto success; | ||
71 | } | ||
72 | |||
53 | /* Find the next available LKEY */ | 73 | /* Find the next available LKEY */ |
54 | r = rkt->next; | 74 | r = rkt->next; |
55 | n = r; | 75 | n = r; |
@@ -57,11 +77,8 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) | |||
57 | if (rkt->table[r] == NULL) | 77 | if (rkt->table[r] == NULL) |
58 | break; | 78 | break; |
59 | r = (r + 1) & (rkt->max - 1); | 79 | r = (r + 1) & (rkt->max - 1); |
60 | if (r == n) { | 80 | if (r == n) |
61 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
62 | ret = 0; | ||
63 | goto bail; | 81 | goto bail; |
64 | } | ||
65 | } | 82 | } |
66 | rkt->next = (r + 1) & (rkt->max - 1); | 83 | rkt->next = (r + 1) & (rkt->max - 1); |
67 | /* | 84 | /* |
@@ -76,57 +93,58 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) | |||
76 | mr->lkey |= 1 << 8; | 93 | mr->lkey |= 1 << 8; |
77 | rkt->gen++; | 94 | rkt->gen++; |
78 | } | 95 | } |
79 | rkt->table[r] = mr; | 96 | qib_get_mr(mr); |
97 | rcu_assign_pointer(rkt->table[r], mr); | ||
98 | mr->lkey_published = 1; | ||
99 | success: | ||
80 | spin_unlock_irqrestore(&rkt->lock, flags); | 100 | spin_unlock_irqrestore(&rkt->lock, flags); |
81 | 101 | out: | |
82 | ret = 1; | ||
83 | |||
84 | bail: | ||
85 | return ret; | 102 | return ret; |
103 | bail: | ||
104 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
105 | ret = -ENOMEM; | ||
106 | goto out; | ||
86 | } | 107 | } |
87 | 108 | ||
88 | /** | 109 | /** |
89 | * qib_free_lkey - free an lkey | 110 | * qib_free_lkey - free an lkey |
90 | * @rkt: table from which to free the lkey | 111 | * @mr: mr to free from tables |
91 | * @lkey: lkey id to free | ||
92 | */ | 112 | */ |
93 | int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr) | 113 | void qib_free_lkey(struct qib_mregion *mr) |
94 | { | 114 | { |
95 | unsigned long flags; | 115 | unsigned long flags; |
96 | u32 lkey = mr->lkey; | 116 | u32 lkey = mr->lkey; |
97 | u32 r; | 117 | u32 r; |
98 | int ret; | 118 | struct qib_ibdev *dev = to_idev(mr->pd->device); |
119 | struct qib_lkey_table *rkt = &dev->lk_table; | ||
99 | 120 | ||
100 | spin_lock_irqsave(&dev->lk_table.lock, flags); | 121 | spin_lock_irqsave(&rkt->lock, flags); |
101 | if (lkey == 0) { | 122 | if (!mr->lkey_published) |
102 | if (dev->dma_mr && dev->dma_mr == mr) { | 123 | goto out; |
103 | ret = atomic_read(&dev->dma_mr->refcount); | 124 | if (lkey == 0) |
104 | if (!ret) | 125 | rcu_assign_pointer(dev->dma_mr, NULL); |
105 | dev->dma_mr = NULL; | 126 | else { |
106 | } else | ||
107 | ret = 0; | ||
108 | } else { | ||
109 | r = lkey >> (32 - ib_qib_lkey_table_size); | 127 | r = lkey >> (32 - ib_qib_lkey_table_size); |
110 | ret = atomic_read(&dev->lk_table.table[r]->refcount); | 128 | rcu_assign_pointer(rkt->table[r], NULL); |
111 | if (!ret) | ||
112 | dev->lk_table.table[r] = NULL; | ||
113 | } | 129 | } |
114 | spin_unlock_irqrestore(&dev->lk_table.lock, flags); | 130 | qib_put_mr(mr); |
115 | 131 | mr->lkey_published = 0; | |
116 | if (ret) | 132 | out: |
117 | ret = -EBUSY; | 133 | spin_unlock_irqrestore(&rkt->lock, flags); |
118 | return ret; | ||
119 | } | 134 | } |
120 | 135 | ||
121 | /** | 136 | /** |
122 | * qib_lkey_ok - check IB SGE for validity and initialize | 137 | * qib_lkey_ok - check IB SGE for validity and initialize |
123 | * @rkt: table containing lkey to check SGE against | 138 | * @rkt: table containing lkey to check SGE against |
139 | * @pd: protection domain | ||
124 | * @isge: outgoing internal SGE | 140 | * @isge: outgoing internal SGE |
125 | * @sge: SGE to check | 141 | * @sge: SGE to check |
126 | * @acc: access flags | 142 | * @acc: access flags |
127 | * | 143 | * |
128 | * Return 1 if valid and successful, otherwise returns 0. | 144 | * Return 1 if valid and successful, otherwise returns 0. |
129 | * | 145 | * |
146 | * increments the reference count upon success | ||
147 | * | ||
130 | * Check the IB SGE for validity and initialize our internal version | 148 | * Check the IB SGE for validity and initialize our internal version |
131 | * of it. | 149 | * of it. |
132 | */ | 150 | */ |
@@ -136,24 +154,25 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
136 | struct qib_mregion *mr; | 154 | struct qib_mregion *mr; |
137 | unsigned n, m; | 155 | unsigned n, m; |
138 | size_t off; | 156 | size_t off; |
139 | unsigned long flags; | ||
140 | 157 | ||
141 | /* | 158 | /* |
142 | * We use LKEY == zero for kernel virtual addresses | 159 | * We use LKEY == zero for kernel virtual addresses |
143 | * (see qib_get_dma_mr and qib_dma.c). | 160 | * (see qib_get_dma_mr and qib_dma.c). |
144 | */ | 161 | */ |
145 | spin_lock_irqsave(&rkt->lock, flags); | 162 | rcu_read_lock(); |
146 | if (sge->lkey == 0) { | 163 | if (sge->lkey == 0) { |
147 | struct qib_ibdev *dev = to_idev(pd->ibpd.device); | 164 | struct qib_ibdev *dev = to_idev(pd->ibpd.device); |
148 | 165 | ||
149 | if (pd->user) | 166 | if (pd->user) |
150 | goto bail; | 167 | goto bail; |
151 | if (!dev->dma_mr) | 168 | mr = rcu_dereference(dev->dma_mr); |
169 | if (!mr) | ||
152 | goto bail; | 170 | goto bail; |
153 | atomic_inc(&dev->dma_mr->refcount); | 171 | if (unlikely(!atomic_inc_not_zero(&mr->refcount))) |
154 | spin_unlock_irqrestore(&rkt->lock, flags); | 172 | goto bail; |
173 | rcu_read_unlock(); | ||
155 | 174 | ||
156 | isge->mr = dev->dma_mr; | 175 | isge->mr = mr; |
157 | isge->vaddr = (void *) sge->addr; | 176 | isge->vaddr = (void *) sge->addr; |
158 | isge->length = sge->length; | 177 | isge->length = sge->length; |
159 | isge->sge_length = sge->length; | 178 | isge->sge_length = sge->length; |
@@ -161,18 +180,18 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
161 | isge->n = 0; | 180 | isge->n = 0; |
162 | goto ok; | 181 | goto ok; |
163 | } | 182 | } |
164 | mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]; | 183 | mr = rcu_dereference( |
165 | if (unlikely(mr == NULL || mr->lkey != sge->lkey || | 184 | rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]); |
166 | mr->pd != &pd->ibpd)) | 185 | if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) |
167 | goto bail; | 186 | goto bail; |
168 | 187 | ||
169 | off = sge->addr - mr->user_base; | 188 | off = sge->addr - mr->user_base; |
170 | if (unlikely(sge->addr < mr->user_base || | 189 | if (unlikely(sge->addr < mr->iova || off + sge->length > mr->length || |
171 | off + sge->length > mr->length || | 190 | (mr->access_flags & acc) == 0)) |
172 | (mr->access_flags & acc) != acc)) | ||
173 | goto bail; | 191 | goto bail; |
174 | atomic_inc(&mr->refcount); | 192 | if (unlikely(!atomic_inc_not_zero(&mr->refcount))) |
175 | spin_unlock_irqrestore(&rkt->lock, flags); | 193 | goto bail; |
194 | rcu_read_unlock(); | ||
176 | 195 | ||
177 | off += mr->offset; | 196 | off += mr->offset; |
178 | if (mr->page_shift) { | 197 | if (mr->page_shift) { |
@@ -208,20 +227,22 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
208 | ok: | 227 | ok: |
209 | return 1; | 228 | return 1; |
210 | bail: | 229 | bail: |
211 | spin_unlock_irqrestore(&rkt->lock, flags); | 230 | rcu_read_unlock(); |
212 | return 0; | 231 | return 0; |
213 | } | 232 | } |
214 | 233 | ||
215 | /** | 234 | /** |
216 | * qib_rkey_ok - check the IB virtual address, length, and RKEY | 235 | * qib_rkey_ok - check the IB virtual address, length, and RKEY |
217 | * @dev: infiniband device | 236 | * @qp: qp for validation |
218 | * @ss: SGE state | 237 | * @sge: SGE state |
219 | * @len: length of data | 238 | * @len: length of data |
220 | * @vaddr: virtual address to place data | 239 | * @vaddr: virtual address to place data |
221 | * @rkey: rkey to check | 240 | * @rkey: rkey to check |
222 | * @acc: access flags | 241 | * @acc: access flags |
223 | * | 242 | * |
224 | * Return 1 if successful, otherwise 0. | 243 | * Return 1 if successful, otherwise 0. |
244 | * | ||
245 | * increments the reference count upon success | ||
225 | */ | 246 | */ |
226 | int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | 247 | int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, |
227 | u32 len, u64 vaddr, u32 rkey, int acc) | 248 | u32 len, u64 vaddr, u32 rkey, int acc) |
@@ -230,25 +251,26 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
230 | struct qib_mregion *mr; | 251 | struct qib_mregion *mr; |
231 | unsigned n, m; | 252 | unsigned n, m; |
232 | size_t off; | 253 | size_t off; |
233 | unsigned long flags; | ||
234 | 254 | ||
235 | /* | 255 | /* |
236 | * We use RKEY == zero for kernel virtual addresses | 256 | * We use RKEY == zero for kernel virtual addresses |
237 | * (see qib_get_dma_mr and qib_dma.c). | 257 | * (see qib_get_dma_mr and qib_dma.c). |
238 | */ | 258 | */ |
239 | spin_lock_irqsave(&rkt->lock, flags); | 259 | rcu_read_lock(); |
240 | if (rkey == 0) { | 260 | if (rkey == 0) { |
241 | struct qib_pd *pd = to_ipd(qp->ibqp.pd); | 261 | struct qib_pd *pd = to_ipd(qp->ibqp.pd); |
242 | struct qib_ibdev *dev = to_idev(pd->ibpd.device); | 262 | struct qib_ibdev *dev = to_idev(pd->ibpd.device); |
243 | 263 | ||
244 | if (pd->user) | 264 | if (pd->user) |
245 | goto bail; | 265 | goto bail; |
246 | if (!dev->dma_mr) | 266 | mr = rcu_dereference(dev->dma_mr); |
267 | if (!mr) | ||
247 | goto bail; | 268 | goto bail; |
248 | atomic_inc(&dev->dma_mr->refcount); | 269 | if (unlikely(!atomic_inc_not_zero(&mr->refcount))) |
249 | spin_unlock_irqrestore(&rkt->lock, flags); | 270 | goto bail; |
271 | rcu_read_unlock(); | ||
250 | 272 | ||
251 | sge->mr = dev->dma_mr; | 273 | sge->mr = mr; |
252 | sge->vaddr = (void *) vaddr; | 274 | sge->vaddr = (void *) vaddr; |
253 | sge->length = len; | 275 | sge->length = len; |
254 | sge->sge_length = len; | 276 | sge->sge_length = len; |
@@ -257,16 +279,18 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
257 | goto ok; | 279 | goto ok; |
258 | } | 280 | } |
259 | 281 | ||
260 | mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; | 282 | mr = rcu_dereference( |
261 | if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) | 283 | rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]); |
284 | if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) | ||
262 | goto bail; | 285 | goto bail; |
263 | 286 | ||
264 | off = vaddr - mr->iova; | 287 | off = vaddr - mr->iova; |
265 | if (unlikely(vaddr < mr->iova || off + len > mr->length || | 288 | if (unlikely(vaddr < mr->iova || off + len > mr->length || |
266 | (mr->access_flags & acc) == 0)) | 289 | (mr->access_flags & acc) == 0)) |
267 | goto bail; | 290 | goto bail; |
268 | atomic_inc(&mr->refcount); | 291 | if (unlikely(!atomic_inc_not_zero(&mr->refcount))) |
269 | spin_unlock_irqrestore(&rkt->lock, flags); | 292 | goto bail; |
293 | rcu_read_unlock(); | ||
270 | 294 | ||
271 | off += mr->offset; | 295 | off += mr->offset; |
272 | if (mr->page_shift) { | 296 | if (mr->page_shift) { |
@@ -302,7 +326,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
302 | ok: | 326 | ok: |
303 | return 1; | 327 | return 1; |
304 | bail: | 328 | bail: |
305 | spin_unlock_irqrestore(&rkt->lock, flags); | 329 | rcu_read_unlock(); |
306 | return 0; | 330 | return 0; |
307 | } | 331 | } |
308 | 332 | ||
@@ -325,7 +349,9 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) | |||
325 | if (pd->user || rkey == 0) | 349 | if (pd->user || rkey == 0) |
326 | goto bail; | 350 | goto bail; |
327 | 351 | ||
328 | mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; | 352 | mr = rcu_dereference_protected( |
353 | rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))], | ||
354 | lockdep_is_held(&rkt->lock)); | ||
329 | if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) | 355 | if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) |
330 | goto bail; | 356 | goto bail; |
331 | 357 | ||
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 43390217a026..19f1e6c45fb6 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -49,6 +49,18 @@ static int reply(struct ib_smp *smp) | |||
49 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | 49 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
50 | } | 50 | } |
51 | 51 | ||
52 | static int reply_failure(struct ib_smp *smp) | ||
53 | { | ||
54 | /* | ||
55 | * The verbs framework will handle the directed/LID route | ||
56 | * packet changes. | ||
57 | */ | ||
58 | smp->method = IB_MGMT_METHOD_GET_RESP; | ||
59 | if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | ||
60 | smp->status |= IB_SMP_DIRECTION; | ||
61 | return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY; | ||
62 | } | ||
63 | |||
52 | static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) | 64 | static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) |
53 | { | 65 | { |
54 | struct ib_mad_send_buf *send_buf; | 66 | struct ib_mad_send_buf *send_buf; |
@@ -90,14 +102,10 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) | |||
90 | if (!ibp->sm_ah) { | 102 | if (!ibp->sm_ah) { |
91 | if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { | 103 | if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { |
92 | struct ib_ah *ah; | 104 | struct ib_ah *ah; |
93 | struct ib_ah_attr attr; | ||
94 | 105 | ||
95 | memset(&attr, 0, sizeof attr); | 106 | ah = qib_create_qp0_ah(ibp, ibp->sm_lid); |
96 | attr.dlid = ibp->sm_lid; | ||
97 | attr.port_num = ppd_from_ibp(ibp)->port; | ||
98 | ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); | ||
99 | if (IS_ERR(ah)) | 107 | if (IS_ERR(ah)) |
100 | ret = -EINVAL; | 108 | ret = PTR_ERR(ah); |
101 | else { | 109 | else { |
102 | send_buf->ah = ah; | 110 | send_buf->ah = ah; |
103 | ibp->sm_ah = to_iah(ah); | 111 | ibp->sm_ah = to_iah(ah); |
@@ -2051,6 +2059,298 @@ bail: | |||
2051 | return ret; | 2059 | return ret; |
2052 | } | 2060 | } |
2053 | 2061 | ||
2062 | static int cc_get_classportinfo(struct ib_cc_mad *ccp, | ||
2063 | struct ib_device *ibdev) | ||
2064 | { | ||
2065 | struct ib_cc_classportinfo_attr *p = | ||
2066 | (struct ib_cc_classportinfo_attr *)ccp->mgmt_data; | ||
2067 | |||
2068 | memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); | ||
2069 | |||
2070 | p->base_version = 1; | ||
2071 | p->class_version = 1; | ||
2072 | p->cap_mask = 0; | ||
2073 | |||
2074 | /* | ||
2075 | * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. | ||
2076 | */ | ||
2077 | p->resp_time_value = 18; | ||
2078 | |||
2079 | return reply((struct ib_smp *) ccp); | ||
2080 | } | ||
2081 | |||
2082 | static int cc_get_congestion_info(struct ib_cc_mad *ccp, | ||
2083 | struct ib_device *ibdev, u8 port) | ||
2084 | { | ||
2085 | struct ib_cc_info_attr *p = | ||
2086 | (struct ib_cc_info_attr *)ccp->mgmt_data; | ||
2087 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
2088 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
2089 | |||
2090 | memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); | ||
2091 | |||
2092 | p->congestion_info = 0; | ||
2093 | p->control_table_cap = ppd->cc_max_table_entries; | ||
2094 | |||
2095 | return reply((struct ib_smp *) ccp); | ||
2096 | } | ||
2097 | |||
2098 | static int cc_get_congestion_setting(struct ib_cc_mad *ccp, | ||
2099 | struct ib_device *ibdev, u8 port) | ||
2100 | { | ||
2101 | int i; | ||
2102 | struct ib_cc_congestion_setting_attr *p = | ||
2103 | (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; | ||
2104 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
2105 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
2106 | struct ib_cc_congestion_entry_shadow *entries; | ||
2107 | |||
2108 | memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); | ||
2109 | |||
2110 | spin_lock(&ppd->cc_shadow_lock); | ||
2111 | |||
2112 | entries = ppd->congestion_entries_shadow->entries; | ||
2113 | p->port_control = cpu_to_be16( | ||
2114 | ppd->congestion_entries_shadow->port_control); | ||
2115 | p->control_map = cpu_to_be16( | ||
2116 | ppd->congestion_entries_shadow->control_map); | ||
2117 | for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { | ||
2118 | p->entries[i].ccti_increase = entries[i].ccti_increase; | ||
2119 | p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); | ||
2120 | p->entries[i].trigger_threshold = entries[i].trigger_threshold; | ||
2121 | p->entries[i].ccti_min = entries[i].ccti_min; | ||
2122 | } | ||
2123 | |||
2124 | spin_unlock(&ppd->cc_shadow_lock); | ||
2125 | |||
2126 | return reply((struct ib_smp *) ccp); | ||
2127 | } | ||
2128 | |||
2129 | static int cc_get_congestion_control_table(struct ib_cc_mad *ccp, | ||
2130 | struct ib_device *ibdev, u8 port) | ||
2131 | { | ||
2132 | struct ib_cc_table_attr *p = | ||
2133 | (struct ib_cc_table_attr *)ccp->mgmt_data; | ||
2134 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
2135 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
2136 | u32 cct_block_index = be32_to_cpu(ccp->attr_mod); | ||
2137 | u32 max_cct_block; | ||
2138 | u32 cct_entry; | ||
2139 | struct ib_cc_table_entry_shadow *entries; | ||
2140 | int i; | ||
2141 | |||
2142 | /* Is the table index more than what is supported? */ | ||
2143 | if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) | ||
2144 | goto bail; | ||
2145 | |||
2146 | memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); | ||
2147 | |||
2148 | spin_lock(&ppd->cc_shadow_lock); | ||
2149 | |||
2150 | max_cct_block = | ||
2151 | (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES; | ||
2152 | max_cct_block = max_cct_block ? max_cct_block - 1 : 0; | ||
2153 | |||
2154 | if (cct_block_index > max_cct_block) { | ||
2155 | spin_unlock(&ppd->cc_shadow_lock); | ||
2156 | goto bail; | ||
2157 | } | ||
2158 | |||
2159 | ccp->attr_mod = cpu_to_be32(cct_block_index); | ||
2160 | |||
2161 | cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1); | ||
2162 | |||
2163 | cct_entry--; | ||
2164 | |||
2165 | p->ccti_limit = cpu_to_be16(cct_entry); | ||
2166 | |||
2167 | entries = &ppd->ccti_entries_shadow-> | ||
2168 | entries[IB_CCT_ENTRIES * cct_block_index]; | ||
2169 | cct_entry %= IB_CCT_ENTRIES; | ||
2170 | |||
2171 | for (i = 0; i <= cct_entry; i++) | ||
2172 | p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry); | ||
2173 | |||
2174 | spin_unlock(&ppd->cc_shadow_lock); | ||
2175 | |||
2176 | return reply((struct ib_smp *) ccp); | ||
2177 | |||
2178 | bail: | ||
2179 | return reply_failure((struct ib_smp *) ccp); | ||
2180 | } | ||
2181 | |||
2182 | static int cc_set_congestion_setting(struct ib_cc_mad *ccp, | ||
2183 | struct ib_device *ibdev, u8 port) | ||
2184 | { | ||
2185 | struct ib_cc_congestion_setting_attr *p = | ||
2186 | (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; | ||
2187 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
2188 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
2189 | int i; | ||
2190 | |||
2191 | ppd->cc_sl_control_map = be16_to_cpu(p->control_map); | ||
2192 | |||
2193 | for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { | ||
2194 | ppd->congestion_entries[i].ccti_increase = | ||
2195 | p->entries[i].ccti_increase; | ||
2196 | |||
2197 | ppd->congestion_entries[i].ccti_timer = | ||
2198 | be16_to_cpu(p->entries[i].ccti_timer); | ||
2199 | |||
2200 | ppd->congestion_entries[i].trigger_threshold = | ||
2201 | p->entries[i].trigger_threshold; | ||
2202 | |||
2203 | ppd->congestion_entries[i].ccti_min = | ||
2204 | p->entries[i].ccti_min; | ||
2205 | } | ||
2206 | |||
2207 | return reply((struct ib_smp *) ccp); | ||
2208 | } | ||
2209 | |||
2210 | static int cc_set_congestion_control_table(struct ib_cc_mad *ccp, | ||
2211 | struct ib_device *ibdev, u8 port) | ||
2212 | { | ||
2213 | struct ib_cc_table_attr *p = | ||
2214 | (struct ib_cc_table_attr *)ccp->mgmt_data; | ||
2215 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
2216 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
2217 | u32 cct_block_index = be32_to_cpu(ccp->attr_mod); | ||
2218 | u32 cct_entry; | ||
2219 | struct ib_cc_table_entry_shadow *entries; | ||
2220 | int i; | ||
2221 | |||
2222 | /* Is the table index more than what is supported? */ | ||
2223 | if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) | ||
2224 | goto bail; | ||
2225 | |||
2226 | /* If this packet is the first in the sequence then | ||
2227 | * zero the total table entry count. | ||
2228 | */ | ||
2229 | if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES) | ||
2230 | ppd->total_cct_entry = 0; | ||
2231 | |||
2232 | cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES; | ||
2233 | |||
2234 | /* ccti_limit is 0 to 63 */ | ||
2235 | ppd->total_cct_entry += (cct_entry + 1); | ||
2236 | |||
2237 | if (ppd->total_cct_entry > ppd->cc_supported_table_entries) | ||
2238 | goto bail; | ||
2239 | |||
2240 | ppd->ccti_limit = be16_to_cpu(p->ccti_limit); | ||
2241 | |||
2242 | entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index); | ||
2243 | |||
2244 | for (i = 0; i <= cct_entry; i++) | ||
2245 | entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry); | ||
2246 | |||
2247 | spin_lock(&ppd->cc_shadow_lock); | ||
2248 | |||
2249 | ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1; | ||
2250 | memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries, | ||
2251 | (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry))); | ||
2252 | |||
2253 | ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED; | ||
2254 | ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map; | ||
2255 | memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries, | ||
2256 | IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry)); | ||
2257 | |||
2258 | spin_unlock(&ppd->cc_shadow_lock); | ||
2259 | |||
2260 | return reply((struct ib_smp *) ccp); | ||
2261 | |||
2262 | bail: | ||
2263 | return reply_failure((struct ib_smp *) ccp); | ||
2264 | } | ||
2265 | |||
2266 | static int check_cc_key(struct qib_ibport *ibp, | ||
2267 | struct ib_cc_mad *ccp, int mad_flags) | ||
2268 | { | ||
2269 | return 0; | ||
2270 | } | ||
2271 | |||
2272 | static int process_cc(struct ib_device *ibdev, int mad_flags, | ||
2273 | u8 port, struct ib_mad *in_mad, | ||
2274 | struct ib_mad *out_mad) | ||
2275 | { | ||
2276 | struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad; | ||
2277 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
2278 | int ret; | ||
2279 | |||
2280 | *out_mad = *in_mad; | ||
2281 | |||
2282 | if (ccp->class_version != 2) { | ||
2283 | ccp->status |= IB_SMP_UNSUP_VERSION; | ||
2284 | ret = reply((struct ib_smp *)ccp); | ||
2285 | goto bail; | ||
2286 | } | ||
2287 | |||
2288 | ret = check_cc_key(ibp, ccp, mad_flags); | ||
2289 | if (ret) | ||
2290 | goto bail; | ||
2291 | |||
2292 | switch (ccp->method) { | ||
2293 | case IB_MGMT_METHOD_GET: | ||
2294 | switch (ccp->attr_id) { | ||
2295 | case IB_CC_ATTR_CLASSPORTINFO: | ||
2296 | ret = cc_get_classportinfo(ccp, ibdev); | ||
2297 | goto bail; | ||
2298 | |||
2299 | case IB_CC_ATTR_CONGESTION_INFO: | ||
2300 | ret = cc_get_congestion_info(ccp, ibdev, port); | ||
2301 | goto bail; | ||
2302 | |||
2303 | case IB_CC_ATTR_CA_CONGESTION_SETTING: | ||
2304 | ret = cc_get_congestion_setting(ccp, ibdev, port); | ||
2305 | goto bail; | ||
2306 | |||
2307 | case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: | ||
2308 | ret = cc_get_congestion_control_table(ccp, ibdev, port); | ||
2309 | goto bail; | ||
2310 | |||
2311 | /* FALLTHROUGH */ | ||
2312 | default: | ||
2313 | ccp->status |= IB_SMP_UNSUP_METH_ATTR; | ||
2314 | ret = reply((struct ib_smp *) ccp); | ||
2315 | goto bail; | ||
2316 | } | ||
2317 | |||
2318 | case IB_MGMT_METHOD_SET: | ||
2319 | switch (ccp->attr_id) { | ||
2320 | case IB_CC_ATTR_CA_CONGESTION_SETTING: | ||
2321 | ret = cc_set_congestion_setting(ccp, ibdev, port); | ||
2322 | goto bail; | ||
2323 | |||
2324 | case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: | ||
2325 | ret = cc_set_congestion_control_table(ccp, ibdev, port); | ||
2326 | goto bail; | ||
2327 | |||
2328 | /* FALLTHROUGH */ | ||
2329 | default: | ||
2330 | ccp->status |= IB_SMP_UNSUP_METH_ATTR; | ||
2331 | ret = reply((struct ib_smp *) ccp); | ||
2332 | goto bail; | ||
2333 | } | ||
2334 | |||
2335 | case IB_MGMT_METHOD_GET_RESP: | ||
2336 | /* | ||
2337 | * The ib_mad module will call us to process responses | ||
2338 | * before checking for other consumers. | ||
2339 | * Just tell the caller to process it normally. | ||
2340 | */ | ||
2341 | ret = IB_MAD_RESULT_SUCCESS; | ||
2342 | goto bail; | ||
2343 | |||
2344 | case IB_MGMT_METHOD_TRAP: | ||
2345 | default: | ||
2346 | ccp->status |= IB_SMP_UNSUP_METHOD; | ||
2347 | ret = reply((struct ib_smp *) ccp); | ||
2348 | } | ||
2349 | |||
2350 | bail: | ||
2351 | return ret; | ||
2352 | } | ||
2353 | |||
2054 | /** | 2354 | /** |
2055 | * qib_process_mad - process an incoming MAD packet | 2355 | * qib_process_mad - process an incoming MAD packet |
2056 | * @ibdev: the infiniband device this packet came in on | 2356 | * @ibdev: the infiniband device this packet came in on |
@@ -2075,6 +2375,8 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, | |||
2075 | struct ib_mad *in_mad, struct ib_mad *out_mad) | 2375 | struct ib_mad *in_mad, struct ib_mad *out_mad) |
2076 | { | 2376 | { |
2077 | int ret; | 2377 | int ret; |
2378 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
2379 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
2078 | 2380 | ||
2079 | switch (in_mad->mad_hdr.mgmt_class) { | 2381 | switch (in_mad->mad_hdr.mgmt_class) { |
2080 | case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: | 2382 | case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: |
@@ -2086,6 +2388,15 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, | |||
2086 | ret = process_perf(ibdev, port, in_mad, out_mad); | 2388 | ret = process_perf(ibdev, port, in_mad, out_mad); |
2087 | goto bail; | 2389 | goto bail; |
2088 | 2390 | ||
2391 | case IB_MGMT_CLASS_CONG_MGMT: | ||
2392 | if (!ppd->congestion_entries_shadow || | ||
2393 | !qib_cc_table_size) { | ||
2394 | ret = IB_MAD_RESULT_SUCCESS; | ||
2395 | goto bail; | ||
2396 | } | ||
2397 | ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad); | ||
2398 | goto bail; | ||
2399 | |||
2089 | default: | 2400 | default: |
2090 | ret = IB_MAD_RESULT_SUCCESS; | 2401 | ret = IB_MAD_RESULT_SUCCESS; |
2091 | } | 2402 | } |
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h index ecc416cdbaaa..57bd3fa016bc 100644 --- a/drivers/infiniband/hw/qib/qib_mad.h +++ b/drivers/infiniband/hw/qib/qib_mad.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -31,6 +31,8 @@ | |||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. | 32 | * SOFTWARE. |
33 | */ | 33 | */ |
34 | #ifndef _QIB_MAD_H | ||
35 | #define _QIB_MAD_H | ||
34 | 36 | ||
35 | #include <rdma/ib_pma.h> | 37 | #include <rdma/ib_pma.h> |
36 | 38 | ||
@@ -223,6 +225,198 @@ struct ib_pma_portcounters_cong { | |||
223 | #define IB_PMA_SEL_CONG_ROUTING 0x08 | 225 | #define IB_PMA_SEL_CONG_ROUTING 0x08 |
224 | 226 | ||
225 | /* | 227 | /* |
228 | * Congestion control class attributes | ||
229 | */ | ||
230 | #define IB_CC_ATTR_CLASSPORTINFO cpu_to_be16(0x0001) | ||
231 | #define IB_CC_ATTR_NOTICE cpu_to_be16(0x0002) | ||
232 | #define IB_CC_ATTR_CONGESTION_INFO cpu_to_be16(0x0011) | ||
233 | #define IB_CC_ATTR_CONGESTION_KEY_INFO cpu_to_be16(0x0012) | ||
234 | #define IB_CC_ATTR_CONGESTION_LOG cpu_to_be16(0x0013) | ||
235 | #define IB_CC_ATTR_SWITCH_CONGESTION_SETTING cpu_to_be16(0x0014) | ||
236 | #define IB_CC_ATTR_SWITCH_PORT_CONGESTION_SETTING cpu_to_be16(0x0015) | ||
237 | #define IB_CC_ATTR_CA_CONGESTION_SETTING cpu_to_be16(0x0016) | ||
238 | #define IB_CC_ATTR_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0017) | ||
239 | #define IB_CC_ATTR_TIME_STAMP cpu_to_be16(0x0018) | ||
240 | |||
241 | /* generalizations for threshold values */ | ||
242 | #define IB_CC_THRESHOLD_NONE 0x0 | ||
243 | #define IB_CC_THRESHOLD_MIN 0x1 | ||
244 | #define IB_CC_THRESHOLD_MAX 0xf | ||
245 | |||
246 | /* CCA MAD header constants */ | ||
247 | #define IB_CC_MAD_LOGDATA_LEN 32 | ||
248 | #define IB_CC_MAD_MGMTDATA_LEN 192 | ||
249 | |||
250 | struct ib_cc_mad { | ||
251 | u8 base_version; | ||
252 | u8 mgmt_class; | ||
253 | u8 class_version; | ||
254 | u8 method; | ||
255 | __be16 status; | ||
256 | __be16 class_specific; | ||
257 | __be64 tid; | ||
258 | __be16 attr_id; | ||
259 | __be16 resv; | ||
260 | __be32 attr_mod; | ||
261 | __be64 cckey; | ||
262 | |||
263 | /* For CongestionLog attribute only */ | ||
264 | u8 log_data[IB_CC_MAD_LOGDATA_LEN]; | ||
265 | |||
266 | u8 mgmt_data[IB_CC_MAD_MGMTDATA_LEN]; | ||
267 | } __packed; | ||
268 | |||
269 | /* | ||
270 | * Congestion Control class portinfo capability mask bits | ||
271 | */ | ||
272 | #define IB_CC_CPI_CM_TRAP_GEN cpu_to_be16(1 << 0) | ||
273 | #define IB_CC_CPI_CM_GET_SET_NOTICE cpu_to_be16(1 << 1) | ||
274 | #define IB_CC_CPI_CM_CAP2 cpu_to_be16(1 << 2) | ||
275 | #define IB_CC_CPI_CM_ENHANCEDPORT0_CC cpu_to_be16(1 << 8) | ||
276 | |||
277 | struct ib_cc_classportinfo_attr { | ||
278 | u8 base_version; | ||
279 | u8 class_version; | ||
280 | __be16 cap_mask; | ||
281 | u8 reserved[3]; | ||
282 | u8 resp_time_value; /* only lower 5 bits */ | ||
283 | union ib_gid redirect_gid; | ||
284 | __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */ | ||
285 | __be16 redirect_lid; | ||
286 | __be16 redirect_pkey; | ||
287 | __be32 redirect_qp; /* only lower 24 bits */ | ||
288 | __be32 redirect_qkey; | ||
289 | union ib_gid trap_gid; | ||
290 | __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */ | ||
291 | __be16 trap_lid; | ||
292 | __be16 trap_pkey; | ||
293 | __be32 trap_hl_qp; /* 8, 24 bits respectively */ | ||
294 | __be32 trap_qkey; | ||
295 | } __packed; | ||
296 | |||
297 | /* Congestion control traps */ | ||
298 | #define IB_CC_TRAP_KEY_VIOLATION 0x0000 | ||
299 | |||
300 | struct ib_cc_trap_key_violation_attr { | ||
301 | __be16 source_lid; | ||
302 | u8 method; | ||
303 | u8 reserved1; | ||
304 | __be16 attrib_id; | ||
305 | __be32 attrib_mod; | ||
306 | __be32 qp; | ||
307 | __be64 cckey; | ||
308 | u8 sgid[16]; | ||
309 | u8 padding[24]; | ||
310 | } __packed; | ||
311 | |||
312 | /* Congestion info flags */ | ||
313 | #define IB_CC_CI_FLAGS_CREDIT_STARVATION 0x1 | ||
314 | #define IB_CC_TABLE_CAP_DEFAULT 31 | ||
315 | |||
316 | struct ib_cc_info_attr { | ||
317 | __be16 congestion_info; | ||
318 | u8 control_table_cap; /* Multiple of 64 entry unit CCTs */ | ||
319 | } __packed; | ||
320 | |||
321 | struct ib_cc_key_info_attr { | ||
322 | __be64 cckey; | ||
323 | u8 protect; | ||
324 | __be16 lease_period; | ||
325 | __be16 violations; | ||
326 | } __packed; | ||
327 | |||
328 | #define IB_CC_CL_CA_LOGEVENTS_LEN 208 | ||
329 | |||
330 | struct ib_cc_log_attr { | ||
331 | u8 log_type; | ||
332 | u8 congestion_flags; | ||
333 | __be16 threshold_event_counter; | ||
334 | __be16 threshold_congestion_event_map; | ||
335 | __be16 current_time_stamp; | ||
336 | u8 log_events[IB_CC_CL_CA_LOGEVENTS_LEN]; | ||
337 | } __packed; | ||
338 | |||
339 | #define IB_CC_CLEC_SERVICETYPE_RC 0x0 | ||
340 | #define IB_CC_CLEC_SERVICETYPE_UC 0x1 | ||
341 | #define IB_CC_CLEC_SERVICETYPE_RD 0x2 | ||
342 | #define IB_CC_CLEC_SERVICETYPE_UD 0x3 | ||
343 | |||
344 | struct ib_cc_log_event { | ||
345 | u8 local_qp_cn_entry; | ||
346 | u8 remote_qp_number_cn_entry[3]; | ||
347 | u8 sl_cn_entry:4; | ||
348 | u8 service_type_cn_entry:4; | ||
349 | __be32 remote_lid_cn_entry; | ||
350 | __be32 timestamp_cn_entry; | ||
351 | } __packed; | ||
352 | |||
353 | /* Sixteen congestion entries */ | ||
354 | #define IB_CC_CCS_ENTRIES 16 | ||
355 | |||
356 | /* Port control flags */ | ||
357 | #define IB_CC_CCS_PC_SL_BASED 0x01 | ||
358 | |||
359 | struct ib_cc_congestion_entry { | ||
360 | u8 ccti_increase; | ||
361 | __be16 ccti_timer; | ||
362 | u8 trigger_threshold; | ||
363 | u8 ccti_min; /* min CCTI for cc table */ | ||
364 | } __packed; | ||
365 | |||
366 | struct ib_cc_congestion_entry_shadow { | ||
367 | u8 ccti_increase; | ||
368 | u16 ccti_timer; | ||
369 | u8 trigger_threshold; | ||
370 | u8 ccti_min; /* min CCTI for cc table */ | ||
371 | } __packed; | ||
372 | |||
373 | struct ib_cc_congestion_setting_attr { | ||
374 | __be16 port_control; | ||
375 | __be16 control_map; | ||
376 | struct ib_cc_congestion_entry entries[IB_CC_CCS_ENTRIES]; | ||
377 | } __packed; | ||
378 | |||
379 | struct ib_cc_congestion_setting_attr_shadow { | ||
380 | u16 port_control; | ||
381 | u16 control_map; | ||
382 | struct ib_cc_congestion_entry_shadow entries[IB_CC_CCS_ENTRIES]; | ||
383 | } __packed; | ||
384 | |||
385 | #define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1 | ||
386 | #define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1 | ||
387 | |||
388 | /* 64 Congestion Control table entries in a single MAD */ | ||
389 | #define IB_CCT_ENTRIES 64 | ||
390 | #define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2) | ||
391 | |||
392 | struct ib_cc_table_entry { | ||
393 | __be16 entry; /* shift:2, multiplier:14 */ | ||
394 | }; | ||
395 | |||
396 | struct ib_cc_table_entry_shadow { | ||
397 | u16 entry; /* shift:2, multiplier:14 */ | ||
398 | }; | ||
399 | |||
400 | struct ib_cc_table_attr { | ||
401 | __be16 ccti_limit; /* max CCTI for cc table */ | ||
402 | struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES]; | ||
403 | } __packed; | ||
404 | |||
405 | struct ib_cc_table_attr_shadow { | ||
406 | u16 ccti_limit; /* max CCTI for cc table */ | ||
407 | struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES]; | ||
408 | } __packed; | ||
409 | |||
410 | #define CC_TABLE_SHADOW_MAX \ | ||
411 | (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES) | ||
412 | |||
413 | struct cc_table_shadow { | ||
414 | u16 ccti_last_entry; | ||
415 | struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX]; | ||
416 | } __packed; | ||
417 | |||
418 | #endif /* _QIB_MAD_H */ | ||
419 | /* | ||
226 | * The PortSamplesControl.CounterMasks field is an array of 3 bit fields | 420 | * The PortSamplesControl.CounterMasks field is an array of 3 bit fields |
227 | * which specify the N'th counter's capabilities. See ch. 16.1.3.2. | 421 | * which specify the N'th counter's capabilities. See ch. 16.1.3.2. |
228 | * We support 5 counters which only count the mandatory quantities. | 422 | * We support 5 counters which only count the mandatory quantities. |
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index 08944e2ee334..e6687ded8210 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c | |||
@@ -47,6 +47,43 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) | |||
47 | return container_of(ibfmr, struct qib_fmr, ibfmr); | 47 | return container_of(ibfmr, struct qib_fmr, ibfmr); |
48 | } | 48 | } |
49 | 49 | ||
50 | static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, | ||
51 | int count) | ||
52 | { | ||
53 | int m, i = 0; | ||
54 | int rval = 0; | ||
55 | |||
56 | m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; | ||
57 | for (; i < m; i++) { | ||
58 | mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); | ||
59 | if (!mr->map[i]) | ||
60 | goto bail; | ||
61 | } | ||
62 | mr->mapsz = m; | ||
63 | init_completion(&mr->comp); | ||
64 | /* count returning the ptr to user */ | ||
65 | atomic_set(&mr->refcount, 1); | ||
66 | mr->pd = pd; | ||
67 | mr->max_segs = count; | ||
68 | out: | ||
69 | return rval; | ||
70 | bail: | ||
71 | while (i) | ||
72 | kfree(mr->map[--i]); | ||
73 | rval = -ENOMEM; | ||
74 | goto out; | ||
75 | } | ||
76 | |||
77 | static void deinit_qib_mregion(struct qib_mregion *mr) | ||
78 | { | ||
79 | int i = mr->mapsz; | ||
80 | |||
81 | mr->mapsz = 0; | ||
82 | while (i) | ||
83 | kfree(mr->map[--i]); | ||
84 | } | ||
85 | |||
86 | |||
50 | /** | 87 | /** |
51 | * qib_get_dma_mr - get a DMA memory region | 88 | * qib_get_dma_mr - get a DMA memory region |
52 | * @pd: protection domain for this memory region | 89 | * @pd: protection domain for this memory region |
@@ -58,10 +95,9 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) | |||
58 | */ | 95 | */ |
59 | struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) | 96 | struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) |
60 | { | 97 | { |
61 | struct qib_ibdev *dev = to_idev(pd->device); | 98 | struct qib_mr *mr = NULL; |
62 | struct qib_mr *mr; | ||
63 | struct ib_mr *ret; | 99 | struct ib_mr *ret; |
64 | unsigned long flags; | 100 | int rval; |
65 | 101 | ||
66 | if (to_ipd(pd)->user) { | 102 | if (to_ipd(pd)->user) { |
67 | ret = ERR_PTR(-EPERM); | 103 | ret = ERR_PTR(-EPERM); |
@@ -74,61 +110,64 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) | |||
74 | goto bail; | 110 | goto bail; |
75 | } | 111 | } |
76 | 112 | ||
77 | mr->mr.access_flags = acc; | 113 | rval = init_qib_mregion(&mr->mr, pd, 0); |
78 | atomic_set(&mr->mr.refcount, 0); | 114 | if (rval) { |
115 | ret = ERR_PTR(rval); | ||
116 | goto bail; | ||
117 | } | ||
118 | |||
79 | 119 | ||
80 | spin_lock_irqsave(&dev->lk_table.lock, flags); | 120 | rval = qib_alloc_lkey(&mr->mr, 1); |
81 | if (!dev->dma_mr) | 121 | if (rval) { |
82 | dev->dma_mr = &mr->mr; | 122 | ret = ERR_PTR(rval); |
83 | spin_unlock_irqrestore(&dev->lk_table.lock, flags); | 123 | goto bail_mregion; |
124 | } | ||
84 | 125 | ||
126 | mr->mr.access_flags = acc; | ||
85 | ret = &mr->ibmr; | 127 | ret = &mr->ibmr; |
128 | done: | ||
129 | return ret; | ||
86 | 130 | ||
131 | bail_mregion: | ||
132 | deinit_qib_mregion(&mr->mr); | ||
87 | bail: | 133 | bail: |
88 | return ret; | 134 | kfree(mr); |
135 | goto done; | ||
89 | } | 136 | } |
90 | 137 | ||
91 | static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) | 138 | static struct qib_mr *alloc_mr(int count, struct ib_pd *pd) |
92 | { | 139 | { |
93 | struct qib_mr *mr; | 140 | struct qib_mr *mr; |
94 | int m, i = 0; | 141 | int rval = -ENOMEM; |
142 | int m; | ||
95 | 143 | ||
96 | /* Allocate struct plus pointers to first level page tables. */ | 144 | /* Allocate struct plus pointers to first level page tables. */ |
97 | m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; | 145 | m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; |
98 | mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); | 146 | mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); |
99 | if (!mr) | 147 | if (!mr) |
100 | goto done; | 148 | goto bail; |
101 | |||
102 | /* Allocate first level page tables. */ | ||
103 | for (; i < m; i++) { | ||
104 | mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); | ||
105 | if (!mr->mr.map[i]) | ||
106 | goto bail; | ||
107 | } | ||
108 | mr->mr.mapsz = m; | ||
109 | mr->mr.page_shift = 0; | ||
110 | mr->mr.max_segs = count; | ||
111 | 149 | ||
150 | rval = init_qib_mregion(&mr->mr, pd, count); | ||
151 | if (rval) | ||
152 | goto bail; | ||
112 | /* | 153 | /* |
113 | * ib_reg_phys_mr() will initialize mr->ibmr except for | 154 | * ib_reg_phys_mr() will initialize mr->ibmr except for |
114 | * lkey and rkey. | 155 | * lkey and rkey. |
115 | */ | 156 | */ |
116 | if (!qib_alloc_lkey(lk_table, &mr->mr)) | 157 | rval = qib_alloc_lkey(&mr->mr, 0); |
117 | goto bail; | 158 | if (rval) |
159 | goto bail_mregion; | ||
118 | mr->ibmr.lkey = mr->mr.lkey; | 160 | mr->ibmr.lkey = mr->mr.lkey; |
119 | mr->ibmr.rkey = mr->mr.lkey; | 161 | mr->ibmr.rkey = mr->mr.lkey; |
162 | done: | ||
163 | return mr; | ||
120 | 164 | ||
121 | atomic_set(&mr->mr.refcount, 0); | 165 | bail_mregion: |
122 | goto done; | 166 | deinit_qib_mregion(&mr->mr); |
123 | |||
124 | bail: | 167 | bail: |
125 | while (i) | ||
126 | kfree(mr->mr.map[--i]); | ||
127 | kfree(mr); | 168 | kfree(mr); |
128 | mr = NULL; | 169 | mr = ERR_PTR(rval); |
129 | 170 | goto done; | |
130 | done: | ||
131 | return mr; | ||
132 | } | 171 | } |
133 | 172 | ||
134 | /** | 173 | /** |
@@ -148,19 +187,15 @@ struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, | |||
148 | int n, m, i; | 187 | int n, m, i; |
149 | struct ib_mr *ret; | 188 | struct ib_mr *ret; |
150 | 189 | ||
151 | mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); | 190 | mr = alloc_mr(num_phys_buf, pd); |
152 | if (mr == NULL) { | 191 | if (IS_ERR(mr)) { |
153 | ret = ERR_PTR(-ENOMEM); | 192 | ret = (struct ib_mr *)mr; |
154 | goto bail; | 193 | goto bail; |
155 | } | 194 | } |
156 | 195 | ||
157 | mr->mr.pd = pd; | ||
158 | mr->mr.user_base = *iova_start; | 196 | mr->mr.user_base = *iova_start; |
159 | mr->mr.iova = *iova_start; | 197 | mr->mr.iova = *iova_start; |
160 | mr->mr.length = 0; | ||
161 | mr->mr.offset = 0; | ||
162 | mr->mr.access_flags = acc; | 198 | mr->mr.access_flags = acc; |
163 | mr->umem = NULL; | ||
164 | 199 | ||
165 | m = 0; | 200 | m = 0; |
166 | n = 0; | 201 | n = 0; |
@@ -186,7 +221,6 @@ bail: | |||
186 | * @pd: protection domain for this memory region | 221 | * @pd: protection domain for this memory region |
187 | * @start: starting userspace address | 222 | * @start: starting userspace address |
188 | * @length: length of region to register | 223 | * @length: length of region to register |
189 | * @virt_addr: virtual address to use (from HCA's point of view) | ||
190 | * @mr_access_flags: access flags for this memory region | 224 | * @mr_access_flags: access flags for this memory region |
191 | * @udata: unused by the QLogic_IB driver | 225 | * @udata: unused by the QLogic_IB driver |
192 | * | 226 | * |
@@ -216,14 +250,13 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
216 | list_for_each_entry(chunk, &umem->chunk_list, list) | 250 | list_for_each_entry(chunk, &umem->chunk_list, list) |
217 | n += chunk->nents; | 251 | n += chunk->nents; |
218 | 252 | ||
219 | mr = alloc_mr(n, &to_idev(pd->device)->lk_table); | 253 | mr = alloc_mr(n, pd); |
220 | if (!mr) { | 254 | if (IS_ERR(mr)) { |
221 | ret = ERR_PTR(-ENOMEM); | 255 | ret = (struct ib_mr *)mr; |
222 | ib_umem_release(umem); | 256 | ib_umem_release(umem); |
223 | goto bail; | 257 | goto bail; |
224 | } | 258 | } |
225 | 259 | ||
226 | mr->mr.pd = pd; | ||
227 | mr->mr.user_base = start; | 260 | mr->mr.user_base = start; |
228 | mr->mr.iova = virt_addr; | 261 | mr->mr.iova = virt_addr; |
229 | mr->mr.length = length; | 262 | mr->mr.length = length; |
@@ -271,21 +304,25 @@ bail: | |||
271 | int qib_dereg_mr(struct ib_mr *ibmr) | 304 | int qib_dereg_mr(struct ib_mr *ibmr) |
272 | { | 305 | { |
273 | struct qib_mr *mr = to_imr(ibmr); | 306 | struct qib_mr *mr = to_imr(ibmr); |
274 | struct qib_ibdev *dev = to_idev(ibmr->device); | 307 | int ret = 0; |
275 | int ret; | 308 | unsigned long timeout; |
276 | int i; | 309 | |
277 | 310 | qib_free_lkey(&mr->mr); | |
278 | ret = qib_free_lkey(dev, &mr->mr); | 311 | |
279 | if (ret) | 312 | qib_put_mr(&mr->mr); /* will set completion if last */ |
280 | return ret; | 313 | timeout = wait_for_completion_timeout(&mr->mr.comp, |
281 | 314 | 5 * HZ); | |
282 | i = mr->mr.mapsz; | 315 | if (!timeout) { |
283 | while (i) | 316 | qib_get_mr(&mr->mr); |
284 | kfree(mr->mr.map[--i]); | 317 | ret = -EBUSY; |
318 | goto out; | ||
319 | } | ||
320 | deinit_qib_mregion(&mr->mr); | ||
285 | if (mr->umem) | 321 | if (mr->umem) |
286 | ib_umem_release(mr->umem); | 322 | ib_umem_release(mr->umem); |
287 | kfree(mr); | 323 | kfree(mr); |
288 | return 0; | 324 | out: |
325 | return ret; | ||
289 | } | 326 | } |
290 | 327 | ||
291 | /* | 328 | /* |
@@ -298,17 +335,9 @@ struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) | |||
298 | { | 335 | { |
299 | struct qib_mr *mr; | 336 | struct qib_mr *mr; |
300 | 337 | ||
301 | mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table); | 338 | mr = alloc_mr(max_page_list_len, pd); |
302 | if (mr == NULL) | 339 | if (IS_ERR(mr)) |
303 | return ERR_PTR(-ENOMEM); | 340 | return (struct ib_mr *)mr; |
304 | |||
305 | mr->mr.pd = pd; | ||
306 | mr->mr.user_base = 0; | ||
307 | mr->mr.iova = 0; | ||
308 | mr->mr.length = 0; | ||
309 | mr->mr.offset = 0; | ||
310 | mr->mr.access_flags = 0; | ||
311 | mr->umem = NULL; | ||
312 | 341 | ||
313 | return &mr->ibmr; | 342 | return &mr->ibmr; |
314 | } | 343 | } |
@@ -322,11 +351,11 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) | |||
322 | if (size > PAGE_SIZE) | 351 | if (size > PAGE_SIZE) |
323 | return ERR_PTR(-EINVAL); | 352 | return ERR_PTR(-EINVAL); |
324 | 353 | ||
325 | pl = kmalloc(sizeof *pl, GFP_KERNEL); | 354 | pl = kzalloc(sizeof *pl, GFP_KERNEL); |
326 | if (!pl) | 355 | if (!pl) |
327 | return ERR_PTR(-ENOMEM); | 356 | return ERR_PTR(-ENOMEM); |
328 | 357 | ||
329 | pl->page_list = kmalloc(size, GFP_KERNEL); | 358 | pl->page_list = kzalloc(size, GFP_KERNEL); |
330 | if (!pl->page_list) | 359 | if (!pl->page_list) |
331 | goto err_free; | 360 | goto err_free; |
332 | 361 | ||
@@ -355,57 +384,47 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |||
355 | struct ib_fmr_attr *fmr_attr) | 384 | struct ib_fmr_attr *fmr_attr) |
356 | { | 385 | { |
357 | struct qib_fmr *fmr; | 386 | struct qib_fmr *fmr; |
358 | int m, i = 0; | 387 | int m; |
359 | struct ib_fmr *ret; | 388 | struct ib_fmr *ret; |
389 | int rval = -ENOMEM; | ||
360 | 390 | ||
361 | /* Allocate struct plus pointers to first level page tables. */ | 391 | /* Allocate struct plus pointers to first level page tables. */ |
362 | m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; | 392 | m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; |
363 | fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); | 393 | fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); |
364 | if (!fmr) | 394 | if (!fmr) |
365 | goto bail; | 395 | goto bail; |
366 | 396 | ||
367 | /* Allocate first level page tables. */ | 397 | rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); |
368 | for (; i < m; i++) { | 398 | if (rval) |
369 | fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], | 399 | goto bail; |
370 | GFP_KERNEL); | ||
371 | if (!fmr->mr.map[i]) | ||
372 | goto bail; | ||
373 | } | ||
374 | fmr->mr.mapsz = m; | ||
375 | 400 | ||
376 | /* | 401 | /* |
377 | * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & | 402 | * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & |
378 | * rkey. | 403 | * rkey. |
379 | */ | 404 | */ |
380 | if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) | 405 | rval = qib_alloc_lkey(&fmr->mr, 0); |
381 | goto bail; | 406 | if (rval) |
407 | goto bail_mregion; | ||
382 | fmr->ibfmr.rkey = fmr->mr.lkey; | 408 | fmr->ibfmr.rkey = fmr->mr.lkey; |
383 | fmr->ibfmr.lkey = fmr->mr.lkey; | 409 | fmr->ibfmr.lkey = fmr->mr.lkey; |
384 | /* | 410 | /* |
385 | * Resources are allocated but no valid mapping (RKEY can't be | 411 | * Resources are allocated but no valid mapping (RKEY can't be |
386 | * used). | 412 | * used). |
387 | */ | 413 | */ |
388 | fmr->mr.pd = pd; | ||
389 | fmr->mr.user_base = 0; | ||
390 | fmr->mr.iova = 0; | ||
391 | fmr->mr.length = 0; | ||
392 | fmr->mr.offset = 0; | ||
393 | fmr->mr.access_flags = mr_access_flags; | 414 | fmr->mr.access_flags = mr_access_flags; |
394 | fmr->mr.max_segs = fmr_attr->max_pages; | 415 | fmr->mr.max_segs = fmr_attr->max_pages; |
395 | fmr->mr.page_shift = fmr_attr->page_shift; | 416 | fmr->mr.page_shift = fmr_attr->page_shift; |
396 | 417 | ||
397 | atomic_set(&fmr->mr.refcount, 0); | ||
398 | ret = &fmr->ibfmr; | 418 | ret = &fmr->ibfmr; |
399 | goto done; | 419 | done: |
420 | return ret; | ||
400 | 421 | ||
422 | bail_mregion: | ||
423 | deinit_qib_mregion(&fmr->mr); | ||
401 | bail: | 424 | bail: |
402 | while (i) | ||
403 | kfree(fmr->mr.map[--i]); | ||
404 | kfree(fmr); | 425 | kfree(fmr); |
405 | ret = ERR_PTR(-ENOMEM); | 426 | ret = ERR_PTR(rval); |
406 | 427 | goto done; | |
407 | done: | ||
408 | return ret; | ||
409 | } | 428 | } |
410 | 429 | ||
411 | /** | 430 | /** |
@@ -428,7 +447,8 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |||
428 | u32 ps; | 447 | u32 ps; |
429 | int ret; | 448 | int ret; |
430 | 449 | ||
431 | if (atomic_read(&fmr->mr.refcount)) | 450 | i = atomic_read(&fmr->mr.refcount); |
451 | if (i > 2) | ||
432 | return -EBUSY; | 452 | return -EBUSY; |
433 | 453 | ||
434 | if (list_len > fmr->mr.max_segs) { | 454 | if (list_len > fmr->mr.max_segs) { |
@@ -490,16 +510,27 @@ int qib_unmap_fmr(struct list_head *fmr_list) | |||
490 | int qib_dealloc_fmr(struct ib_fmr *ibfmr) | 510 | int qib_dealloc_fmr(struct ib_fmr *ibfmr) |
491 | { | 511 | { |
492 | struct qib_fmr *fmr = to_ifmr(ibfmr); | 512 | struct qib_fmr *fmr = to_ifmr(ibfmr); |
493 | int ret; | 513 | int ret = 0; |
494 | int i; | 514 | unsigned long timeout; |
515 | |||
516 | qib_free_lkey(&fmr->mr); | ||
517 | qib_put_mr(&fmr->mr); /* will set completion if last */ | ||
518 | timeout = wait_for_completion_timeout(&fmr->mr.comp, | ||
519 | 5 * HZ); | ||
520 | if (!timeout) { | ||
521 | qib_get_mr(&fmr->mr); | ||
522 | ret = -EBUSY; | ||
523 | goto out; | ||
524 | } | ||
525 | deinit_qib_mregion(&fmr->mr); | ||
526 | kfree(fmr); | ||
527 | out: | ||
528 | return ret; | ||
529 | } | ||
495 | 530 | ||
496 | ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr); | 531 | void mr_rcu_callback(struct rcu_head *list) |
497 | if (ret) | 532 | { |
498 | return ret; | 533 | struct qib_mregion *mr = container_of(list, struct qib_mregion, list); |
499 | 534 | ||
500 | i = fmr->mr.mapsz; | 535 | complete(&mr->comp); |
501 | while (i) | ||
502 | kfree(fmr->mr.map[--i]); | ||
503 | kfree(fmr); | ||
504 | return 0; | ||
505 | } | 536 | } |
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 790646ef5106..062c301ebf53 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -224,8 +224,9 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, | |||
224 | } | 224 | } |
225 | do_intx: | 225 | do_intx: |
226 | if (ret) { | 226 | if (ret) { |
227 | qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " | 227 | qib_dev_err(dd, |
228 | "falling back to INTx\n", tabsize, ret); | 228 | "pci_enable_msix %d vectors failed: %d, falling back to INTx\n", |
229 | tabsize, ret); | ||
229 | tabsize = 0; | 230 | tabsize = 0; |
230 | } | 231 | } |
231 | for (i = 0; i < tabsize; i++) | 232 | for (i = 0; i < tabsize; i++) |
@@ -251,8 +252,9 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos) | |||
251 | 252 | ||
252 | ret = pci_enable_msi(pdev); | 253 | ret = pci_enable_msi(pdev); |
253 | if (ret) | 254 | if (ret) |
254 | qib_dev_err(dd, "pci_enable_msi failed: %d, " | 255 | qib_dev_err(dd, |
255 | "interrupts may not work\n", ret); | 256 | "pci_enable_msi failed: %d, interrupts may not work\n", |
257 | ret); | ||
256 | /* continue even if it fails, we may still be OK... */ | 258 | /* continue even if it fails, we may still be OK... */ |
257 | 259 | ||
258 | pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, | 260 | pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, |
@@ -358,8 +360,8 @@ int qib_reinit_intr(struct qib_devdata *dd) | |||
358 | 360 | ||
359 | pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); | 361 | pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); |
360 | if (!pos) { | 362 | if (!pos) { |
361 | qib_dev_err(dd, "Can't find MSI capability, " | 363 | qib_dev_err(dd, |
362 | "can't restore MSI settings\n"); | 364 | "Can't find MSI capability, can't restore MSI settings\n"); |
363 | ret = 0; | 365 | ret = 0; |
364 | /* nothing special for MSIx, just MSI */ | 366 | /* nothing special for MSIx, just MSI */ |
365 | goto bail; | 367 | goto bail; |
@@ -471,8 +473,8 @@ void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) | |||
471 | pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline); | 473 | pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline); |
472 | r = pci_enable_device(dd->pcidev); | 474 | r = pci_enable_device(dd->pcidev); |
473 | if (r) | 475 | if (r) |
474 | qib_dev_err(dd, "pci_enable_device failed after " | 476 | qib_dev_err(dd, |
475 | "reset: %d\n", r); | 477 | "pci_enable_device failed after reset: %d\n", r); |
476 | } | 478 | } |
477 | 479 | ||
478 | /* code to adjust PCIe capabilities. */ | 480 | /* code to adjust PCIe capabilities. */ |
@@ -717,15 +719,16 @@ qib_pci_mmio_enabled(struct pci_dev *pdev) | |||
717 | if (words == ~0ULL) | 719 | if (words == ~0ULL) |
718 | ret = PCI_ERS_RESULT_NEED_RESET; | 720 | ret = PCI_ERS_RESULT_NEED_RESET; |
719 | } | 721 | } |
720 | qib_devinfo(pdev, "QIB mmio_enabled function called, " | 722 | qib_devinfo(pdev, |
721 | "read wordscntr %Lx, returning %d\n", words, ret); | 723 | "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n", |
724 | words, ret); | ||
722 | return ret; | 725 | return ret; |
723 | } | 726 | } |
724 | 727 | ||
725 | static pci_ers_result_t | 728 | static pci_ers_result_t |
726 | qib_pci_slot_reset(struct pci_dev *pdev) | 729 | qib_pci_slot_reset(struct pci_dev *pdev) |
727 | { | 730 | { |
728 | qib_devinfo(pdev, "QIB link_reset function called, ignored\n"); | 731 | qib_devinfo(pdev, "QIB slot_reset function called, ignored\n"); |
729 | return PCI_ERS_RESULT_CAN_RECOVER; | 732 | return PCI_ERS_RESULT_CAN_RECOVER; |
730 | } | 733 | } |
731 | 734 | ||
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 1ce56b51ab1a..4850d03870c2 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -250,23 +250,33 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
250 | 250 | ||
251 | spin_lock_irqsave(&dev->qpt_lock, flags); | 251 | spin_lock_irqsave(&dev->qpt_lock, flags); |
252 | 252 | ||
253 | if (ibp->qp0 == qp) { | 253 | if (rcu_dereference_protected(ibp->qp0, |
254 | lockdep_is_held(&dev->qpt_lock)) == qp) { | ||
254 | atomic_dec(&qp->refcount); | 255 | atomic_dec(&qp->refcount); |
255 | rcu_assign_pointer(ibp->qp0, NULL); | 256 | rcu_assign_pointer(ibp->qp0, NULL); |
256 | } else if (ibp->qp1 == qp) { | 257 | } else if (rcu_dereference_protected(ibp->qp1, |
258 | lockdep_is_held(&dev->qpt_lock)) == qp) { | ||
257 | atomic_dec(&qp->refcount); | 259 | atomic_dec(&qp->refcount); |
258 | rcu_assign_pointer(ibp->qp1, NULL); | 260 | rcu_assign_pointer(ibp->qp1, NULL); |
259 | } else { | 261 | } else { |
260 | struct qib_qp *q, **qpp; | 262 | struct qib_qp *q; |
263 | struct qib_qp __rcu **qpp; | ||
261 | 264 | ||
262 | qpp = &dev->qp_table[n]; | 265 | qpp = &dev->qp_table[n]; |
263 | for (; (q = *qpp) != NULL; qpp = &q->next) | 266 | q = rcu_dereference_protected(*qpp, |
267 | lockdep_is_held(&dev->qpt_lock)); | ||
268 | for (; q; qpp = &q->next) { | ||
264 | if (q == qp) { | 269 | if (q == qp) { |
265 | atomic_dec(&qp->refcount); | 270 | atomic_dec(&qp->refcount); |
266 | rcu_assign_pointer(*qpp, qp->next); | 271 | *qpp = qp->next; |
267 | qp->next = NULL; | 272 | rcu_assign_pointer(qp->next, NULL); |
273 | q = rcu_dereference_protected(*qpp, | ||
274 | lockdep_is_held(&dev->qpt_lock)); | ||
268 | break; | 275 | break; |
269 | } | 276 | } |
277 | q = rcu_dereference_protected(*qpp, | ||
278 | lockdep_is_held(&dev->qpt_lock)); | ||
279 | } | ||
270 | } | 280 | } |
271 | 281 | ||
272 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 282 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
@@ -302,10 +312,12 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) | |||
302 | 312 | ||
303 | spin_lock_irqsave(&dev->qpt_lock, flags); | 313 | spin_lock_irqsave(&dev->qpt_lock, flags); |
304 | for (n = 0; n < dev->qp_table_size; n++) { | 314 | for (n = 0; n < dev->qp_table_size; n++) { |
305 | qp = dev->qp_table[n]; | 315 | qp = rcu_dereference_protected(dev->qp_table[n], |
316 | lockdep_is_held(&dev->qpt_lock)); | ||
306 | rcu_assign_pointer(dev->qp_table[n], NULL); | 317 | rcu_assign_pointer(dev->qp_table[n], NULL); |
307 | 318 | ||
308 | for (; qp; qp = qp->next) | 319 | for (; qp; qp = rcu_dereference_protected(qp->next, |
320 | lockdep_is_held(&dev->qpt_lock))) | ||
309 | qp_inuse++; | 321 | qp_inuse++; |
310 | } | 322 | } |
311 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 323 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
@@ -337,7 +349,8 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) | |||
337 | unsigned n = qpn_hash(dev, qpn); | 349 | unsigned n = qpn_hash(dev, qpn); |
338 | 350 | ||
339 | rcu_read_lock(); | 351 | rcu_read_lock(); |
340 | for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next) | 352 | for (qp = rcu_dereference(dev->qp_table[n]); qp; |
353 | qp = rcu_dereference(qp->next)) | ||
341 | if (qp->ibqp.qp_num == qpn) | 354 | if (qp->ibqp.qp_num == qpn) |
342 | break; | 355 | break; |
343 | } | 356 | } |
@@ -406,18 +419,9 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
406 | unsigned n; | 419 | unsigned n; |
407 | 420 | ||
408 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) | 421 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) |
409 | while (qp->s_rdma_read_sge.num_sge) { | 422 | qib_put_ss(&qp->s_rdma_read_sge); |
410 | atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); | ||
411 | if (--qp->s_rdma_read_sge.num_sge) | ||
412 | qp->s_rdma_read_sge.sge = | ||
413 | *qp->s_rdma_read_sge.sg_list++; | ||
414 | } | ||
415 | 423 | ||
416 | while (qp->r_sge.num_sge) { | 424 | qib_put_ss(&qp->r_sge); |
417 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
418 | if (--qp->r_sge.num_sge) | ||
419 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
420 | } | ||
421 | 425 | ||
422 | if (clr_sends) { | 426 | if (clr_sends) { |
423 | while (qp->s_last != qp->s_head) { | 427 | while (qp->s_last != qp->s_head) { |
@@ -427,7 +431,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
427 | for (i = 0; i < wqe->wr.num_sge; i++) { | 431 | for (i = 0; i < wqe->wr.num_sge; i++) { |
428 | struct qib_sge *sge = &wqe->sg_list[i]; | 432 | struct qib_sge *sge = &wqe->sg_list[i]; |
429 | 433 | ||
430 | atomic_dec(&sge->mr->refcount); | 434 | qib_put_mr(sge->mr); |
431 | } | 435 | } |
432 | if (qp->ibqp.qp_type == IB_QPT_UD || | 436 | if (qp->ibqp.qp_type == IB_QPT_UD || |
433 | qp->ibqp.qp_type == IB_QPT_SMI || | 437 | qp->ibqp.qp_type == IB_QPT_SMI || |
@@ -437,7 +441,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
437 | qp->s_last = 0; | 441 | qp->s_last = 0; |
438 | } | 442 | } |
439 | if (qp->s_rdma_mr) { | 443 | if (qp->s_rdma_mr) { |
440 | atomic_dec(&qp->s_rdma_mr->refcount); | 444 | qib_put_mr(qp->s_rdma_mr); |
441 | qp->s_rdma_mr = NULL; | 445 | qp->s_rdma_mr = NULL; |
442 | } | 446 | } |
443 | } | 447 | } |
@@ -450,7 +454,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
450 | 454 | ||
451 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && | 455 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && |
452 | e->rdma_sge.mr) { | 456 | e->rdma_sge.mr) { |
453 | atomic_dec(&e->rdma_sge.mr->refcount); | 457 | qib_put_mr(e->rdma_sge.mr); |
454 | e->rdma_sge.mr = NULL; | 458 | e->rdma_sge.mr = NULL; |
455 | } | 459 | } |
456 | } | 460 | } |
@@ -495,7 +499,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) | |||
495 | if (!(qp->s_flags & QIB_S_BUSY)) { | 499 | if (!(qp->s_flags & QIB_S_BUSY)) { |
496 | qp->s_hdrwords = 0; | 500 | qp->s_hdrwords = 0; |
497 | if (qp->s_rdma_mr) { | 501 | if (qp->s_rdma_mr) { |
498 | atomic_dec(&qp->s_rdma_mr->refcount); | 502 | qib_put_mr(qp->s_rdma_mr); |
499 | qp->s_rdma_mr = NULL; | 503 | qp->s_rdma_mr = NULL; |
500 | } | 504 | } |
501 | if (qp->s_tx) { | 505 | if (qp->s_tx) { |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index b641416148eb..3ab341320ead 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, | |||
95 | case OP(RDMA_READ_RESPONSE_ONLY): | 95 | case OP(RDMA_READ_RESPONSE_ONLY): |
96 | e = &qp->s_ack_queue[qp->s_tail_ack_queue]; | 96 | e = &qp->s_ack_queue[qp->s_tail_ack_queue]; |
97 | if (e->rdma_sge.mr) { | 97 | if (e->rdma_sge.mr) { |
98 | atomic_dec(&e->rdma_sge.mr->refcount); | 98 | qib_put_mr(e->rdma_sge.mr); |
99 | e->rdma_sge.mr = NULL; | 99 | e->rdma_sge.mr = NULL; |
100 | } | 100 | } |
101 | /* FALLTHROUGH */ | 101 | /* FALLTHROUGH */ |
@@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, | |||
133 | /* Copy SGE state in case we need to resend */ | 133 | /* Copy SGE state in case we need to resend */ |
134 | qp->s_rdma_mr = e->rdma_sge.mr; | 134 | qp->s_rdma_mr = e->rdma_sge.mr; |
135 | if (qp->s_rdma_mr) | 135 | if (qp->s_rdma_mr) |
136 | atomic_inc(&qp->s_rdma_mr->refcount); | 136 | qib_get_mr(qp->s_rdma_mr); |
137 | qp->s_ack_rdma_sge.sge = e->rdma_sge; | 137 | qp->s_ack_rdma_sge.sge = e->rdma_sge; |
138 | qp->s_ack_rdma_sge.num_sge = 1; | 138 | qp->s_ack_rdma_sge.num_sge = 1; |
139 | qp->s_cur_sge = &qp->s_ack_rdma_sge; | 139 | qp->s_cur_sge = &qp->s_ack_rdma_sge; |
@@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, | |||
172 | qp->s_cur_sge = &qp->s_ack_rdma_sge; | 172 | qp->s_cur_sge = &qp->s_ack_rdma_sge; |
173 | qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; | 173 | qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; |
174 | if (qp->s_rdma_mr) | 174 | if (qp->s_rdma_mr) |
175 | atomic_inc(&qp->s_rdma_mr->refcount); | 175 | qib_get_mr(qp->s_rdma_mr); |
176 | len = qp->s_ack_rdma_sge.sge.sge_length; | 176 | len = qp->s_ack_rdma_sge.sge.sge_length; |
177 | if (len > pmtu) | 177 | if (len > pmtu) |
178 | len = pmtu; | 178 | len = pmtu; |
@@ -1012,7 +1012,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) | |||
1012 | for (i = 0; i < wqe->wr.num_sge; i++) { | 1012 | for (i = 0; i < wqe->wr.num_sge; i++) { |
1013 | struct qib_sge *sge = &wqe->sg_list[i]; | 1013 | struct qib_sge *sge = &wqe->sg_list[i]; |
1014 | 1014 | ||
1015 | atomic_dec(&sge->mr->refcount); | 1015 | qib_put_mr(sge->mr); |
1016 | } | 1016 | } |
1017 | /* Post a send completion queue entry if requested. */ | 1017 | /* Post a send completion queue entry if requested. */ |
1018 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || | 1018 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || |
@@ -1068,7 +1068,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, | |||
1068 | for (i = 0; i < wqe->wr.num_sge; i++) { | 1068 | for (i = 0; i < wqe->wr.num_sge; i++) { |
1069 | struct qib_sge *sge = &wqe->sg_list[i]; | 1069 | struct qib_sge *sge = &wqe->sg_list[i]; |
1070 | 1070 | ||
1071 | atomic_dec(&sge->mr->refcount); | 1071 | qib_put_mr(sge->mr); |
1072 | } | 1072 | } |
1073 | /* Post a send completion queue entry if requested. */ | 1073 | /* Post a send completion queue entry if requested. */ |
1074 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || | 1074 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || |
@@ -1730,7 +1730,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, | |||
1730 | if (unlikely(offset + len != e->rdma_sge.sge_length)) | 1730 | if (unlikely(offset + len != e->rdma_sge.sge_length)) |
1731 | goto unlock_done; | 1731 | goto unlock_done; |
1732 | if (e->rdma_sge.mr) { | 1732 | if (e->rdma_sge.mr) { |
1733 | atomic_dec(&e->rdma_sge.mr->refcount); | 1733 | qib_put_mr(e->rdma_sge.mr); |
1734 | e->rdma_sge.mr = NULL; | 1734 | e->rdma_sge.mr = NULL; |
1735 | } | 1735 | } |
1736 | if (len != 0) { | 1736 | if (len != 0) { |
@@ -2024,11 +2024,7 @@ send_last: | |||
2024 | if (unlikely(wc.byte_len > qp->r_len)) | 2024 | if (unlikely(wc.byte_len > qp->r_len)) |
2025 | goto nack_inv; | 2025 | goto nack_inv; |
2026 | qib_copy_sge(&qp->r_sge, data, tlen, 1); | 2026 | qib_copy_sge(&qp->r_sge, data, tlen, 1); |
2027 | while (qp->r_sge.num_sge) { | 2027 | qib_put_ss(&qp->r_sge); |
2028 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
2029 | if (--qp->r_sge.num_sge) | ||
2030 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
2031 | } | ||
2032 | qp->r_msn++; | 2028 | qp->r_msn++; |
2033 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 2029 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
2034 | break; | 2030 | break; |
@@ -2116,7 +2112,7 @@ send_last: | |||
2116 | } | 2112 | } |
2117 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | 2113 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; |
2118 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { | 2114 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { |
2119 | atomic_dec(&e->rdma_sge.mr->refcount); | 2115 | qib_put_mr(e->rdma_sge.mr); |
2120 | e->rdma_sge.mr = NULL; | 2116 | e->rdma_sge.mr = NULL; |
2121 | } | 2117 | } |
2122 | reth = &ohdr->u.rc.reth; | 2118 | reth = &ohdr->u.rc.reth; |
@@ -2188,7 +2184,7 @@ send_last: | |||
2188 | } | 2184 | } |
2189 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | 2185 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; |
2190 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { | 2186 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { |
2191 | atomic_dec(&e->rdma_sge.mr->refcount); | 2187 | qib_put_mr(e->rdma_sge.mr); |
2192 | e->rdma_sge.mr = NULL; | 2188 | e->rdma_sge.mr = NULL; |
2193 | } | 2189 | } |
2194 | ateth = &ohdr->u.atomic_eth; | 2190 | ateth = &ohdr->u.atomic_eth; |
@@ -2210,7 +2206,7 @@ send_last: | |||
2210 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | 2206 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
2211 | be64_to_cpu(ateth->compare_data), | 2207 | be64_to_cpu(ateth->compare_data), |
2212 | sdata); | 2208 | sdata); |
2213 | atomic_dec(&qp->r_sge.sge.mr->refcount); | 2209 | qib_put_mr(qp->r_sge.sge.mr); |
2214 | qp->r_sge.num_sge = 0; | 2210 | qp->r_sge.num_sge = 0; |
2215 | e->opcode = opcode; | 2211 | e->opcode = opcode; |
2216 | e->sent = 0; | 2212 | e->sent = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index c0ee7e095d81..357b6cfcd46c 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -110,7 +110,7 @@ bad_lkey: | |||
110 | while (j) { | 110 | while (j) { |
111 | struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; | 111 | struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; |
112 | 112 | ||
113 | atomic_dec(&sge->mr->refcount); | 113 | qib_put_mr(sge->mr); |
114 | } | 114 | } |
115 | ss->num_sge = 0; | 115 | ss->num_sge = 0; |
116 | memset(&wc, 0, sizeof(wc)); | 116 | memset(&wc, 0, sizeof(wc)); |
@@ -501,7 +501,7 @@ again: | |||
501 | (u64) atomic64_add_return(sdata, maddr) - sdata : | 501 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
502 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | 502 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
503 | sdata, wqe->wr.wr.atomic.swap); | 503 | sdata, wqe->wr.wr.atomic.swap); |
504 | atomic_dec(&qp->r_sge.sge.mr->refcount); | 504 | qib_put_mr(qp->r_sge.sge.mr); |
505 | qp->r_sge.num_sge = 0; | 505 | qp->r_sge.num_sge = 0; |
506 | goto send_comp; | 506 | goto send_comp; |
507 | 507 | ||
@@ -525,7 +525,7 @@ again: | |||
525 | sge->sge_length -= len; | 525 | sge->sge_length -= len; |
526 | if (sge->sge_length == 0) { | 526 | if (sge->sge_length == 0) { |
527 | if (!release) | 527 | if (!release) |
528 | atomic_dec(&sge->mr->refcount); | 528 | qib_put_mr(sge->mr); |
529 | if (--sqp->s_sge.num_sge) | 529 | if (--sqp->s_sge.num_sge) |
530 | *sge = *sqp->s_sge.sg_list++; | 530 | *sge = *sqp->s_sge.sg_list++; |
531 | } else if (sge->length == 0 && sge->mr->lkey) { | 531 | } else if (sge->length == 0 && sge->mr->lkey) { |
@@ -542,11 +542,7 @@ again: | |||
542 | sqp->s_len -= len; | 542 | sqp->s_len -= len; |
543 | } | 543 | } |
544 | if (release) | 544 | if (release) |
545 | while (qp->r_sge.num_sge) { | 545 | qib_put_ss(&qp->r_sge); |
546 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
547 | if (--qp->r_sge.num_sge) | ||
548 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
549 | } | ||
550 | 546 | ||
551 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 547 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
552 | goto send_comp; | 548 | goto send_comp; |
@@ -782,7 +778,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, | |||
782 | for (i = 0; i < wqe->wr.num_sge; i++) { | 778 | for (i = 0; i < wqe->wr.num_sge; i++) { |
783 | struct qib_sge *sge = &wqe->sg_list[i]; | 779 | struct qib_sge *sge = &wqe->sg_list[i]; |
784 | 780 | ||
785 | atomic_dec(&sge->mr->refcount); | 781 | qib_put_mr(sge->mr); |
786 | } | 782 | } |
787 | if (qp->ibqp.qp_type == IB_QPT_UD || | 783 | if (qp->ibqp.qp_type == IB_QPT_UD || |
788 | qp->ibqp.qp_type == IB_QPT_SMI || | 784 | qp->ibqp.qp_type == IB_QPT_SMI || |
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index ac065dd6b693..a322d5171a2c 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -342,15 +342,17 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd, | |||
342 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | 342 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
343 | IB_CTRL2(chn), 0, 0); | 343 | IB_CTRL2(chn), 0, 0); |
344 | if (ret < 0) | 344 | if (ret < 0) |
345 | qib_dev_err(dd, "Failed checking TRIMDONE, chn %d" | 345 | qib_dev_err(dd, |
346 | " (%s)\n", chn, where); | 346 | "Failed checking TRIMDONE, chn %d (%s)\n", |
347 | chn, where); | ||
347 | 348 | ||
348 | if (!(ret & 0x10)) { | 349 | if (!(ret & 0x10)) { |
349 | int probe; | 350 | int probe; |
350 | 351 | ||
351 | baduns |= (1 << chn); | 352 | baduns |= (1 << chn); |
352 | qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." | 353 | qib_dev_err(dd, |
353 | " (%s)\n", chn, ret, where); | 354 | "TRIMDONE cleared on chn %d (%02X). (%s)\n", |
355 | chn, ret, where); | ||
354 | probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | 356 | probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
355 | IB_PGUDP(0), 0, 0); | 357 | IB_PGUDP(0), 0, 0); |
356 | qib_dev_err(dd, "probe is %d (%02X)\n", | 358 | qib_dev_err(dd, "probe is %d (%02X)\n", |
@@ -375,8 +377,8 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd, | |||
375 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | 377 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
376 | IB_CTRL2(chn), 0x10, 0x10); | 378 | IB_CTRL2(chn), 0x10, 0x10); |
377 | if (ret < 0) | 379 | if (ret < 0) |
378 | qib_dev_err(dd, "Failed re-setting " | 380 | qib_dev_err(dd, |
379 | "TRIMDONE, chn %d (%s)\n", | 381 | "Failed re-setting TRIMDONE, chn %d (%s)\n", |
380 | chn, where); | 382 | chn, where); |
381 | } | 383 | } |
382 | } | 384 | } |
@@ -1144,10 +1146,10 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, | |||
1144 | if (ret < 0) { | 1146 | if (ret < 0) { |
1145 | int sloc = loc >> EPB_ADDR_SHF; | 1147 | int sloc = loc >> EPB_ADDR_SHF; |
1146 | 1148 | ||
1147 | qib_dev_err(dd, "pre-read failed: elt %d," | 1149 | qib_dev_err(dd, |
1148 | " addr 0x%X, chnl %d\n", | 1150 | "pre-read failed: elt %d, addr 0x%X, chnl %d\n", |
1149 | (sloc & 0xF), | 1151 | (sloc & 0xF), |
1150 | (sloc >> 9) & 0x3f, chnl); | 1152 | (sloc >> 9) & 0x3f, chnl); |
1151 | return ret; | 1153 | return ret; |
1152 | } | 1154 | } |
1153 | val = (ret & ~mask) | (val & mask); | 1155 | val = (ret & ~mask) | (val & mask); |
@@ -1157,9 +1159,9 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, | |||
1157 | if (ret < 0) { | 1159 | if (ret < 0) { |
1158 | int sloc = loc >> EPB_ADDR_SHF; | 1160 | int sloc = loc >> EPB_ADDR_SHF; |
1159 | 1161 | ||
1160 | qib_dev_err(dd, "Global WR failed: elt %d," | 1162 | qib_dev_err(dd, |
1161 | " addr 0x%X, val %02X\n", | 1163 | "Global WR failed: elt %d, addr 0x%X, val %02X\n", |
1162 | (sloc & 0xF), (sloc >> 9) & 0x3f, val); | 1164 | (sloc & 0xF), (sloc >> 9) & 0x3f, val); |
1163 | } | 1165 | } |
1164 | return ret; | 1166 | return ret; |
1165 | } | 1167 | } |
@@ -1173,11 +1175,10 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, | |||
1173 | if (ret < 0) { | 1175 | if (ret < 0) { |
1174 | int sloc = loc >> EPB_ADDR_SHF; | 1176 | int sloc = loc >> EPB_ADDR_SHF; |
1175 | 1177 | ||
1176 | qib_dev_err(dd, "Write failed: elt %d," | 1178 | qib_dev_err(dd, |
1177 | " addr 0x%X, chnl %d, val 0x%02X," | 1179 | "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n", |
1178 | " mask 0x%02X\n", | 1180 | (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, |
1179 | (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, | 1181 | val & 0xFF, mask & 0xFF); |
1180 | val & 0xFF, mask & 0xFF); | ||
1181 | break; | 1182 | break; |
1182 | } | 1183 | } |
1183 | } | 1184 | } |
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 12a9604310d7..3fc514431212 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -276,8 +277,8 @@ static int alloc_sdma(struct qib_pportdata *ppd) | |||
276 | GFP_KERNEL); | 277 | GFP_KERNEL); |
277 | 278 | ||
278 | if (!ppd->sdma_descq) { | 279 | if (!ppd->sdma_descq) { |
279 | qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor " | 280 | qib_dev_err(ppd->dd, |
280 | "FIFO memory\n"); | 281 | "failed to allocate SendDMA descriptor FIFO memory\n"); |
281 | goto bail; | 282 | goto bail; |
282 | } | 283 | } |
283 | 284 | ||
@@ -285,8 +286,8 @@ static int alloc_sdma(struct qib_pportdata *ppd) | |||
285 | ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev, | 286 | ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev, |
286 | PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL); | 287 | PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL); |
287 | if (!ppd->sdma_head_dma) { | 288 | if (!ppd->sdma_head_dma) { |
288 | qib_dev_err(ppd->dd, "failed to allocate SendDMA " | 289 | qib_dev_err(ppd->dd, |
289 | "head memory\n"); | 290 | "failed to allocate SendDMA head memory\n"); |
290 | goto cleanup_descq; | 291 | goto cleanup_descq; |
291 | } | 292 | } |
292 | ppd->sdma_head_dma[0] = 0; | 293 | ppd->sdma_head_dma[0] = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index dd9cd49d0979..034cc821de5c 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2006 PathScale, Inc. All rights reserved. |
4 | * | 5 | * |
5 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -33,41 +34,7 @@ | |||
33 | #include <linux/ctype.h> | 34 | #include <linux/ctype.h> |
34 | 35 | ||
35 | #include "qib.h" | 36 | #include "qib.h" |
36 | 37 | #include "qib_mad.h" | |
37 | /** | ||
38 | * qib_parse_ushort - parse an unsigned short value in an arbitrary base | ||
39 | * @str: the string containing the number | ||
40 | * @valp: where to put the result | ||
41 | * | ||
42 | * Returns the number of bytes consumed, or negative value on error. | ||
43 | */ | ||
44 | static int qib_parse_ushort(const char *str, unsigned short *valp) | ||
45 | { | ||
46 | unsigned long val; | ||
47 | char *end; | ||
48 | int ret; | ||
49 | |||
50 | if (!isdigit(str[0])) { | ||
51 | ret = -EINVAL; | ||
52 | goto bail; | ||
53 | } | ||
54 | |||
55 | val = simple_strtoul(str, &end, 0); | ||
56 | |||
57 | if (val > 0xffff) { | ||
58 | ret = -EINVAL; | ||
59 | goto bail; | ||
60 | } | ||
61 | |||
62 | *valp = val; | ||
63 | |||
64 | ret = end + 1 - str; | ||
65 | if (ret == 0) | ||
66 | ret = -EINVAL; | ||
67 | |||
68 | bail: | ||
69 | return ret; | ||
70 | } | ||
71 | 38 | ||
72 | /* start of per-port functions */ | 39 | /* start of per-port functions */ |
73 | /* | 40 | /* |
@@ -90,7 +57,11 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, | |||
90 | int ret; | 57 | int ret; |
91 | u16 val; | 58 | u16 val; |
92 | 59 | ||
93 | ret = qib_parse_ushort(buf, &val); | 60 | ret = kstrtou16(buf, 0, &val); |
61 | if (ret) { | ||
62 | qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); | ||
63 | return ret; | ||
64 | } | ||
94 | 65 | ||
95 | /* | 66 | /* |
96 | * Set the "intentional" heartbeat enable per either of | 67 | * Set the "intentional" heartbeat enable per either of |
@@ -99,10 +70,7 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, | |||
99 | * because entering loopback mode overrides it and automatically | 70 | * because entering loopback mode overrides it and automatically |
100 | * disables heartbeat. | 71 | * disables heartbeat. |
101 | */ | 72 | */ |
102 | if (ret >= 0) | 73 | ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); |
103 | ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); | ||
104 | if (ret < 0) | ||
105 | qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); | ||
106 | return ret < 0 ? ret : count; | 74 | return ret < 0 ? ret : count; |
107 | } | 75 | } |
108 | 76 | ||
@@ -126,12 +94,14 @@ static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf, | |||
126 | int ret; | 94 | int ret; |
127 | u16 val; | 95 | u16 val; |
128 | 96 | ||
129 | ret = qib_parse_ushort(buf, &val); | 97 | ret = kstrtou16(buf, 0, &val); |
130 | if (ret > 0) | 98 | if (ret) { |
131 | qib_set_led_override(ppd, val); | ||
132 | else | ||
133 | qib_dev_err(dd, "attempt to set invalid LED override\n"); | 99 | qib_dev_err(dd, "attempt to set invalid LED override\n"); |
134 | return ret < 0 ? ret : count; | 100 | return ret; |
101 | } | ||
102 | |||
103 | qib_set_led_override(ppd, val); | ||
104 | return count; | ||
135 | } | 105 | } |
136 | 106 | ||
137 | static ssize_t show_status(struct qib_pportdata *ppd, char *buf) | 107 | static ssize_t show_status(struct qib_pportdata *ppd, char *buf) |
@@ -231,6 +201,98 @@ static struct attribute *port_default_attributes[] = { | |||
231 | NULL | 201 | NULL |
232 | }; | 202 | }; |
233 | 203 | ||
204 | /* | ||
205 | * Start of per-port congestion control structures and support code | ||
206 | */ | ||
207 | |||
208 | /* | ||
209 | * Congestion control table size followed by table entries | ||
210 | */ | ||
211 | static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj, | ||
212 | struct bin_attribute *bin_attr, | ||
213 | char *buf, loff_t pos, size_t count) | ||
214 | { | ||
215 | int ret; | ||
216 | struct qib_pportdata *ppd = | ||
217 | container_of(kobj, struct qib_pportdata, pport_cc_kobj); | ||
218 | |||
219 | if (!qib_cc_table_size || !ppd->ccti_entries_shadow) | ||
220 | return -EINVAL; | ||
221 | |||
222 | ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow) | ||
223 | + sizeof(__be16); | ||
224 | |||
225 | if (pos > ret) | ||
226 | return -EINVAL; | ||
227 | |||
228 | if (count > ret - pos) | ||
229 | count = ret - pos; | ||
230 | |||
231 | if (!count) | ||
232 | return count; | ||
233 | |||
234 | spin_lock(&ppd->cc_shadow_lock); | ||
235 | memcpy(buf, ppd->ccti_entries_shadow, count); | ||
236 | spin_unlock(&ppd->cc_shadow_lock); | ||
237 | |||
238 | return count; | ||
239 | } | ||
240 | |||
241 | static void qib_port_release(struct kobject *kobj) | ||
242 | { | ||
243 | /* nothing to do since memory is freed by qib_free_devdata() */ | ||
244 | } | ||
245 | |||
246 | static struct kobj_type qib_port_cc_ktype = { | ||
247 | .release = qib_port_release, | ||
248 | }; | ||
249 | |||
250 | static struct bin_attribute cc_table_bin_attr = { | ||
251 | .attr = {.name = "cc_table_bin", .mode = 0444}, | ||
252 | .read = read_cc_table_bin, | ||
253 | .size = PAGE_SIZE, | ||
254 | }; | ||
255 | |||
256 | /* | ||
257 | * Congestion settings: port control, control map and an array of 16 | ||
258 | * entries for the congestion entries - increase, timer, event log | ||
259 | * trigger threshold and the minimum injection rate delay. | ||
260 | */ | ||
261 | static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj, | ||
262 | struct bin_attribute *bin_attr, | ||
263 | char *buf, loff_t pos, size_t count) | ||
264 | { | ||
265 | int ret; | ||
266 | struct qib_pportdata *ppd = | ||
267 | container_of(kobj, struct qib_pportdata, pport_cc_kobj); | ||
268 | |||
269 | if (!qib_cc_table_size || !ppd->congestion_entries_shadow) | ||
270 | return -EINVAL; | ||
271 | |||
272 | ret = sizeof(struct ib_cc_congestion_setting_attr_shadow); | ||
273 | |||
274 | if (pos > ret) | ||
275 | return -EINVAL; | ||
276 | if (count > ret - pos) | ||
277 | count = ret - pos; | ||
278 | |||
279 | if (!count) | ||
280 | return count; | ||
281 | |||
282 | spin_lock(&ppd->cc_shadow_lock); | ||
283 | memcpy(buf, ppd->congestion_entries_shadow, count); | ||
284 | spin_unlock(&ppd->cc_shadow_lock); | ||
285 | |||
286 | return count; | ||
287 | } | ||
288 | |||
289 | static struct bin_attribute cc_setting_bin_attr = { | ||
290 | .attr = {.name = "cc_settings_bin", .mode = 0444}, | ||
291 | .read = read_cc_setting_bin, | ||
292 | .size = PAGE_SIZE, | ||
293 | }; | ||
294 | |||
295 | |||
234 | static ssize_t qib_portattr_show(struct kobject *kobj, | 296 | static ssize_t qib_portattr_show(struct kobject *kobj, |
235 | struct attribute *attr, char *buf) | 297 | struct attribute *attr, char *buf) |
236 | { | 298 | { |
@@ -253,10 +315,6 @@ static ssize_t qib_portattr_store(struct kobject *kobj, | |||
253 | return pattr->store(ppd, buf, len); | 315 | return pattr->store(ppd, buf, len); |
254 | } | 316 | } |
255 | 317 | ||
256 | static void qib_port_release(struct kobject *kobj) | ||
257 | { | ||
258 | /* nothing to do since memory is freed by qib_free_devdata() */ | ||
259 | } | ||
260 | 318 | ||
261 | static const struct sysfs_ops qib_port_ops = { | 319 | static const struct sysfs_ops qib_port_ops = { |
262 | .show = qib_portattr_show, | 320 | .show = qib_portattr_show, |
@@ -411,12 +469,12 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, | |||
411 | struct qib_pportdata *ppd = | 469 | struct qib_pportdata *ppd = |
412 | container_of(kobj, struct qib_pportdata, diagc_kobj); | 470 | container_of(kobj, struct qib_pportdata, diagc_kobj); |
413 | struct qib_ibport *qibp = &ppd->ibport_data; | 471 | struct qib_ibport *qibp = &ppd->ibport_data; |
414 | char *endp; | 472 | u32 val; |
415 | long val = simple_strtol(buf, &endp, 0); | 473 | int ret; |
416 | |||
417 | if (val < 0 || endp == buf) | ||
418 | return -EINVAL; | ||
419 | 474 | ||
475 | ret = kstrtou32(buf, 0, &val); | ||
476 | if (ret) | ||
477 | return ret; | ||
420 | *(u32 *)((char *) qibp + dattr->counter) = val; | 478 | *(u32 *)((char *) qibp + dattr->counter) = val; |
421 | return size; | 479 | return size; |
422 | } | 480 | } |
@@ -649,8 +707,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, | |||
649 | int ret; | 707 | int ret; |
650 | 708 | ||
651 | if (!port_num || port_num > dd->num_pports) { | 709 | if (!port_num || port_num > dd->num_pports) { |
652 | qib_dev_err(dd, "Skipping infiniband class with " | 710 | qib_dev_err(dd, |
653 | "invalid port %u\n", port_num); | 711 | "Skipping infiniband class with invalid port %u\n", |
712 | port_num); | ||
654 | ret = -ENODEV; | 713 | ret = -ENODEV; |
655 | goto bail; | 714 | goto bail; |
656 | } | 715 | } |
@@ -659,8 +718,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, | |||
659 | ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj, | 718 | ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj, |
660 | "linkcontrol"); | 719 | "linkcontrol"); |
661 | if (ret) { | 720 | if (ret) { |
662 | qib_dev_err(dd, "Skipping linkcontrol sysfs info, " | 721 | qib_dev_err(dd, |
663 | "(err %d) port %u\n", ret, port_num); | 722 | "Skipping linkcontrol sysfs info, (err %d) port %u\n", |
723 | ret, port_num); | ||
664 | goto bail; | 724 | goto bail; |
665 | } | 725 | } |
666 | kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); | 726 | kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); |
@@ -668,26 +728,70 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, | |||
668 | ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj, | 728 | ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj, |
669 | "sl2vl"); | 729 | "sl2vl"); |
670 | if (ret) { | 730 | if (ret) { |
671 | qib_dev_err(dd, "Skipping sl2vl sysfs info, " | 731 | qib_dev_err(dd, |
672 | "(err %d) port %u\n", ret, port_num); | 732 | "Skipping sl2vl sysfs info, (err %d) port %u\n", |
673 | goto bail_sl; | 733 | ret, port_num); |
734 | goto bail_link; | ||
674 | } | 735 | } |
675 | kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); | 736 | kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); |
676 | 737 | ||
677 | ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj, | 738 | ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj, |
678 | "diag_counters"); | 739 | "diag_counters"); |
679 | if (ret) { | 740 | if (ret) { |
680 | qib_dev_err(dd, "Skipping diag_counters sysfs info, " | 741 | qib_dev_err(dd, |
681 | "(err %d) port %u\n", ret, port_num); | 742 | "Skipping diag_counters sysfs info, (err %d) port %u\n", |
682 | goto bail_diagc; | 743 | ret, port_num); |
744 | goto bail_sl; | ||
683 | } | 745 | } |
684 | kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); | 746 | kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); |
685 | 747 | ||
748 | if (!qib_cc_table_size || !ppd->congestion_entries_shadow) | ||
749 | return 0; | ||
750 | |||
751 | ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype, | ||
752 | kobj, "CCMgtA"); | ||
753 | if (ret) { | ||
754 | qib_dev_err(dd, | ||
755 | "Skipping Congestion Control sysfs info, (err %d) port %u\n", | ||
756 | ret, port_num); | ||
757 | goto bail_diagc; | ||
758 | } | ||
759 | |||
760 | kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); | ||
761 | |||
762 | ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, | ||
763 | &cc_setting_bin_attr); | ||
764 | if (ret) { | ||
765 | qib_dev_err(dd, | ||
766 | "Skipping Congestion Control setting sysfs info, (err %d) port %u\n", | ||
767 | ret, port_num); | ||
768 | goto bail_cc; | ||
769 | } | ||
770 | |||
771 | ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, | ||
772 | &cc_table_bin_attr); | ||
773 | if (ret) { | ||
774 | qib_dev_err(dd, | ||
775 | "Skipping Congestion Control table sysfs info, (err %d) port %u\n", | ||
776 | ret, port_num); | ||
777 | goto bail_cc_entry_bin; | ||
778 | } | ||
779 | |||
780 | qib_devinfo(dd->pcidev, | ||
781 | "IB%u: Congestion Control Agent enabled for port %d\n", | ||
782 | dd->unit, port_num); | ||
783 | |||
686 | return 0; | 784 | return 0; |
687 | 785 | ||
786 | bail_cc_entry_bin: | ||
787 | sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr); | ||
788 | bail_cc: | ||
789 | kobject_put(&ppd->pport_cc_kobj); | ||
688 | bail_diagc: | 790 | bail_diagc: |
689 | kobject_put(&ppd->sl2vl_kobj); | 791 | kobject_put(&ppd->diagc_kobj); |
690 | bail_sl: | 792 | bail_sl: |
793 | kobject_put(&ppd->sl2vl_kobj); | ||
794 | bail_link: | ||
691 | kobject_put(&ppd->pport_kobj); | 795 | kobject_put(&ppd->pport_kobj); |
692 | bail: | 796 | bail: |
693 | return ret; | 797 | return ret; |
@@ -720,7 +824,15 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) | |||
720 | 824 | ||
721 | for (i = 0; i < dd->num_pports; i++) { | 825 | for (i = 0; i < dd->num_pports; i++) { |
722 | ppd = &dd->pport[i]; | 826 | ppd = &dd->pport[i]; |
723 | kobject_put(&ppd->pport_kobj); | 827 | if (qib_cc_table_size && |
828 | ppd->congestion_entries_shadow) { | ||
829 | sysfs_remove_bin_file(&ppd->pport_cc_kobj, | ||
830 | &cc_setting_bin_attr); | ||
831 | sysfs_remove_bin_file(&ppd->pport_cc_kobj, | ||
832 | &cc_table_bin_attr); | ||
833 | kobject_put(&ppd->pport_cc_kobj); | ||
834 | } | ||
724 | kobject_put(&ppd->sl2vl_kobj); | 835 | kobject_put(&ppd->sl2vl_kobj); |
836 | kobject_put(&ppd->pport_kobj); | ||
725 | } | 837 | } |
726 | } | 838 | } |
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c index ddde72e11edb..647f7beb1b0a 100644 --- a/drivers/infiniband/hw/qib/qib_twsi.c +++ b/drivers/infiniband/hw/qib/qib_twsi.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | 5 | * |
5 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -449,8 +450,9 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, | |||
449 | goto failed_write; | 450 | goto failed_write; |
450 | ret = qib_twsi_wr(dd, addr, 0); | 451 | ret = qib_twsi_wr(dd, addr, 0); |
451 | if (ret) { | 452 | if (ret) { |
452 | qib_dev_err(dd, "Failed to write interface" | 453 | qib_dev_err(dd, |
453 | " write addr %02X\n", addr); | 454 | "Failed to write interface write addr %02X\n", |
455 | addr); | ||
454 | goto failed_write; | 456 | goto failed_write; |
455 | } | 457 | } |
456 | } | 458 | } |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index ce7387ff5d91..aa3a8035bb68 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -281,11 +281,7 @@ inv: | |||
281 | set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); | 281 | set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); |
282 | qp->r_sge.num_sge = 0; | 282 | qp->r_sge.num_sge = 0; |
283 | } else | 283 | } else |
284 | while (qp->r_sge.num_sge) { | 284 | qib_put_ss(&qp->r_sge); |
285 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
286 | if (--qp->r_sge.num_sge) | ||
287 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
288 | } | ||
289 | qp->r_state = OP(SEND_LAST); | 285 | qp->r_state = OP(SEND_LAST); |
290 | switch (opcode) { | 286 | switch (opcode) { |
291 | case OP(SEND_FIRST): | 287 | case OP(SEND_FIRST): |
@@ -403,14 +399,9 @@ send_last: | |||
403 | if (unlikely(wc.byte_len > qp->r_len)) | 399 | if (unlikely(wc.byte_len > qp->r_len)) |
404 | goto rewind; | 400 | goto rewind; |
405 | wc.opcode = IB_WC_RECV; | 401 | wc.opcode = IB_WC_RECV; |
406 | last_imm: | ||
407 | qib_copy_sge(&qp->r_sge, data, tlen, 0); | 402 | qib_copy_sge(&qp->r_sge, data, tlen, 0); |
408 | while (qp->s_rdma_read_sge.num_sge) { | 403 | qib_put_ss(&qp->s_rdma_read_sge); |
409 | atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); | 404 | last_imm: |
410 | if (--qp->s_rdma_read_sge.num_sge) | ||
411 | qp->s_rdma_read_sge.sge = | ||
412 | *qp->s_rdma_read_sge.sg_list++; | ||
413 | } | ||
414 | wc.wr_id = qp->r_wr_id; | 405 | wc.wr_id = qp->r_wr_id; |
415 | wc.status = IB_WC_SUCCESS; | 406 | wc.status = IB_WC_SUCCESS; |
416 | wc.qp = &qp->ibqp; | 407 | wc.qp = &qp->ibqp; |
@@ -493,13 +484,7 @@ rdma_last_imm: | |||
493 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) | 484 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) |
494 | goto drop; | 485 | goto drop; |
495 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) | 486 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) |
496 | while (qp->s_rdma_read_sge.num_sge) { | 487 | qib_put_ss(&qp->s_rdma_read_sge); |
497 | atomic_dec(&qp->s_rdma_read_sge.sge.mr-> | ||
498 | refcount); | ||
499 | if (--qp->s_rdma_read_sge.num_sge) | ||
500 | qp->s_rdma_read_sge.sge = | ||
501 | *qp->s_rdma_read_sge.sg_list++; | ||
502 | } | ||
503 | else { | 488 | else { |
504 | ret = qib_get_rwqe(qp, 1); | 489 | ret = qib_get_rwqe(qp, 1); |
505 | if (ret < 0) | 490 | if (ret < 0) |
@@ -509,6 +494,8 @@ rdma_last_imm: | |||
509 | } | 494 | } |
510 | wc.byte_len = qp->r_len; | 495 | wc.byte_len = qp->r_len; |
511 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; | 496 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; |
497 | qib_copy_sge(&qp->r_sge, data, tlen, 1); | ||
498 | qib_put_ss(&qp->r_sge); | ||
512 | goto last_imm; | 499 | goto last_imm; |
513 | 500 | ||
514 | case OP(RDMA_WRITE_LAST): | 501 | case OP(RDMA_WRITE_LAST): |
@@ -524,11 +511,7 @@ rdma_last: | |||
524 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) | 511 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) |
525 | goto drop; | 512 | goto drop; |
526 | qib_copy_sge(&qp->r_sge, data, tlen, 1); | 513 | qib_copy_sge(&qp->r_sge, data, tlen, 1); |
527 | while (qp->r_sge.num_sge) { | 514 | qib_put_ss(&qp->r_sge); |
528 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
529 | if (--qp->r_sge.num_sge) | ||
530 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
531 | } | ||
532 | break; | 515 | break; |
533 | 516 | ||
534 | default: | 517 | default: |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index a468bf2d4465..d6c7fe7f88d5 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -194,11 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) | |||
194 | } | 194 | } |
195 | length -= len; | 195 | length -= len; |
196 | } | 196 | } |
197 | while (qp->r_sge.num_sge) { | 197 | qib_put_ss(&qp->r_sge); |
198 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
199 | if (--qp->r_sge.num_sge) | ||
200 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
201 | } | ||
202 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 198 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
203 | goto bail_unlock; | 199 | goto bail_unlock; |
204 | wc.wr_id = qp->r_wr_id; | 200 | wc.wr_id = qp->r_wr_id; |
@@ -556,11 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
556 | } else | 552 | } else |
557 | qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); | 553 | qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); |
558 | qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); | 554 | qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); |
559 | while (qp->r_sge.num_sge) { | 555 | qib_put_ss(&qp->r_sge); |
560 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
561 | if (--qp->r_sge.num_sge) | ||
562 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
563 | } | ||
564 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 556 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
565 | return; | 557 | return; |
566 | wc.wr_id = qp->r_wr_id; | 558 | wc.wr_id = qp->r_wr_id; |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 7b6c3bffa9d9..fc9b205c2412 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -183,7 +183,7 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) | |||
183 | sge->sge_length -= len; | 183 | sge->sge_length -= len; |
184 | if (sge->sge_length == 0) { | 184 | if (sge->sge_length == 0) { |
185 | if (release) | 185 | if (release) |
186 | atomic_dec(&sge->mr->refcount); | 186 | qib_put_mr(sge->mr); |
187 | if (--ss->num_sge) | 187 | if (--ss->num_sge) |
188 | *sge = *ss->sg_list++; | 188 | *sge = *ss->sg_list++; |
189 | } else if (sge->length == 0 && sge->mr->lkey) { | 189 | } else if (sge->length == 0 && sge->mr->lkey) { |
@@ -224,7 +224,7 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) | |||
224 | sge->sge_length -= len; | 224 | sge->sge_length -= len; |
225 | if (sge->sge_length == 0) { | 225 | if (sge->sge_length == 0) { |
226 | if (release) | 226 | if (release) |
227 | atomic_dec(&sge->mr->refcount); | 227 | qib_put_mr(sge->mr); |
228 | if (--ss->num_sge) | 228 | if (--ss->num_sge) |
229 | *sge = *ss->sg_list++; | 229 | *sge = *ss->sg_list++; |
230 | } else if (sge->length == 0 && sge->mr->lkey) { | 230 | } else if (sge->length == 0 && sge->mr->lkey) { |
@@ -333,7 +333,8 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) | |||
333 | * @qp: the QP to post on | 333 | * @qp: the QP to post on |
334 | * @wr: the work request to send | 334 | * @wr: the work request to send |
335 | */ | 335 | */ |
336 | static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) | 336 | static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, |
337 | int *scheduled) | ||
337 | { | 338 | { |
338 | struct qib_swqe *wqe; | 339 | struct qib_swqe *wqe; |
339 | u32 next; | 340 | u32 next; |
@@ -435,11 +436,17 @@ bail_inval_free: | |||
435 | while (j) { | 436 | while (j) { |
436 | struct qib_sge *sge = &wqe->sg_list[--j]; | 437 | struct qib_sge *sge = &wqe->sg_list[--j]; |
437 | 438 | ||
438 | atomic_dec(&sge->mr->refcount); | 439 | qib_put_mr(sge->mr); |
439 | } | 440 | } |
440 | bail_inval: | 441 | bail_inval: |
441 | ret = -EINVAL; | 442 | ret = -EINVAL; |
442 | bail: | 443 | bail: |
444 | if (!ret && !wr->next && | ||
445 | !qib_sdma_empty( | ||
446 | dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { | ||
447 | qib_schedule_send(qp); | ||
448 | *scheduled = 1; | ||
449 | } | ||
443 | spin_unlock_irqrestore(&qp->s_lock, flags); | 450 | spin_unlock_irqrestore(&qp->s_lock, flags); |
444 | return ret; | 451 | return ret; |
445 | } | 452 | } |
@@ -457,9 +464,10 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
457 | { | 464 | { |
458 | struct qib_qp *qp = to_iqp(ibqp); | 465 | struct qib_qp *qp = to_iqp(ibqp); |
459 | int err = 0; | 466 | int err = 0; |
467 | int scheduled = 0; | ||
460 | 468 | ||
461 | for (; wr; wr = wr->next) { | 469 | for (; wr; wr = wr->next) { |
462 | err = qib_post_one_send(qp, wr); | 470 | err = qib_post_one_send(qp, wr, &scheduled); |
463 | if (err) { | 471 | if (err) { |
464 | *bad_wr = wr; | 472 | *bad_wr = wr; |
465 | goto bail; | 473 | goto bail; |
@@ -467,7 +475,8 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
467 | } | 475 | } |
468 | 476 | ||
469 | /* Try to do the send work in the caller's context. */ | 477 | /* Try to do the send work in the caller's context. */ |
470 | qib_do_send(&qp->s_work); | 478 | if (!scheduled) |
479 | qib_do_send(&qp->s_work); | ||
471 | 480 | ||
472 | bail: | 481 | bail: |
473 | return err; | 482 | return err; |
@@ -978,7 +987,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) | |||
978 | if (atomic_dec_and_test(&qp->refcount)) | 987 | if (atomic_dec_and_test(&qp->refcount)) |
979 | wake_up(&qp->wait); | 988 | wake_up(&qp->wait); |
980 | if (tx->mr) { | 989 | if (tx->mr) { |
981 | atomic_dec(&tx->mr->refcount); | 990 | qib_put_mr(tx->mr); |
982 | tx->mr = NULL; | 991 | tx->mr = NULL; |
983 | } | 992 | } |
984 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { | 993 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { |
@@ -1336,7 +1345,7 @@ done: | |||
1336 | } | 1345 | } |
1337 | qib_sendbuf_done(dd, pbufn); | 1346 | qib_sendbuf_done(dd, pbufn); |
1338 | if (qp->s_rdma_mr) { | 1347 | if (qp->s_rdma_mr) { |
1339 | atomic_dec(&qp->s_rdma_mr->refcount); | 1348 | qib_put_mr(qp->s_rdma_mr); |
1340 | qp->s_rdma_mr = NULL; | 1349 | qp->s_rdma_mr = NULL; |
1341 | } | 1350 | } |
1342 | if (qp->s_wqe) { | 1351 | if (qp->s_wqe) { |
@@ -1845,6 +1854,23 @@ bail: | |||
1845 | return ret; | 1854 | return ret; |
1846 | } | 1855 | } |
1847 | 1856 | ||
1857 | struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) | ||
1858 | { | ||
1859 | struct ib_ah_attr attr; | ||
1860 | struct ib_ah *ah = ERR_PTR(-EINVAL); | ||
1861 | struct qib_qp *qp0; | ||
1862 | |||
1863 | memset(&attr, 0, sizeof attr); | ||
1864 | attr.dlid = dlid; | ||
1865 | attr.port_num = ppd_from_ibp(ibp)->port; | ||
1866 | rcu_read_lock(); | ||
1867 | qp0 = rcu_dereference(ibp->qp0); | ||
1868 | if (qp0) | ||
1869 | ah = ib_create_ah(qp0->ibqp.pd, &attr); | ||
1870 | rcu_read_unlock(); | ||
1871 | return ah; | ||
1872 | } | ||
1873 | |||
1848 | /** | 1874 | /** |
1849 | * qib_destroy_ah - destroy an address handle | 1875 | * qib_destroy_ah - destroy an address handle |
1850 | * @ibah: the AH to destroy | 1876 | * @ibah: the AH to destroy |
@@ -2060,13 +2086,15 @@ int qib_register_ib_device(struct qib_devdata *dd) | |||
2060 | spin_lock_init(&dev->lk_table.lock); | 2086 | spin_lock_init(&dev->lk_table.lock); |
2061 | dev->lk_table.max = 1 << ib_qib_lkey_table_size; | 2087 | dev->lk_table.max = 1 << ib_qib_lkey_table_size; |
2062 | lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); | 2088 | lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); |
2063 | dev->lk_table.table = (struct qib_mregion **) | 2089 | dev->lk_table.table = (struct qib_mregion __rcu **) |
2064 | __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); | 2090 | __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); |
2065 | if (dev->lk_table.table == NULL) { | 2091 | if (dev->lk_table.table == NULL) { |
2066 | ret = -ENOMEM; | 2092 | ret = -ENOMEM; |
2067 | goto err_lk; | 2093 | goto err_lk; |
2068 | } | 2094 | } |
2069 | memset(dev->lk_table.table, 0, lk_tab_size); | 2095 | RCU_INIT_POINTER(dev->dma_mr, NULL); |
2096 | for (i = 0; i < dev->lk_table.max; i++) | ||
2097 | RCU_INIT_POINTER(dev->lk_table.table[i], NULL); | ||
2070 | INIT_LIST_HEAD(&dev->pending_mmaps); | 2098 | INIT_LIST_HEAD(&dev->pending_mmaps); |
2071 | spin_lock_init(&dev->pending_lock); | 2099 | spin_lock_init(&dev->pending_lock); |
2072 | dev->mmap_offset = PAGE_SIZE; | 2100 | dev->mmap_offset = PAGE_SIZE; |
@@ -2289,3 +2317,17 @@ void qib_unregister_ib_device(struct qib_devdata *dd) | |||
2289 | get_order(lk_tab_size)); | 2317 | get_order(lk_tab_size)); |
2290 | kfree(dev->qp_table); | 2318 | kfree(dev->qp_table); |
2291 | } | 2319 | } |
2320 | |||
2321 | /* | ||
2322 | * This must be called with s_lock held. | ||
2323 | */ | ||
2324 | void qib_schedule_send(struct qib_qp *qp) | ||
2325 | { | ||
2326 | if (qib_send_ok(qp)) { | ||
2327 | struct qib_ibport *ibp = | ||
2328 | to_iport(qp->ibqp.device, qp->port_num); | ||
2329 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
2330 | |||
2331 | queue_work(ppd->qib_wq, &qp->s_work); | ||
2332 | } | ||
2333 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 487606024659..aff8b2c17886 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
42 | #include <linux/kref.h> | 42 | #include <linux/kref.h> |
43 | #include <linux/workqueue.h> | 43 | #include <linux/workqueue.h> |
44 | #include <linux/completion.h> | ||
44 | #include <rdma/ib_pack.h> | 45 | #include <rdma/ib_pack.h> |
45 | #include <rdma/ib_user_verbs.h> | 46 | #include <rdma/ib_user_verbs.h> |
46 | 47 | ||
@@ -302,6 +303,9 @@ struct qib_mregion { | |||
302 | u32 max_segs; /* number of qib_segs in all the arrays */ | 303 | u32 max_segs; /* number of qib_segs in all the arrays */ |
303 | u32 mapsz; /* size of the map array */ | 304 | u32 mapsz; /* size of the map array */ |
304 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ | 305 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ |
306 | u8 lkey_published; /* in global table */ | ||
307 | struct completion comp; /* complete when refcount goes to zero */ | ||
308 | struct rcu_head list; | ||
305 | atomic_t refcount; | 309 | atomic_t refcount; |
306 | struct qib_segarray *map[0]; /* the segments */ | 310 | struct qib_segarray *map[0]; /* the segments */ |
307 | }; | 311 | }; |
@@ -416,7 +420,7 @@ struct qib_qp { | |||
416 | /* read mostly fields above and below */ | 420 | /* read mostly fields above and below */ |
417 | struct ib_ah_attr remote_ah_attr; | 421 | struct ib_ah_attr remote_ah_attr; |
418 | struct ib_ah_attr alt_ah_attr; | 422 | struct ib_ah_attr alt_ah_attr; |
419 | struct qib_qp *next; /* link list for QPN hash table */ | 423 | struct qib_qp __rcu *next; /* link list for QPN hash table */ |
420 | struct qib_swqe *s_wq; /* send work queue */ | 424 | struct qib_swqe *s_wq; /* send work queue */ |
421 | struct qib_mmap_info *ip; | 425 | struct qib_mmap_info *ip; |
422 | struct qib_ib_header *s_hdr; /* next packet header to send */ | 426 | struct qib_ib_header *s_hdr; /* next packet header to send */ |
@@ -646,7 +650,7 @@ struct qib_lkey_table { | |||
646 | u32 next; /* next unused index (speeds search) */ | 650 | u32 next; /* next unused index (speeds search) */ |
647 | u32 gen; /* generation count */ | 651 | u32 gen; /* generation count */ |
648 | u32 max; /* size of the table */ | 652 | u32 max; /* size of the table */ |
649 | struct qib_mregion **table; | 653 | struct qib_mregion __rcu **table; |
650 | }; | 654 | }; |
651 | 655 | ||
652 | struct qib_opcode_stats { | 656 | struct qib_opcode_stats { |
@@ -655,8 +659,8 @@ struct qib_opcode_stats { | |||
655 | }; | 659 | }; |
656 | 660 | ||
657 | struct qib_ibport { | 661 | struct qib_ibport { |
658 | struct qib_qp *qp0; | 662 | struct qib_qp __rcu *qp0; |
659 | struct qib_qp *qp1; | 663 | struct qib_qp __rcu *qp1; |
660 | struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ | 664 | struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ |
661 | struct qib_ah *sm_ah; | 665 | struct qib_ah *sm_ah; |
662 | struct qib_ah *smi_ah; | 666 | struct qib_ah *smi_ah; |
@@ -723,12 +727,13 @@ struct qib_ibport { | |||
723 | struct qib_opcode_stats opstats[128]; | 727 | struct qib_opcode_stats opstats[128]; |
724 | }; | 728 | }; |
725 | 729 | ||
730 | |||
726 | struct qib_ibdev { | 731 | struct qib_ibdev { |
727 | struct ib_device ibdev; | 732 | struct ib_device ibdev; |
728 | struct list_head pending_mmaps; | 733 | struct list_head pending_mmaps; |
729 | spinlock_t mmap_offset_lock; /* protect mmap_offset */ | 734 | spinlock_t mmap_offset_lock; /* protect mmap_offset */ |
730 | u32 mmap_offset; | 735 | u32 mmap_offset; |
731 | struct qib_mregion *dma_mr; | 736 | struct qib_mregion __rcu *dma_mr; |
732 | 737 | ||
733 | /* QP numbers are shared by all IB ports */ | 738 | /* QP numbers are shared by all IB ports */ |
734 | struct qib_qpn_table qpn_table; | 739 | struct qib_qpn_table qpn_table; |
@@ -739,7 +744,7 @@ struct qib_ibdev { | |||
739 | struct list_head memwait; /* list for wait kernel memory */ | 744 | struct list_head memwait; /* list for wait kernel memory */ |
740 | struct list_head txreq_free; | 745 | struct list_head txreq_free; |
741 | struct timer_list mem_timer; | 746 | struct timer_list mem_timer; |
742 | struct qib_qp **qp_table; | 747 | struct qib_qp __rcu **qp_table; |
743 | struct qib_pio_header *pio_hdrs; | 748 | struct qib_pio_header *pio_hdrs; |
744 | dma_addr_t pio_hdrs_phys; | 749 | dma_addr_t pio_hdrs_phys; |
745 | /* list of QPs waiting for RNR timer */ | 750 | /* list of QPs waiting for RNR timer */ |
@@ -832,11 +837,7 @@ extern struct workqueue_struct *qib_cq_wq; | |||
832 | /* | 837 | /* |
833 | * This must be called with s_lock held. | 838 | * This must be called with s_lock held. |
834 | */ | 839 | */ |
835 | static inline void qib_schedule_send(struct qib_qp *qp) | 840 | void qib_schedule_send(struct qib_qp *qp); |
836 | { | ||
837 | if (qib_send_ok(qp)) | ||
838 | queue_work(ib_wq, &qp->s_work); | ||
839 | } | ||
840 | 841 | ||
841 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) | 842 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) |
842 | { | 843 | { |
@@ -933,6 +934,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
933 | 934 | ||
934 | int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); | 935 | int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); |
935 | 936 | ||
937 | struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid); | ||
938 | |||
936 | void qib_rc_rnr_retry(unsigned long arg); | 939 | void qib_rc_rnr_retry(unsigned long arg); |
937 | 940 | ||
938 | void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); | 941 | void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); |
@@ -944,9 +947,9 @@ int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); | |||
944 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | 947 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, |
945 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); | 948 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); |
946 | 949 | ||
947 | int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); | 950 | int qib_alloc_lkey(struct qib_mregion *mr, int dma_region); |
948 | 951 | ||
949 | int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); | 952 | void qib_free_lkey(struct qib_mregion *mr); |
950 | 953 | ||
951 | int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | 954 | int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, |
952 | struct qib_sge *isge, struct ib_sge *sge, int acc); | 955 | struct qib_sge *isge, struct ib_sge *sge, int acc); |
@@ -1014,6 +1017,29 @@ int qib_unmap_fmr(struct list_head *fmr_list); | |||
1014 | 1017 | ||
1015 | int qib_dealloc_fmr(struct ib_fmr *ibfmr); | 1018 | int qib_dealloc_fmr(struct ib_fmr *ibfmr); |
1016 | 1019 | ||
1020 | static inline void qib_get_mr(struct qib_mregion *mr) | ||
1021 | { | ||
1022 | atomic_inc(&mr->refcount); | ||
1023 | } | ||
1024 | |||
1025 | void mr_rcu_callback(struct rcu_head *list); | ||
1026 | |||
1027 | static inline void qib_put_mr(struct qib_mregion *mr) | ||
1028 | { | ||
1029 | if (unlikely(atomic_dec_and_test(&mr->refcount))) | ||
1030 | call_rcu(&mr->list, mr_rcu_callback); | ||
1031 | } | ||
1032 | |||
1033 | static inline void qib_put_ss(struct qib_sge_state *ss) | ||
1034 | { | ||
1035 | while (ss->num_sge) { | ||
1036 | qib_put_mr(ss->sge.mr); | ||
1037 | if (--ss->num_sge) | ||
1038 | ss->sge = *ss->sg_list++; | ||
1039 | } | ||
1040 | } | ||
1041 | |||
1042 | |||
1017 | void qib_release_mmap_info(struct kref *ref); | 1043 | void qib_release_mmap_info(struct kref *ref); |
1018 | 1044 | ||
1019 | struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, | 1045 | struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, |
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index 561b8bca4060..1d7281c5a02e 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | 5 | * |
5 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -102,10 +103,10 @@ int qib_enable_wc(struct qib_devdata *dd) | |||
102 | u64 atmp; | 103 | u64 atmp; |
103 | atmp = pioaddr & ~(piolen - 1); | 104 | atmp = pioaddr & ~(piolen - 1); |
104 | if (atmp < addr || (atmp + piolen) > (addr + len)) { | 105 | if (atmp < addr || (atmp + piolen) > (addr + len)) { |
105 | qib_dev_err(dd, "No way to align address/size " | 106 | qib_dev_err(dd, |
106 | "(%llx/%llx), no WC mtrr\n", | 107 | "No way to align address/size (%llx/%llx), no WC mtrr\n", |
107 | (unsigned long long) atmp, | 108 | (unsigned long long) atmp, |
108 | (unsigned long long) piolen << 1); | 109 | (unsigned long long) piolen << 1); |
109 | ret = -ENODEV; | 110 | ret = -ENODEV; |
110 | } else { | 111 | } else { |
111 | pioaddr = atmp; | 112 | pioaddr = atmp; |
@@ -120,8 +121,7 @@ int qib_enable_wc(struct qib_devdata *dd) | |||
120 | if (cookie < 0) { | 121 | if (cookie < 0) { |
121 | { | 122 | { |
122 | qib_devinfo(dd->pcidev, | 123 | qib_devinfo(dd->pcidev, |
123 | "mtrr_add() WC for PIO bufs " | 124 | "mtrr_add() WC for PIO bufs failed (%d)\n", |
124 | "failed (%d)\n", | ||
125 | cookie); | 125 | cookie); |
126 | ret = -EINVAL; | 126 | ret = -EINVAL; |
127 | } | 127 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 1ca732201f33..6d66ab0dd92a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1376,7 +1376,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work) | |||
1376 | 1376 | ||
1377 | if (skb->protocol == htons(ETH_P_IP)) | 1377 | if (skb->protocol == htons(ETH_P_IP)) |
1378 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 1378 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
1379 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1379 | #if IS_ENABLED(CONFIG_IPV6) |
1380 | else if (skb->protocol == htons(ETH_P_IPV6)) | 1380 | else if (skb->protocol == htons(ETH_P_IPV6)) |
1381 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 1381 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
1382 | #endif | 1382 | #endif |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 69ba57270481..a52922ed85c1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c | |||
@@ -131,7 +131,7 @@ static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, | 133 | static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, |
134 | enum mlx4_dev_event event, int port) | 134 | enum mlx4_dev_event event, unsigned long port) |
135 | { | 135 | { |
136 | struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; | 136 | struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; |
137 | struct mlx4_en_priv *priv; | 137 | struct mlx4_en_priv *priv; |
@@ -156,7 +156,8 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, | |||
156 | if (port < 1 || port > dev->caps.num_ports || | 156 | if (port < 1 || port > dev->caps.num_ports || |
157 | !mdev->pndev[port]) | 157 | !mdev->pndev[port]) |
158 | return; | 158 | return; |
159 | mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port); | 159 | mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, |
160 | (int) port); | ||
160 | } | 161 | } |
161 | } | 162 | } |
162 | 163 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cd48337cbfc0..99a04648fab0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -83,6 +83,15 @@ enum { | |||
83 | (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ | 83 | (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ |
84 | (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) | 84 | (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) |
85 | 85 | ||
86 | static u64 get_async_ev_mask(struct mlx4_dev *dev) | ||
87 | { | ||
88 | u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; | ||
89 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) | ||
90 | async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); | ||
91 | |||
92 | return async_ev_mask; | ||
93 | } | ||
94 | |||
86 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) | 95 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) |
87 | { | 96 | { |
88 | __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | | 97 | __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | |
@@ -474,6 +483,11 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
474 | 483 | ||
475 | break; | 484 | break; |
476 | 485 | ||
486 | case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: | ||
487 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, | ||
488 | (unsigned long) eqe); | ||
489 | break; | ||
490 | |||
477 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: | 491 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: |
478 | case MLX4_EVENT_TYPE_ECC_DETECT: | 492 | case MLX4_EVENT_TYPE_ECC_DETECT: |
479 | default: | 493 | default: |
@@ -957,7 +971,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
957 | priv->eq_table.have_irq = 1; | 971 | priv->eq_table.have_irq = 1; |
958 | } | 972 | } |
959 | 973 | ||
960 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | 974 | err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, |
961 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); | 975 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
962 | if (err) | 976 | if (err) |
963 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | 977 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", |
@@ -997,7 +1011,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
997 | struct mlx4_priv *priv = mlx4_priv(dev); | 1011 | struct mlx4_priv *priv = mlx4_priv(dev); |
998 | int i; | 1012 | int i; |
999 | 1013 | ||
1000 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, | 1014 | mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, |
1001 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); | 1015 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
1002 | 1016 | ||
1003 | mlx4_free_irqs(dev); | 1017 | mlx4_free_irqs(dev); |
@@ -1041,7 +1055,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) | |||
1041 | mlx4_cmd_use_polling(dev); | 1055 | mlx4_cmd_use_polling(dev); |
1042 | 1056 | ||
1043 | /* Map the new eq to handle all asyncronous events */ | 1057 | /* Map the new eq to handle all asyncronous events */ |
1044 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | 1058 | err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, |
1045 | priv->eq_table.eq[i].eqn); | 1059 | priv->eq_table.eq[i].eqn); |
1046 | if (err) { | 1060 | if (err) { |
1047 | mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); | 1061 | mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); |
@@ -1055,7 +1069,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) | |||
1055 | } | 1069 | } |
1056 | 1070 | ||
1057 | /* Return to default */ | 1071 | /* Return to default */ |
1058 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | 1072 | mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, |
1059 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); | 1073 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
1060 | return err; | 1074 | return err; |
1061 | } | 1075 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 1d70657058a5..c69648487321 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -109,6 +109,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) | |||
109 | [41] = "Unicast VEP steering support", | 109 | [41] = "Unicast VEP steering support", |
110 | [42] = "Multicast VEP steering support", | 110 | [42] = "Multicast VEP steering support", |
111 | [48] = "Counters support", | 111 | [48] = "Counters support", |
112 | [59] = "Port management change event support", | ||
112 | }; | 113 | }; |
113 | int i; | 114 | int i; |
114 | 115 | ||
@@ -174,6 +175,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
174 | #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 | 175 | #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 |
175 | #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 | 176 | #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 |
176 | #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 | 177 | #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 |
178 | #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 | ||
177 | #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 | 179 | #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 |
178 | #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 | 180 | #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 |
179 | #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 | 181 | #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 |
@@ -183,25 +185,44 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
183 | #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c | 185 | #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c |
184 | #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30 | 186 | #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30 |
185 | 187 | ||
188 | #define QUERY_FUNC_CAP_FMR_FLAG 0x80 | ||
189 | #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 | ||
190 | #define QUERY_FUNC_CAP_FLAG_ETH 0x80 | ||
191 | |||
192 | /* when opcode modifier = 1 */ | ||
186 | #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 | 193 | #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 |
194 | #define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8 | ||
187 | #define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc | 195 | #define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc |
188 | 196 | ||
197 | #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40 | ||
198 | #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80 | ||
199 | |||
200 | #define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80 | ||
201 | |||
189 | if (vhcr->op_modifier == 1) { | 202 | if (vhcr->op_modifier == 1) { |
190 | field = vhcr->in_modifier; | 203 | field = vhcr->in_modifier; |
191 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); | 204 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); |
192 | 205 | ||
193 | field = 0; /* ensure fvl bit is not set */ | 206 | field = 0; |
207 | /* ensure force vlan and force mac bits are not set */ | ||
194 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); | 208 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); |
209 | /* ensure that phy_wqe_gid bit is not set */ | ||
210 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); | ||
211 | |||
195 | } else if (vhcr->op_modifier == 0) { | 212 | } else if (vhcr->op_modifier == 0) { |
196 | field = 1 << 7; /* enable only ethernet interface */ | 213 | /* enable rdma and ethernet interfaces */ |
214 | field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); | ||
197 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); | 215 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); |
198 | 216 | ||
199 | field = dev->caps.num_ports; | 217 | field = dev->caps.num_ports; |
200 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); | 218 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); |
201 | 219 | ||
202 | size = 0; /* no PF behavious is set for now */ | 220 | size = 0; /* no PF behaviour is set for now */ |
203 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); | 221 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); |
204 | 222 | ||
223 | field = 0; /* protected FMR support not available as yet */ | ||
224 | MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); | ||
225 | |||
205 | size = dev->caps.num_qps; | 226 | size = dev->caps.num_qps; |
206 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); | 227 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); |
207 | 228 | ||
@@ -254,11 +275,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) | |||
254 | outbox = mailbox->buf; | 275 | outbox = mailbox->buf; |
255 | 276 | ||
256 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); | 277 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); |
257 | if (!(field & (1 << 7))) { | 278 | if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { |
258 | mlx4_err(dev, "The host doesn't support eth interface\n"); | 279 | mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); |
259 | err = -EPROTONOSUPPORT; | 280 | err = -EPROTONOSUPPORT; |
260 | goto out; | 281 | goto out; |
261 | } | 282 | } |
283 | func_cap->flags = field; | ||
262 | 284 | ||
263 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); | 285 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); |
264 | func_cap->num_ports = field; | 286 | func_cap->num_ports = field; |
@@ -297,17 +319,27 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) | |||
297 | if (err) | 319 | if (err) |
298 | goto out; | 320 | goto out; |
299 | 321 | ||
300 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); | 322 | if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { |
301 | if (field & (1 << 7)) { | 323 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); |
302 | mlx4_err(dev, "VLAN is enforced on this port\n"); | 324 | if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) { |
303 | err = -EPROTONOSUPPORT; | 325 | mlx4_err(dev, "VLAN is enforced on this port\n"); |
304 | goto out; | 326 | err = -EPROTONOSUPPORT; |
305 | } | 327 | goto out; |
328 | } | ||
306 | 329 | ||
307 | if (field & (1 << 6)) { | 330 | if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) { |
308 | mlx4_err(dev, "Force mac is enabled on this port\n"); | 331 | mlx4_err(dev, "Force mac is enabled on this port\n"); |
309 | err = -EPROTONOSUPPORT; | 332 | err = -EPROTONOSUPPORT; |
310 | goto out; | 333 | goto out; |
334 | } | ||
335 | } else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) { | ||
336 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); | ||
337 | if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) { | ||
338 | mlx4_err(dev, "phy_wqe_gid is " | ||
339 | "enforced on this ib port\n"); | ||
340 | err = -EPROTONOSUPPORT; | ||
341 | goto out; | ||
342 | } | ||
311 | } | 343 | } |
312 | 344 | ||
313 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); | 345 | MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); |
@@ -707,14 +739,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, | |||
707 | { | 739 | { |
708 | u64 def_mac; | 740 | u64 def_mac; |
709 | u8 port_type; | 741 | u8 port_type; |
742 | u16 short_field; | ||
710 | int err; | 743 | int err; |
711 | 744 | ||
712 | #define MLX4_PORT_SUPPORT_IB (1 << 0) | 745 | #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 |
713 | #define MLX4_PORT_SUGGEST_TYPE (1 << 3) | 746 | #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c |
714 | #define MLX4_PORT_DEFAULT_SENSE (1 << 4) | 747 | #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e |
715 | #define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \ | ||
716 | ~MLX4_PORT_SUGGEST_TYPE & \ | ||
717 | ~MLX4_PORT_DEFAULT_SENSE) | ||
718 | 748 | ||
719 | err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, | 749 | err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, |
720 | MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, | 750 | MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, |
@@ -730,20 +760,58 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, | |||
730 | MLX4_GET(port_type, outbox->buf, | 760 | MLX4_GET(port_type, outbox->buf, |
731 | QUERY_PORT_SUPPORTED_TYPE_OFFSET); | 761 | QUERY_PORT_SUPPORTED_TYPE_OFFSET); |
732 | 762 | ||
733 | /* Allow only Eth port, no link sensing allowed */ | 763 | /* No link sensing allowed */ |
734 | port_type &= MLX4_VF_PORT_ETH_ONLY_MASK; | 764 | port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; |
735 | 765 | /* set port type to currently operating port type */ | |
736 | /* check eth is enabled for this port */ | 766 | port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); |
737 | if (!(port_type & 2)) | ||
738 | mlx4_dbg(dev, "QUERY PORT: eth not supported by host"); | ||
739 | 767 | ||
740 | MLX4_PUT(outbox->buf, port_type, | 768 | MLX4_PUT(outbox->buf, port_type, |
741 | QUERY_PORT_SUPPORTED_TYPE_OFFSET); | 769 | QUERY_PORT_SUPPORTED_TYPE_OFFSET); |
770 | |||
771 | short_field = 1; /* slave max gids */ | ||
772 | MLX4_PUT(outbox->buf, short_field, | ||
773 | QUERY_PORT_CUR_MAX_GID_OFFSET); | ||
774 | |||
775 | short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; | ||
776 | MLX4_PUT(outbox->buf, short_field, | ||
777 | QUERY_PORT_CUR_MAX_PKEY_OFFSET); | ||
742 | } | 778 | } |
743 | 779 | ||
744 | return err; | 780 | return err; |
745 | } | 781 | } |
746 | 782 | ||
783 | int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, | ||
784 | int *gid_tbl_len, int *pkey_tbl_len) | ||
785 | { | ||
786 | struct mlx4_cmd_mailbox *mailbox; | ||
787 | u32 *outbox; | ||
788 | u16 field; | ||
789 | int err; | ||
790 | |||
791 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
792 | if (IS_ERR(mailbox)) | ||
793 | return PTR_ERR(mailbox); | ||
794 | |||
795 | err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, | ||
796 | MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, | ||
797 | MLX4_CMD_WRAPPED); | ||
798 | if (err) | ||
799 | goto out; | ||
800 | |||
801 | outbox = mailbox->buf; | ||
802 | |||
803 | MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); | ||
804 | *gid_tbl_len = field; | ||
805 | |||
806 | MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); | ||
807 | *pkey_tbl_len = field; | ||
808 | |||
809 | out: | ||
810 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
811 | return err; | ||
812 | } | ||
813 | EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); | ||
814 | |||
747 | int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) | 815 | int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) |
748 | { | 816 | { |
749 | struct mlx4_cmd_mailbox *mailbox; | 817 | struct mlx4_cmd_mailbox *mailbox; |
@@ -890,11 +958,12 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) | |||
890 | ((fw_ver & 0xffff0000ull) >> 16) | | 958 | ((fw_ver & 0xffff0000ull) >> 16) | |
891 | ((fw_ver & 0x0000ffffull) << 16); | 959 | ((fw_ver & 0x0000ffffull) << 16); |
892 | 960 | ||
961 | MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); | ||
962 | dev->caps.function = lg; | ||
963 | |||
893 | if (mlx4_is_slave(dev)) | 964 | if (mlx4_is_slave(dev)) |
894 | goto out; | 965 | goto out; |
895 | 966 | ||
896 | MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); | ||
897 | dev->caps.function = lg; | ||
898 | 967 | ||
899 | MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); | 968 | MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); |
900 | if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || | 969 | if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || |
@@ -975,9 +1044,12 @@ int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, | |||
975 | if (err) | 1044 | if (err) |
976 | return err; | 1045 | return err; |
977 | 1046 | ||
978 | /* for slaves, zero out everything except FW version */ | 1047 | /* for slaves, set pci PPF ID to invalid and zero out everything |
1048 | * else except FW version */ | ||
979 | outbuf[0] = outbuf[1] = 0; | 1049 | outbuf[0] = outbuf[1] = 0; |
980 | memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); | 1050 | memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); |
1051 | outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; | ||
1052 | |||
981 | return 0; | 1053 | return 0; |
982 | } | 1054 | } |
983 | 1055 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index a9ade1c3cad5..88b7b3e75ab1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c | |||
@@ -413,6 +413,8 @@ err: | |||
413 | mlx4_free_icm(dev, table->icm[i], use_coherent); | 413 | mlx4_free_icm(dev, table->icm[i], use_coherent); |
414 | } | 414 | } |
415 | 415 | ||
416 | kfree(table->icm); | ||
417 | |||
416 | return -ENOMEM; | 418 | return -ENOMEM; |
417 | } | 419 | } |
418 | 420 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index b10c07a1dc1a..19e4efc0b342 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h | |||
@@ -81,13 +81,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |||
81 | u64 virt, int obj_size, int nobj, int reserved, | 81 | u64 virt, int obj_size, int nobj, int reserved, |
82 | int use_lowmem, int use_coherent); | 82 | int use_lowmem, int use_coherent); |
83 | void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); | 83 | void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); |
84 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | ||
85 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | ||
86 | void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); | 84 | void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); |
87 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
88 | int start, int end); | ||
89 | void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
90 | int start, int end); | ||
91 | 85 | ||
92 | static inline void mlx4_icm_first(struct mlx4_icm *icm, | 86 | static inline void mlx4_icm_first(struct mlx4_icm *icm, |
93 | struct mlx4_icm_iter *iter) | 87 | struct mlx4_icm_iter *iter) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index b4e9f6f5cc04..116895ac8b35 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c | |||
@@ -115,7 +115,8 @@ void mlx4_unregister_interface(struct mlx4_interface *intf) | |||
115 | } | 115 | } |
116 | EXPORT_SYMBOL_GPL(mlx4_unregister_interface); | 116 | EXPORT_SYMBOL_GPL(mlx4_unregister_interface); |
117 | 117 | ||
118 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port) | 118 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, |
119 | unsigned long param) | ||
119 | { | 120 | { |
120 | struct mlx4_priv *priv = mlx4_priv(dev); | 121 | struct mlx4_priv *priv = mlx4_priv(dev); |
121 | struct mlx4_device_context *dev_ctx; | 122 | struct mlx4_device_context *dev_ctx; |
@@ -125,7 +126,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int por | |||
125 | 126 | ||
126 | list_for_each_entry(dev_ctx, &priv->ctx_list, list) | 127 | list_for_each_entry(dev_ctx, &priv->ctx_list, list) |
127 | if (dev_ctx->intf->event) | 128 | if (dev_ctx->intf->event) |
128 | dev_ctx->intf->event(dev, dev_ctx->context, type, port); | 129 | dev_ctx->intf->event(dev, dev_ctx->context, type, param); |
129 | 130 | ||
130 | spin_unlock_irqrestore(&priv->ctx_lock, flags); | 131 | spin_unlock_irqrestore(&priv->ctx_lock, flags); |
131 | } | 132 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 42645166bae2..e8f8ebb4ae65 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -218,6 +218,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
218 | for (i = 1; i <= dev->caps.num_ports; ++i) { | 218 | for (i = 1; i <= dev->caps.num_ports; ++i) { |
219 | dev->caps.vl_cap[i] = dev_cap->max_vl[i]; | 219 | dev->caps.vl_cap[i] = dev_cap->max_vl[i]; |
220 | dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; | 220 | dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; |
221 | dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i]; | ||
222 | dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i]; | ||
223 | /* set gid and pkey table operating lengths by default | ||
224 | * to non-sriov values */ | ||
221 | dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; | 225 | dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; |
222 | dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; | 226 | dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; |
223 | dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; | 227 | dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; |
@@ -312,29 +316,19 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
312 | /* if only ETH is supported - assign ETH */ | 316 | /* if only ETH is supported - assign ETH */ |
313 | if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) | 317 | if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) |
314 | dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; | 318 | dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; |
315 | /* if only IB is supported, | 319 | /* if only IB is supported, assign IB */ |
316 | * assign IB only if SRIOV is off*/ | ||
317 | else if (dev->caps.supported_type[i] == | 320 | else if (dev->caps.supported_type[i] == |
318 | MLX4_PORT_TYPE_IB) { | 321 | MLX4_PORT_TYPE_IB) |
319 | if (dev->flags & MLX4_FLAG_SRIOV) | 322 | dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; |
320 | dev->caps.port_type[i] = | ||
321 | MLX4_PORT_TYPE_NONE; | ||
322 | else | ||
323 | dev->caps.port_type[i] = | ||
324 | MLX4_PORT_TYPE_IB; | ||
325 | /* if IB and ETH are supported, | ||
326 | * first of all check if SRIOV is on */ | ||
327 | } else if (dev->flags & MLX4_FLAG_SRIOV) | ||
328 | dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; | ||
329 | else { | 323 | else { |
330 | /* In non-SRIOV mode, we set the port type | 324 | /* if IB and ETH are supported, we set the port |
331 | * according to user selection of port type, | 325 | * type according to user selection of port type; |
332 | * if usere selected none, take the FW hint */ | 326 | * if user selected none, take the FW hint */ |
333 | if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) | 327 | if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) |
334 | dev->caps.port_type[i] = dev->caps.suggested_type[i] ? | 328 | dev->caps.port_type[i] = dev->caps.suggested_type[i] ? |
335 | MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; | 329 | MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; |
336 | else | 330 | else |
337 | dev->caps.port_type[i] = port_type_array[i-1]; | 331 | dev->caps.port_type[i] = port_type_array[i - 1]; |
338 | } | 332 | } |
339 | } | 333 | } |
340 | /* | 334 | /* |
@@ -415,6 +409,23 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) | |||
415 | return ret; | 409 | return ret; |
416 | } | 410 | } |
417 | 411 | ||
412 | int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) | ||
413 | { | ||
414 | u32 qk = MLX4_RESERVED_QKEY_BASE; | ||
415 | if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || | ||
416 | qpn < dev->caps.sqp_start) | ||
417 | return -EINVAL; | ||
418 | |||
419 | if (qpn >= dev->caps.base_tunnel_sqpn) | ||
420 | /* tunnel qp */ | ||
421 | qk += qpn - dev->caps.base_tunnel_sqpn; | ||
422 | else | ||
423 | qk += qpn - dev->caps.sqp_start; | ||
424 | *qkey = qk; | ||
425 | return 0; | ||
426 | } | ||
427 | EXPORT_SYMBOL(mlx4_get_parav_qkey); | ||
428 | |||
418 | int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) | 429 | int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) |
419 | { | 430 | { |
420 | struct mlx4_priv *priv = mlx4_priv(dev); | 431 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -515,8 +526,13 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) | |||
515 | return -ENODEV; | 526 | return -ENODEV; |
516 | } | 527 | } |
517 | 528 | ||
518 | for (i = 1; i <= dev->caps.num_ports; ++i) | 529 | for (i = 1; i <= dev->caps.num_ports; ++i) { |
519 | dev->caps.port_mask[i] = dev->caps.port_type[i]; | 530 | dev->caps.port_mask[i] = dev->caps.port_type[i]; |
531 | if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, | ||
532 | &dev->caps.gid_table_len[i], | ||
533 | &dev->caps.pkey_table_len[i])) | ||
534 | return -ENODEV; | ||
535 | } | ||
520 | 536 | ||
521 | if (dev->caps.uar_page_size * (dev->caps.num_uars - | 537 | if (dev->caps.uar_page_size * (dev->caps.num_uars - |
522 | dev->caps.reserved_uars) > | 538 | dev->caps.reserved_uars) > |
@@ -553,7 +569,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev, | |||
553 | for (port = 1; port <= dev->caps.num_ports; port++) { | 569 | for (port = 1; port <= dev->caps.num_ports; port++) { |
554 | mlx4_CLOSE_PORT(dev, port); | 570 | mlx4_CLOSE_PORT(dev, port); |
555 | dev->caps.port_type[port] = port_types[port - 1]; | 571 | dev->caps.port_type[port] = port_types[port - 1]; |
556 | err = mlx4_SET_PORT(dev, port); | 572 | err = mlx4_SET_PORT(dev, port, -1); |
557 | if (err) { | 573 | if (err) { |
558 | mlx4_err(dev, "Failed to set port %d, " | 574 | mlx4_err(dev, "Failed to set port %d, " |
559 | "aborting\n", port); | 575 | "aborting\n", port); |
@@ -739,7 +755,7 @@ static ssize_t set_port_ib_mtu(struct device *dev, | |||
739 | mlx4_unregister_device(mdev); | 755 | mlx4_unregister_device(mdev); |
740 | for (port = 1; port <= mdev->caps.num_ports; port++) { | 756 | for (port = 1; port <= mdev->caps.num_ports; port++) { |
741 | mlx4_CLOSE_PORT(mdev, port); | 757 | mlx4_CLOSE_PORT(mdev, port); |
742 | err = mlx4_SET_PORT(mdev, port); | 758 | err = mlx4_SET_PORT(mdev, port, -1); |
743 | if (err) { | 759 | if (err) { |
744 | mlx4_err(mdev, "Failed to set port %d, " | 760 | mlx4_err(mdev, "Failed to set port %d, " |
745 | "aborting\n", port); | 761 | "aborting\n", port); |
@@ -1192,6 +1208,17 @@ err: | |||
1192 | return -EIO; | 1208 | return -EIO; |
1193 | } | 1209 | } |
1194 | 1210 | ||
1211 | static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) | ||
1212 | { | ||
1213 | int i; | ||
1214 | |||
1215 | for (i = 1; i <= dev->caps.num_ports; i++) { | ||
1216 | dev->caps.gid_table_len[i] = 1; | ||
1217 | dev->caps.pkey_table_len[i] = | ||
1218 | dev->phys_caps.pkey_phys_table_len[i] - 1; | ||
1219 | } | ||
1220 | } | ||
1221 | |||
1195 | static int mlx4_init_hca(struct mlx4_dev *dev) | 1222 | static int mlx4_init_hca(struct mlx4_dev *dev) |
1196 | { | 1223 | { |
1197 | struct mlx4_priv *priv = mlx4_priv(dev); | 1224 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -1231,6 +1258,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
1231 | goto err_stop_fw; | 1258 | goto err_stop_fw; |
1232 | } | 1259 | } |
1233 | 1260 | ||
1261 | if (mlx4_is_master(dev)) | ||
1262 | mlx4_parav_master_pf_caps(dev); | ||
1263 | |||
1234 | priv->fs_hash_mode = MLX4_FS_L2_HASH; | 1264 | priv->fs_hash_mode = MLX4_FS_L2_HASH; |
1235 | 1265 | ||
1236 | switch (priv->fs_hash_mode) { | 1266 | switch (priv->fs_hash_mode) { |
@@ -1522,12 +1552,24 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
1522 | "with caps = 0\n", port, err); | 1552 | "with caps = 0\n", port, err); |
1523 | dev->caps.ib_port_def_cap[port] = ib_port_default_caps; | 1553 | dev->caps.ib_port_def_cap[port] = ib_port_default_caps; |
1524 | 1554 | ||
1555 | /* initialize per-slave default ib port capabilities */ | ||
1556 | if (mlx4_is_master(dev)) { | ||
1557 | int i; | ||
1558 | for (i = 0; i < dev->num_slaves; i++) { | ||
1559 | if (i == mlx4_master_func_num(dev)) | ||
1560 | continue; | ||
1561 | priv->mfunc.master.slave_state[i].ib_cap_mask[port] = | ||
1562 | ib_port_default_caps; | ||
1563 | } | ||
1564 | } | ||
1565 | |||
1525 | if (mlx4_is_mfunc(dev)) | 1566 | if (mlx4_is_mfunc(dev)) |
1526 | dev->caps.port_ib_mtu[port] = IB_MTU_2048; | 1567 | dev->caps.port_ib_mtu[port] = IB_MTU_2048; |
1527 | else | 1568 | else |
1528 | dev->caps.port_ib_mtu[port] = IB_MTU_4096; | 1569 | dev->caps.port_ib_mtu[port] = IB_MTU_4096; |
1529 | 1570 | ||
1530 | err = mlx4_SET_PORT(dev, port); | 1571 | err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? |
1572 | dev->caps.pkey_table_len[port] : -1); | ||
1531 | if (err) { | 1573 | if (err) { |
1532 | mlx4_err(dev, "Failed to set port %d, aborting\n", | 1574 | mlx4_err(dev, "Failed to set port %d, aborting\n", |
1533 | port); | 1575 | port); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index d2c436b10fbf..59ebc0339638 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -351,66 +351,6 @@ struct mlx4_srq_context { | |||
351 | __be64 db_rec_addr; | 351 | __be64 db_rec_addr; |
352 | }; | 352 | }; |
353 | 353 | ||
354 | struct mlx4_eqe { | ||
355 | u8 reserved1; | ||
356 | u8 type; | ||
357 | u8 reserved2; | ||
358 | u8 subtype; | ||
359 | union { | ||
360 | u32 raw[6]; | ||
361 | struct { | ||
362 | __be32 cqn; | ||
363 | } __packed comp; | ||
364 | struct { | ||
365 | u16 reserved1; | ||
366 | __be16 token; | ||
367 | u32 reserved2; | ||
368 | u8 reserved3[3]; | ||
369 | u8 status; | ||
370 | __be64 out_param; | ||
371 | } __packed cmd; | ||
372 | struct { | ||
373 | __be32 qpn; | ||
374 | } __packed qp; | ||
375 | struct { | ||
376 | __be32 srqn; | ||
377 | } __packed srq; | ||
378 | struct { | ||
379 | __be32 cqn; | ||
380 | u32 reserved1; | ||
381 | u8 reserved2[3]; | ||
382 | u8 syndrome; | ||
383 | } __packed cq_err; | ||
384 | struct { | ||
385 | u32 reserved1[2]; | ||
386 | __be32 port; | ||
387 | } __packed port_change; | ||
388 | struct { | ||
389 | #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 | ||
390 | u32 reserved; | ||
391 | u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; | ||
392 | } __packed comm_channel_arm; | ||
393 | struct { | ||
394 | u8 port; | ||
395 | u8 reserved[3]; | ||
396 | __be64 mac; | ||
397 | } __packed mac_update; | ||
398 | struct { | ||
399 | u8 port; | ||
400 | } __packed sw_event; | ||
401 | struct { | ||
402 | __be32 slave_id; | ||
403 | } __packed flr_event; | ||
404 | struct { | ||
405 | __be16 current_temperature; | ||
406 | __be16 warning_threshold; | ||
407 | } __packed warming; | ||
408 | } event; | ||
409 | u8 slave_id; | ||
410 | u8 reserved3[2]; | ||
411 | u8 owner; | ||
412 | } __packed; | ||
413 | |||
414 | struct mlx4_eq { | 354 | struct mlx4_eq { |
415 | struct mlx4_dev *dev; | 355 | struct mlx4_dev *dev; |
416 | void __iomem *doorbell; | 356 | void __iomem *doorbell; |
@@ -902,7 +842,8 @@ void mlx4_catas_init(void); | |||
902 | int mlx4_restart_one(struct pci_dev *pdev); | 842 | int mlx4_restart_one(struct pci_dev *pdev); |
903 | int mlx4_register_device(struct mlx4_dev *dev); | 843 | int mlx4_register_device(struct mlx4_dev *dev); |
904 | void mlx4_unregister_device(struct mlx4_dev *dev); | 844 | void mlx4_unregister_device(struct mlx4_dev *dev); |
905 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port); | 845 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, |
846 | unsigned long param); | ||
906 | 847 | ||
907 | struct mlx4_dev_cap; | 848 | struct mlx4_dev_cap; |
908 | struct mlx4_init_hca_param; | 849 | struct mlx4_init_hca_param; |
@@ -1043,7 +984,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev, | |||
1043 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); | 984 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); |
1044 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); | 985 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); |
1045 | 986 | ||
1046 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); | 987 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); |
1047 | /* resource tracker functions*/ | 988 | /* resource tracker functions*/ |
1048 | int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, | 989 | int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, |
1049 | enum mlx4_resource resource_type, | 990 | enum mlx4_resource resource_type, |
@@ -1086,6 +1027,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, | |||
1086 | struct mlx4_cmd_info *cmd); | 1027 | struct mlx4_cmd_info *cmd); |
1087 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); | 1028 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); |
1088 | 1029 | ||
1030 | int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, | ||
1031 | int *gid_tbl_len, int *pkey_tbl_len); | ||
1089 | 1032 | ||
1090 | int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | 1033 | int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, |
1091 | struct mlx4_vhcr *vhcr, | 1034 | struct mlx4_vhcr *vhcr, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 028833ffc56f..e36dd0f2fa73 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -775,14 +775,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, | |||
775 | enum { | 775 | enum { |
776 | MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ | 776 | MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ |
777 | MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ | 777 | MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ |
778 | MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20, | ||
778 | MLX4_CHANGE_PORT_VL_CAP = 21, | 779 | MLX4_CHANGE_PORT_VL_CAP = 21, |
779 | MLX4_CHANGE_PORT_MTU_CAP = 22, | 780 | MLX4_CHANGE_PORT_MTU_CAP = 22, |
780 | }; | 781 | }; |
781 | 782 | ||
782 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | 783 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) |
783 | { | 784 | { |
784 | struct mlx4_cmd_mailbox *mailbox; | 785 | struct mlx4_cmd_mailbox *mailbox; |
785 | int err, vl_cap; | 786 | int err, vl_cap, pkey_tbl_flag = 0; |
786 | 787 | ||
787 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) | 788 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) |
788 | return 0; | 789 | return 0; |
@@ -795,11 +796,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | |||
795 | 796 | ||
796 | ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; | 797 | ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; |
797 | 798 | ||
799 | if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { | ||
800 | pkey_tbl_flag = 1; | ||
801 | ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz); | ||
802 | } | ||
803 | |||
798 | /* IB VL CAP enum isn't used by the firmware, just numerical values */ | 804 | /* IB VL CAP enum isn't used by the firmware, just numerical values */ |
799 | for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { | 805 | for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { |
800 | ((__be32 *) mailbox->buf)[0] = cpu_to_be32( | 806 | ((__be32 *) mailbox->buf)[0] = cpu_to_be32( |
801 | (1 << MLX4_CHANGE_PORT_MTU_CAP) | | 807 | (1 << MLX4_CHANGE_PORT_MTU_CAP) | |
802 | (1 << MLX4_CHANGE_PORT_VL_CAP) | | 808 | (1 << MLX4_CHANGE_PORT_VL_CAP) | |
809 | (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | | ||
803 | (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | | 810 | (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | |
804 | (vl_cap << MLX4_SET_PORT_VL_CAP)); | 811 | (vl_cap << MLX4_SET_PORT_VL_CAP)); |
805 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, | 812 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 4d7761f8c3f6..bd6c9fcdf2dd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -57,6 +57,13 @@ enum { | |||
57 | MLX4_MAX_PORTS = 2 | 57 | MLX4_MAX_PORTS = 2 |
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* base qkey for use in sriov tunnel-qp/proxy-qp communication. | ||
61 | * These qkeys must not be allowed for general use. This is a 64k range, | ||
62 | * and to test for violation, we use the mask (protect against future chg). | ||
63 | */ | ||
64 | #define MLX4_RESERVED_QKEY_BASE (0xFFFF0000) | ||
65 | #define MLX4_RESERVED_QKEY_MASK (0xFFFF0000) | ||
66 | |||
60 | enum { | 67 | enum { |
61 | MLX4_BOARD_ID_LEN = 64 | 68 | MLX4_BOARD_ID_LEN = 64 |
62 | }; | 69 | }; |
@@ -127,7 +134,8 @@ enum { | |||
127 | MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, | 134 | MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, |
128 | MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, | 135 | MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, |
129 | MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, | 136 | MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, |
130 | MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 | 137 | MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, |
138 | MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, | ||
131 | }; | 139 | }; |
132 | 140 | ||
133 | enum { | 141 | enum { |
@@ -170,6 +178,7 @@ enum mlx4_event { | |||
170 | MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, | 178 | MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, |
171 | MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, | 179 | MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, |
172 | MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, | 180 | MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, |
181 | MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, | ||
173 | MLX4_EVENT_TYPE_NONE = 0xff, | 182 | MLX4_EVENT_TYPE_NONE = 0xff, |
174 | }; | 183 | }; |
175 | 184 | ||
@@ -267,12 +276,32 @@ enum { | |||
267 | MLX4_MAX_FAST_REG_PAGES = 511, | 276 | MLX4_MAX_FAST_REG_PAGES = 511, |
268 | }; | 277 | }; |
269 | 278 | ||
279 | enum { | ||
280 | MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, | ||
281 | MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, | ||
282 | MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, | ||
283 | }; | ||
284 | |||
285 | /* Port mgmt change event handling */ | ||
286 | enum { | ||
287 | MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0, | ||
288 | MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1, | ||
289 | MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2, | ||
290 | MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3, | ||
291 | MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4, | ||
292 | }; | ||
293 | |||
294 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ | ||
295 | MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) | ||
296 | |||
270 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) | 297 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) |
271 | { | 298 | { |
272 | return (major << 32) | (minor << 16) | subminor; | 299 | return (major << 32) | (minor << 16) | subminor; |
273 | } | 300 | } |
274 | 301 | ||
275 | struct mlx4_phys_caps { | 302 | struct mlx4_phys_caps { |
303 | u32 gid_phys_table_len[MLX4_MAX_PORTS + 1]; | ||
304 | u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1]; | ||
276 | u32 num_phys_eqs; | 305 | u32 num_phys_eqs; |
277 | }; | 306 | }; |
278 | 307 | ||
@@ -305,6 +334,8 @@ struct mlx4_caps { | |||
305 | int max_qp_init_rdma; | 334 | int max_qp_init_rdma; |
306 | int max_qp_dest_rdma; | 335 | int max_qp_dest_rdma; |
307 | int sqp_start; | 336 | int sqp_start; |
337 | u32 base_sqpn; | ||
338 | u32 base_tunnel_sqpn; | ||
308 | int num_srqs; | 339 | int num_srqs; |
309 | int max_srq_wqes; | 340 | int max_srq_wqes; |
310 | int max_srq_sge; | 341 | int max_srq_sge; |
@@ -547,6 +578,81 @@ struct mlx4_dev { | |||
547 | u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; | 578 | u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; |
548 | }; | 579 | }; |
549 | 580 | ||
581 | struct mlx4_eqe { | ||
582 | u8 reserved1; | ||
583 | u8 type; | ||
584 | u8 reserved2; | ||
585 | u8 subtype; | ||
586 | union { | ||
587 | u32 raw[6]; | ||
588 | struct { | ||
589 | __be32 cqn; | ||
590 | } __packed comp; | ||
591 | struct { | ||
592 | u16 reserved1; | ||
593 | __be16 token; | ||
594 | u32 reserved2; | ||
595 | u8 reserved3[3]; | ||
596 | u8 status; | ||
597 | __be64 out_param; | ||
598 | } __packed cmd; | ||
599 | struct { | ||
600 | __be32 qpn; | ||
601 | } __packed qp; | ||
602 | struct { | ||
603 | __be32 srqn; | ||
604 | } __packed srq; | ||
605 | struct { | ||
606 | __be32 cqn; | ||
607 | u32 reserved1; | ||
608 | u8 reserved2[3]; | ||
609 | u8 syndrome; | ||
610 | } __packed cq_err; | ||
611 | struct { | ||
612 | u32 reserved1[2]; | ||
613 | __be32 port; | ||
614 | } __packed port_change; | ||
615 | struct { | ||
616 | #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 | ||
617 | u32 reserved; | ||
618 | u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; | ||
619 | } __packed comm_channel_arm; | ||
620 | struct { | ||
621 | u8 port; | ||
622 | u8 reserved[3]; | ||
623 | __be64 mac; | ||
624 | } __packed mac_update; | ||
625 | struct { | ||
626 | __be32 slave_id; | ||
627 | } __packed flr_event; | ||
628 | struct { | ||
629 | __be16 current_temperature; | ||
630 | __be16 warning_threshold; | ||
631 | } __packed warming; | ||
632 | struct { | ||
633 | u8 reserved[3]; | ||
634 | u8 port; | ||
635 | union { | ||
636 | struct { | ||
637 | __be16 mstr_sm_lid; | ||
638 | __be16 port_lid; | ||
639 | __be32 changed_attr; | ||
640 | u8 reserved[3]; | ||
641 | u8 mstr_sm_sl; | ||
642 | __be64 gid_prefix; | ||
643 | } __packed port_info; | ||
644 | struct { | ||
645 | __be32 block_ptr; | ||
646 | __be32 tbl_entries_mask; | ||
647 | } __packed tbl_change_info; | ||
648 | } params; | ||
649 | } __packed port_mgmt_change; | ||
650 | } event; | ||
651 | u8 slave_id; | ||
652 | u8 reserved3[2]; | ||
653 | u8 owner; | ||
654 | } __packed; | ||
655 | |||
550 | struct mlx4_init_port_param { | 656 | struct mlx4_init_port_param { |
551 | int set_guid0; | 657 | int set_guid0; |
552 | int set_node_guid; | 658 | int set_node_guid; |
@@ -570,6 +676,15 @@ struct mlx4_init_port_param { | |||
570 | if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ | 676 | if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ |
571 | ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) | 677 | ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) |
572 | 678 | ||
679 | #define MLX4_INVALID_SLAVE_ID 0xFF | ||
680 | |||
681 | void handle_port_mgmt_change_event(struct work_struct *work); | ||
682 | |||
683 | static inline int mlx4_master_func_num(struct mlx4_dev *dev) | ||
684 | { | ||
685 | return dev->caps.function; | ||
686 | } | ||
687 | |||
573 | static inline int mlx4_is_master(struct mlx4_dev *dev) | 688 | static inline int mlx4_is_master(struct mlx4_dev *dev) |
574 | { | 689 | { |
575 | return dev->flags & MLX4_FLAG_MASTER; | 690 | return dev->flags & MLX4_FLAG_MASTER; |
@@ -799,4 +914,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev, | |||
799 | struct mlx4_net_trans_rule *rule, u64 *reg_id); | 914 | struct mlx4_net_trans_rule *rule, u64 *reg_id); |
800 | int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); | 915 | int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); |
801 | 916 | ||
917 | int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey); | ||
918 | |||
802 | #endif /* MLX4_DEVICE_H */ | 919 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 8dc485febc6b..d813704b963b 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h | |||
@@ -44,13 +44,14 @@ enum mlx4_dev_event { | |||
44 | MLX4_DEV_EVENT_PORT_UP, | 44 | MLX4_DEV_EVENT_PORT_UP, |
45 | MLX4_DEV_EVENT_PORT_DOWN, | 45 | MLX4_DEV_EVENT_PORT_DOWN, |
46 | MLX4_DEV_EVENT_PORT_REINIT, | 46 | MLX4_DEV_EVENT_PORT_REINIT, |
47 | MLX4_DEV_EVENT_PORT_MGMT_CHANGE, | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | struct mlx4_interface { | 50 | struct mlx4_interface { |
50 | void * (*add) (struct mlx4_dev *dev); | 51 | void * (*add) (struct mlx4_dev *dev); |
51 | void (*remove)(struct mlx4_dev *dev, void *context); | 52 | void (*remove)(struct mlx4_dev *dev, void *context); |
52 | void (*event) (struct mlx4_dev *dev, void *context, | 53 | void (*event) (struct mlx4_dev *dev, void *context, |
53 | enum mlx4_dev_event event, int port); | 54 | enum mlx4_dev_event event, unsigned long param); |
54 | void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); | 55 | void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); |
55 | struct list_head list; | 56 | struct list_head list; |
56 | enum mlx4_protocol protocol; | 57 | enum mlx4_protocol protocol; |
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 83f77ac33957..0e3ff30647d5 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h | |||
@@ -262,6 +262,18 @@ struct ib_cm_event { | |||
262 | void *private_data; | 262 | void *private_data; |
263 | }; | 263 | }; |
264 | 264 | ||
265 | #define CM_REQ_ATTR_ID cpu_to_be16(0x0010) | ||
266 | #define CM_MRA_ATTR_ID cpu_to_be16(0x0011) | ||
267 | #define CM_REJ_ATTR_ID cpu_to_be16(0x0012) | ||
268 | #define CM_REP_ATTR_ID cpu_to_be16(0x0013) | ||
269 | #define CM_RTU_ATTR_ID cpu_to_be16(0x0014) | ||
270 | #define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) | ||
271 | #define CM_DREP_ATTR_ID cpu_to_be16(0x0016) | ||
272 | #define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) | ||
273 | #define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) | ||
274 | #define CM_LAP_ATTR_ID cpu_to_be16(0x0019) | ||
275 | #define CM_APR_ATTR_ID cpu_to_be16(0x001A) | ||
276 | |||
265 | /** | 277 | /** |
266 | * ib_cm_handler - User-defined callback to process communication events. | 278 | * ib_cm_handler - User-defined callback to process communication events. |
267 | * @cm_id: Communication identifier associated with the reported event. | 279 | * @cm_id: Communication identifier associated with the reported event. |
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index d44a56388a3e..8275e539bace 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h | |||
@@ -251,6 +251,28 @@ struct ib_sa_service_rec { | |||
251 | u64 data64[2]; | 251 | u64 data64[2]; |
252 | }; | 252 | }; |
253 | 253 | ||
254 | #define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0) | ||
255 | #define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1) | ||
256 | #define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2) | ||
257 | #define IB_SA_GUIDINFO_REC_RES2 IB_SA_COMP_MASK(3) | ||
258 | #define IB_SA_GUIDINFO_REC_GID0 IB_SA_COMP_MASK(4) | ||
259 | #define IB_SA_GUIDINFO_REC_GID1 IB_SA_COMP_MASK(5) | ||
260 | #define IB_SA_GUIDINFO_REC_GID2 IB_SA_COMP_MASK(6) | ||
261 | #define IB_SA_GUIDINFO_REC_GID3 IB_SA_COMP_MASK(7) | ||
262 | #define IB_SA_GUIDINFO_REC_GID4 IB_SA_COMP_MASK(8) | ||
263 | #define IB_SA_GUIDINFO_REC_GID5 IB_SA_COMP_MASK(9) | ||
264 | #define IB_SA_GUIDINFO_REC_GID6 IB_SA_COMP_MASK(10) | ||
265 | #define IB_SA_GUIDINFO_REC_GID7 IB_SA_COMP_MASK(11) | ||
266 | |||
267 | struct ib_sa_guidinfo_rec { | ||
268 | __be16 lid; | ||
269 | u8 block_num; | ||
270 | /* reserved */ | ||
271 | u8 res1; | ||
272 | __be32 res2; | ||
273 | u8 guid_info_list[64]; | ||
274 | }; | ||
275 | |||
254 | struct ib_sa_client { | 276 | struct ib_sa_client { |
255 | atomic_t users; | 277 | atomic_t users; |
256 | struct completion comp; | 278 | struct completion comp; |
@@ -385,4 +407,15 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, | |||
385 | */ | 407 | */ |
386 | void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec); | 408 | void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec); |
387 | 409 | ||
410 | /* Support GuidInfoRecord */ | ||
411 | int ib_sa_guid_info_rec_query(struct ib_sa_client *client, | ||
412 | struct ib_device *device, u8 port_num, | ||
413 | struct ib_sa_guidinfo_rec *rec, | ||
414 | ib_sa_comp_mask comp_mask, u8 method, | ||
415 | int timeout_ms, gfp_t gfp_mask, | ||
416 | void (*callback)(int status, | ||
417 | struct ib_sa_guidinfo_rec *resp, | ||
418 | void *context), | ||
419 | void *context, | ||
420 | struct ib_sa_query **sa_query); | ||
388 | #endif /* IB_SA_H */ | 421 | #endif /* IB_SA_H */ |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 51988f808181..ad3a3142383a 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -357,4 +357,14 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos); | |||
357 | */ | 357 | */ |
358 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse); | 358 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse); |
359 | 359 | ||
360 | /** | ||
361 | * rdma_set_afonly - Specify that listens are restricted to the | ||
362 | * bound address family only. | ||
363 | * @id: Communication identifer to configure. | ||
364 | * @afonly: Value indicating if listens are restricted. | ||
365 | * | ||
366 | * Must be set before identifier is in the listening state. | ||
367 | */ | ||
368 | int rdma_set_afonly(struct rdma_cm_id *id, int afonly); | ||
369 | |||
360 | #endif /* RDMA_CM_H */ | 370 | #endif /* RDMA_CM_H */ |
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h index 5348a000c8f3..1ee9239ff8c2 100644 --- a/include/rdma/rdma_user_cm.h +++ b/include/rdma/rdma_user_cm.h | |||
@@ -224,6 +224,7 @@ enum { | |||
224 | enum { | 224 | enum { |
225 | RDMA_OPTION_ID_TOS = 0, | 225 | RDMA_OPTION_ID_TOS = 0, |
226 | RDMA_OPTION_ID_REUSEADDR = 1, | 226 | RDMA_OPTION_ID_REUSEADDR = 1, |
227 | RDMA_OPTION_ID_AFONLY = 2, | ||
227 | RDMA_OPTION_IB_PATH = 1 | 228 | RDMA_OPTION_IB_PATH = 1 |
228 | }; | 229 | }; |
229 | 230 | ||