diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-11 22:43:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-11 22:43:13 -0400 |
commit | ce9d3c9a6a9aef61525be07fe6ba27d937236aa2 (patch) | |
tree | 1b29bcb8f60fc6b59fa0d7b833cc733b8ebe17c9 /drivers/infiniband/core/cma.c | |
parent | 038a5008b2f395c85e6e71d6ddf3c684e7c405b0 (diff) | |
parent | 3d73c2884f45f9a297cbc956cea101405a9703f2 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (87 commits)
mlx4_core: Fix section mismatches
IPoIB: Allow setting policy to ignore multicast groups
IB/mthca: Mark error paths as unlikely() in post_srq_recv functions
IB/ipath: Minor fix to ordering of freeing and zeroing of tid pages.
IB/ipath: Remove redundant link state checks
IB/ipath: Fix IB_EVENT_PORT_ERR event
IB/ipath: Better handling of unexpected GPIO interrupts
IB/ipath: Maintain active time on all chips
IB/ipath: Fix QHT7040 serial number check
IB/ipath: Indicate a couple of chip bugs to userspace
IB/ipath: iba6110 rev4 no longer needs recv header overrun workaround
IB/ipath: Use counters in ipath_poll and cleanup interrupts in ipath_close
IB/ipath: Remove duplicate copy of LMC
IB/ipath: Add ability to set the LMC via the sysfs debugging interface
IB/ipath: Optimize completion queue entry insertion and polling
IB/ipath: Implement IB_EVENT_QP_LAST_WQE_REACHED
IB/ipath: Generate flush CQE when QP is in error state
IB/ipath: Remove redundant code
IB/ipath: Future proof eeprom checksum code (contents reading)
IB/ipath: UC RDMA WRITE with IMMEDIATE doesn't send the immediate
...
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r-- | drivers/infiniband/core/cma.c | 46 |
1 files changed, 36 insertions, 10 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 2e641b255db..93644f82592 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -52,6 +52,7 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
52 | 52 | ||
53 | #define CMA_CM_RESPONSE_TIMEOUT 20 | 53 | #define CMA_CM_RESPONSE_TIMEOUT 20 |
54 | #define CMA_MAX_CM_RETRIES 15 | 54 | #define CMA_MAX_CM_RETRIES 15 |
55 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) | ||
55 | 56 | ||
56 | static void cma_add_one(struct ib_device *device); | 57 | static void cma_add_one(struct ib_device *device); |
57 | static void cma_remove_one(struct ib_device *device); | 58 | static void cma_remove_one(struct ib_device *device); |
@@ -138,6 +139,7 @@ struct rdma_id_private { | |||
138 | u32 qkey; | 139 | u32 qkey; |
139 | u32 qp_num; | 140 | u32 qp_num; |
140 | u8 srq; | 141 | u8 srq; |
142 | u8 tos; | ||
141 | }; | 143 | }; |
142 | 144 | ||
143 | struct cma_multicast { | 145 | struct cma_multicast { |
@@ -1089,6 +1091,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1089 | event.param.ud.private_data_len = | 1091 | event.param.ud.private_data_len = |
1090 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; | 1092 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; |
1091 | } else { | 1093 | } else { |
1094 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); | ||
1092 | conn_id = cma_new_conn_id(&listen_id->id, ib_event); | 1095 | conn_id = cma_new_conn_id(&listen_id->id, ib_event); |
1093 | cma_set_req_event_data(&event, &ib_event->param.req_rcvd, | 1096 | cma_set_req_event_data(&event, &ib_event->param.req_rcvd, |
1094 | ib_event->private_data, offset); | 1097 | ib_event->private_data, offset); |
@@ -1474,6 +1477,15 @@ err: | |||
1474 | } | 1477 | } |
1475 | EXPORT_SYMBOL(rdma_listen); | 1478 | EXPORT_SYMBOL(rdma_listen); |
1476 | 1479 | ||
1480 | void rdma_set_service_type(struct rdma_cm_id *id, int tos) | ||
1481 | { | ||
1482 | struct rdma_id_private *id_priv; | ||
1483 | |||
1484 | id_priv = container_of(id, struct rdma_id_private, id); | ||
1485 | id_priv->tos = (u8) tos; | ||
1486 | } | ||
1487 | EXPORT_SYMBOL(rdma_set_service_type); | ||
1488 | |||
1477 | static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, | 1489 | static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, |
1478 | void *context) | 1490 | void *context) |
1479 | { | 1491 | { |
@@ -1498,23 +1510,37 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, | |||
1498 | static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | 1510 | static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, |
1499 | struct cma_work *work) | 1511 | struct cma_work *work) |
1500 | { | 1512 | { |
1501 | struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr; | 1513 | struct rdma_addr *addr = &id_priv->id.route.addr; |
1502 | struct ib_sa_path_rec path_rec; | 1514 | struct ib_sa_path_rec path_rec; |
1515 | ib_sa_comp_mask comp_mask; | ||
1516 | struct sockaddr_in6 *sin6; | ||
1503 | 1517 | ||
1504 | memset(&path_rec, 0, sizeof path_rec); | 1518 | memset(&path_rec, 0, sizeof path_rec); |
1505 | ib_addr_get_sgid(addr, &path_rec.sgid); | 1519 | ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); |
1506 | ib_addr_get_dgid(addr, &path_rec.dgid); | 1520 | ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); |
1507 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); | 1521 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); |
1508 | path_rec.numb_path = 1; | 1522 | path_rec.numb_path = 1; |
1509 | path_rec.reversible = 1; | 1523 | path_rec.reversible = 1; |
1524 | path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); | ||
1525 | |||
1526 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | | ||
1527 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | | ||
1528 | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; | ||
1529 | |||
1530 | if (addr->src_addr.sa_family == AF_INET) { | ||
1531 | path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); | ||
1532 | comp_mask |= IB_SA_PATH_REC_QOS_CLASS; | ||
1533 | } else { | ||
1534 | sin6 = (struct sockaddr_in6 *) &addr->src_addr; | ||
1535 | path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); | ||
1536 | comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; | ||
1537 | } | ||
1510 | 1538 | ||
1511 | id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, | 1539 | id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, |
1512 | id_priv->id.port_num, &path_rec, | 1540 | id_priv->id.port_num, &path_rec, |
1513 | IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | | 1541 | comp_mask, timeout_ms, |
1514 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | | 1542 | GFP_KERNEL, cma_query_handler, |
1515 | IB_SA_PATH_REC_REVERSIBLE, | 1543 | work, &id_priv->query); |
1516 | timeout_ms, GFP_KERNEL, | ||
1517 | cma_query_handler, work, &id_priv->query); | ||
1518 | 1544 | ||
1519 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; | 1545 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; |
1520 | } | 1546 | } |