aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:43:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:43:13 -0400
commitce9d3c9a6a9aef61525be07fe6ba27d937236aa2 (patch)
tree1b29bcb8f60fc6b59fa0d7b833cc733b8ebe17c9 /drivers/infiniband/core
parent038a5008b2f395c85e6e71d6ddf3c684e7c405b0 (diff)
parent3d73c2884f45f9a297cbc956cea101405a9703f2 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (87 commits) mlx4_core: Fix section mismatches IPoIB: Allow setting policy to ignore multicast groups IB/mthca: Mark error paths as unlikely() in post_srq_recv functions IB/ipath: Minor fix to ordering of freeing and zeroing of tid pages. IB/ipath: Remove redundant link state checks IB/ipath: Fix IB_EVENT_PORT_ERR event IB/ipath: Better handling of unexpected GPIO interrupts IB/ipath: Maintain active time on all chips IB/ipath: Fix QHT7040 serial number check IB/ipath: Indicate a couple of chip bugs to userspace IB/ipath: iba6110 rev4 no longer needs recv header overrun workaround IB/ipath: Use counters in ipath_poll and cleanup interrupts in ipath_close IB/ipath: Remove duplicate copy of LMC IB/ipath: Add ability to set the LMC via the sysfs debugging interface IB/ipath: Optimize completion queue entry insertion and polling IB/ipath: Implement IB_EVENT_QP_LAST_WQE_REACHED IB/ipath: Generate flush CQE when QP is in error state IB/ipath: Remove redundant code IB/ipath: Future proof eeprom checksum code (contents reading) IB/ipath: UC RDMA WRITE with IMMEDIATE doesn't send the immediate ...
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/addr.c3
-rw-r--r--drivers/infiniband/core/cm.c51
-rw-r--r--drivers/infiniband/core/cma.c46
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/fmr_pool.c22
-rw-r--r--drivers/infiniband/core/multicast.c2
-rw-r--r--drivers/infiniband/core/sa_query.c12
-rw-r--r--drivers/infiniband/core/ucma.c74
-rw-r--r--drivers/infiniband/core/umem.c20
-rw-r--r--drivers/infiniband/core/user_mad.c151
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_main.c16
12 files changed, 291 insertions, 111 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index c5c33d35f87d..5381c80de10a 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -161,8 +161,7 @@ static void addr_send_arp(struct sockaddr_in *dst_in)
161 if (ip_route_output_key(&rt, &fl)) 161 if (ip_route_output_key(&rt, &fl))
162 return; 162 return;
163 163
164 arp_send(ARPOP_REQUEST, ETH_P_ARP, rt->rt_gateway, rt->idev->dev, 164 neigh_event_send(rt->u.dst.neighbour, NULL);
165 rt->rt_src, NULL, rt->idev->dev->dev_addr, NULL);
166 ip_rt_put(rt); 165 ip_rt_put(rt);
167} 166}
168 167
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 4df269f5d9ac..2e39236d189f 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -2219,6 +2219,9 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
2219{ 2219{
2220 struct cm_id_private *cm_id_priv; 2220 struct cm_id_private *cm_id_priv;
2221 struct ib_mad_send_buf *msg; 2221 struct ib_mad_send_buf *msg;
2222 enum ib_cm_state cm_state;
2223 enum ib_cm_lap_state lap_state;
2224 enum cm_msg_response msg_response;
2222 void *data; 2225 void *data;
2223 unsigned long flags; 2226 unsigned long flags;
2224 int ret; 2227 int ret;
@@ -2235,48 +2238,40 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
2235 spin_lock_irqsave(&cm_id_priv->lock, flags); 2238 spin_lock_irqsave(&cm_id_priv->lock, flags);
2236 switch(cm_id_priv->id.state) { 2239 switch(cm_id_priv->id.state) {
2237 case IB_CM_REQ_RCVD: 2240 case IB_CM_REQ_RCVD:
2238 ret = cm_alloc_msg(cm_id_priv, &msg); 2241 cm_state = IB_CM_MRA_REQ_SENT;
2239 if (ret) 2242 lap_state = cm_id->lap_state;
2240 goto error1; 2243 msg_response = CM_MSG_RESPONSE_REQ;
2241
2242 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2243 CM_MSG_RESPONSE_REQ, service_timeout,
2244 private_data, private_data_len);
2245 ret = ib_post_send_mad(msg, NULL);
2246 if (ret)
2247 goto error2;
2248 cm_id->state = IB_CM_MRA_REQ_SENT;
2249 break; 2244 break;
2250 case IB_CM_REP_RCVD: 2245 case IB_CM_REP_RCVD:
2251 ret = cm_alloc_msg(cm_id_priv, &msg); 2246 cm_state = IB_CM_MRA_REP_SENT;
2252 if (ret) 2247 lap_state = cm_id->lap_state;
2253 goto error1; 2248 msg_response = CM_MSG_RESPONSE_REP;
2254
2255 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2256 CM_MSG_RESPONSE_REP, service_timeout,
2257 private_data, private_data_len);
2258 ret = ib_post_send_mad(msg, NULL);
2259 if (ret)
2260 goto error2;
2261 cm_id->state = IB_CM_MRA_REP_SENT;
2262 break; 2249 break;
2263 case IB_CM_ESTABLISHED: 2250 case IB_CM_ESTABLISHED:
2251 cm_state = cm_id->state;
2252 lap_state = IB_CM_MRA_LAP_SENT;
2253 msg_response = CM_MSG_RESPONSE_OTHER;
2254 break;
2255 default:
2256 ret = -EINVAL;
2257 goto error1;
2258 }
2259
2260 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2264 ret = cm_alloc_msg(cm_id_priv, &msg); 2261 ret = cm_alloc_msg(cm_id_priv, &msg);
2265 if (ret) 2262 if (ret)
2266 goto error1; 2263 goto error1;
2267 2264
2268 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2265 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2269 CM_MSG_RESPONSE_OTHER, service_timeout, 2266 msg_response, service_timeout,
2270 private_data, private_data_len); 2267 private_data, private_data_len);
2271 ret = ib_post_send_mad(msg, NULL); 2268 ret = ib_post_send_mad(msg, NULL);
2272 if (ret) 2269 if (ret)
2273 goto error2; 2270 goto error2;
2274 cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2275 break;
2276 default:
2277 ret = -EINVAL;
2278 goto error1;
2279 } 2271 }
2272
2273 cm_id->state = cm_state;
2274 cm_id->lap_state = lap_state;
2280 cm_id_priv->service_timeout = service_timeout; 2275 cm_id_priv->service_timeout = service_timeout;
2281 cm_set_private_data(cm_id_priv, data, private_data_len); 2276 cm_set_private_data(cm_id_priv, data, private_data_len);
2282 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2277 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2e641b255db4..93644f82592c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -52,6 +52,7 @@ MODULE_LICENSE("Dual BSD/GPL");
52 52
53#define CMA_CM_RESPONSE_TIMEOUT 20 53#define CMA_CM_RESPONSE_TIMEOUT 20
54#define CMA_MAX_CM_RETRIES 15 54#define CMA_MAX_CM_RETRIES 15
55#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
55 56
56static void cma_add_one(struct ib_device *device); 57static void cma_add_one(struct ib_device *device);
57static void cma_remove_one(struct ib_device *device); 58static void cma_remove_one(struct ib_device *device);
@@ -138,6 +139,7 @@ struct rdma_id_private {
138 u32 qkey; 139 u32 qkey;
139 u32 qp_num; 140 u32 qp_num;
140 u8 srq; 141 u8 srq;
142 u8 tos;
141}; 143};
142 144
143struct cma_multicast { 145struct cma_multicast {
@@ -1089,6 +1091,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1089 event.param.ud.private_data_len = 1091 event.param.ud.private_data_len =
1090 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1092 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1091 } else { 1093 } else {
1094 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1092 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1095 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1093 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1096 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1094 ib_event->private_data, offset); 1097 ib_event->private_data, offset);
@@ -1474,6 +1477,15 @@ err:
1474} 1477}
1475EXPORT_SYMBOL(rdma_listen); 1478EXPORT_SYMBOL(rdma_listen);
1476 1479
1480void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1481{
1482 struct rdma_id_private *id_priv;
1483
1484 id_priv = container_of(id, struct rdma_id_private, id);
1485 id_priv->tos = (u8) tos;
1486}
1487EXPORT_SYMBOL(rdma_set_service_type);
1488
1477static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1489static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1478 void *context) 1490 void *context)
1479{ 1491{
@@ -1498,23 +1510,37 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1498static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1510static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1499 struct cma_work *work) 1511 struct cma_work *work)
1500{ 1512{
1501 struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr; 1513 struct rdma_addr *addr = &id_priv->id.route.addr;
1502 struct ib_sa_path_rec path_rec; 1514 struct ib_sa_path_rec path_rec;
1515 ib_sa_comp_mask comp_mask;
1516 struct sockaddr_in6 *sin6;
1503 1517
1504 memset(&path_rec, 0, sizeof path_rec); 1518 memset(&path_rec, 0, sizeof path_rec);
1505 ib_addr_get_sgid(addr, &path_rec.sgid); 1519 ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
1506 ib_addr_get_dgid(addr, &path_rec.dgid); 1520 ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
1507 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); 1521 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1508 path_rec.numb_path = 1; 1522 path_rec.numb_path = 1;
1509 path_rec.reversible = 1; 1523 path_rec.reversible = 1;
1524 path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr);
1525
1526 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1527 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1528 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1529
1530 if (addr->src_addr.sa_family == AF_INET) {
1531 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1532 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1533 } else {
1534 sin6 = (struct sockaddr_in6 *) &addr->src_addr;
1535 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
1536 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
1537 }
1510 1538
1511 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1539 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1512 id_priv->id.port_num, &path_rec, 1540 id_priv->id.port_num, &path_rec,
1513 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1541 comp_mask, timeout_ms,
1514 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1542 GFP_KERNEL, cma_query_handler,
1515 IB_SA_PATH_REC_REVERSIBLE, 1543 work, &id_priv->query);
1516 timeout_ms, GFP_KERNEL,
1517 cma_query_handler, work, &id_priv->query);
1518 1544
1519 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1545 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1520} 1546}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 2506c43ba041..5ac5ffee05cb 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -120,12 +120,12 @@ static struct ib_device *__ib_device_get_by_name(const char *name)
120 120
121static int alloc_name(char *name) 121static int alloc_name(char *name)
122{ 122{
123 long *inuse; 123 unsigned long *inuse;
124 char buf[IB_DEVICE_NAME_MAX]; 124 char buf[IB_DEVICE_NAME_MAX];
125 struct ib_device *device; 125 struct ib_device *device;
126 int i; 126 int i;
127 127
128 inuse = (long *) get_zeroed_page(GFP_KERNEL); 128 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
129 if (!inuse) 129 if (!inuse)
130 return -ENOMEM; 130 return -ENOMEM;
131 131
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index a06bcc65a871..d7f64525469b 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -152,7 +152,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
152 152
153#ifdef DEBUG 153#ifdef DEBUG
154 if (fmr->ref_count !=0) { 154 if (fmr->ref_count !=0) {
155 printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d", 155 printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
156 fmr, fmr->ref_count); 156 fmr, fmr->ref_count);
157 } 157 }
158#endif 158#endif
@@ -170,7 +170,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
170 170
171 ret = ib_unmap_fmr(&fmr_list); 171 ret = ib_unmap_fmr(&fmr_list);
172 if (ret) 172 if (ret)
173 printk(KERN_WARNING PFX "ib_unmap_fmr returned %d", ret); 173 printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
174 174
175 spin_lock_irq(&pool->pool_lock); 175 spin_lock_irq(&pool->pool_lock);
176 list_splice(&unmap_list, &pool->free_list); 176 list_splice(&unmap_list, &pool->free_list);
@@ -235,13 +235,13 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
235 235
236 attr = kmalloc(sizeof *attr, GFP_KERNEL); 236 attr = kmalloc(sizeof *attr, GFP_KERNEL);
237 if (!attr) { 237 if (!attr) {
238 printk(KERN_WARNING PFX "couldn't allocate device attr struct"); 238 printk(KERN_WARNING PFX "couldn't allocate device attr struct\n");
239 return ERR_PTR(-ENOMEM); 239 return ERR_PTR(-ENOMEM);
240 } 240 }
241 241
242 ret = ib_query_device(device, attr); 242 ret = ib_query_device(device, attr);
243 if (ret) { 243 if (ret) {
244 printk(KERN_WARNING PFX "couldn't query device: %d", ret); 244 printk(KERN_WARNING PFX "couldn't query device: %d\n", ret);
245 kfree(attr); 245 kfree(attr);
246 return ERR_PTR(ret); 246 return ERR_PTR(ret);
247 } 247 }
@@ -255,7 +255,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
255 255
256 pool = kmalloc(sizeof *pool, GFP_KERNEL); 256 pool = kmalloc(sizeof *pool, GFP_KERNEL);
257 if (!pool) { 257 if (!pool) {
258 printk(KERN_WARNING PFX "couldn't allocate pool struct"); 258 printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
259 return ERR_PTR(-ENOMEM); 259 return ERR_PTR(-ENOMEM);
260 } 260 }
261 261
@@ -272,7 +272,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
272 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, 272 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
273 GFP_KERNEL); 273 GFP_KERNEL);
274 if (!pool->cache_bucket) { 274 if (!pool->cache_bucket) {
275 printk(KERN_WARNING PFX "Failed to allocate cache in pool"); 275 printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
276 ret = -ENOMEM; 276 ret = -ENOMEM;
277 goto out_free_pool; 277 goto out_free_pool;
278 } 278 }
@@ -296,7 +296,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
296 "ib_fmr(%s)", 296 "ib_fmr(%s)",
297 device->name); 297 device->name);
298 if (IS_ERR(pool->thread)) { 298 if (IS_ERR(pool->thread)) {
299 printk(KERN_WARNING PFX "couldn't start cleanup thread"); 299 printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
300 ret = PTR_ERR(pool->thread); 300 ret = PTR_ERR(pool->thread);
301 goto out_free_pool; 301 goto out_free_pool;
302 } 302 }
@@ -314,7 +314,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
314 GFP_KERNEL); 314 GFP_KERNEL);
315 if (!fmr) { 315 if (!fmr) {
316 printk(KERN_WARNING PFX "failed to allocate fmr " 316 printk(KERN_WARNING PFX "failed to allocate fmr "
317 "struct for FMR %d", i); 317 "struct for FMR %d\n", i);
318 goto out_fail; 318 goto out_fail;
319 } 319 }
320 320
@@ -326,7 +326,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
326 fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); 326 fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
327 if (IS_ERR(fmr->fmr)) { 327 if (IS_ERR(fmr->fmr)) {
328 printk(KERN_WARNING PFX "fmr_create failed " 328 printk(KERN_WARNING PFX "fmr_create failed "
329 "for FMR %d", i); 329 "for FMR %d\n", i);
330 kfree(fmr); 330 kfree(fmr);
331 goto out_fail; 331 goto out_fail;
332 } 332 }
@@ -381,7 +381,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
381 } 381 }
382 382
383 if (i < pool->pool_size) 383 if (i < pool->pool_size)
384 printk(KERN_WARNING PFX "pool still has %d regions registered", 384 printk(KERN_WARNING PFX "pool still has %d regions registered\n",
385 pool->pool_size - i); 385 pool->pool_size - i);
386 386
387 kfree(pool->cache_bucket); 387 kfree(pool->cache_bucket);
@@ -518,7 +518,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
518 518
519#ifdef DEBUG 519#ifdef DEBUG
520 if (fmr->ref_count < 0) 520 if (fmr->ref_count < 0)
521 printk(KERN_WARNING PFX "FMR %p has ref count %d < 0", 521 printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
522 fmr, fmr->ref_count); 522 fmr, fmr->ref_count);
523#endif 523#endif
524 524
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 15b4c4d3606d..1bc1fe605282 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -196,7 +196,7 @@ static void queue_join(struct mcast_member *member)
196 unsigned long flags; 196 unsigned long flags;
197 197
198 spin_lock_irqsave(&group->lock, flags); 198 spin_lock_irqsave(&group->lock, flags);
199 list_add(&member->list, &group->pending_list); 199 list_add_tail(&member->list, &group->pending_list);
200 if (group->state == MCAST_IDLE) { 200 if (group->state == MCAST_IDLE) {
201 group->state = MCAST_BUSY; 201 group->state = MCAST_BUSY;
202 atomic_inc(&group->refcount); 202 atomic_inc(&group->refcount);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index d271bd715c12..cf474ec27070 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -123,14 +123,10 @@ static u32 tid;
123 .field_name = "sa_path_rec:" #field 123 .field_name = "sa_path_rec:" #field
124 124
125static const struct ib_field path_rec_table[] = { 125static const struct ib_field path_rec_table[] = {
126 { RESERVED, 126 { PATH_REC_FIELD(service_id),
127 .offset_words = 0, 127 .offset_words = 0,
128 .offset_bits = 0, 128 .offset_bits = 0,
129 .size_bits = 32 }, 129 .size_bits = 64 },
130 { RESERVED,
131 .offset_words = 1,
132 .offset_bits = 0,
133 .size_bits = 32 },
134 { PATH_REC_FIELD(dgid), 130 { PATH_REC_FIELD(dgid),
135 .offset_words = 2, 131 .offset_words = 2,
136 .offset_bits = 0, 132 .offset_bits = 0,
@@ -179,7 +175,7 @@ static const struct ib_field path_rec_table[] = {
179 .offset_words = 12, 175 .offset_words = 12,
180 .offset_bits = 16, 176 .offset_bits = 16,
181 .size_bits = 16 }, 177 .size_bits = 16 },
182 { RESERVED, 178 { PATH_REC_FIELD(qos_class),
183 .offset_words = 13, 179 .offset_words = 13,
184 .offset_bits = 0, 180 .offset_bits = 0,
185 .size_bits = 12 }, 181 .size_bits = 12 },
@@ -531,7 +527,7 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
531 query->sm_ah->pkey_index, 527 query->sm_ah->pkey_index,
532 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 528 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
533 gfp_mask); 529 gfp_mask);
534 if (!query->mad_buf) { 530 if (IS_ERR(query->mad_buf)) {
535 kref_put(&query->sm_ah->ref, free_sm_ah); 531 kref_put(&query->sm_ah->ref, free_sm_ah);
536 return -ENOMEM; 532 return -ENOMEM;
537 } 533 }
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 53b4c94a7eb5..90d675ad9ec8 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -792,6 +792,78 @@ out:
792 return ret; 792 return ret;
793} 793}
794 794
795static int ucma_set_option_id(struct ucma_context *ctx, int optname,
796 void *optval, size_t optlen)
797{
798 int ret = 0;
799
800 switch (optname) {
801 case RDMA_OPTION_ID_TOS:
802 if (optlen != sizeof(u8)) {
803 ret = -EINVAL;
804 break;
805 }
806 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
807 break;
808 default:
809 ret = -ENOSYS;
810 }
811
812 return ret;
813}
814
815static int ucma_set_option_level(struct ucma_context *ctx, int level,
816 int optname, void *optval, size_t optlen)
817{
818 int ret;
819
820 switch (level) {
821 case RDMA_OPTION_ID:
822 ret = ucma_set_option_id(ctx, optname, optval, optlen);
823 break;
824 default:
825 ret = -ENOSYS;
826 }
827
828 return ret;
829}
830
831static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
832 int in_len, int out_len)
833{
834 struct rdma_ucm_set_option cmd;
835 struct ucma_context *ctx;
836 void *optval;
837 int ret;
838
839 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
840 return -EFAULT;
841
842 ctx = ucma_get_ctx(file, cmd.id);
843 if (IS_ERR(ctx))
844 return PTR_ERR(ctx);
845
846 optval = kmalloc(cmd.optlen, GFP_KERNEL);
847 if (!optval) {
848 ret = -ENOMEM;
849 goto out1;
850 }
851
852 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
853 cmd.optlen)) {
854 ret = -EFAULT;
855 goto out2;
856 }
857
858 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
859 cmd.optlen);
860out2:
861 kfree(optval);
862out1:
863 ucma_put_ctx(ctx);
864 return ret;
865}
866
795static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, 867static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
796 int in_len, int out_len) 868 int in_len, int out_len)
797{ 869{
@@ -936,7 +1008,7 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
936 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, 1008 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
937 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, 1009 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
938 [RDMA_USER_CM_CMD_GET_OPTION] = NULL, 1010 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
939 [RDMA_USER_CM_CMD_SET_OPTION] = NULL, 1011 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
940 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, 1012 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
941 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast, 1013 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
942 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, 1014 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 664d2faa9e74..2f54e29dc7a6 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -37,6 +37,7 @@
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/hugetlb.h>
40 41
41#include "uverbs.h" 42#include "uverbs.h"
42 43
@@ -75,6 +76,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
75{ 76{
76 struct ib_umem *umem; 77 struct ib_umem *umem;
77 struct page **page_list; 78 struct page **page_list;
79 struct vm_area_struct **vma_list;
78 struct ib_umem_chunk *chunk; 80 struct ib_umem_chunk *chunk;
79 unsigned long locked; 81 unsigned long locked;
80 unsigned long lock_limit; 82 unsigned long lock_limit;
@@ -104,6 +106,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
104 */ 106 */
105 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ); 107 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
106 108
109 /* We assume the memory is from hugetlb until proved otherwise */
110 umem->hugetlb = 1;
111
107 INIT_LIST_HEAD(&umem->chunk_list); 112 INIT_LIST_HEAD(&umem->chunk_list);
108 113
109 page_list = (struct page **) __get_free_page(GFP_KERNEL); 114 page_list = (struct page **) __get_free_page(GFP_KERNEL);
@@ -112,6 +117,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
112 return ERR_PTR(-ENOMEM); 117 return ERR_PTR(-ENOMEM);
113 } 118 }
114 119
120 /*
121 * if we can't alloc the vma_list, it's not so bad;
122 * just assume the memory is not hugetlb memory
123 */
124 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
125 if (!vma_list)
126 umem->hugetlb = 0;
127
115 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT; 128 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
116 129
117 down_write(&current->mm->mmap_sem); 130 down_write(&current->mm->mmap_sem);
@@ -131,7 +144,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
131 ret = get_user_pages(current, current->mm, cur_base, 144 ret = get_user_pages(current, current->mm, cur_base,
132 min_t(int, npages, 145 min_t(int, npages,
133 PAGE_SIZE / sizeof (struct page *)), 146 PAGE_SIZE / sizeof (struct page *)),
134 1, !umem->writable, page_list, NULL); 147 1, !umem->writable, page_list, vma_list);
135 148
136 if (ret < 0) 149 if (ret < 0)
137 goto out; 150 goto out;
@@ -152,6 +165,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
152 165
153 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); 166 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
154 for (i = 0; i < chunk->nents; ++i) { 167 for (i = 0; i < chunk->nents; ++i) {
168 if (vma_list &&
169 !is_vm_hugetlb_page(vma_list[i + off]))
170 umem->hugetlb = 0;
155 chunk->page_list[i].page = page_list[i + off]; 171 chunk->page_list[i].page = page_list[i + off];
156 chunk->page_list[i].offset = 0; 172 chunk->page_list[i].offset = 0;
157 chunk->page_list[i].length = PAGE_SIZE; 173 chunk->page_list[i].length = PAGE_SIZE;
@@ -186,6 +202,8 @@ out:
186 current->mm->locked_vm = locked; 202 current->mm->locked_vm = locked;
187 203
188 up_write(&current->mm->mmap_sem); 204 up_write(&current->mm->mmap_sem);
205 if (vma_list)
206 free_page((unsigned long) vma_list);
189 free_page((unsigned long) page_list); 207 free_page((unsigned long) page_list);
190 208
191 return ret < 0 ? ERR_PTR(ret) : umem; 209 return ret < 0 ? ERR_PTR(ret) : umem;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index d97ded25c4ff..b53eac4611de 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -44,6 +44,7 @@
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/rwsem.h> 45#include <linux/rwsem.h>
46#include <linux/kref.h> 46#include <linux/kref.h>
47#include <linux/compat.h>
47 48
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49#include <asm/semaphore.h> 50#include <asm/semaphore.h>
@@ -118,6 +119,8 @@ struct ib_umad_file {
118 wait_queue_head_t recv_wait; 119 wait_queue_head_t recv_wait;
119 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 120 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
120 int agents_dead; 121 int agents_dead;
122 u8 use_pkey_index;
123 u8 already_used;
121}; 124};
122 125
123struct ib_umad_packet { 126struct ib_umad_packet {
@@ -147,6 +150,12 @@ static void ib_umad_release_dev(struct kref *ref)
147 kfree(dev); 150 kfree(dev);
148} 151}
149 152
153static int hdr_size(struct ib_umad_file *file)
154{
155 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
156 sizeof (struct ib_user_mad_hdr_old);
157}
158
150/* caller must hold port->mutex at least for reading */ 159/* caller must hold port->mutex at least for reading */
151static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) 160static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
152{ 161{
@@ -221,13 +230,13 @@ static void recv_handler(struct ib_mad_agent *agent,
221 packet->length = mad_recv_wc->mad_len; 230 packet->length = mad_recv_wc->mad_len;
222 packet->recv_wc = mad_recv_wc; 231 packet->recv_wc = mad_recv_wc;
223 232
224 packet->mad.hdr.status = 0; 233 packet->mad.hdr.status = 0;
225 packet->mad.hdr.length = sizeof (struct ib_user_mad) + 234 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
226 mad_recv_wc->mad_len; 235 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
227 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 236 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
228 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); 237 packet->mad.hdr.sl = mad_recv_wc->wc->sl;
229 packet->mad.hdr.sl = mad_recv_wc->wc->sl; 238 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
230 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; 239 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
231 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); 240 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
232 if (packet->mad.hdr.grh_present) { 241 if (packet->mad.hdr.grh_present) {
233 struct ib_ah_attr ah_attr; 242 struct ib_ah_attr ah_attr;
@@ -253,8 +262,8 @@ err1:
253 ib_free_recv_mad(mad_recv_wc); 262 ib_free_recv_mad(mad_recv_wc);
254} 263}
255 264
256static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, 265static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
257 size_t count) 266 struct ib_umad_packet *packet, size_t count)
258{ 267{
259 struct ib_mad_recv_buf *recv_buf; 268 struct ib_mad_recv_buf *recv_buf;
260 int left, seg_payload, offset, max_seg_payload; 269 int left, seg_payload, offset, max_seg_payload;
@@ -262,15 +271,15 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
262 /* We need enough room to copy the first (or only) MAD segment. */ 271 /* We need enough room to copy the first (or only) MAD segment. */
263 recv_buf = &packet->recv_wc->recv_buf; 272 recv_buf = &packet->recv_wc->recv_buf;
264 if ((packet->length <= sizeof (*recv_buf->mad) && 273 if ((packet->length <= sizeof (*recv_buf->mad) &&
265 count < sizeof (packet->mad) + packet->length) || 274 count < hdr_size(file) + packet->length) ||
266 (packet->length > sizeof (*recv_buf->mad) && 275 (packet->length > sizeof (*recv_buf->mad) &&
267 count < sizeof (packet->mad) + sizeof (*recv_buf->mad))) 276 count < hdr_size(file) + sizeof (*recv_buf->mad)))
268 return -EINVAL; 277 return -EINVAL;
269 278
270 if (copy_to_user(buf, &packet->mad, sizeof (packet->mad))) 279 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
271 return -EFAULT; 280 return -EFAULT;
272 281
273 buf += sizeof (packet->mad); 282 buf += hdr_size(file);
274 seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad)); 283 seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad));
275 if (copy_to_user(buf, recv_buf->mad, seg_payload)) 284 if (copy_to_user(buf, recv_buf->mad, seg_payload))
276 return -EFAULT; 285 return -EFAULT;
@@ -280,7 +289,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
280 * Multipacket RMPP MAD message. Copy remainder of message. 289 * Multipacket RMPP MAD message. Copy remainder of message.
281 * Note that last segment may have a shorter payload. 290 * Note that last segment may have a shorter payload.
282 */ 291 */
283 if (count < sizeof (packet->mad) + packet->length) { 292 if (count < hdr_size(file) + packet->length) {
284 /* 293 /*
285 * The buffer is too small, return the first RMPP segment, 294 * The buffer is too small, return the first RMPP segment,
286 * which includes the RMPP message length. 295 * which includes the RMPP message length.
@@ -300,18 +309,23 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
300 return -EFAULT; 309 return -EFAULT;
301 } 310 }
302 } 311 }
303 return sizeof (packet->mad) + packet->length; 312 return hdr_size(file) + packet->length;
304} 313}
305 314
306static ssize_t copy_send_mad(char __user *buf, struct ib_umad_packet *packet, 315static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf,
307 size_t count) 316 struct ib_umad_packet *packet, size_t count)
308{ 317{
309 ssize_t size = sizeof (packet->mad) + packet->length; 318 ssize_t size = hdr_size(file) + packet->length;
310 319
311 if (count < size) 320 if (count < size)
312 return -EINVAL; 321 return -EINVAL;
313 322
314 if (copy_to_user(buf, &packet->mad, size)) 323 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
324 return -EFAULT;
325
326 buf += hdr_size(file);
327
328 if (copy_to_user(buf, packet->mad.data, packet->length))
315 return -EFAULT; 329 return -EFAULT;
316 330
317 return size; 331 return size;
@@ -324,7 +338,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
324 struct ib_umad_packet *packet; 338 struct ib_umad_packet *packet;
325 ssize_t ret; 339 ssize_t ret;
326 340
327 if (count < sizeof (struct ib_user_mad)) 341 if (count < hdr_size(file))
328 return -EINVAL; 342 return -EINVAL;
329 343
330 spin_lock_irq(&file->recv_lock); 344 spin_lock_irq(&file->recv_lock);
@@ -348,9 +362,9 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
348 spin_unlock_irq(&file->recv_lock); 362 spin_unlock_irq(&file->recv_lock);
349 363
350 if (packet->recv_wc) 364 if (packet->recv_wc)
351 ret = copy_recv_mad(buf, packet, count); 365 ret = copy_recv_mad(file, buf, packet, count);
352 else 366 else
353 ret = copy_send_mad(buf, packet, count); 367 ret = copy_send_mad(file, buf, packet, count);
354 368
355 if (ret < 0) { 369 if (ret < 0) {
356 /* Requeue packet */ 370 /* Requeue packet */
@@ -442,15 +456,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
442 __be64 *tid; 456 __be64 *tid;
443 int ret, data_len, hdr_len, copy_offset, rmpp_active; 457 int ret, data_len, hdr_len, copy_offset, rmpp_active;
444 458
445 if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR) 459 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
446 return -EINVAL; 460 return -EINVAL;
447 461
448 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); 462 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
449 if (!packet) 463 if (!packet)
450 return -ENOMEM; 464 return -ENOMEM;
451 465
452 if (copy_from_user(&packet->mad, buf, 466 if (copy_from_user(&packet->mad, buf, hdr_size(file))) {
453 sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)) {
454 ret = -EFAULT; 467 ret = -EFAULT;
455 goto err; 468 goto err;
456 } 469 }
@@ -461,6 +474,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
461 goto err; 474 goto err;
462 } 475 }
463 476
477 buf += hdr_size(file);
478
479 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) {
480 ret = -EFAULT;
481 goto err;
482 }
483
464 down_read(&file->port->mutex); 484 down_read(&file->port->mutex);
465 485
466 agent = __get_agent(file, packet->mad.hdr.id); 486 agent = __get_agent(file, packet->mad.hdr.id);
@@ -500,11 +520,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
500 IB_MGMT_RMPP_FLAG_ACTIVE; 520 IB_MGMT_RMPP_FLAG_ACTIVE;
501 } 521 }
502 522
503 data_len = count - sizeof (struct ib_user_mad) - hdr_len; 523 data_len = count - hdr_size(file) - hdr_len;
504 packet->msg = ib_create_send_mad(agent, 524 packet->msg = ib_create_send_mad(agent,
505 be32_to_cpu(packet->mad.hdr.qpn), 525 be32_to_cpu(packet->mad.hdr.qpn),
506 0, rmpp_active, hdr_len, 526 packet->mad.hdr.pkey_index, rmpp_active,
507 data_len, GFP_KERNEL); 527 hdr_len, data_len, GFP_KERNEL);
508 if (IS_ERR(packet->msg)) { 528 if (IS_ERR(packet->msg)) {
509 ret = PTR_ERR(packet->msg); 529 ret = PTR_ERR(packet->msg);
510 goto err_ah; 530 goto err_ah;
@@ -517,7 +537,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
517 537
518 /* Copy MAD header. Any RMPP header is already in place. */ 538 /* Copy MAD header. Any RMPP header is already in place. */
519 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); 539 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
520 buf += sizeof (struct ib_user_mad);
521 540
522 if (!rmpp_active) { 541 if (!rmpp_active) {
523 if (copy_from_user(packet->msg->mad + copy_offset, 542 if (copy_from_user(packet->msg->mad + copy_offset,
@@ -589,7 +608,8 @@ static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wa
589 return mask; 608 return mask;
590} 609}
591 610
592static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg) 611static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
612 int compat_method_mask)
593{ 613{
594 struct ib_user_mad_reg_req ureq; 614 struct ib_user_mad_reg_req ureq;
595 struct ib_mad_reg_req req; 615 struct ib_mad_reg_req req;
@@ -604,7 +624,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
604 goto out; 624 goto out;
605 } 625 }
606 626
607 if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) { 627 if (copy_from_user(&ureq, arg, sizeof ureq)) {
608 ret = -EFAULT; 628 ret = -EFAULT;
609 goto out; 629 goto out;
610 } 630 }
@@ -625,8 +645,18 @@ found:
625 if (ureq.mgmt_class) { 645 if (ureq.mgmt_class) {
626 req.mgmt_class = ureq.mgmt_class; 646 req.mgmt_class = ureq.mgmt_class;
627 req.mgmt_class_version = ureq.mgmt_class_version; 647 req.mgmt_class_version = ureq.mgmt_class_version;
628 memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask); 648 memcpy(req.oui, ureq.oui, sizeof req.oui);
629 memcpy(req.oui, ureq.oui, sizeof req.oui); 649
650 if (compat_method_mask) {
651 u32 *umm = (u32 *) ureq.method_mask;
652 int i;
653
654 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i)
655 req.method_mask[i] =
656 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32);
657 } else
658 memcpy(req.method_mask, ureq.method_mask,
659 sizeof req.method_mask);
630 } 660 }
631 661
632 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 662 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
@@ -646,6 +676,16 @@ found:
646 goto out; 676 goto out;
647 } 677 }
648 678
679 if (!file->already_used) {
680 file->already_used = 1;
681 if (!file->use_pkey_index) {
682 printk(KERN_WARNING "user_mad: process %s did not enable "
683 "P_Key index support.\n", current->comm);
684 printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt "
685 "has info on the new ABI.\n");
686 }
687 }
688
649 file->agent[agent_id] = agent; 689 file->agent[agent_id] = agent;
650 ret = 0; 690 ret = 0;
651 691
@@ -654,13 +694,13 @@ out:
654 return ret; 694 return ret;
655} 695}
656 696
657static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg) 697static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
658{ 698{
659 struct ib_mad_agent *agent = NULL; 699 struct ib_mad_agent *agent = NULL;
660 u32 id; 700 u32 id;
661 int ret = 0; 701 int ret = 0;
662 702
663 if (get_user(id, (u32 __user *) arg)) 703 if (get_user(id, arg))
664 return -EFAULT; 704 return -EFAULT;
665 705
666 down_write(&file->port->mutex); 706 down_write(&file->port->mutex);
@@ -682,18 +722,51 @@ out:
682 return ret; 722 return ret;
683} 723}
684 724
725static long ib_umad_enable_pkey(struct ib_umad_file *file)
726{
727 int ret = 0;
728
729 down_write(&file->port->mutex);
730 if (file->already_used)
731 ret = -EINVAL;
732 else
733 file->use_pkey_index = 1;
734 up_write(&file->port->mutex);
735
736 return ret;
737}
738
685static long ib_umad_ioctl(struct file *filp, unsigned int cmd, 739static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
686 unsigned long arg) 740 unsigned long arg)
687{ 741{
688 switch (cmd) { 742 switch (cmd) {
689 case IB_USER_MAD_REGISTER_AGENT: 743 case IB_USER_MAD_REGISTER_AGENT:
690 return ib_umad_reg_agent(filp->private_data, arg); 744 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0);
745 case IB_USER_MAD_UNREGISTER_AGENT:
746 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg);
747 case IB_USER_MAD_ENABLE_PKEY:
748 return ib_umad_enable_pkey(filp->private_data);
749 default:
750 return -ENOIOCTLCMD;
751 }
752}
753
754#ifdef CONFIG_COMPAT
755static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
756 unsigned long arg)
757{
758 switch (cmd) {
759 case IB_USER_MAD_REGISTER_AGENT:
760 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1);
691 case IB_USER_MAD_UNREGISTER_AGENT: 761 case IB_USER_MAD_UNREGISTER_AGENT:
692 return ib_umad_unreg_agent(filp->private_data, arg); 762 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg));
763 case IB_USER_MAD_ENABLE_PKEY:
764 return ib_umad_enable_pkey(filp->private_data);
693 default: 765 default:
694 return -ENOIOCTLCMD; 766 return -ENOIOCTLCMD;
695 } 767 }
696} 768}
769#endif
697 770
698static int ib_umad_open(struct inode *inode, struct file *filp) 771static int ib_umad_open(struct inode *inode, struct file *filp)
699{ 772{
@@ -782,7 +855,9 @@ static const struct file_operations umad_fops = {
782 .write = ib_umad_write, 855 .write = ib_umad_write,
783 .poll = ib_umad_poll, 856 .poll = ib_umad_poll,
784 .unlocked_ioctl = ib_umad_ioctl, 857 .unlocked_ioctl = ib_umad_ioctl,
785 .compat_ioctl = ib_umad_ioctl, 858#ifdef CONFIG_COMPAT
859 .compat_ioctl = ib_umad_compat_ioctl,
860#endif
786 .open = ib_umad_open, 861 .open = ib_umad_open,
787 .release = ib_umad_close 862 .release = ib_umad_close
788}; 863};
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index c33546f9e961..c75eb6c9bd49 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -148,7 +148,6 @@ void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
148 148
149struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, 149struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
150 int is_async, int *fd); 150 int is_async, int *fd);
151void ib_uverbs_release_event_file(struct kref *ref);
152struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd); 151struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
153 152
154void ib_uverbs_release_ucq(struct ib_uverbs_file *file, 153void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 14d7ccd89195..7c2ac3905582 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -125,6 +125,14 @@ static void ib_uverbs_release_dev(struct kref *ref)
125 complete(&dev->comp); 125 complete(&dev->comp);
126} 126}
127 127
128static void ib_uverbs_release_event_file(struct kref *ref)
129{
130 struct ib_uverbs_event_file *file =
131 container_of(ref, struct ib_uverbs_event_file, ref);
132
133 kfree(file);
134}
135
128void ib_uverbs_release_ucq(struct ib_uverbs_file *file, 136void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
129 struct ib_uverbs_event_file *ev_file, 137 struct ib_uverbs_event_file *ev_file,
130 struct ib_ucq_object *uobj) 138 struct ib_ucq_object *uobj)
@@ -331,14 +339,6 @@ static unsigned int ib_uverbs_event_poll(struct file *filp,
331 return pollflags; 339 return pollflags;
332} 340}
333 341
334void ib_uverbs_release_event_file(struct kref *ref)
335{
336 struct ib_uverbs_event_file *file =
337 container_of(ref, struct ib_uverbs_event_file, ref);
338
339 kfree(file);
340}
341
342static int ib_uverbs_event_fasync(int fd, struct file *filp, int on) 342static int ib_uverbs_event_fasync(int fd, struct file *filp, int on)
343{ 343{
344 struct ib_uverbs_event_file *file = filp->private_data; 344 struct ib_uverbs_event_file *file = filp->private_data;