aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-07 20:08:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-07 20:08:02 -0400
commit3cc08fc35db75b059118626c30b60b0f56583802 (patch)
tree704d71199c8be8d5b822ca424675291e8cec7bde /drivers/infiniband/hw/qib
parentfaa38b5e0e092914764cdba9f83d31a3f794d182 (diff)
parent03b37ecdb3975f09832747600853d3818a50eda3 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (42 commits) IB/qib: Add missing <linux/slab.h> include IB/ehca: Drop unnecessary NULL test RDMA/nes: Fix confusing if statement indentation IB/ehca: Init irq tasklet before irq can happen RDMA/nes: Fix misindented code RDMA/nes: Fix showing wqm_quanta RDMA/nes: Get rid of "set but not used" variables RDMA/nes: Read firmware version from correct place IB/srp: Export req_lim via sysfs IB/srp: Make receive buffer handling more robust IB/srp: Use print_hex_dump() IB: Rename RAW_ETY to RAW_ETHERTYPE RDMA/nes: Fix two sparse warnings RDMA/cxgb3: Make needlessly global iwch_l2t_send() static IB/iser: Make needlessly global iser_alloc_rx_descriptors() static RDMA/cxgb4: Add timeouts when waiting for FW responses IB/qib: Fix race between qib_error_qp() and receive packet processing IB/qib: Limit the number of packets processed per interrupt IB/qib: Allow writes to the diag_counters to be able to clear them IB/qib: Set cfgctxts to number of CPUs by default ...
Diffstat (limited to 'drivers/infiniband/hw/qib')
-rw-r--r--drivers/infiniband/hw/qib/qib.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h16
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c203
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c47
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c7
15 files changed, 183 insertions, 170 deletions
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 3593983df7ba..61de0654820e 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -45,6 +45,7 @@
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/scatterlist.h> 47#include <linux/scatterlist.h>
48#include <linux/slab.h>
48#include <linux/io.h> 49#include <linux/io.h>
49#include <linux/fs.h> 50#include <linux/fs.h>
50#include <linux/completion.h> 51#include <linux/completion.h>
@@ -326,6 +327,9 @@ struct qib_verbs_txreq {
326 327
327#define QIB_DEFAULT_MTU 4096 328#define QIB_DEFAULT_MTU 4096
328 329
330/* max number of IB ports supported per HCA */
331#define QIB_MAX_IB_PORTS 2
332
329/* 333/*
330 * Possible IB config parameters for f_get/set_ib_table() 334 * Possible IB config parameters for f_get/set_ib_table()
331 */ 335 */
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index b3955ed8f794..145da4040883 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -279,7 +279,7 @@ struct qib_base_info {
279 * may not be implemented; the user code must deal with this if it 279 * may not be implemented; the user code must deal with this if it
280 * cares, or it must abort after initialization reports the difference. 280 * cares, or it must abort after initialization reports the difference.
281 */ 281 */
282#define QIB_USER_SWMINOR 10 282#define QIB_USER_SWMINOR 11
283 283
284#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) 284#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
285 285
@@ -302,6 +302,18 @@ struct qib_base_info {
302#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION) 302#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
303 303
304/* 304/*
305 * If the unit is specified via open, HCA choice is fixed. If port is
306 * specified, it's also fixed. Otherwise we try to spread contexts
307 * across ports and HCAs, using different algorithims. WITHIN is
308 * the old default, prior to this mechanism.
309 */
310#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then
311 * ports; this is the default */
312#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin
313 * active ports within), then next HCA */
314#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */
315
316/*
305 * This structure is passed to qib_userinit() to tell the driver where 317 * This structure is passed to qib_userinit() to tell the driver where
306 * user code buffers are, sizes, etc. The offsets and sizes of the 318 * user code buffers are, sizes, etc. The offsets and sizes of the
307 * fields must remain unchanged, for binary compatibility. It can 319 * fields must remain unchanged, for binary compatibility. It can
@@ -319,7 +331,7 @@ struct qib_user_info {
319 /* size of struct base_info to write to */ 331 /* size of struct base_info to write to */
320 __u32 spu_base_info_size; 332 __u32 spu_base_info_size;
321 333
322 __u32 _spu_unused3; 334 __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */
323 335
324 /* 336 /*
325 * If two or more processes wish to share a context, each process 337 * If two or more processes wish to share a context, each process
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index f15ce076ac49..9cd193603fb1 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -335,7 +335,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
335 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 335 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
336 } 336 }
337 337
338 for (last = 0, i = 1; !last; i += !last) { 338 for (last = 0, i = 1; !last && i <= 64; i += !last) {
339 hdr = dd->f_get_msgheader(dd, rhf_addr); 339 hdr = dd->f_get_msgheader(dd, rhf_addr);
340 eflags = qib_hdrget_err_flags(rhf_addr); 340 eflags = qib_hdrget_err_flags(rhf_addr);
341 etype = qib_hdrget_rcv_type(rhf_addr); 341 etype = qib_hdrget_rcv_type(rhf_addr);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index a142a9eb5226..6b11645edf35 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1294,128 +1294,130 @@ bail:
1294 return ret; 1294 return ret;
1295} 1295}
1296 1296
1297static inline int usable(struct qib_pportdata *ppd, int active_only) 1297static inline int usable(struct qib_pportdata *ppd)
1298{ 1298{
1299 struct qib_devdata *dd = ppd->dd; 1299 struct qib_devdata *dd = ppd->dd;
1300 u32 linkok = active_only ? QIBL_LINKACTIVE :
1301 (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
1302 1300
1303 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && 1301 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1304 (ppd->lflags & linkok); 1302 (ppd->lflags & QIBL_LINKACTIVE);
1305} 1303}
1306 1304
1307static int find_free_ctxt(int unit, struct file *fp, 1305/*
1308 const struct qib_user_info *uinfo) 1306 * Select a context on the given device, either using a requested port
1307 * or the port based on the context number.
1308 */
1309static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1310 const struct qib_user_info *uinfo)
1309{ 1311{
1310 struct qib_devdata *dd = qib_lookup(unit);
1311 struct qib_pportdata *ppd = NULL; 1312 struct qib_pportdata *ppd = NULL;
1312 int ret; 1313 int ret, ctxt;
1313 u32 ctxt;
1314 1314
1315 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) { 1315 if (port) {
1316 ret = -ENODEV; 1316 if (!usable(dd->pport + port - 1)) {
1317 goto bail;
1318 }
1319
1320 /*
1321 * If users requests specific port, only try that one port, else
1322 * select "best" port below, based on context.
1323 */
1324 if (uinfo->spu_port) {
1325 ppd = dd->pport + uinfo->spu_port - 1;
1326 if (!usable(ppd, 0)) {
1327 ret = -ENETDOWN; 1317 ret = -ENETDOWN;
1328 goto bail; 1318 goto done;
1329 } 1319 } else
1320 ppd = dd->pport + port - 1;
1330 } 1321 }
1331 1322 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
1332 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { 1323 ctxt++)
1333 if (dd->rcd[ctxt]) 1324 ;
1334 continue; 1325 if (ctxt == dd->cfgctxts) {
1335 /* 1326 ret = -EBUSY;
1336 * The setting and clearing of user context rcd[x] protected 1327 goto done;
1337 * by the qib_mutex 1328 }
1338 */ 1329 if (!ppd) {
1339 if (!ppd) { 1330 u32 pidx = ctxt % dd->num_pports;
1340 /* choose port based on ctxt, if up, else 1st up */ 1331 if (usable(dd->pport + pidx))
1341 ppd = dd->pport + (ctxt % dd->num_pports); 1332 ppd = dd->pport + pidx;
1342 if (!usable(ppd, 0)) { 1333 else {
1343 int i; 1334 for (pidx = 0; pidx < dd->num_pports && !ppd;
1344 for (i = 0; i < dd->num_pports; i++) { 1335 pidx++)
1345 ppd = dd->pport + i; 1336 if (usable(dd->pport + pidx))
1346 if (usable(ppd, 0)) 1337 ppd = dd->pport + pidx;
1347 break;
1348 }
1349 if (i == dd->num_pports) {
1350 ret = -ENETDOWN;
1351 goto bail;
1352 }
1353 }
1354 } 1338 }
1355 ret = setup_ctxt(ppd, ctxt, fp, uinfo);
1356 goto bail;
1357 } 1339 }
1358 ret = -EBUSY; 1340 ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
1341done:
1342 return ret;
1343}
1344
1345static int find_free_ctxt(int unit, struct file *fp,
1346 const struct qib_user_info *uinfo)
1347{
1348 struct qib_devdata *dd = qib_lookup(unit);
1349 int ret;
1350
1351 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
1352 ret = -ENODEV;
1353 else
1354 ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
1359 1355
1360bail:
1361 return ret; 1356 return ret;
1362} 1357}
1363 1358
1364static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo) 1359static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1360 unsigned alg)
1365{ 1361{
1366 struct qib_pportdata *ppd; 1362 struct qib_devdata *udd = NULL;
1367 int ret = 0, devmax; 1363 int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
1368 int npresent, nup;
1369 int ndev;
1370 u32 port = uinfo->spu_port, ctxt; 1364 u32 port = uinfo->spu_port, ctxt;
1371 1365
1372 devmax = qib_count_units(&npresent, &nup); 1366 devmax = qib_count_units(&npresent, &nup);
1367 if (!npresent) {
1368 ret = -ENXIO;
1369 goto done;
1370 }
1371 if (nup == 0) {
1372 ret = -ENETDOWN;
1373 goto done;
1374 }
1373 1375
1374 for (ndev = 0; ndev < devmax; ndev++) { 1376 if (alg == QIB_PORT_ALG_ACROSS) {
1375 struct qib_devdata *dd = qib_lookup(ndev); 1377 unsigned inuse = ~0U;
1376 1378 /* find device (with ACTIVE ports) with fewest ctxts in use */
1377 /* device portion of usable() */ 1379 for (ndev = 0; ndev < devmax; ndev++) {
1378 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) 1380 struct qib_devdata *dd = qib_lookup(ndev);
1379 continue; 1381 unsigned cused = 0, cfree = 0;
1380 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { 1382 if (!dd)
1381 if (dd->rcd[ctxt])
1382 continue; 1383 continue;
1383 if (port) { 1384 if (port && port <= dd->num_pports &&
1384 if (port > dd->num_pports) 1385 usable(dd->pport + port - 1))
1385 continue; 1386 dusable = 1;
1386 ppd = dd->pport + port - 1; 1387 else
1387 if (!usable(ppd, 0)) 1388 for (i = 0; i < dd->num_pports; i++)
1388 continue; 1389 if (usable(dd->pport + i))
1389 } else { 1390 dusable++;
1390 /* 1391 if (!dusable)
1391 * choose port based on ctxt, if up, else 1392 continue;
1392 * first port that's up for multi-port HCA 1393 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1393 */ 1394 ctxt++)
1394 ppd = dd->pport + (ctxt % dd->num_pports); 1395 if (dd->rcd[ctxt])
1395 if (!usable(ppd, 0)) { 1396 cused++;
1396 int j; 1397 else
1397 1398 cfree++;
1398 ppd = NULL; 1399 if (cfree && cused < inuse) {
1399 for (j = 0; j < dd->num_pports && 1400 udd = dd;
1400 !ppd; j++) 1401 inuse = cused;
1401 if (usable(dd->pport + j, 0))
1402 ppd = dd->pport + j;
1403 if (!ppd)
1404 continue; /* to next unit */
1405 }
1406 } 1402 }
1407 ret = setup_ctxt(ppd, ctxt, fp, uinfo); 1403 }
1404 if (udd) {
1405 ret = choose_port_ctxt(fp, udd, port, uinfo);
1408 goto done; 1406 goto done;
1409 } 1407 }
1408 } else {
1409 for (ndev = 0; ndev < devmax; ndev++) {
1410 struct qib_devdata *dd = qib_lookup(ndev);
1411 if (dd) {
1412 ret = choose_port_ctxt(fp, dd, port, uinfo);
1413 if (!ret)
1414 goto done;
1415 if (ret == -EBUSY)
1416 dusable++;
1417 }
1418 }
1410 } 1419 }
1411 1420 ret = dusable ? -EBUSY : -ENETDOWN;
1412 if (npresent) {
1413 if (nup == 0)
1414 ret = -ENETDOWN;
1415 else
1416 ret = -EBUSY;
1417 } else
1418 ret = -ENXIO;
1419 1421
1420done: 1422done:
1421 return ret; 1423 return ret;
@@ -1481,7 +1483,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1481{ 1483{
1482 int ret; 1484 int ret;
1483 int i_minor; 1485 int i_minor;
1484 unsigned swmajor, swminor; 1486 unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
1485 1487
1486 /* Check to be sure we haven't already initialized this file */ 1488 /* Check to be sure we haven't already initialized this file */
1487 if (ctxt_fp(fp)) { 1489 if (ctxt_fp(fp)) {
@@ -1498,6 +1500,9 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1498 1500
1499 swminor = uinfo->spu_userversion & 0xffff; 1501 swminor = uinfo->spu_userversion & 0xffff;
1500 1502
1503 if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
1504 alg = uinfo->spu_port_alg;
1505
1501 mutex_lock(&qib_mutex); 1506 mutex_lock(&qib_mutex);
1502 1507
1503 if (qib_compatible_subctxts(swmajor, swminor) && 1508 if (qib_compatible_subctxts(swmajor, swminor) &&
@@ -1514,7 +1519,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1514 if (i_minor) 1519 if (i_minor)
1515 ret = find_free_ctxt(i_minor - 1, fp, uinfo); 1520 ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1516 else 1521 else
1517 ret = get_a_ctxt(fp, uinfo); 1522 ret = get_a_ctxt(fp, uinfo, alg);
1518 1523
1519done_chk_sdma: 1524done_chk_sdma:
1520 if (!ret) { 1525 if (!ret) {
@@ -1862,7 +1867,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd)
1862{ 1867{
1863 int ret = 0; 1868 int ret = 0;
1864 1869
1865 if (!usable(rcd->ppd, 1)) { 1870 if (!usable(rcd->ppd)) {
1866 int i; 1871 int i;
1867 /* 1872 /*
1868 * if link is down, or otherwise not usable, delay 1873 * if link is down, or otherwise not usable, delay
@@ -1881,7 +1886,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd)
1881 set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 1886 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1882 &rcd->user_event_mask[i]); 1887 &rcd->user_event_mask[i]);
1883 } 1888 }
1884 for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++) 1889 for (i = 0; !usable(rcd->ppd) && i < 300; i++)
1885 msleep(100); 1890 msleep(100);
1886 ret = -ENETDOWN; 1891 ret = -ENETDOWN;
1887 } 1892 }
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 844954bf417b..9f989c0ba9d3 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -135,8 +135,8 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
135} 135}
136 136
137static const struct file_operations driver_ops[] = { 137static const struct file_operations driver_ops[] = {
138 { .read = driver_stats_read, }, 138 { .read = driver_stats_read, .llseek = generic_file_llseek, },
139 { .read = driver_names_read, }, 139 { .read = driver_names_read, .llseek = generic_file_llseek, },
140}; 140};
141 141
142/* read the per-device counters */ 142/* read the per-device counters */
@@ -164,8 +164,8 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
164} 164}
165 165
166static const struct file_operations cntr_ops[] = { 166static const struct file_operations cntr_ops[] = {
167 { .read = dev_counters_read, }, 167 { .read = dev_counters_read, .llseek = generic_file_llseek, },
168 { .read = dev_names_read, }, 168 { .read = dev_names_read, .llseek = generic_file_llseek, },
169}; 169};
170 170
171/* 171/*
@@ -210,9 +210,9 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
210} 210}
211 211
212static const struct file_operations portcntr_ops[] = { 212static const struct file_operations portcntr_ops[] = {
213 { .read = portnames_read, }, 213 { .read = portnames_read, .llseek = generic_file_llseek, },
214 { .read = portcntrs_1_read, }, 214 { .read = portcntrs_1_read, .llseek = generic_file_llseek, },
215 { .read = portcntrs_2_read, }, 215 { .read = portcntrs_2_read, .llseek = generic_file_llseek, },
216}; 216};
217 217
218/* 218/*
@@ -261,8 +261,8 @@ static ssize_t qsfp_2_read(struct file *file, char __user *buf,
261} 261}
262 262
263static const struct file_operations qsfp_ops[] = { 263static const struct file_operations qsfp_ops[] = {
264 { .read = qsfp_1_read, }, 264 { .read = qsfp_1_read, .llseek = generic_file_llseek, },
265 { .read = qsfp_2_read, }, 265 { .read = qsfp_2_read, .llseek = generic_file_llseek, },
266}; 266};
267 267
268static ssize_t flash_read(struct file *file, char __user *buf, 268static ssize_t flash_read(struct file *file, char __user *buf,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5eedf83e2c3b..584d443b5335 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5864,7 +5864,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
5864 * Doesn't clear any of the error bits that might be set. 5864 * Doesn't clear any of the error bits that might be set.
5865 */ 5865 */
5866 val = TIDFLOW_ERRBITS; /* these are W1C */ 5866 val = TIDFLOW_ERRBITS; /* these are W1C */
5867 for (i = 0; i < dd->ctxtcnt; i++) { 5867 for (i = 0; i < dd->cfgctxts; i++) {
5868 int flow; 5868 int flow;
5869 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) 5869 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
5870 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); 5870 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
@@ -7271,6 +7271,8 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7271 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ 7271 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7272 7272
7273 data = qib_read_kreg_port(ppd, krp_serdesctrl); 7273 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7274 /* Turn off IB latency mode */
7275 data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
7274 qib_write_kreg_port(ppd, krp_serdesctrl, data | 7276 qib_write_kreg_port(ppd, krp_serdesctrl, data |
7275 SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); 7277 SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
7276 7278
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index a873dd596e81..f1d16d3a01f6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -93,7 +93,7 @@ unsigned long *qib_cpulist;
93void qib_set_ctxtcnt(struct qib_devdata *dd) 93void qib_set_ctxtcnt(struct qib_devdata *dd)
94{ 94{
95 if (!qib_cfgctxts) 95 if (!qib_cfgctxts)
96 dd->cfgctxts = dd->ctxtcnt; 96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
97 else if (qib_cfgctxts < dd->num_pports) 97 else if (qib_cfgctxts < dd->num_pports)
98 dd->cfgctxts = dd->ctxtcnt; 98 dd->cfgctxts = dd->ctxtcnt;
99 else if (qib_cfgctxts <= dd->ctxtcnt) 99 else if (qib_cfgctxts <= dd->ctxtcnt)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index e0f65e39076b..6c39851d2ded 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -450,7 +450,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
450 * 450 *
451 * Flushes both send and receive work queues. 451 * Flushes both send and receive work queues.
452 * Returns true if last WQE event should be generated. 452 * Returns true if last WQE event should be generated.
453 * The QP s_lock should be held and interrupts disabled. 453 * The QP r_lock and s_lock should be held and interrupts disabled.
454 * If we are already in error state, just return. 454 * If we are already in error state, just return.
455 */ 455 */
456int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) 456int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 40c0a373719c..a0931119bd78 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -868,7 +868,7 @@ done:
868 868
869/* 869/*
870 * Back up requester to resend the last un-ACKed request. 870 * Back up requester to resend the last un-ACKed request.
871 * The QP s_lock should be held and interrupts disabled. 871 * The QP r_lock and s_lock should be held and interrupts disabled.
872 */ 872 */
873static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) 873static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
874{ 874{
@@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg)
911 struct qib_ibport *ibp; 911 struct qib_ibport *ibp;
912 unsigned long flags; 912 unsigned long flags;
913 913
914 spin_lock_irqsave(&qp->s_lock, flags); 914 spin_lock_irqsave(&qp->r_lock, flags);
915 spin_lock(&qp->s_lock);
915 if (qp->s_flags & QIB_S_TIMER) { 916 if (qp->s_flags & QIB_S_TIMER) {
916 ibp = to_iport(qp->ibqp.device, qp->port_num); 917 ibp = to_iport(qp->ibqp.device, qp->port_num);
917 ibp->n_rc_timeouts++; 918 ibp->n_rc_timeouts++;
@@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg)
920 qib_restart_rc(qp, qp->s_last_psn + 1, 1); 921 qib_restart_rc(qp, qp->s_last_psn + 1, 1);
921 qib_schedule_send(qp); 922 qib_schedule_send(qp);
922 } 923 }
923 spin_unlock_irqrestore(&qp->s_lock, flags); 924 spin_unlock(&qp->s_lock);
925 spin_unlock_irqrestore(&qp->r_lock, flags);
924} 926}
925 927
926/* 928/*
@@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1414 1416
1415 spin_lock_irqsave(&qp->s_lock, flags); 1417 spin_lock_irqsave(&qp->s_lock, flags);
1416 1418
1417 /* Double check we can process this now that we hold the s_lock. */
1418 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1419 goto ack_done;
1420
1421 /* Ignore invalid responses. */ 1419 /* Ignore invalid responses. */
1422 if (qib_cmp24(psn, qp->s_next_psn) >= 0) 1420 if (qib_cmp24(psn, qp->s_next_psn) >= 0)
1423 goto ack_done; 1421 goto ack_done;
@@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
1661 ibp->n_rc_dupreq++; 1659 ibp->n_rc_dupreq++;
1662 1660
1663 spin_lock_irqsave(&qp->s_lock, flags); 1661 spin_lock_irqsave(&qp->s_lock, flags);
1664 /* Double check we can process this now that we hold the s_lock. */
1665 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1666 goto unlock_done;
1667 1662
1668 for (i = qp->r_head_ack_queue; ; i = prev) { 1663 for (i = qp->r_head_ack_queue; ; i = prev) {
1669 if (i == qp->s_tail_ack_queue) 1664 if (i == qp->s_tail_ack_queue)
@@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
1878 psn = be32_to_cpu(ohdr->bth[2]); 1873 psn = be32_to_cpu(ohdr->bth[2]);
1879 opcode >>= 24; 1874 opcode >>= 24;
1880 1875
1881 /* Prevent simultaneous processing after APM on different CPUs */
1882 spin_lock(&qp->r_lock);
1883
1884 /* 1876 /*
1885 * Process responses (ACKs) before anything else. Note that the 1877 * Process responses (ACKs) before anything else. Note that the
1886 * packet sequence number will be for something in the send work 1878 * packet sequence number will be for something in the send work
@@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
1891 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 1883 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1892 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, 1884 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1893 hdrsize, pmtu, rcd); 1885 hdrsize, pmtu, rcd);
1894 goto runlock; 1886 return;
1895 } 1887 }
1896 1888
1897 /* Compute 24 bits worth of difference. */ 1889 /* Compute 24 bits worth of difference. */
1898 diff = qib_cmp24(psn, qp->r_psn); 1890 diff = qib_cmp24(psn, qp->r_psn);
1899 if (unlikely(diff)) { 1891 if (unlikely(diff)) {
1900 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) 1892 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1901 goto runlock; 1893 return;
1902 goto send_ack; 1894 goto send_ack;
1903 } 1895 }
1904 1896
@@ -2090,9 +2082,6 @@ send_last:
2090 if (next > QIB_MAX_RDMA_ATOMIC) 2082 if (next > QIB_MAX_RDMA_ATOMIC)
2091 next = 0; 2083 next = 0;
2092 spin_lock_irqsave(&qp->s_lock, flags); 2084 spin_lock_irqsave(&qp->s_lock, flags);
2093 /* Double check we can process this while holding the s_lock. */
2094 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
2095 goto srunlock;
2096 if (unlikely(next == qp->s_tail_ack_queue)) { 2085 if (unlikely(next == qp->s_tail_ack_queue)) {
2097 if (!qp->s_ack_queue[next].sent) 2086 if (!qp->s_ack_queue[next].sent)
2098 goto nack_inv_unlck; 2087 goto nack_inv_unlck;
@@ -2146,7 +2135,7 @@ send_last:
2146 qp->s_flags |= QIB_S_RESP_PENDING; 2135 qp->s_flags |= QIB_S_RESP_PENDING;
2147 qib_schedule_send(qp); 2136 qib_schedule_send(qp);
2148 2137
2149 goto srunlock; 2138 goto sunlock;
2150 } 2139 }
2151 2140
2152 case OP(COMPARE_SWAP): 2141 case OP(COMPARE_SWAP):
@@ -2165,9 +2154,6 @@ send_last:
2165 if (next > QIB_MAX_RDMA_ATOMIC) 2154 if (next > QIB_MAX_RDMA_ATOMIC)
2166 next = 0; 2155 next = 0;
2167 spin_lock_irqsave(&qp->s_lock, flags); 2156 spin_lock_irqsave(&qp->s_lock, flags);
2168 /* Double check we can process this while holding the s_lock. */
2169 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
2170 goto srunlock;
2171 if (unlikely(next == qp->s_tail_ack_queue)) { 2157 if (unlikely(next == qp->s_tail_ack_queue)) {
2172 if (!qp->s_ack_queue[next].sent) 2158 if (!qp->s_ack_queue[next].sent)
2173 goto nack_inv_unlck; 2159 goto nack_inv_unlck;
@@ -2213,7 +2199,7 @@ send_last:
2213 qp->s_flags |= QIB_S_RESP_PENDING; 2199 qp->s_flags |= QIB_S_RESP_PENDING;
2214 qib_schedule_send(qp); 2200 qib_schedule_send(qp);
2215 2201
2216 goto srunlock; 2202 goto sunlock;
2217 } 2203 }
2218 2204
2219 default: 2205 default:
@@ -2227,7 +2213,7 @@ send_last:
2227 /* Send an ACK if requested or required. */ 2213 /* Send an ACK if requested or required. */
2228 if (psn & (1 << 31)) 2214 if (psn & (1 << 31))
2229 goto send_ack; 2215 goto send_ack;
2230 goto runlock; 2216 return;
2231 2217
2232rnr_nak: 2218rnr_nak:
2233 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; 2219 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
@@ -2238,7 +2224,7 @@ rnr_nak:
2238 atomic_inc(&qp->refcount); 2224 atomic_inc(&qp->refcount);
2239 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 2225 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2240 } 2226 }
2241 goto runlock; 2227 return;
2242 2228
2243nack_op_err: 2229nack_op_err:
2244 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2230 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
@@ -2250,7 +2236,7 @@ nack_op_err:
2250 atomic_inc(&qp->refcount); 2236 atomic_inc(&qp->refcount);
2251 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 2237 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2252 } 2238 }
2253 goto runlock; 2239 return;
2254 2240
2255nack_inv_unlck: 2241nack_inv_unlck:
2256 spin_unlock_irqrestore(&qp->s_lock, flags); 2242 spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2264,7 +2250,7 @@ nack_inv:
2264 atomic_inc(&qp->refcount); 2250 atomic_inc(&qp->refcount);
2265 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 2251 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2266 } 2252 }
2267 goto runlock; 2253 return;
2268 2254
2269nack_acc_unlck: 2255nack_acc_unlck:
2270 spin_unlock_irqrestore(&qp->s_lock, flags); 2256 spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2274,13 +2260,6 @@ nack_acc:
2274 qp->r_ack_psn = qp->r_psn; 2260 qp->r_ack_psn = qp->r_psn;
2275send_ack: 2261send_ack:
2276 qib_send_rc_ack(qp); 2262 qib_send_rc_ack(qp);
2277runlock:
2278 spin_unlock(&qp->r_lock);
2279 return;
2280
2281srunlock:
2282 spin_unlock_irqrestore(&qp->s_lock, flags);
2283 spin_unlock(&qp->r_lock);
2284 return; 2263 return;
2285 2264
2286sunlock: 2265sunlock:
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index b8456881f7f6..cad44491320b 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -656,6 +656,7 @@ unmap:
656 } 656 }
657 qp = tx->qp; 657 qp = tx->qp;
658 qib_put_txreq(tx); 658 qib_put_txreq(tx);
659 spin_lock(&qp->r_lock);
659 spin_lock(&qp->s_lock); 660 spin_lock(&qp->s_lock);
660 if (qp->ibqp.qp_type == IB_QPT_RC) { 661 if (qp->ibqp.qp_type == IB_QPT_RC) {
661 /* XXX what about error sending RDMA read responses? */ 662 /* XXX what about error sending RDMA read responses? */
@@ -664,6 +665,7 @@ unmap:
664 } else if (qp->s_wqe) 665 } else if (qp->s_wqe)
665 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); 666 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
666 spin_unlock(&qp->s_lock); 667 spin_unlock(&qp->s_lock);
668 spin_unlock(&qp->r_lock);
667 /* return zero to process the next send work request */ 669 /* return zero to process the next send work request */
668 goto unlock; 670 goto unlock;
669 671
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index dab4d9f4a2cc..d50a33fe8bbc 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -347,7 +347,7 @@ static struct kobj_type qib_sl2vl_ktype = {
347 347
348#define QIB_DIAGC_ATTR(N) \ 348#define QIB_DIAGC_ATTR(N) \
349 static struct qib_diagc_attr qib_diagc_attr_##N = { \ 349 static struct qib_diagc_attr qib_diagc_attr_##N = { \
350 .attr = { .name = __stringify(N), .mode = 0444 }, \ 350 .attr = { .name = __stringify(N), .mode = 0664 }, \
351 .counter = offsetof(struct qib_ibport, n_##N) \ 351 .counter = offsetof(struct qib_ibport, n_##N) \
352 } 352 }
353 353
@@ -403,8 +403,27 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
403 return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); 403 return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
404} 404}
405 405
406static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
407 const char *buf, size_t size)
408{
409 struct qib_diagc_attr *dattr =
410 container_of(attr, struct qib_diagc_attr, attr);
411 struct qib_pportdata *ppd =
412 container_of(kobj, struct qib_pportdata, diagc_kobj);
413 struct qib_ibport *qibp = &ppd->ibport_data;
414 char *endp;
415 long val = simple_strtol(buf, &endp, 0);
416
417 if (val < 0 || endp == buf)
418 return -EINVAL;
419
420 *(u32 *)((char *) qibp + dattr->counter) = val;
421 return size;
422}
423
406static const struct sysfs_ops qib_diagc_ops = { 424static const struct sysfs_ops qib_diagc_ops = {
407 .show = diagc_attr_show, 425 .show = diagc_attr_show,
426 .store = diagc_attr_store,
408}; 427};
409 428
410static struct kobj_type qib_diagc_ktype = { 429static struct kobj_type qib_diagc_ktype = {
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index af30232b6831..7f36454c225e 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -170,7 +170,7 @@ static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
170void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, 170void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
171 unsigned cnt) 171 unsigned cnt)
172{ 172{
173 struct qib_pportdata *ppd, *pppd[dd->num_pports]; 173 struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
174 unsigned i; 174 unsigned i;
175 unsigned long flags; 175 unsigned long flags;
176 176
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 6c7fe78cca64..b9c8b6346c1b 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -272,9 +272,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
272 opcode >>= 24; 272 opcode >>= 24;
273 memset(&wc, 0, sizeof wc); 273 memset(&wc, 0, sizeof wc);
274 274
275 /* Prevent simultaneous processing after APM on different CPUs */
276 spin_lock(&qp->r_lock);
277
278 /* Compare the PSN verses the expected PSN. */ 275 /* Compare the PSN verses the expected PSN. */
279 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { 276 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
280 /* 277 /*
@@ -534,7 +531,6 @@ rdma_last:
534 } 531 }
535 qp->r_psn++; 532 qp->r_psn++;
536 qp->r_state = opcode; 533 qp->r_state = opcode;
537 spin_unlock(&qp->r_lock);
538 return; 534 return;
539 535
540rewind: 536rewind:
@@ -542,12 +538,10 @@ rewind:
542 qp->r_sge.num_sge = 0; 538 qp->r_sge.num_sge = 0;
543drop: 539drop:
544 ibp->n_pkt_drops++; 540 ibp->n_pkt_drops++;
545 spin_unlock(&qp->r_lock);
546 return; 541 return;
547 542
548op_err: 543op_err:
549 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 544 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
550 spin_unlock(&qp->r_lock);
551 return; 545 return;
552 546
553sunlock: 547sunlock:
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index c838cda73347..e1b3da2a1f85 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -535,13 +535,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
535 wc.byte_len = tlen + sizeof(struct ib_grh); 535 wc.byte_len = tlen + sizeof(struct ib_grh);
536 536
537 /* 537 /*
538 * We need to serialize getting a receive work queue entry and
539 * generating a completion for it against QPs sending to this QP
540 * locally.
541 */
542 spin_lock(&qp->r_lock);
543
544 /*
545 * Get the next work request entry to find where to put the data. 538 * Get the next work request entry to find where to put the data.
546 */ 539 */
547 if (qp->r_flags & QIB_R_REUSE_SGE) 540 if (qp->r_flags & QIB_R_REUSE_SGE)
@@ -552,19 +545,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
552 ret = qib_get_rwqe(qp, 0); 545 ret = qib_get_rwqe(qp, 0);
553 if (ret < 0) { 546 if (ret < 0) {
554 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 547 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
555 goto bail_unlock; 548 return;
556 } 549 }
557 if (!ret) { 550 if (!ret) {
558 if (qp->ibqp.qp_num == 0) 551 if (qp->ibqp.qp_num == 0)
559 ibp->n_vl15_dropped++; 552 ibp->n_vl15_dropped++;
560 goto bail_unlock; 553 return;
561 } 554 }
562 } 555 }
563 /* Silently drop packets which are too big. */ 556 /* Silently drop packets which are too big. */
564 if (unlikely(wc.byte_len > qp->r_len)) { 557 if (unlikely(wc.byte_len > qp->r_len)) {
565 qp->r_flags |= QIB_R_REUSE_SGE; 558 qp->r_flags |= QIB_R_REUSE_SGE;
566 ibp->n_pkt_drops++; 559 ibp->n_pkt_drops++;
567 goto bail_unlock; 560 return;
568 } 561 }
569 if (has_grh) { 562 if (has_grh) {
570 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, 563 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -579,7 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
579 qp->r_sge.sge = *qp->r_sge.sg_list++; 572 qp->r_sge.sge = *qp->r_sge.sg_list++;
580 } 573 }
581 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) 574 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
582 goto bail_unlock; 575 return;
583 wc.wr_id = qp->r_wr_id; 576 wc.wr_id = qp->r_wr_id;
584 wc.status = IB_WC_SUCCESS; 577 wc.status = IB_WC_SUCCESS;
585 wc.opcode = IB_WC_RECV; 578 wc.opcode = IB_WC_RECV;
@@ -601,7 +594,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
601 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 594 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
602 (ohdr->bth[0] & 595 (ohdr->bth[0] &
603 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 596 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
604bail_unlock:
605 spin_unlock(&qp->r_lock);
606bail:; 597bail:;
607} 598}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index cda8f4173d23..9fab40488850 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -550,10 +550,12 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
550{ 550{
551 struct qib_ibport *ibp = &rcd->ppd->ibport_data; 551 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
552 552
553 spin_lock(&qp->r_lock);
554
553 /* Check for valid receive state. */ 555 /* Check for valid receive state. */
554 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { 556 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
555 ibp->n_pkt_drops++; 557 ibp->n_pkt_drops++;
556 return; 558 goto unlock;
557 } 559 }
558 560
559 switch (qp->ibqp.qp_type) { 561 switch (qp->ibqp.qp_type) {
@@ -577,6 +579,9 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
577 default: 579 default:
578 break; 580 break;
579 } 581 }
582
583unlock:
584 spin_unlock(&qp->r_lock);
580} 585}
581 586
582/** 587/**