aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGanesh Goudar <ganeshgr@chelsio.com>2018-01-25 02:59:43 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-25 16:30:54 -0500
commitd9ac2d99787d08c0e3f777dac5aeaa3fed61b692 (patch)
tree4fa6af4c7579459e3543cd19de8b0bd122569869
parent955ec4cb3b54c7c389a9f830be7d3ae2056b9212 (diff)
cxgb4: fix possible deadlock
t4_wr_mbox_meat_timeout() can be called from both softirq context and process context, hence protect the mbox with spin_lock_bh() instead of simple spin_lock() Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 34055476288c..af27d2b0f79f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -317,9 +317,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
317 * wait [for a while] till we're at the front [or bail out with an 317 * wait [for a while] till we're at the front [or bail out with an
318 * EBUSY] ... 318 * EBUSY] ...
319 */ 319 */
320 spin_lock(&adap->mbox_lock); 320 spin_lock_bh(&adap->mbox_lock);
321 list_add_tail(&entry.list, &adap->mlist.list); 321 list_add_tail(&entry.list, &adap->mlist.list);
322 spin_unlock(&adap->mbox_lock); 322 spin_unlock_bh(&adap->mbox_lock);
323 323
324 delay_idx = 0; 324 delay_idx = 0;
325 ms = delay[0]; 325 ms = delay[0];
@@ -332,9 +332,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
332 */ 332 */
333 pcie_fw = t4_read_reg(adap, PCIE_FW_A); 333 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
334 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) { 334 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
335 spin_lock(&adap->mbox_lock); 335 spin_lock_bh(&adap->mbox_lock);
336 list_del(&entry.list); 336 list_del(&entry.list);
337 spin_unlock(&adap->mbox_lock); 337 spin_unlock_bh(&adap->mbox_lock);
338 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY; 338 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
339 t4_record_mbox(adap, cmd, size, access, ret); 339 t4_record_mbox(adap, cmd, size, access, ret);
340 return ret; 340 return ret;
@@ -365,9 +365,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
365 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 365 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); 366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 if (v != MBOX_OWNER_DRV) { 367 if (v != MBOX_OWNER_DRV) {
368 spin_lock(&adap->mbox_lock); 368 spin_lock_bh(&adap->mbox_lock);
369 list_del(&entry.list); 369 list_del(&entry.list);
370 spin_unlock(&adap->mbox_lock); 370 spin_unlock_bh(&adap->mbox_lock);
371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; 371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
372 t4_record_mbox(adap, cmd, size, access, ret); 372 t4_record_mbox(adap, cmd, size, access, ret);
373 return ret; 373 return ret;
@@ -418,9 +418,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
418 execute = i + ms; 418 execute = i + ms;
419 t4_record_mbox(adap, cmd_rpl, 419 t4_record_mbox(adap, cmd_rpl,
420 MBOX_LEN, access, execute); 420 MBOX_LEN, access, execute);
421 spin_lock(&adap->mbox_lock); 421 spin_lock_bh(&adap->mbox_lock);
422 list_del(&entry.list); 422 list_del(&entry.list);
423 spin_unlock(&adap->mbox_lock); 423 spin_unlock_bh(&adap->mbox_lock);
424 return -FW_CMD_RETVAL_G((int)res); 424 return -FW_CMD_RETVAL_G((int)res);
425 } 425 }
426 } 426 }
@@ -430,9 +430,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
431 *(const u8 *)cmd, mbox); 431 *(const u8 *)cmd, mbox);
432 t4_report_fw_error(adap); 432 t4_report_fw_error(adap);
433 spin_lock(&adap->mbox_lock); 433 spin_lock_bh(&adap->mbox_lock);
434 list_del(&entry.list); 434 list_del(&entry.list);
435 spin_unlock(&adap->mbox_lock); 435 spin_unlock_bh(&adap->mbox_lock);
436 t4_fatal_err(adap); 436 t4_fatal_err(adap);
437 return ret; 437 return ret;
438} 438}