aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/caif
diff options
context:
space:
mode:
authorsjur.brandeland@stericsson.com <sjur.brandeland@stericsson.com>2011-12-06 07:15:43 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-06 17:21:47 -0500
commit095d2a71e51bd2a3e476232156e8d9c2dbc0596d (patch)
treebeb274f3b78134dad1badf29c0607baece3ce9e4 /drivers/net/caif
parent005b0b076ff68a10cc7ca8d3ba32def3ae95ad96 (diff)
caif-shm: Bugfixes for caif_shmcore.c
Various bugfixes for caif_shmcore.c: - fix deadlocks due to improper usage of spin-lock - add missing spin-lock init - don't call dev_kfree_skb() with irqs disabled, use dev_kfree_skb_irq() instead. - fix potential skb null pointer de-reference. Squashed original patches from: Rabin Vincent <rabin.vincent@stericsson.com> Durga Prasada Rao BATHINA <durgaprasadarao.b@stericcson.com> Arun Murthy <arun.murthy@stericsson.com> Bibek Basu <bibek.basu@stericsson.com> Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/caif')
-rw-r--r--drivers/net/caif/caif_shmcore.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index d4b26fb24ed9..fd59e37e28a1 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
238 if ((avail_emptybuff > HIGH_WATERMARK) && 238 if ((avail_emptybuff > HIGH_WATERMARK) &&
239 (!pshm_drv->tx_empty_available)) { 239 (!pshm_drv->tx_empty_available)) {
240 pshm_drv->tx_empty_available = 1; 240 pshm_drv->tx_empty_available = 1;
241 spin_unlock_irqrestore(&pshm_drv->lock, flags);
241 pshm_drv->cfdev.flowctrl 242 pshm_drv->cfdev.flowctrl
242 (pshm_drv->pshm_dev->pshm_netdev, 243 (pshm_drv->pshm_dev->pshm_netdev,
243 CAIF_FLOW_ON); 244 CAIF_FLOW_ON);
244 245
245 spin_unlock_irqrestore(&pshm_drv->lock, flags);
246 246
247 /* Schedule the work queue. if required */ 247 /* Schedule the work queue. if required */
248 if (!work_pending(&pshm_drv->shm_tx_work)) 248 if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
285 list_entry(pshm_drv->rx_full_list.next, struct buf_list, 285 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
286 list); 286 list);
287 list_del_init(&pbuf->list); 287 list_del_init(&pbuf->list);
288 spin_unlock_irqrestore(&pshm_drv->lock, flags);
288 289
289 /* Retrieve pointer to start of the packet descriptor area. */ 290 /* Retrieve pointer to start of the packet descriptor area. */
290 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr; 291 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -360,6 +361,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
360 pck_desc++; 361 pck_desc++;
361 } 362 }
362 363
364 spin_lock_irqsave(&pshm_drv->lock, flags);
363 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list); 365 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
364 366
365 spin_unlock_irqrestore(&pshm_drv->lock, flags); 367 spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +414,6 @@ static void shm_tx_work_func(struct work_struct *tx_work)
412 414
413 if (skb == NULL) 415 if (skb == NULL)
414 goto send_msg; 416 goto send_msg;
415
416 /* Check the available no. of buffers in the empty list */ 417 /* Check the available no. of buffers in the empty list */
417 list_for_each(pos, &pshm_drv->tx_empty_list) 418 list_for_each(pos, &pshm_drv->tx_empty_list)
418 avail_emptybuff++; 419 avail_emptybuff++;
@@ -421,9 +422,11 @@ static void shm_tx_work_func(struct work_struct *tx_work)
421 pshm_drv->tx_empty_available) { 422 pshm_drv->tx_empty_available) {
422 /* Update blocking condition. */ 423 /* Update blocking condition. */
423 pshm_drv->tx_empty_available = 0; 424 pshm_drv->tx_empty_available = 0;
425 spin_unlock_irqrestore(&pshm_drv->lock, flags);
424 pshm_drv->cfdev.flowctrl 426 pshm_drv->cfdev.flowctrl
425 (pshm_drv->pshm_dev->pshm_netdev, 427 (pshm_drv->pshm_dev->pshm_netdev,
426 CAIF_FLOW_OFF); 428 CAIF_FLOW_OFF);
429 spin_lock_irqsave(&pshm_drv->lock, flags);
427 } 430 }
428 /* 431 /*
429 * We simply return back to the caller if we do not have space 432 * We simply return back to the caller if we do not have space
@@ -469,6 +472,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
469 } 472 }
470 473
471 skb = skb_dequeue(&pshm_drv->sk_qhead); 474 skb = skb_dequeue(&pshm_drv->sk_qhead);
475 if (skb == NULL)
476 break;
472 /* Copy in CAIF frame. */ 477 /* Copy in CAIF frame. */
473 skb_copy_bits(skb, 0, pbuf->desc_vptr + 478 skb_copy_bits(skb, 0, pbuf->desc_vptr +
474 pbuf->frm_ofs + SHM_HDR_LEN + 479 pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +482,7 @@ static void shm_tx_work_func(struct work_struct *tx_work)
477 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++; 482 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
478 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes += 483 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
479 frmlen; 484 frmlen;
480 dev_kfree_skb(skb); 485 dev_kfree_skb_irq(skb);
481 486
482 /* Fill in the shared memory packet descriptor area. */ 487 /* Fill in the shared memory packet descriptor area. */
483 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr); 488 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +517,11 @@ send_msg:
512static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev) 517static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
513{ 518{
514 struct shmdrv_layer *pshm_drv; 519 struct shmdrv_layer *pshm_drv;
515 unsigned long flags = 0;
516 520
517 pshm_drv = netdev_priv(shm_netdev); 521 pshm_drv = netdev_priv(shm_netdev);
518 522
519 spin_lock_irqsave(&pshm_drv->lock, flags);
520
521 skb_queue_tail(&pshm_drv->sk_qhead, skb); 523 skb_queue_tail(&pshm_drv->sk_qhead, skb);
522 524
523 spin_unlock_irqrestore(&pshm_drv->lock, flags);
524
525 /* Schedule Tx work queue. for deferred processing of skbs*/ 525 /* Schedule Tx work queue. for deferred processing of skbs*/
526 if (!work_pending(&pshm_drv->shm_tx_work)) 526 if (!work_pending(&pshm_drv->shm_tx_work))
527 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); 527 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +606,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
606 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr + 606 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
607 (NR_TX_BUF * TX_BUF_SZ); 607 (NR_TX_BUF * TX_BUF_SZ);
608 608
609 spin_lock_init(&pshm_drv->lock);
609 INIT_LIST_HEAD(&pshm_drv->tx_empty_list); 610 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
610 INIT_LIST_HEAD(&pshm_drv->tx_pend_list); 611 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
611 INIT_LIST_HEAD(&pshm_drv->tx_full_list); 612 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +641,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
640 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS; 641 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
641 642
642 if (pshm_dev->shm_loopback) 643 if (pshm_dev->shm_loopback)
643 tx_buf->desc_vptr = (char *)tx_buf->phy_addr; 644 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
644 else 645 else
645 tx_buf->desc_vptr = 646 tx_buf->desc_vptr =
646 ioremap(tx_buf->phy_addr, TX_BUF_SZ); 647 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +665,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
664 rx_buf->len = RX_BUF_SZ; 665 rx_buf->len = RX_BUF_SZ;
665 666
666 if (pshm_dev->shm_loopback) 667 if (pshm_dev->shm_loopback)
667 rx_buf->desc_vptr = (char *)rx_buf->phy_addr; 668 rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
668 else 669 else
669 rx_buf->desc_vptr = 670 rx_buf->desc_vptr =
670 ioremap(rx_buf->phy_addr, RX_BUF_SZ); 671 ioremap(rx_buf->phy_addr, RX_BUF_SZ);