aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath6kl/sdio.c
diff options
context:
space:
mode:
authorVasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com>2011-09-30 09:48:43 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2011-11-11 05:50:54 -0500
commit151bd30bdf88551d68a743b7f7504ca0f3ff2796 (patch)
tree8d987704d0061e77f59c668ebde4169f235163b8 /drivers/net/wireless/ath/ath6kl/sdio.c
parent1555f7339db57987487e2bd849bca9a104109c18 (diff)
ath6kl: Replace spin_lock_irqsave with spin_lock_bh
It is not necessary to use spinlock primitive to protect data which is accessed in hard irq context as nothing is running in hard irq with this driver. The spinlock primitive meant to protect data in softirq context is more appropriate. Signed-off-by: Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath6kl/sdio.c')
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c51
1 files changed, 21 insertions, 30 deletions
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index f1dc311ee0c7..2dd7a881f223 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -166,12 +166,11 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
166static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) 166static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
167{ 167{
168 struct bus_request *bus_req; 168 struct bus_request *bus_req;
169 unsigned long flag;
170 169
171 spin_lock_irqsave(&ar_sdio->lock, flag); 170 spin_lock_bh(&ar_sdio->lock);
172 171
173 if (list_empty(&ar_sdio->bus_req_freeq)) { 172 if (list_empty(&ar_sdio->bus_req_freeq)) {
174 spin_unlock_irqrestore(&ar_sdio->lock, flag); 173 spin_unlock_bh(&ar_sdio->lock);
175 return NULL; 174 return NULL;
176 } 175 }
177 176
@@ -179,7 +178,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
179 struct bus_request, list); 178 struct bus_request, list);
180 list_del(&bus_req->list); 179 list_del(&bus_req->list);
181 180
182 spin_unlock_irqrestore(&ar_sdio->lock, flag); 181 spin_unlock_bh(&ar_sdio->lock);
183 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 182 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
184 __func__, bus_req); 183 __func__, bus_req);
185 184
@@ -189,14 +188,12 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
189static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, 188static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
190 struct bus_request *bus_req) 189 struct bus_request *bus_req)
191{ 190{
192 unsigned long flag;
193
194 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 191 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
195 __func__, bus_req); 192 __func__, bus_req);
196 193
197 spin_lock_irqsave(&ar_sdio->lock, flag); 194 spin_lock_bh(&ar_sdio->lock);
198 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); 195 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
199 spin_unlock_irqrestore(&ar_sdio->lock, flag); 196 spin_unlock_bh(&ar_sdio->lock);
200} 197}
201 198
202static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, 199static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
@@ -424,20 +421,19 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
424static void ath6kl_sdio_write_async_work(struct work_struct *work) 421static void ath6kl_sdio_write_async_work(struct work_struct *work)
425{ 422{
426 struct ath6kl_sdio *ar_sdio; 423 struct ath6kl_sdio *ar_sdio;
427 unsigned long flags;
428 struct bus_request *req, *tmp_req; 424 struct bus_request *req, *tmp_req;
429 425
430 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); 426 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
431 sdio_claim_host(ar_sdio->func); 427 sdio_claim_host(ar_sdio->func);
432 428
433 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); 429 spin_lock_bh(&ar_sdio->wr_async_lock);
434 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 430 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
435 list_del(&req->list); 431 list_del(&req->list);
436 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); 432 spin_unlock_bh(&ar_sdio->wr_async_lock);
437 __ath6kl_sdio_write_async(ar_sdio, req); 433 __ath6kl_sdio_write_async(ar_sdio, req);
438 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); 434 spin_lock_bh(&ar_sdio->wr_async_lock);
439 } 435 }
440 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); 436 spin_unlock_bh(&ar_sdio->wr_async_lock);
441 437
442 sdio_release_host(ar_sdio->func); 438 sdio_release_host(ar_sdio->func);
443} 439}
@@ -520,7 +516,6 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
520{ 516{
521 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 517 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
522 struct bus_request *bus_req; 518 struct bus_request *bus_req;
523 unsigned long flags;
524 519
525 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); 520 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
526 521
@@ -533,9 +528,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
533 bus_req->request = request; 528 bus_req->request = request;
534 bus_req->packet = packet; 529 bus_req->packet = packet;
535 530
536 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); 531 spin_lock_bh(&ar_sdio->wr_async_lock);
537 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); 532 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
538 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); 533 spin_unlock_bh(&ar_sdio->wr_async_lock);
539 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 534 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
540 535
541 return 0; 536 return 0;
@@ -581,9 +576,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
581{ 576{
582 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 577 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
583 struct hif_scatter_req *node = NULL; 578 struct hif_scatter_req *node = NULL;
584 unsigned long flag;
585 579
586 spin_lock_irqsave(&ar_sdio->scat_lock, flag); 580 spin_lock_bh(&ar_sdio->scat_lock);
587 581
588 if (!list_empty(&ar_sdio->scat_req)) { 582 if (!list_empty(&ar_sdio->scat_req)) {
589 node = list_first_entry(&ar_sdio->scat_req, 583 node = list_first_entry(&ar_sdio->scat_req,
@@ -591,7 +585,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
591 list_del(&node->list); 585 list_del(&node->list);
592 } 586 }
593 587
594 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); 588 spin_unlock_bh(&ar_sdio->scat_lock);
595 589
596 return node; 590 return node;
597} 591}
@@ -600,13 +594,12 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
600 struct hif_scatter_req *s_req) 594 struct hif_scatter_req *s_req)
601{ 595{
602 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 596 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
603 unsigned long flag;
604 597
605 spin_lock_irqsave(&ar_sdio->scat_lock, flag); 598 spin_lock_bh(&ar_sdio->scat_lock);
606 599
607 list_add_tail(&s_req->list, &ar_sdio->scat_req); 600 list_add_tail(&s_req->list, &ar_sdio->scat_req);
608 601
609 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); 602 spin_unlock_bh(&ar_sdio->scat_lock);
610 603
611} 604}
612 605
@@ -617,7 +610,6 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
617 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 610 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
618 u32 request = scat_req->req; 611 u32 request = scat_req->req;
619 int status = 0; 612 int status = 0;
620 unsigned long flags;
621 613
622 if (!scat_req->len) 614 if (!scat_req->len)
623 return -EINVAL; 615 return -EINVAL;
@@ -631,9 +623,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
631 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); 623 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
632 sdio_release_host(ar_sdio->func); 624 sdio_release_host(ar_sdio->func);
633 } else { 625 } else {
634 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); 626 spin_lock_bh(&ar_sdio->wr_async_lock);
635 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); 627 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
636 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); 628 spin_unlock_bh(&ar_sdio->wr_async_lock);
637 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 629 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
638 } 630 }
639 631
@@ -645,13 +637,12 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
645{ 637{
646 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 638 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
647 struct hif_scatter_req *s_req, *tmp_req; 639 struct hif_scatter_req *s_req, *tmp_req;
648 unsigned long flag;
649 640
650 /* empty the free list */ 641 /* empty the free list */
651 spin_lock_irqsave(&ar_sdio->scat_lock, flag); 642 spin_lock_bh(&ar_sdio->scat_lock);
652 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { 643 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
653 list_del(&s_req->list); 644 list_del(&s_req->list);
654 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); 645 spin_unlock_bh(&ar_sdio->scat_lock);
655 646
656 if (s_req->busrequest) 647 if (s_req->busrequest)
657 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); 648 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
@@ -659,9 +650,9 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
659 kfree(s_req->sgentries); 650 kfree(s_req->sgentries);
660 kfree(s_req); 651 kfree(s_req);
661 652
662 spin_lock_irqsave(&ar_sdio->scat_lock, flag); 653 spin_lock_bh(&ar_sdio->scat_lock);
663 } 654 }
664 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); 655 spin_unlock_bh(&ar_sdio->scat_lock);
665} 656}
666 657
667/* setup of HIF scatter resources */ 658/* setup of HIF scatter resources */