aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMithlesh Thukral <mithlesh@linsyssoft.com>2009-02-06 09:01:40 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2009-04-03 17:53:12 -0400
commitc5e5cf5a1824f5efbe97880bc7d667053866afc3 (patch)
tree278172504048e568c49c54d2c7e45a2ed7dddb58
parent7c66b14b6bfd4bf1b803be5ba3cc2e2e31d784de (diff)
Staging: sxg: Removed unnecessary checks while taking Transmit Locks
Fix the locking issue of locks in transmit code path. There was an unnecessary check for interrupt context in transmit code path. Removed that. Signed-off-by: LinSysSoft Sahara Team <saharaproj@linsyssoft.com> Signed-off-by: Mithlesh Thukral <mithlesh@linsyssoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/sxg/sxg.c51
-rw-r--r--drivers/staging/sxg/sxg.h16
2 files changed, 18 insertions, 49 deletions
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c
index 951904320d9e..75c4982d6a3f 100644
--- a/drivers/staging/sxg/sxg.c
+++ b/drivers/staging/sxg/sxg.c
@@ -119,7 +119,7 @@ static int sxg_poll(struct napi_struct *napi, int budget);
119static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId); 119static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
120static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId, 120static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
121 int *sxg_napi_continue, int *work_done, int budget); 121 int *sxg_napi_continue, int *work_done, int budget);
122static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context); 122static void sxg_complete_slow_send(struct adapter_t *adapter);
123static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, 123static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
124 struct sxg_event *Event); 124 struct sxg_event *Event);
125static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus); 125static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
@@ -1274,7 +1274,7 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
1274 } 1274 }
1275 /* Slowpath send completions */ 1275 /* Slowpath send completions */
1276 if (Isr & SXG_ISR_SPSEND) { 1276 if (Isr & SXG_ISR_SPSEND) {
1277 sxg_complete_slow_send(adapter, 1); 1277 sxg_complete_slow_send(adapter);
1278 } 1278 }
1279 /* Dump */ 1279 /* Dump */
1280 if (Isr & SXG_ISR_UPC) { 1280 if (Isr & SXG_ISR_UPC) {
@@ -1477,11 +1477,10 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1477 * 1477 *
1478 * Arguments - 1478 * Arguments -
1479 * adapter - A pointer to our adapter structure 1479 * adapter - A pointer to our adapter structure
1480 * irq_context - An integer to denote if we are in interrupt context
1481 * Return 1480 * Return
1482 * None 1481 * None
1483 */ 1482 */
1484static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context) 1483static void sxg_complete_slow_send(struct adapter_t *adapter)
1485{ 1484{
1486 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0]; 1485 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1487 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo; 1486 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
@@ -1496,12 +1495,7 @@ static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context)
1496 * This means two different processors can both be running/ 1495 * This means two different processors can both be running/
1497 * through this loop. Be *very* careful. 1496 * through this loop. Be *very* careful.
1498 */ 1497 */
1499 if(irq_context) { 1498 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1500 if(!spin_trylock(&adapter->XmtZeroLock))
1501 goto lock_busy;
1502 }
1503 else
1504 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1505 1499
1506 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds", 1500 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1507 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 1501 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
@@ -1545,36 +1539,23 @@ static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context)
1545 * chimney send, which results in a double trip 1539 * chimney send, which results in a double trip
1546 * in SxgTcpOuput 1540 * in SxgTcpOuput
1547 */ 1541 */
1548 if(irq_context) 1542 spin_unlock_irqrestore(
1549 spin_unlock(&adapter->XmtZeroLock); 1543 &adapter->XmtZeroLock, flags);
1550 else
1551 spin_unlock_irqrestore(
1552 &adapter->XmtZeroLock, flags);
1553 1544
1554 SxgSgl->DumbPacket = NULL; 1545 SxgSgl->DumbPacket = NULL;
1555 SXG_COMPLETE_DUMB_SEND(adapter, skb, 1546 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1556 FirstSgeAddress, 1547 FirstSgeAddress,
1557 FirstSgeLength); 1548 FirstSgeLength);
1558 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL, 1549 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
1559 irq_context);
1560 /* and reacquire.. */ 1550 /* and reacquire.. */
1561 if(irq_context) { 1551 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1562 if(!spin_trylock(&adapter->XmtZeroLock))
1563 goto lock_busy;
1564 }
1565 else
1566 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1567 } 1552 }
1568 break; 1553 break;
1569 default: 1554 default:
1570 ASSERT(0); 1555 ASSERT(0);
1571 } 1556 }
1572 } 1557 }
1573 if(irq_context) 1558 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1574 spin_unlock(&adapter->XmtZeroLock);
1575 else
1576 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1577lock_busy:
1578 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd", 1559 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1579 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 1560 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1580} 1561}
@@ -2468,7 +2449,7 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2468 */ 2449 */
2469 2450
2470 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); 2451 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2471 sxg_complete_slow_send(adapter, 0); 2452 sxg_complete_slow_send(adapter);
2472 spin_lock_irqsave(&adapter->XmtZeroLock, flags); 2453 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2473 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl); 2454 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2474 if (XmtCmd == NULL) { 2455 if (XmtCmd == NULL) {
@@ -3781,22 +3762,16 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
3781 unsigned long sgl_flags; 3762 unsigned long sgl_flags;
3782 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp", 3763 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3783 adapter, SxgSgl, Length, 0); 3764 adapter, SxgSgl, Length, 0);
3784 if(!in_irq()) 3765 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
3785 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
3786 else
3787 spin_lock(&adapter->SglQLock);
3788 adapter->AllSglBufferCount++; 3766 adapter->AllSglBufferCount++;
3789 /* PhysicalAddress; */ 3767 /* PhysicalAddress; */
3790 SxgSgl->PhysicalAddress = PhysicalAddress; 3768 SxgSgl->PhysicalAddress = PhysicalAddress;
3791 /* Initialize backpointer once */ 3769 /* Initialize backpointer once */
3792 SxgSgl->adapter = adapter; 3770 SxgSgl->adapter = adapter;
3793 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); 3771 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3794 if(!in_irq()) 3772 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3795 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3796 else
3797 spin_unlock(&adapter->SglQLock);
3798 SxgSgl->State = SXG_BUFFER_BUSY; 3773 SxgSgl->State = SXG_BUFFER_BUSY;
3799 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL, in_irq()); 3774 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
3800 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl", 3775 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3801 adapter, SxgSgl, Length, 0); 3776 adapter, SxgSgl, Length, 0);
3802} 3777}
diff --git a/drivers/staging/sxg/sxg.h b/drivers/staging/sxg/sxg.h
index 81d16803169e..e937d4b8e811 100644
--- a/drivers/staging/sxg/sxg.h
+++ b/drivers/staging/sxg/sxg.h
@@ -244,20 +244,14 @@ struct sxg_stats {
244} 244}
245 245
246/* SGL macros */ 246/* SGL macros */
247#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB, _irq) { \ 247#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \
248 if(!_irq) \ 248 spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \
249 spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \
250 else \
251 spin_lock(&(_pAdapt)->SglQLock); \
252 (_pAdapt)->FreeSglBufferCount++; \ 249 (_pAdapt)->FreeSglBufferCount++; \
253 ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount); \ 250 ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount); \
254 ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \ 251 ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \
255 (_Sgl)->State = SXG_BUFFER_FREE; \ 252 (_Sgl)->State = SXG_BUFFER_FREE; \
256 InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \ 253 InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \
257 if(!_irq) \ 254 spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags); \
258 spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags); \
259 else \
260 spin_unlock(&(_pAdapt)->SglQLock); \
261} 255}
262 256
263/* 257/*
@@ -280,7 +274,7 @@ struct sxg_stats {
280 if(!_irq) \ 274 if(!_irq) \
281 spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \ 275 spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \
282 else \ 276 else \
283 spin_lock(&(_pAdapt)->SglQLock); \ 277 spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \
284 if((_pAdapt)->FreeSglBufferCount) { \ 278 if((_pAdapt)->FreeSglBufferCount) { \
285 ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \ 279 ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \
286 _ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \ 280 _ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \
@@ -294,7 +288,7 @@ struct sxg_stats {
294 if(!_irq) \ 288 if(!_irq) \
295 spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags);\ 289 spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags);\
296 else \ 290 else \
297 spin_unlock(&(_pAdapt)->SglQLock); \ 291 spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags);\
298} 292}
299 293
300/* 294/*