aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-06-09 19:04:28 -0400
committerDan Williams <dan.j.williams@intel.com>2011-07-03 07:04:51 -0400
commit994a9303d33f8238d57f58c26067b6d4ac9af222 (patch)
tree8a7a1f680761ee3cfb2a258c2784194eea69b703 /drivers/scsi
parentdd047c8e2bca22856050dbe0378a37cf44eecc97 (diff)
isci: cleanup/optimize queue increment macros
Every single i/o or event completion incurs a test and branch to see if the cycle bit changed. For power-of-2 queue sizes the cycle bit can be read directly from the rollover of the queue pointer. Likely premature optimization, but the hidden if() and hidden assignments / side-effects in the macros were already asking to be cleaned up. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/isci/host.c56
-rw-r--r--drivers/scsi/isci/host.h16
-rw-r--r--drivers/scsi/isci/isci.h4
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c58
4 files changed, 44 insertions, 90 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 3c7042b8bc0e..ae9edae1d245 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -123,34 +123,6 @@
123 ) 123 )
124 124
125/** 125/**
126 * INCREMENT_COMPLETION_QUEUE_GET() -
127 *
128 * This macro will increment the controllers completion queue index value and
129 * possibly toggle the cycle bit if the completion queue index wraps back to 0.
130 */
131#define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
132 INCREMENT_QUEUE_GET(\
133 (index), \
134 (cycle), \
135 SCU_MAX_COMPLETION_QUEUE_ENTRIES, \
136 SMU_CQGR_CYCLE_BIT)
137
138/**
139 * INCREMENT_EVENT_QUEUE_GET() -
140 *
141 * This macro will increment the controllers event queue index value and
142 * possibly toggle the event cycle bit if the event queue index wraps back to 0.
143 */
144#define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
145 INCREMENT_QUEUE_GET(\
146 (index), \
147 (cycle), \
148 SCU_MAX_EVENTS, \
149 SMU_CQGR_EVENT_CYCLE_BIT \
150 )
151
152
153/**
154 * NORMALIZE_GET_POINTER() - 126 * NORMALIZE_GET_POINTER() -
155 * 127 *
156 * This macro will normalize the completion queue get pointer so its value can 128 * This macro will normalize the completion queue get pointer so its value can
@@ -528,15 +500,13 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci
528 } 500 }
529} 501}
530 502
531
532
533static void scic_sds_controller_process_completions(struct scic_sds_controller *scic) 503static void scic_sds_controller_process_completions(struct scic_sds_controller *scic)
534{ 504{
535 u32 completion_count = 0; 505 u32 completion_count = 0;
536 u32 completion_entry; 506 u32 completion_entry;
537 u32 get_index; 507 u32 get_index;
538 u32 get_cycle; 508 u32 get_cycle;
539 u32 event_index; 509 u32 event_get;
540 u32 event_cycle; 510 u32 event_cycle;
541 511
542 dev_dbg(scic_to_dev(scic), 512 dev_dbg(scic_to_dev(scic),
@@ -548,7 +518,7 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller *
548 get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get); 518 get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
549 get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get; 519 get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
550 520
551 event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get); 521 event_get = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
552 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get; 522 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
553 523
554 while ( 524 while (
@@ -558,7 +528,11 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller *
558 completion_count++; 528 completion_count++;
559 529
560 completion_entry = scic->completion_queue[get_index]; 530 completion_entry = scic->completion_queue[get_index];
561 INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle); 531
532 /* increment the get pointer and check for rollover to toggle the cycle bit */
533 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
534 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
535 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
562 536
563 dev_dbg(scic_to_dev(scic), 537 dev_dbg(scic_to_dev(scic),
564 "%s: completion queue entry:0x%08x\n", 538 "%s: completion queue entry:0x%08x\n",
@@ -579,18 +553,14 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller *
579 break; 553 break;
580 554
581 case SCU_COMPLETION_TYPE_EVENT: 555 case SCU_COMPLETION_TYPE_EVENT:
582 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle); 556 case SCU_COMPLETION_TYPE_NOTIFY: {
583 scic_sds_controller_event_completion(scic, completion_entry); 557 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
584 break; 558 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
559 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
585 560
586 case SCU_COMPLETION_TYPE_NOTIFY:
587 /*
588 * Presently we do the same thing with a notify event that we do with the
589 * other event codes. */
590 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
591 scic_sds_controller_event_completion(scic, completion_entry); 561 scic_sds_controller_event_completion(scic, completion_entry);
592 break; 562 break;
593 563 }
594 default: 564 default:
595 dev_warn(scic_to_dev(scic), 565 dev_warn(scic_to_dev(scic),
596 "%s: SCIC Controller received unknown " 566 "%s: SCIC Controller received unknown "
@@ -607,7 +577,7 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller *
607 SMU_CQGR_GEN_BIT(ENABLE) | 577 SMU_CQGR_GEN_BIT(ENABLE) |
608 SMU_CQGR_GEN_BIT(EVENT_ENABLE) | 578 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
609 event_cycle | 579 event_cycle |
610 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) | 580 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
611 get_cycle | 581 get_cycle |
612 SMU_CQGR_GEN_VAL(POINTER, get_index); 582 SMU_CQGR_GEN_VAL(POINTER, get_index);
613 583
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 7d17ab80f1a9..94fd54dc9f01 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -524,22 +524,6 @@ static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic)
524} 524}
525 525
526/** 526/**
527 * INCREMENT_QUEUE_GET() -
528 *
529 * This macro will increment the specified index to and if the index wraps to 0
530 * it will toggel the cycle bit.
531 */
532#define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \
533 { \
534 if ((index) + 1 == entry_count) { \
535 (index) = 0; \
536 (cycle) = (cycle) ^ (bit_toggle); \
537 } else { \
538 index = index + 1; \
539 } \
540 }
541
542/**
543 * scic_sds_controller_get_protocol_engine_group() - 527 * scic_sds_controller_get_protocol_engine_group() -
544 * 528 *
545 * This macro returns the protocol engine group for this controller object. 529 * This macro returns the protocol engine group for this controller object.
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 81bade421b96..207328369edd 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -90,7 +90,8 @@ enum sci_controller_mode {
90#define SCI_MAX_DOMAINS SCI_MAX_PORTS 90#define SCI_MAX_DOMAINS SCI_MAX_PORTS
91 91
92#define SCU_MAX_CRITICAL_NOTIFICATIONS (384) 92#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
93#define SCU_MAX_EVENTS (128) 93#define SCU_MAX_EVENTS_SHIFT (7)
94#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
94#define SCU_MAX_UNSOLICITED_FRAMES (128) 95#define SCU_MAX_UNSOLICITED_FRAMES (128)
95#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128) 96#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
96#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \ 97#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
@@ -98,6 +99,7 @@ enum sci_controller_mode {
98 + SCU_MAX_UNSOLICITED_FRAMES \ 99 + SCU_MAX_UNSOLICITED_FRAMES \
99 + SCI_MAX_IO_REQUESTS \ 100 + SCI_MAX_IO_REQUESTS \
100 + SCU_MAX_COMPLETION_QUEUE_SCRATCH) 101 + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
102#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
101 103
102#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096) 104#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
103#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024) 105#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index d89570700ffd..680582d8cde5 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -209,7 +209,8 @@ bool scic_sds_unsolicited_frame_control_release_frame(
209 /* 209 /*
210 * In the event there are NULL entries in the UF table, we need to 210 * In the event there are NULL entries in the UF table, we need to
211 * advance the get pointer in order to find out if this frame should 211 * advance the get pointer in order to find out if this frame should
212 * be released (i.e. update the get pointer). */ 212 * be released (i.e. update the get pointer)
213 */
213 while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 && 214 while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
214 upper_32_bits(uf_control->address_table.array[frame_get]) == 0 && 215 upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
215 frame_get < SCU_MAX_UNSOLICITED_FRAMES) 216 frame_get < SCU_MAX_UNSOLICITED_FRAMES)
@@ -217,40 +218,37 @@ bool scic_sds_unsolicited_frame_control_release_frame(
217 218
218 /* 219 /*
219 * The table has a NULL entry as it's last element. This is 220 * The table has a NULL entry as it's last element. This is
220 * illegal. */ 221 * illegal.
222 */
221 BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES); 223 BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
224 if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
225 return false;
222 226
223 if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { 227 uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
224 uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
225 228
229 if (frame_get != frame_index) {
226 /* 230 /*
227 * The frame index is equal to the current get pointer so we 231 * Frames remain in use until we advance the get pointer
228 * can now free up all of the frame entries that */ 232 * so there is nothing we can do here
229 if (frame_get == frame_index) { 233 */
230 while ( 234 return false;
231 uf_control->buffers.array[frame_get].state 235 }
232 == UNSOLICITED_FRAME_RELEASED
233 ) {
234 uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
235
236 INCREMENT_QUEUE_GET(
237 frame_get,
238 frame_cycle,
239 SCU_MAX_UNSOLICITED_FRAMES - 1,
240 SCU_MAX_UNSOLICITED_FRAMES);
241 }
242
243 uf_control->get =
244 (SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get);
245 236
246 return true; 237 /*
247 } else { 238 * The frame index is equal to the current get pointer so we
248 /* 239 * can now free up all of the frame entries that
249 * Frames remain in use until we advance the get pointer 240 */
250 * so there is nothing we can do here */ 241 while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
251 } 242 uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
243
244 if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
245 frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
246 frame_get = 0;
247 } else
248 frame_get++;
252 } 249 }
253 250
254 return false; 251 uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
255}
256 252
253 return true;
254}