aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci/host.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-06-30 22:14:33 -0400
committerDan Williams <dan.j.williams@intel.com>2011-07-03 07:04:52 -0400
commit89a7301f21fb00e753089671eb9e4132aab8ea08 (patch)
treeafa8bac0a36d0d5626997d8995f6c9194aef3a0f /drivers/scsi/isci/host.c
parentd9dcb4ba791de2a06b19ac47cd61601cf3d4e208 (diff)
isci: retire scic_sds_ and scic_ prefixes
The distinction between scic_sds_ scic_ and sci_ are no longer relevant so just unify the prefixes on sci_. The distinction between isci_ and sci_ is historically significant, and useful for comparing the old 'core' to the current Linux driver. 'sci_' represents the former core as well as the routines that are closer to the hardware and protocol than their 'isci_' brethren. sci == sas controller interface. Also unwind the 'sds1' out of the parameter structs. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci/host.c')
-rw-r--r--drivers/scsi/isci/host.c742
1 files changed, 281 insertions, 461 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index bb298f8f609a..f31f64e4b713 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -180,8 +180,7 @@ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
180 handler(sm); 180 handler(sm);
181} 181}
182 182
183static bool scic_sds_controller_completion_queue_has_entries( 183static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
184 struct isci_host *ihost)
185{ 184{
186 u32 get_value = ihost->completion_queue_get; 185 u32 get_value = ihost->completion_queue_get;
187 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; 186 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
@@ -193,9 +192,9 @@ static bool scic_sds_controller_completion_queue_has_entries(
193 return false; 192 return false;
194} 193}
195 194
196static bool scic_sds_controller_isr(struct isci_host *ihost) 195static bool sci_controller_isr(struct isci_host *ihost)
197{ 196{
198 if (scic_sds_controller_completion_queue_has_entries(ihost)) { 197 if (sci_controller_completion_queue_has_entries(ihost)) {
199 return true; 198 return true;
200 } else { 199 } else {
201 /* 200 /*
@@ -219,13 +218,13 @@ irqreturn_t isci_msix_isr(int vec, void *data)
219{ 218{
220 struct isci_host *ihost = data; 219 struct isci_host *ihost = data;
221 220
222 if (scic_sds_controller_isr(ihost)) 221 if (sci_controller_isr(ihost))
223 tasklet_schedule(&ihost->completion_tasklet); 222 tasklet_schedule(&ihost->completion_tasklet);
224 223
225 return IRQ_HANDLED; 224 return IRQ_HANDLED;
226} 225}
227 226
228static bool scic_sds_controller_error_isr(struct isci_host *ihost) 227static bool sci_controller_error_isr(struct isci_host *ihost)
229{ 228{
230 u32 interrupt_status; 229 u32 interrupt_status;
231 230
@@ -252,35 +251,35 @@ static bool scic_sds_controller_error_isr(struct isci_host *ihost)
252 return false; 251 return false;
253} 252}
254 253
255static void scic_sds_controller_task_completion(struct isci_host *ihost, 254static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
256 u32 completion_entry)
257{ 255{
258 u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); 256 u32 index = SCU_GET_COMPLETION_INDEX(ent);
259 struct isci_request *ireq = ihost->reqs[index]; 257 struct isci_request *ireq = ihost->reqs[index];
260 258
261 /* Make sure that we really want to process this IO request */ 259 /* Make sure that we really want to process this IO request */
262 if (test_bit(IREQ_ACTIVE, &ireq->flags) && 260 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
263 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && 261 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
264 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) 262 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
265 /* Yep this is a valid io request pass it along to the io request handler */ 263 /* Yep this is a valid io request pass it along to the
266 scic_sds_io_request_tc_completion(ireq, completion_entry); 264 * io request handler
265 */
266 sci_io_request_tc_completion(ireq, ent);
267} 267}
268 268
269static void scic_sds_controller_sdma_completion(struct isci_host *ihost, 269static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
270 u32 completion_entry)
271{ 270{
272 u32 index; 271 u32 index;
273 struct isci_request *ireq; 272 struct isci_request *ireq;
274 struct isci_remote_device *idev; 273 struct isci_remote_device *idev;
275 274
276 index = SCU_GET_COMPLETION_INDEX(completion_entry); 275 index = SCU_GET_COMPLETION_INDEX(ent);
277 276
278 switch (scu_get_command_request_type(completion_entry)) { 277 switch (scu_get_command_request_type(ent)) {
279 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: 278 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
280 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: 279 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
281 ireq = ihost->reqs[index]; 280 ireq = ihost->reqs[index];
282 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", 281 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
283 __func__, completion_entry, ireq); 282 __func__, ent, ireq);
284 /* @todo For a post TC operation we need to fail the IO 283 /* @todo For a post TC operation we need to fail the IO
285 * request 284 * request
286 */ 285 */
@@ -290,20 +289,19 @@ static void scic_sds_controller_sdma_completion(struct isci_host *ihost,
290 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: 289 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
291 idev = ihost->device_table[index]; 290 idev = ihost->device_table[index];
292 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", 291 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
293 __func__, completion_entry, idev); 292 __func__, ent, idev);
294 /* @todo For a port RNC operation we need to fail the 293 /* @todo For a port RNC operation we need to fail the
295 * device 294 * device
296 */ 295 */
297 break; 296 break;
298 default: 297 default:
299 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", 298 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
300 __func__, completion_entry); 299 __func__, ent);
301 break; 300 break;
302 } 301 }
303} 302}
304 303
305static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, 304static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
306 u32 completion_entry)
307{ 305{
308 u32 index; 306 u32 index;
309 u32 frame_index; 307 u32 frame_index;
@@ -314,36 +312,36 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost,
314 312
315 enum sci_status result = SCI_FAILURE; 313 enum sci_status result = SCI_FAILURE;
316 314
317 frame_index = SCU_GET_FRAME_INDEX(completion_entry); 315 frame_index = SCU_GET_FRAME_INDEX(ent);
318 316
319 frame_header = ihost->uf_control.buffers.array[frame_index].header; 317 frame_header = ihost->uf_control.buffers.array[frame_index].header;
320 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; 318 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
321 319
322 if (SCU_GET_FRAME_ERROR(completion_entry)) { 320 if (SCU_GET_FRAME_ERROR(ent)) {
323 /* 321 /*
324 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will 322 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
325 * / this cause a problem? We expect the phy initialization will 323 * / this cause a problem? We expect the phy initialization will
326 * / fail if there is an error in the frame. */ 324 * / fail if there is an error in the frame. */
327 scic_sds_controller_release_frame(ihost, frame_index); 325 sci_controller_release_frame(ihost, frame_index);
328 return; 326 return;
329 } 327 }
330 328
331 if (frame_header->is_address_frame) { 329 if (frame_header->is_address_frame) {
332 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 330 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
333 iphy = &ihost->phys[index]; 331 iphy = &ihost->phys[index];
334 result = scic_sds_phy_frame_handler(iphy, frame_index); 332 result = sci_phy_frame_handler(iphy, frame_index);
335 } else { 333 } else {
336 334
337 index = SCU_GET_COMPLETION_INDEX(completion_entry); 335 index = SCU_GET_COMPLETION_INDEX(ent);
338 336
339 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 337 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
340 /* 338 /*
341 * This is a signature fis or a frame from a direct attached SATA 339 * This is a signature fis or a frame from a direct attached SATA
342 * device that has not yet been created. In either case forwared 340 * device that has not yet been created. In either case forwared
343 * the frame to the PE and let it take care of the frame data. */ 341 * the frame to the PE and let it take care of the frame data. */
344 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 342 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
345 iphy = &ihost->phys[index]; 343 iphy = &ihost->phys[index];
346 result = scic_sds_phy_frame_handler(iphy, frame_index); 344 result = sci_phy_frame_handler(iphy, frame_index);
347 } else { 345 } else {
348 if (index < ihost->remote_node_entries) 346 if (index < ihost->remote_node_entries)
349 idev = ihost->device_table[index]; 347 idev = ihost->device_table[index];
@@ -351,9 +349,9 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost,
351 idev = NULL; 349 idev = NULL;
352 350
353 if (idev != NULL) 351 if (idev != NULL)
354 result = scic_sds_remote_device_frame_handler(idev, frame_index); 352 result = sci_remote_device_frame_handler(idev, frame_index);
355 else 353 else
356 scic_sds_controller_release_frame(ihost, frame_index); 354 sci_controller_release_frame(ihost, frame_index);
357 } 355 }
358 } 356 }
359 357
@@ -364,17 +362,16 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost,
364 } 362 }
365} 363}
366 364
367static void scic_sds_controller_event_completion(struct isci_host *ihost, 365static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
368 u32 completion_entry)
369{ 366{
370 struct isci_remote_device *idev; 367 struct isci_remote_device *idev;
371 struct isci_request *ireq; 368 struct isci_request *ireq;
372 struct isci_phy *iphy; 369 struct isci_phy *iphy;
373 u32 index; 370 u32 index;
374 371
375 index = SCU_GET_COMPLETION_INDEX(completion_entry); 372 index = SCU_GET_COMPLETION_INDEX(ent);
376 373
377 switch (scu_get_event_type(completion_entry)) { 374 switch (scu_get_event_type(ent)) {
378 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: 375 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
379 /* / @todo The driver did something wrong and we need to fix the condtion. */ 376 /* / @todo The driver did something wrong and we need to fix the condtion. */
380 dev_err(&ihost->pdev->dev, 377 dev_err(&ihost->pdev->dev,
@@ -382,7 +379,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost,
382 "0x%x\n", 379 "0x%x\n",
383 __func__, 380 __func__,
384 ihost, 381 ihost,
385 completion_entry); 382 ent);
386 break; 383 break;
387 384
388 case SCU_EVENT_TYPE_SMU_PCQ_ERROR: 385 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
@@ -396,21 +393,21 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost,
396 "event 0x%x\n", 393 "event 0x%x\n",
397 __func__, 394 __func__,
398 ihost, 395 ihost,
399 completion_entry); 396 ent);
400 break; 397 break;
401 398
402 case SCU_EVENT_TYPE_TRANSPORT_ERROR: 399 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
403 ireq = ihost->reqs[index]; 400 ireq = ihost->reqs[index];
404 scic_sds_io_request_event_handler(ireq, completion_entry); 401 sci_io_request_event_handler(ireq, ent);
405 break; 402 break;
406 403
407 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 404 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
408 switch (scu_get_event_specifier(completion_entry)) { 405 switch (scu_get_event_specifier(ent)) {
409 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: 406 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
410 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: 407 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
411 ireq = ihost->reqs[index]; 408 ireq = ihost->reqs[index];
412 if (ireq != NULL) 409 if (ireq != NULL)
413 scic_sds_io_request_event_handler(ireq, completion_entry); 410 sci_io_request_event_handler(ireq, ent);
414 else 411 else
415 dev_warn(&ihost->pdev->dev, 412 dev_warn(&ihost->pdev->dev,
416 "%s: SCIC Controller 0x%p received " 413 "%s: SCIC Controller 0x%p received "
@@ -418,14 +415,14 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost,
418 "that doesnt exist.\n", 415 "that doesnt exist.\n",
419 __func__, 416 __func__,
420 ihost, 417 ihost,
421 completion_entry); 418 ent);
422 419
423 break; 420 break;
424 421
425 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: 422 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
426 idev = ihost->device_table[index]; 423 idev = ihost->device_table[index];
427 if (idev != NULL) 424 if (idev != NULL)
428 scic_sds_remote_device_event_handler(idev, completion_entry); 425 sci_remote_device_event_handler(idev, ent);
429 else 426 else
430 dev_warn(&ihost->pdev->dev, 427 dev_warn(&ihost->pdev->dev,
431 "%s: SCIC Controller 0x%p received " 428 "%s: SCIC Controller 0x%p received "
@@ -433,7 +430,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost,
433 "that doesnt exist.\n", 430 "that doesnt exist.\n",
434 __func__, 431 __func__,
435 ihost, 432 ihost,
436 completion_entry); 433 ent);
437 434
438 break; 435 break;
439 } 436 }
@@ -448,9 +445,9 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost,
448 * direct error counter event to the phy object since that is where 445 * direct error counter event to the phy object since that is where
449 * we get the event notification. This is a type 4 event. */ 446 * we get the event notification. This is a type 4 event. */
450 case SCU_EVENT_TYPE_OSSP_EVENT: 447 case SCU_EVENT_TYPE_OSSP_EVENT:
451 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 448 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
452 iphy = &ihost->phys[index]; 449 iphy = &ihost->phys[index];
453 scic_sds_phy_event_handler(iphy, completion_entry); 450 sci_phy_event_handler(iphy, ent);
454 break; 451 break;
455 452
456 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 453 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
@@ -460,7 +457,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost,
460 idev = ihost->device_table[index]; 457 idev = ihost->device_table[index];
461 458
462 if (idev != NULL) 459 if (idev != NULL)
463 scic_sds_remote_device_event_handler(idev, completion_entry); 460 sci_remote_device_event_handler(idev, ent);
464 } else 461 } else
465 dev_err(&ihost->pdev->dev, 462 dev_err(&ihost->pdev->dev,
466 "%s: SCIC Controller 0x%p received event 0x%x " 463 "%s: SCIC Controller 0x%p received event 0x%x "
@@ -468,7 +465,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost,
468 "exist.\n", 465 "exist.\n",
469 __func__, 466 __func__,
470 ihost, 467 ihost,
471 completion_entry, 468 ent,
472 index); 469 index);
473 470
474 break; 471 break;
@@ -477,15 +474,15 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost,
477 dev_warn(&ihost->pdev->dev, 474 dev_warn(&ihost->pdev->dev,
478 "%s: SCIC Controller received unknown event code %x\n", 475 "%s: SCIC Controller received unknown event code %x\n",
479 __func__, 476 __func__,
480 completion_entry); 477 ent);
481 break; 478 break;
482 } 479 }
483} 480}
484 481
485static void scic_sds_controller_process_completions(struct isci_host *ihost) 482static void sci_controller_process_completions(struct isci_host *ihost)
486{ 483{
487 u32 completion_count = 0; 484 u32 completion_count = 0;
488 u32 completion_entry; 485 u32 ent;
489 u32 get_index; 486 u32 get_index;
490 u32 get_cycle; 487 u32 get_cycle;
491 u32 event_get; 488 u32 event_get;
@@ -509,7 +506,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost)
509 ) { 506 ) {
510 completion_count++; 507 completion_count++;
511 508
512 completion_entry = ihost->completion_queue[get_index]; 509 ent = ihost->completion_queue[get_index];
513 510
514 /* increment the get pointer and check for rollover to toggle the cycle bit */ 511 /* increment the get pointer and check for rollover to toggle the cycle bit */
515 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << 512 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
@@ -519,19 +516,19 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost)
519 dev_dbg(&ihost->pdev->dev, 516 dev_dbg(&ihost->pdev->dev,
520 "%s: completion queue entry:0x%08x\n", 517 "%s: completion queue entry:0x%08x\n",
521 __func__, 518 __func__,
522 completion_entry); 519 ent);
523 520
524 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { 521 switch (SCU_GET_COMPLETION_TYPE(ent)) {
525 case SCU_COMPLETION_TYPE_TASK: 522 case SCU_COMPLETION_TYPE_TASK:
526 scic_sds_controller_task_completion(ihost, completion_entry); 523 sci_controller_task_completion(ihost, ent);
527 break; 524 break;
528 525
529 case SCU_COMPLETION_TYPE_SDMA: 526 case SCU_COMPLETION_TYPE_SDMA:
530 scic_sds_controller_sdma_completion(ihost, completion_entry); 527 sci_controller_sdma_completion(ihost, ent);
531 break; 528 break;
532 529
533 case SCU_COMPLETION_TYPE_UFI: 530 case SCU_COMPLETION_TYPE_UFI:
534 scic_sds_controller_unsolicited_frame(ihost, completion_entry); 531 sci_controller_unsolicited_frame(ihost, ent);
535 break; 532 break;
536 533
537 case SCU_COMPLETION_TYPE_EVENT: 534 case SCU_COMPLETION_TYPE_EVENT:
@@ -540,7 +537,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost)
540 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); 537 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
541 event_get = (event_get+1) & (SCU_MAX_EVENTS-1); 538 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
542 539
543 scic_sds_controller_event_completion(ihost, completion_entry); 540 sci_controller_event_completion(ihost, ent);
544 break; 541 break;
545 } 542 }
546 default: 543 default:
@@ -548,7 +545,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost)
548 "%s: SCIC Controller received unknown " 545 "%s: SCIC Controller received unknown "
549 "completion type %x\n", 546 "completion type %x\n",
550 __func__, 547 __func__,
551 completion_entry); 548 ent);
552 break; 549 break;
553 } 550 }
554 } 551 }
@@ -575,7 +572,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost)
575 572
576} 573}
577 574
578static void scic_sds_controller_error_handler(struct isci_host *ihost) 575static void sci_controller_error_handler(struct isci_host *ihost)
579{ 576{
580 u32 interrupt_status; 577 u32 interrupt_status;
581 578
@@ -583,9 +580,9 @@ static void scic_sds_controller_error_handler(struct isci_host *ihost)
583 readl(&ihost->smu_registers->interrupt_status); 580 readl(&ihost->smu_registers->interrupt_status);
584 581
585 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && 582 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
586 scic_sds_controller_completion_queue_has_entries(ihost)) { 583 sci_controller_completion_queue_has_entries(ihost)) {
587 584
588 scic_sds_controller_process_completions(ihost); 585 sci_controller_process_completions(ihost);
589 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); 586 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
590 } else { 587 } else {
591 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, 588 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
@@ -607,13 +604,13 @@ irqreturn_t isci_intx_isr(int vec, void *data)
607 irqreturn_t ret = IRQ_NONE; 604 irqreturn_t ret = IRQ_NONE;
608 struct isci_host *ihost = data; 605 struct isci_host *ihost = data;
609 606
610 if (scic_sds_controller_isr(ihost)) { 607 if (sci_controller_isr(ihost)) {
611 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); 608 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
612 tasklet_schedule(&ihost->completion_tasklet); 609 tasklet_schedule(&ihost->completion_tasklet);
613 ret = IRQ_HANDLED; 610 ret = IRQ_HANDLED;
614 } else if (scic_sds_controller_error_isr(ihost)) { 611 } else if (sci_controller_error_isr(ihost)) {
615 spin_lock(&ihost->scic_lock); 612 spin_lock(&ihost->scic_lock);
616 scic_sds_controller_error_handler(ihost); 613 sci_controller_error_handler(ihost);
617 spin_unlock(&ihost->scic_lock); 614 spin_unlock(&ihost->scic_lock);
618 ret = IRQ_HANDLED; 615 ret = IRQ_HANDLED;
619 } 616 }
@@ -625,8 +622,8 @@ irqreturn_t isci_error_isr(int vec, void *data)
625{ 622{
626 struct isci_host *ihost = data; 623 struct isci_host *ihost = data;
627 624
628 if (scic_sds_controller_error_isr(ihost)) 625 if (sci_controller_error_isr(ihost))
629 scic_sds_controller_error_handler(ihost); 626 sci_controller_error_handler(ihost);
630 627
631 return IRQ_HANDLED; 628 return IRQ_HANDLED;
632} 629}
@@ -670,8 +667,8 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
670} 667}
671 668
672/** 669/**
673 * scic_controller_get_suggested_start_timeout() - This method returns the 670 * sci_controller_get_suggested_start_timeout() - This method returns the
674 * suggested scic_controller_start() timeout amount. The user is free to 671 * suggested sci_controller_start() timeout amount. The user is free to
675 * use any timeout value, but this method provides the suggested minimum 672 * use any timeout value, but this method provides the suggested minimum
676 * start timeout value. The returned value is based upon empirical 673 * start timeout value. The returned value is based upon empirical
677 * information determined as a result of interoperability testing. 674 * information determined as a result of interoperability testing.
@@ -681,7 +678,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
681 * This method returns the number of milliseconds for the suggested start 678 * This method returns the number of milliseconds for the suggested start
682 * operation timeout. 679 * operation timeout.
683 */ 680 */
684static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) 681static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
685{ 682{
686 /* Validate the user supplied parameters. */ 683 /* Validate the user supplied parameters. */
687 if (!ihost) 684 if (!ihost)
@@ -706,19 +703,19 @@ static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost)
706 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 703 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
707} 704}
708 705
709static void scic_controller_enable_interrupts(struct isci_host *ihost) 706static void sci_controller_enable_interrupts(struct isci_host *ihost)
710{ 707{
711 BUG_ON(ihost->smu_registers == NULL); 708 BUG_ON(ihost->smu_registers == NULL);
712 writel(0, &ihost->smu_registers->interrupt_mask); 709 writel(0, &ihost->smu_registers->interrupt_mask);
713} 710}
714 711
715void scic_controller_disable_interrupts(struct isci_host *ihost) 712void sci_controller_disable_interrupts(struct isci_host *ihost)
716{ 713{
717 BUG_ON(ihost->smu_registers == NULL); 714 BUG_ON(ihost->smu_registers == NULL);
718 writel(0xffffffff, &ihost->smu_registers->interrupt_mask); 715 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
719} 716}
720 717
721static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *ihost) 718static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
722{ 719{
723 u32 port_task_scheduler_value; 720 u32 port_task_scheduler_value;
724 721
@@ -731,7 +728,7 @@ static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *iho
731 &ihost->scu_registers->peg0.ptsg.control); 728 &ihost->scu_registers->peg0.ptsg.control);
732} 729}
733 730
734static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) 731static void sci_controller_assign_task_entries(struct isci_host *ihost)
735{ 732{
736 u32 task_assignment; 733 u32 task_assignment;
737 734
@@ -752,7 +749,7 @@ static void scic_sds_controller_assign_task_entries(struct isci_host *ihost)
752 749
753} 750}
754 751
755static void scic_sds_controller_initialize_completion_queue(struct isci_host *ihost) 752static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
756{ 753{
757 u32 index; 754 u32 index;
758 u32 completion_queue_control_value; 755 u32 completion_queue_control_value;
@@ -799,7 +796,7 @@ static void scic_sds_controller_initialize_completion_queue(struct isci_host *ih
799 } 796 }
800} 797}
801 798
802static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) 799static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
803{ 800{
804 u32 frame_queue_control_value; 801 u32 frame_queue_control_value;
805 u32 frame_queue_get_value; 802 u32 frame_queue_get_value;
@@ -826,22 +823,8 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_h
826 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); 823 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
827} 824}
828 825
829/** 826static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
830 * This method will attempt to transition into the ready state for the
831 * controller and indicate that the controller start operation has completed
832 * if all criteria are met.
833 * @scic: This parameter indicates the controller object for which
834 * to transition to ready.
835 * @status: This parameter indicates the status value to be pass into the call
836 * to scic_cb_controller_start_complete().
837 *
838 * none.
839 */
840static void scic_sds_controller_transition_to_ready(
841 struct isci_host *ihost,
842 enum sci_status status)
843{ 827{
844
845 if (ihost->sm.current_state_id == SCIC_STARTING) { 828 if (ihost->sm.current_state_id == SCIC_STARTING) {
846 /* 829 /*
847 * We move into the ready state, because some of the phys/ports 830 * We move into the ready state, because some of the phys/ports
@@ -855,7 +838,7 @@ static void scic_sds_controller_transition_to_ready(
855 838
856static bool is_phy_starting(struct isci_phy *iphy) 839static bool is_phy_starting(struct isci_phy *iphy)
857{ 840{
858 enum scic_sds_phy_states state; 841 enum sci_phy_states state;
859 842
860 state = iphy->sm.current_state_id; 843 state = iphy->sm.current_state_id;
861 switch (state) { 844 switch (state) {
@@ -876,16 +859,16 @@ static bool is_phy_starting(struct isci_phy *iphy)
876} 859}
877 860
878/** 861/**
879 * scic_sds_controller_start_next_phy - start phy 862 * sci_controller_start_next_phy - start phy
880 * @scic: controller 863 * @scic: controller
881 * 864 *
882 * If all the phys have been started, then attempt to transition the 865 * If all the phys have been started, then attempt to transition the
883 * controller to the READY state and inform the user 866 * controller to the READY state and inform the user
884 * (scic_cb_controller_start_complete()). 867 * (sci_cb_controller_start_complete()).
885 */ 868 */
886static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihost) 869static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
887{ 870{
888 struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; 871 struct sci_oem_params *oem = &ihost->oem_parameters;
889 struct isci_phy *iphy; 872 struct isci_phy *iphy;
890 enum sci_status status; 873 enum sci_status status;
891 874
@@ -924,7 +907,7 @@ static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihos
924 * The controller has successfully finished the start process. 907 * The controller has successfully finished the start process.
925 * Inform the SCI Core user and transition to the READY state. */ 908 * Inform the SCI Core user and transition to the READY state. */
926 if (is_controller_start_complete == true) { 909 if (is_controller_start_complete == true) {
927 scic_sds_controller_transition_to_ready(ihost, SCI_SUCCESS); 910 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
928 sci_del_timer(&ihost->phy_timer); 911 sci_del_timer(&ihost->phy_timer);
929 ihost->phy_startup_timer_pending = false; 912 ihost->phy_startup_timer_pending = false;
930 } 913 }
@@ -944,11 +927,11 @@ static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihos
944 * incorrectly for the PORT or it was never 927 * incorrectly for the PORT or it was never
945 * assigned to a PORT 928 * assigned to a PORT
946 */ 929 */
947 return scic_sds_controller_start_next_phy(ihost); 930 return sci_controller_start_next_phy(ihost);
948 } 931 }
949 } 932 }
950 933
951 status = scic_sds_phy_start(iphy); 934 status = sci_phy_start(iphy);
952 935
953 if (status == SCI_SUCCESS) { 936 if (status == SCI_SUCCESS) {
954 sci_mod_timer(&ihost->phy_timer, 937 sci_mod_timer(&ihost->phy_timer,
@@ -985,7 +968,7 @@ static void phy_startup_timeout(unsigned long data)
985 ihost->phy_startup_timer_pending = false; 968 ihost->phy_startup_timer_pending = false;
986 969
987 do { 970 do {
988 status = scic_sds_controller_start_next_phy(ihost); 971 status = sci_controller_start_next_phy(ihost);
989 } while (status != SCI_SUCCESS); 972 } while (status != SCI_SUCCESS);
990 973
991done: 974done:
@@ -997,7 +980,7 @@ static u16 isci_tci_active(struct isci_host *ihost)
997 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); 980 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
998} 981}
999 982
1000static enum sci_status scic_controller_start(struct isci_host *ihost, 983static enum sci_status sci_controller_start(struct isci_host *ihost,
1001 u32 timeout) 984 u32 timeout)
1002{ 985{
1003 enum sci_status result; 986 enum sci_status result;
@@ -1018,38 +1001,37 @@ static enum sci_status scic_controller_start(struct isci_host *ihost,
1018 isci_tci_free(ihost, index); 1001 isci_tci_free(ihost, index);
1019 1002
1020 /* Build the RNi free pool */ 1003 /* Build the RNi free pool */
1021 scic_sds_remote_node_table_initialize( 1004 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1022 &ihost->available_remote_nodes, 1005 ihost->remote_node_entries);
1023 ihost->remote_node_entries);
1024 1006
1025 /* 1007 /*
1026 * Before anything else lets make sure we will not be 1008 * Before anything else lets make sure we will not be
1027 * interrupted by the hardware. 1009 * interrupted by the hardware.
1028 */ 1010 */
1029 scic_controller_disable_interrupts(ihost); 1011 sci_controller_disable_interrupts(ihost);
1030 1012
1031 /* Enable the port task scheduler */ 1013 /* Enable the port task scheduler */
1032 scic_sds_controller_enable_port_task_scheduler(ihost); 1014 sci_controller_enable_port_task_scheduler(ihost);
1033 1015
1034 /* Assign all the task entries to ihost physical function */ 1016 /* Assign all the task entries to ihost physical function */
1035 scic_sds_controller_assign_task_entries(ihost); 1017 sci_controller_assign_task_entries(ihost);
1036 1018
1037 /* Now initialize the completion queue */ 1019 /* Now initialize the completion queue */
1038 scic_sds_controller_initialize_completion_queue(ihost); 1020 sci_controller_initialize_completion_queue(ihost);
1039 1021
1040 /* Initialize the unsolicited frame queue for use */ 1022 /* Initialize the unsolicited frame queue for use */
1041 scic_sds_controller_initialize_unsolicited_frame_queue(ihost); 1023 sci_controller_initialize_unsolicited_frame_queue(ihost);
1042 1024
1043 /* Start all of the ports on this controller */ 1025 /* Start all of the ports on this controller */
1044 for (index = 0; index < ihost->logical_port_entries; index++) { 1026 for (index = 0; index < ihost->logical_port_entries; index++) {
1045 struct isci_port *iport = &ihost->ports[index]; 1027 struct isci_port *iport = &ihost->ports[index];
1046 1028
1047 result = scic_sds_port_start(iport); 1029 result = sci_port_start(iport);
1048 if (result) 1030 if (result)
1049 return result; 1031 return result;
1050 } 1032 }
1051 1033
1052 scic_sds_controller_start_next_phy(ihost); 1034 sci_controller_start_next_phy(ihost);
1053 1035
1054 sci_mod_timer(&ihost->timer, timeout); 1036 sci_mod_timer(&ihost->timer, timeout);
1055 1037
@@ -1061,29 +1043,29 @@ static enum sci_status scic_controller_start(struct isci_host *ihost,
1061void isci_host_scan_start(struct Scsi_Host *shost) 1043void isci_host_scan_start(struct Scsi_Host *shost)
1062{ 1044{
1063 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 1045 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1064 unsigned long tmo = scic_controller_get_suggested_start_timeout(ihost); 1046 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1065 1047
1066 set_bit(IHOST_START_PENDING, &ihost->flags); 1048 set_bit(IHOST_START_PENDING, &ihost->flags);
1067 1049
1068 spin_lock_irq(&ihost->scic_lock); 1050 spin_lock_irq(&ihost->scic_lock);
1069 scic_controller_start(ihost, tmo); 1051 sci_controller_start(ihost, tmo);
1070 scic_controller_enable_interrupts(ihost); 1052 sci_controller_enable_interrupts(ihost);
1071 spin_unlock_irq(&ihost->scic_lock); 1053 spin_unlock_irq(&ihost->scic_lock);
1072} 1054}
1073 1055
1074static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) 1056static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1075{ 1057{
1076 isci_host_change_state(ihost, isci_stopped); 1058 isci_host_change_state(ihost, isci_stopped);
1077 scic_controller_disable_interrupts(ihost); 1059 sci_controller_disable_interrupts(ihost);
1078 clear_bit(IHOST_STOP_PENDING, &ihost->flags); 1060 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1079 wake_up(&ihost->eventq); 1061 wake_up(&ihost->eventq);
1080} 1062}
1081 1063
1082static void scic_sds_controller_completion_handler(struct isci_host *ihost) 1064static void sci_controller_completion_handler(struct isci_host *ihost)
1083{ 1065{
1084 /* Empty out the completion queue */ 1066 /* Empty out the completion queue */
1085 if (scic_sds_controller_completion_queue_has_entries(ihost)) 1067 if (sci_controller_completion_queue_has_entries(ihost))
1086 scic_sds_controller_process_completions(ihost); 1068 sci_controller_process_completions(ihost);
1087 1069
1088 /* Clear the interrupt and enable all interrupts again */ 1070 /* Clear the interrupt and enable all interrupts again */
1089 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); 1071 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
@@ -1116,7 +1098,7 @@ static void isci_host_completion_routine(unsigned long data)
1116 1098
1117 spin_lock_irq(&ihost->scic_lock); 1099 spin_lock_irq(&ihost->scic_lock);
1118 1100
1119 scic_sds_controller_completion_handler(ihost); 1101 sci_controller_completion_handler(ihost);
1120 1102
1121 /* Take the lists of completed I/Os from the host. */ 1103 /* Take the lists of completed I/Os from the host. */
1122 1104
@@ -1203,7 +1185,7 @@ static void isci_host_completion_routine(unsigned long data)
1203} 1185}
1204 1186
1205/** 1187/**
1206 * scic_controller_stop() - This method will stop an individual controller 1188 * sci_controller_stop() - This method will stop an individual controller
1207 * object.This method will invoke the associated user callback upon 1189 * object.This method will invoke the associated user callback upon
1208 * completion. The completion callback is called when the following 1190 * completion. The completion callback is called when the following
1209 * conditions are met: -# the method return status is SCI_SUCCESS. -# the 1191 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
@@ -1220,8 +1202,7 @@ static void isci_host_completion_routine(unsigned long data)
1220 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the 1202 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1221 * controller is not either in the STARTED or STOPPED states. 1203 * controller is not either in the STARTED or STOPPED states.
1222 */ 1204 */
1223static enum sci_status scic_controller_stop(struct isci_host *ihost, 1205static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1224 u32 timeout)
1225{ 1206{
1226 if (ihost->sm.current_state_id != SCIC_READY) { 1207 if (ihost->sm.current_state_id != SCIC_READY) {
1227 dev_warn(&ihost->pdev->dev, 1208 dev_warn(&ihost->pdev->dev,
@@ -1236,7 +1217,7 @@ static enum sci_status scic_controller_stop(struct isci_host *ihost,
1236} 1217}
1237 1218
1238/** 1219/**
1239 * scic_controller_reset() - This method will reset the supplied core 1220 * sci_controller_reset() - This method will reset the supplied core
1240 * controller regardless of the state of said controller. This operation is 1221 * controller regardless of the state of said controller. This operation is
1241 * considered destructive. In other words, all current operations are wiped 1222 * considered destructive. In other words, all current operations are wiped
1242 * out. No IO completions for outstanding devices occur. Outstanding IO 1223 * out. No IO completions for outstanding devices occur. Outstanding IO
@@ -1247,7 +1228,7 @@ static enum sci_status scic_controller_stop(struct isci_host *ihost,
1247 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if 1228 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1248 * the controller reset operation is unable to complete. 1229 * the controller reset operation is unable to complete.
1249 */ 1230 */
1250static enum sci_status scic_controller_reset(struct isci_host *ihost) 1231static enum sci_status sci_controller_reset(struct isci_host *ihost)
1251{ 1232{
1252 switch (ihost->sm.current_state_id) { 1233 switch (ihost->sm.current_state_id) {
1253 case SCIC_RESET: 1234 case SCIC_RESET:
@@ -1286,11 +1267,11 @@ void isci_host_deinit(struct isci_host *ihost)
1286 set_bit(IHOST_STOP_PENDING, &ihost->flags); 1267 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1287 1268
1288 spin_lock_irq(&ihost->scic_lock); 1269 spin_lock_irq(&ihost->scic_lock);
1289 scic_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); 1270 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1290 spin_unlock_irq(&ihost->scic_lock); 1271 spin_unlock_irq(&ihost->scic_lock);
1291 1272
1292 wait_for_stop(ihost); 1273 wait_for_stop(ihost);
1293 scic_controller_reset(ihost); 1274 sci_controller_reset(ihost);
1294 1275
1295 /* Cancel any/all outstanding port timers */ 1276 /* Cancel any/all outstanding port timers */
1296 for (i = 0; i < ihost->logical_port_entries; i++) { 1277 for (i = 0; i < ihost->logical_port_entries; i++) {
@@ -1329,11 +1310,8 @@ static void __iomem *smu_base(struct isci_host *isci_host)
1329 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; 1310 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1330} 1311}
1331 1312
1332static void isci_user_parameters_get( 1313static void isci_user_parameters_get(struct sci_user_parameters *u)
1333 struct isci_host *isci_host,
1334 union scic_user_parameters *scic_user_params)
1335{ 1314{
1336 struct scic_sds_user_parameters *u = &scic_user_params->sds1;
1337 int i; 1315 int i;
1338 1316
1339 for (i = 0; i < SCI_MAX_PHYS; i++) { 1317 for (i = 0; i < SCI_MAX_PHYS; i++) {
@@ -1355,14 +1333,14 @@ static void isci_user_parameters_get(
1355 u->max_number_concurrent_device_spin_up = max_concurr_spinup; 1333 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1356} 1334}
1357 1335
1358static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm) 1336static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1359{ 1337{
1360 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1338 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1361 1339
1362 sci_change_state(&ihost->sm, SCIC_RESET); 1340 sci_change_state(&ihost->sm, SCIC_RESET);
1363} 1341}
1364 1342
1365static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm) 1343static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1366{ 1344{
1367 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1345 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1368 1346
@@ -1377,7 +1355,7 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state
1377#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 1355#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1378 1356
1379/** 1357/**
1380 * scic_controller_set_interrupt_coalescence() - This method allows the user to 1358 * sci_controller_set_interrupt_coalescence() - This method allows the user to
1381 * configure the interrupt coalescence. 1359 * configure the interrupt coalescence.
1382 * @controller: This parameter represents the handle to the controller object 1360 * @controller: This parameter represents the handle to the controller object
1383 * for which its interrupt coalesce register is overridden. 1361 * for which its interrupt coalesce register is overridden.
@@ -1394,9 +1372,9 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state
1394 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. 1372 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1395 */ 1373 */
1396static enum sci_status 1374static enum sci_status
1397scic_controller_set_interrupt_coalescence(struct isci_host *ihost, 1375sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1398 u32 coalesce_number, 1376 u32 coalesce_number,
1399 u32 coalesce_timeout) 1377 u32 coalesce_timeout)
1400{ 1378{
1401 u8 timeout_encode = 0; 1379 u8 timeout_encode = 0;
1402 u32 min = 0; 1380 u32 min = 0;
@@ -1489,23 +1467,23 @@ scic_controller_set_interrupt_coalescence(struct isci_host *ihost,
1489} 1467}
1490 1468
1491 1469
1492static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm) 1470static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1493{ 1471{
1494 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1472 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1495 1473
1496 /* set the default interrupt coalescence number and timeout value. */ 1474 /* set the default interrupt coalescence number and timeout value. */
1497 scic_controller_set_interrupt_coalescence(ihost, 0x10, 250); 1475 sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
1498} 1476}
1499 1477
1500static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm) 1478static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1501{ 1479{
1502 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1480 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1503 1481
1504 /* disable interrupt coalescence. */ 1482 /* disable interrupt coalescence. */
1505 scic_controller_set_interrupt_coalescence(ihost, 0, 0); 1483 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1506} 1484}
1507 1485
1508static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) 1486static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1509{ 1487{
1510 u32 index; 1488 u32 index;
1511 enum sci_status status; 1489 enum sci_status status;
@@ -1514,7 +1492,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost)
1514 status = SCI_SUCCESS; 1492 status = SCI_SUCCESS;
1515 1493
1516 for (index = 0; index < SCI_MAX_PHYS; index++) { 1494 for (index = 0; index < SCI_MAX_PHYS; index++) {
1517 phy_status = scic_sds_phy_stop(&ihost->phys[index]); 1495 phy_status = sci_phy_stop(&ihost->phys[index]);
1518 1496
1519 if (phy_status != SCI_SUCCESS && 1497 if (phy_status != SCI_SUCCESS &&
1520 phy_status != SCI_FAILURE_INVALID_STATE) { 1498 phy_status != SCI_FAILURE_INVALID_STATE) {
@@ -1531,7 +1509,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost)
1531 return status; 1509 return status;
1532} 1510}
1533 1511
1534static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) 1512static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1535{ 1513{
1536 u32 index; 1514 u32 index;
1537 enum sci_status port_status; 1515 enum sci_status port_status;
@@ -1540,7 +1518,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost)
1540 for (index = 0; index < ihost->logical_port_entries; index++) { 1518 for (index = 0; index < ihost->logical_port_entries; index++) {
1541 struct isci_port *iport = &ihost->ports[index]; 1519 struct isci_port *iport = &ihost->ports[index];
1542 1520
1543 port_status = scic_sds_port_stop(iport); 1521 port_status = sci_port_stop(iport);
1544 1522
1545 if ((port_status != SCI_SUCCESS) && 1523 if ((port_status != SCI_SUCCESS) &&
1546 (port_status != SCI_FAILURE_INVALID_STATE)) { 1524 (port_status != SCI_FAILURE_INVALID_STATE)) {
@@ -1558,7 +1536,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost)
1558 return status; 1536 return status;
1559} 1537}
1560 1538
1561static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) 1539static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1562{ 1540{
1563 u32 index; 1541 u32 index;
1564 enum sci_status status; 1542 enum sci_status status;
@@ -1569,7 +1547,7 @@ static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost)
1569 for (index = 0; index < ihost->remote_node_entries; index++) { 1547 for (index = 0; index < ihost->remote_node_entries; index++) {
1570 if (ihost->device_table[index] != NULL) { 1548 if (ihost->device_table[index] != NULL) {
1571 /* / @todo What timeout value do we want to provide to this request? */ 1549 /* / @todo What timeout value do we want to provide to this request? */
1572 device_status = scic_remote_device_stop(ihost->device_table[index], 0); 1550 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1573 1551
1574 if ((device_status != SCI_SUCCESS) && 1552 if ((device_status != SCI_SUCCESS) &&
1575 (device_status != SCI_FAILURE_INVALID_STATE)) { 1553 (device_status != SCI_FAILURE_INVALID_STATE)) {
@@ -1586,33 +1564,27 @@ static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost)
1586 return status; 1564 return status;
1587} 1565}
1588 1566
1589static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm) 1567static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1590{ 1568{
1591 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1569 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1592 1570
1593 /* Stop all of the components for this controller */ 1571 /* Stop all of the components for this controller */
1594 scic_sds_controller_stop_phys(ihost); 1572 sci_controller_stop_phys(ihost);
1595 scic_sds_controller_stop_ports(ihost); 1573 sci_controller_stop_ports(ihost);
1596 scic_sds_controller_stop_devices(ihost); 1574 sci_controller_stop_devices(ihost);
1597} 1575}
1598 1576
1599static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm) 1577static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1600{ 1578{
1601 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1579 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1602 1580
1603 sci_del_timer(&ihost->timer); 1581 sci_del_timer(&ihost->timer);
1604} 1582}
1605 1583
1606 1584static void sci_controller_reset_hardware(struct isci_host *ihost)
1607/**
1608 * scic_sds_controller_reset_hardware() -
1609 *
1610 * This method will reset the controller hardware.
1611 */
1612static void scic_sds_controller_reset_hardware(struct isci_host *ihost)
1613{ 1585{
1614 /* Disable interrupts so we dont take any spurious interrupts */ 1586 /* Disable interrupts so we dont take any spurious interrupts */
1615 scic_controller_disable_interrupts(ihost); 1587 sci_controller_disable_interrupts(ihost);
1616 1588
1617 /* Reset the SCU */ 1589 /* Reset the SCU */
1618 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); 1590 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
@@ -1627,82 +1599,82 @@ static void scic_sds_controller_reset_hardware(struct isci_host *ihost)
1627 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 1599 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1628} 1600}
1629 1601
1630static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm) 1602static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1631{ 1603{
1632 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1604 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1633 1605
1634 scic_sds_controller_reset_hardware(ihost); 1606 sci_controller_reset_hardware(ihost);
1635 sci_change_state(&ihost->sm, SCIC_RESET); 1607 sci_change_state(&ihost->sm, SCIC_RESET);
1636} 1608}
1637 1609
1638static const struct sci_base_state scic_sds_controller_state_table[] = { 1610static const struct sci_base_state sci_controller_state_table[] = {
1639 [SCIC_INITIAL] = { 1611 [SCIC_INITIAL] = {
1640 .enter_state = scic_sds_controller_initial_state_enter, 1612 .enter_state = sci_controller_initial_state_enter,
1641 }, 1613 },
1642 [SCIC_RESET] = {}, 1614 [SCIC_RESET] = {},
1643 [SCIC_INITIALIZING] = {}, 1615 [SCIC_INITIALIZING] = {},
1644 [SCIC_INITIALIZED] = {}, 1616 [SCIC_INITIALIZED] = {},
1645 [SCIC_STARTING] = { 1617 [SCIC_STARTING] = {
1646 .exit_state = scic_sds_controller_starting_state_exit, 1618 .exit_state = sci_controller_starting_state_exit,
1647 }, 1619 },
1648 [SCIC_READY] = { 1620 [SCIC_READY] = {
1649 .enter_state = scic_sds_controller_ready_state_enter, 1621 .enter_state = sci_controller_ready_state_enter,
1650 .exit_state = scic_sds_controller_ready_state_exit, 1622 .exit_state = sci_controller_ready_state_exit,
1651 }, 1623 },
1652 [SCIC_RESETTING] = { 1624 [SCIC_RESETTING] = {
1653 .enter_state = scic_sds_controller_resetting_state_enter, 1625 .enter_state = sci_controller_resetting_state_enter,
1654 }, 1626 },
1655 [SCIC_STOPPING] = { 1627 [SCIC_STOPPING] = {
1656 .enter_state = scic_sds_controller_stopping_state_enter, 1628 .enter_state = sci_controller_stopping_state_enter,
1657 .exit_state = scic_sds_controller_stopping_state_exit, 1629 .exit_state = sci_controller_stopping_state_exit,
1658 }, 1630 },
1659 [SCIC_STOPPED] = {}, 1631 [SCIC_STOPPED] = {},
1660 [SCIC_FAILED] = {} 1632 [SCIC_FAILED] = {}
1661}; 1633};
1662 1634
1663static void scic_sds_controller_set_default_config_parameters(struct isci_host *ihost) 1635static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1664{ 1636{
1665 /* these defaults are overridden by the platform / firmware */ 1637 /* these defaults are overridden by the platform / firmware */
1666 u16 index; 1638 u16 index;
1667 1639
1668 /* Default to APC mode. */ 1640 /* Default to APC mode. */
1669 ihost->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; 1641 ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1670 1642
1671 /* Default to APC mode. */ 1643 /* Default to APC mode. */
1672 ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; 1644 ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
1673 1645
1674 /* Default to no SSC operation. */ 1646 /* Default to no SSC operation. */
1675 ihost->oem_parameters.sds1.controller.do_enable_ssc = false; 1647 ihost->oem_parameters.controller.do_enable_ssc = false;
1676 1648
1677 /* Initialize all of the port parameter information to narrow ports. */ 1649 /* Initialize all of the port parameter information to narrow ports. */
1678 for (index = 0; index < SCI_MAX_PORTS; index++) { 1650 for (index = 0; index < SCI_MAX_PORTS; index++) {
1679 ihost->oem_parameters.sds1.ports[index].phy_mask = 0; 1651 ihost->oem_parameters.ports[index].phy_mask = 0;
1680 } 1652 }
1681 1653
1682 /* Initialize all of the phy parameter information. */ 1654 /* Initialize all of the phy parameter information. */
1683 for (index = 0; index < SCI_MAX_PHYS; index++) { 1655 for (index = 0; index < SCI_MAX_PHYS; index++) {
1684 /* Default to 6G (i.e. Gen 3) for now. */ 1656 /* Default to 6G (i.e. Gen 3) for now. */
1685 ihost->user_parameters.sds1.phys[index].max_speed_generation = 3; 1657 ihost->user_parameters.phys[index].max_speed_generation = 3;
1686 1658
1687 /* the frequencies cannot be 0 */ 1659 /* the frequencies cannot be 0 */
1688 ihost->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; 1660 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
1689 ihost->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; 1661 ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
1690 ihost->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; 1662 ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1691 1663
1692 /* 1664 /*
1693 * Previous Vitesse based expanders had a arbitration issue that 1665 * Previous Vitesse based expanders had a arbitration issue that
1694 * is worked around by having the upper 32-bits of SAS address 1666 * is worked around by having the upper 32-bits of SAS address
1695 * with a value greater then the Vitesse company identifier. 1667 * with a value greater then the Vitesse company identifier.
1696 * Hence, usage of 0x5FCFFFFF. */ 1668 * Hence, usage of 0x5FCFFFFF. */
1697 ihost->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; 1669 ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
1698 ihost->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; 1670 ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1699 } 1671 }
1700 1672
1701 ihost->user_parameters.sds1.stp_inactivity_timeout = 5; 1673 ihost->user_parameters.stp_inactivity_timeout = 5;
1702 ihost->user_parameters.sds1.ssp_inactivity_timeout = 5; 1674 ihost->user_parameters.ssp_inactivity_timeout = 5;
1703 ihost->user_parameters.sds1.stp_max_occupancy_timeout = 5; 1675 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1704 ihost->user_parameters.sds1.ssp_max_occupancy_timeout = 20; 1676 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1705 ihost->user_parameters.sds1.no_outbound_task_timeout = 20; 1677 ihost->user_parameters.no_outbound_task_timeout = 20;
1706} 1678}
1707 1679
1708static void controller_timeout(unsigned long data) 1680static void controller_timeout(unsigned long data)
@@ -1718,7 +1690,7 @@ static void controller_timeout(unsigned long data)
1718 goto done; 1690 goto done;
1719 1691
1720 if (sm->current_state_id == SCIC_STARTING) 1692 if (sm->current_state_id == SCIC_STARTING)
1721 scic_sds_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); 1693 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1722 else if (sm->current_state_id == SCIC_STOPPING) { 1694 else if (sm->current_state_id == SCIC_STOPPING) {
1723 sci_change_state(sm, SCIC_FAILED); 1695 sci_change_state(sm, SCIC_FAILED);
1724 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); 1696 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
@@ -1732,45 +1704,29 @@ done:
1732 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1704 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1733} 1705}
1734 1706
1735/** 1707static enum sci_status sci_controller_construct(struct isci_host *ihost,
1736 * scic_controller_construct() - This method will attempt to construct a 1708 void __iomem *scu_base,
1737 * controller object utilizing the supplied parameter information. 1709 void __iomem *smu_base)
1738 * @c: This parameter specifies the controller to be constructed.
1739 * @scu_base: mapped base address of the scu registers
1740 * @smu_base: mapped base address of the smu registers
1741 *
1742 * Indicate if the controller was successfully constructed or if it failed in
1743 * some way. SCI_SUCCESS This value is returned if the controller was
1744 * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
1745 * if the interrupt coalescence timer may cause SAS compliance issues for SMP
1746 * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
1747 * This value is returned if the controller does not support the supplied type.
1748 * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
1749 * controller does not support the supplied initialization data version.
1750 */
1751static enum sci_status scic_controller_construct(struct isci_host *ihost,
1752 void __iomem *scu_base,
1753 void __iomem *smu_base)
1754{ 1710{
1755 u8 i; 1711 u8 i;
1756 1712
1757 sci_init_sm(&ihost->sm, scic_sds_controller_state_table, SCIC_INITIAL); 1713 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1758 1714
1759 ihost->scu_registers = scu_base; 1715 ihost->scu_registers = scu_base;
1760 ihost->smu_registers = smu_base; 1716 ihost->smu_registers = smu_base;
1761 1717
1762 scic_sds_port_configuration_agent_construct(&ihost->port_agent); 1718 sci_port_configuration_agent_construct(&ihost->port_agent);
1763 1719
1764 /* Construct the ports for this controller */ 1720 /* Construct the ports for this controller */
1765 for (i = 0; i < SCI_MAX_PORTS; i++) 1721 for (i = 0; i < SCI_MAX_PORTS; i++)
1766 scic_sds_port_construct(&ihost->ports[i], i, ihost); 1722 sci_port_construct(&ihost->ports[i], i, ihost);
1767 scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); 1723 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1768 1724
1769 /* Construct the phys for this controller */ 1725 /* Construct the phys for this controller */
1770 for (i = 0; i < SCI_MAX_PHYS; i++) { 1726 for (i = 0; i < SCI_MAX_PHYS; i++) {
1771 /* Add all the PHYs to the dummy port */ 1727 /* Add all the PHYs to the dummy port */
1772 scic_sds_phy_construct(&ihost->phys[i], 1728 sci_phy_construct(&ihost->phys[i],
1773 &ihost->ports[SCI_MAX_PORTS], i); 1729 &ihost->ports[SCI_MAX_PORTS], i);
1774 } 1730 }
1775 1731
1776 ihost->invalid_phy_mask = 0; 1732 ihost->invalid_phy_mask = 0;
@@ -1778,12 +1734,12 @@ static enum sci_status scic_controller_construct(struct isci_host *ihost,
1778 sci_init_timer(&ihost->timer, controller_timeout); 1734 sci_init_timer(&ihost->timer, controller_timeout);
1779 1735
1780 /* Initialize the User and OEM parameters to default values. */ 1736 /* Initialize the User and OEM parameters to default values. */
1781 scic_sds_controller_set_default_config_parameters(ihost); 1737 sci_controller_set_default_config_parameters(ihost);
1782 1738
1783 return scic_controller_reset(ihost); 1739 return sci_controller_reset(ihost);
1784} 1740}
1785 1741
1786int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) 1742int sci_oem_parameters_validate(struct sci_oem_params *oem)
1787{ 1743{
1788 int i; 1744 int i;
1789 1745
@@ -1817,8 +1773,7 @@ int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
1817 return 0; 1773 return 0;
1818} 1774}
1819 1775
1820static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, 1776static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1821 union scic_oem_parameters *scic_parms)
1822{ 1777{
1823 u32 state = ihost->sm.current_state_id; 1778 u32 state = ihost->sm.current_state_id;
1824 1779
@@ -1826,9 +1781,8 @@ static enum sci_status scic_oem_parameters_set(struct isci_host *ihost,
1826 state == SCIC_INITIALIZING || 1781 state == SCIC_INITIALIZING ||
1827 state == SCIC_INITIALIZED) { 1782 state == SCIC_INITIALIZED) {
1828 1783
1829 if (scic_oem_parameters_validate(&scic_parms->sds1)) 1784 if (sci_oem_parameters_validate(&ihost->oem_parameters))
1830 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1785 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1831 ihost->oem_parameters.sds1 = scic_parms->sds1;
1832 1786
1833 return SCI_SUCCESS; 1787 return SCI_SUCCESS;
1834 } 1788 }
@@ -1836,13 +1790,6 @@ static enum sci_status scic_oem_parameters_set(struct isci_host *ihost,
1836 return SCI_FAILURE_INVALID_STATE; 1790 return SCI_FAILURE_INVALID_STATE;
1837} 1791}
1838 1792
1839void scic_oem_parameters_get(
1840 struct isci_host *ihost,
1841 union scic_oem_parameters *scic_parms)
1842{
1843 memcpy(scic_parms, (&ihost->oem_parameters), sizeof(*scic_parms));
1844}
1845
1846static void power_control_timeout(unsigned long data) 1793static void power_control_timeout(unsigned long data)
1847{ 1794{
1848 struct sci_timer *tmr = (struct sci_timer *)data; 1795 struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1873,13 +1820,13 @@ static void power_control_timeout(unsigned long data)
1873 continue; 1820 continue;
1874 1821
1875 if (ihost->power_control.phys_granted_power >= 1822 if (ihost->power_control.phys_granted_power >=
1876 ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) 1823 ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
1877 break; 1824 break;
1878 1825
1879 ihost->power_control.requesters[i] = NULL; 1826 ihost->power_control.requesters[i] = NULL;
1880 ihost->power_control.phys_waiting--; 1827 ihost->power_control.phys_waiting--;
1881 ihost->power_control.phys_granted_power++; 1828 ihost->power_control.phys_granted_power++;
1882 scic_sds_phy_consume_power_handler(iphy); 1829 sci_phy_consume_power_handler(iphy);
1883 } 1830 }
1884 1831
1885 /* 1832 /*
@@ -1893,22 +1840,15 @@ done:
1893 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1840 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1894} 1841}
1895 1842
1896/** 1843void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1897 * This method inserts the phy in the stagger spinup control queue. 1844 struct isci_phy *iphy)
1898 * @scic:
1899 *
1900 *
1901 */
1902void scic_sds_controller_power_control_queue_insert(
1903 struct isci_host *ihost,
1904 struct isci_phy *iphy)
1905{ 1845{
1906 BUG_ON(iphy == NULL); 1846 BUG_ON(iphy == NULL);
1907 1847
1908 if (ihost->power_control.phys_granted_power < 1848 if (ihost->power_control.phys_granted_power <
1909 ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { 1849 ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
1910 ihost->power_control.phys_granted_power++; 1850 ihost->power_control.phys_granted_power++;
1911 scic_sds_phy_consume_power_handler(iphy); 1851 sci_phy_consume_power_handler(iphy);
1912 1852
1913 /* 1853 /*
1914 * stop and start the power_control timer. When the timer fires, the 1854 * stop and start the power_control timer. When the timer fires, the
@@ -1928,21 +1868,13 @@ void scic_sds_controller_power_control_queue_insert(
1928 } 1868 }
1929} 1869}
1930 1870
1931/** 1871void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1932 * This method removes the phy from the stagger spinup control queue. 1872 struct isci_phy *iphy)
1933 * @scic:
1934 *
1935 *
1936 */
1937void scic_sds_controller_power_control_queue_remove(
1938 struct isci_host *ihost,
1939 struct isci_phy *iphy)
1940{ 1873{
1941 BUG_ON(iphy == NULL); 1874 BUG_ON(iphy == NULL);
1942 1875
1943 if (ihost->power_control.requesters[iphy->phy_index] != NULL) { 1876 if (ihost->power_control.requesters[iphy->phy_index])
1944 ihost->power_control.phys_waiting--; 1877 ihost->power_control.phys_waiting--;
1945 }
1946 1878
1947 ihost->power_control.requesters[iphy->phy_index] = NULL; 1879 ihost->power_control.requesters[iphy->phy_index] = NULL;
1948} 1880}
@@ -1952,9 +1884,9 @@ void scic_sds_controller_power_control_queue_remove(
1952/* Initialize the AFE for this phy index. We need to read the AFE setup from 1884/* Initialize the AFE for this phy index. We need to read the AFE setup from
1953 * the OEM parameters 1885 * the OEM parameters
1954 */ 1886 */
1955static void scic_sds_controller_afe_initialization(struct isci_host *ihost) 1887static void sci_controller_afe_initialization(struct isci_host *ihost)
1956{ 1888{
1957 const struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; 1889 const struct sci_oem_params *oem = &ihost->oem_parameters;
1958 u32 afe_status; 1890 u32 afe_status;
1959 u32 phy_id; 1891 u32 phy_id;
1960 1892
@@ -2111,7 +2043,7 @@ static void scic_sds_controller_afe_initialization(struct isci_host *ihost)
2111 udelay(AFE_REGISTER_WRITE_DELAY); 2043 udelay(AFE_REGISTER_WRITE_DELAY);
2112} 2044}
2113 2045
2114static void scic_sds_controller_initialize_power_control(struct isci_host *ihost) 2046static void sci_controller_initialize_power_control(struct isci_host *ihost)
2115{ 2047{
2116 sci_init_timer(&ihost->power_control.timer, power_control_timeout); 2048 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2117 2049
@@ -2122,7 +2054,7 @@ static void scic_sds_controller_initialize_power_control(struct isci_host *ihost
2122 ihost->power_control.phys_granted_power = 0; 2054 ihost->power_control.phys_granted_power = 0;
2123} 2055}
2124 2056
2125static enum sci_status scic_controller_initialize(struct isci_host *ihost) 2057static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2126{ 2058{
2127 struct sci_base_state_machine *sm = &ihost->sm; 2059 struct sci_base_state_machine *sm = &ihost->sm;
2128 enum sci_status result = SCI_FAILURE; 2060 enum sci_status result = SCI_FAILURE;
@@ -2142,14 +2074,14 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost)
2142 ihost->next_phy_to_start = 0; 2074 ihost->next_phy_to_start = 0;
2143 ihost->phy_startup_timer_pending = false; 2075 ihost->phy_startup_timer_pending = false;
2144 2076
2145 scic_sds_controller_initialize_power_control(ihost); 2077 sci_controller_initialize_power_control(ihost);
2146 2078
2147 /* 2079 /*
2148 * There is nothing to do here for B0 since we do not have to 2080 * There is nothing to do here for B0 since we do not have to
2149 * program the AFE registers. 2081 * program the AFE registers.
2150 * / @todo The AFE settings are supposed to be correct for the B0 but 2082 * / @todo The AFE settings are supposed to be correct for the B0 but
2151 * / presently they seem to be wrong. */ 2083 * / presently they seem to be wrong. */
2152 scic_sds_controller_afe_initialization(ihost); 2084 sci_controller_afe_initialization(ihost);
2153 2085
2154 2086
2155 /* Take the hardware out of reset */ 2087 /* Take the hardware out of reset */
@@ -2206,24 +2138,22 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost)
2206 * are accessed during the port initialization. 2138 * are accessed during the port initialization.
2207 */ 2139 */
2208 for (i = 0; i < SCI_MAX_PHYS; i++) { 2140 for (i = 0; i < SCI_MAX_PHYS; i++) {
2209 result = scic_sds_phy_initialize(&ihost->phys[i], 2141 result = sci_phy_initialize(&ihost->phys[i],
2210 &ihost->scu_registers->peg0.pe[i].tl, 2142 &ihost->scu_registers->peg0.pe[i].tl,
2211 &ihost->scu_registers->peg0.pe[i].ll); 2143 &ihost->scu_registers->peg0.pe[i].ll);
2212 if (result != SCI_SUCCESS) 2144 if (result != SCI_SUCCESS)
2213 goto out; 2145 goto out;
2214 } 2146 }
2215 2147
2216 for (i = 0; i < ihost->logical_port_entries; i++) { 2148 for (i = 0; i < ihost->logical_port_entries; i++) {
2217 result = scic_sds_port_initialize(&ihost->ports[i], 2149 struct isci_port *iport = &ihost->ports[i];
2218 &ihost->scu_registers->peg0.ptsg.port[i],
2219 &ihost->scu_registers->peg0.ptsg.protocol_engine,
2220 &ihost->scu_registers->peg0.viit[i]);
2221 2150
2222 if (result != SCI_SUCCESS) 2151 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2223 goto out; 2152 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2153 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2224 } 2154 }
2225 2155
2226 result = scic_sds_port_configuration_agent_initialize(ihost, &ihost->port_agent); 2156 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2227 2157
2228 out: 2158 out:
2229 /* Advance the controller state machine */ 2159 /* Advance the controller state machine */
@@ -2236,9 +2166,8 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost)
2236 return result; 2166 return result;
2237} 2167}
2238 2168
2239static enum sci_status scic_user_parameters_set( 2169static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
2240 struct isci_host *ihost, 2170 struct sci_user_parameters *sci_parms)
2241 union scic_user_parameters *scic_parms)
2242{ 2171{
2243 u32 state = ihost->sm.current_state_id; 2172 u32 state = ihost->sm.current_state_id;
2244 2173
@@ -2254,7 +2183,7 @@ static enum sci_status scic_user_parameters_set(
2254 for (index = 0; index < SCI_MAX_PHYS; index++) { 2183 for (index = 0; index < SCI_MAX_PHYS; index++) {
2255 struct sci_phy_user_params *user_phy; 2184 struct sci_phy_user_params *user_phy;
2256 2185
2257 user_phy = &scic_parms->sds1.phys[index]; 2186 user_phy = &sci_parms->phys[index];
2258 2187
2259 if (!((user_phy->max_speed_generation <= 2188 if (!((user_phy->max_speed_generation <=
2260 SCIC_SDS_PARM_MAX_SPEED) && 2189 SCIC_SDS_PARM_MAX_SPEED) &&
@@ -2275,14 +2204,14 @@ static enum sci_status scic_user_parameters_set(
2275 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2204 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2276 } 2205 }
2277 2206
2278 if ((scic_parms->sds1.stp_inactivity_timeout == 0) || 2207 if ((sci_parms->stp_inactivity_timeout == 0) ||
2279 (scic_parms->sds1.ssp_inactivity_timeout == 0) || 2208 (sci_parms->ssp_inactivity_timeout == 0) ||
2280 (scic_parms->sds1.stp_max_occupancy_timeout == 0) || 2209 (sci_parms->stp_max_occupancy_timeout == 0) ||
2281 (scic_parms->sds1.ssp_max_occupancy_timeout == 0) || 2210 (sci_parms->ssp_max_occupancy_timeout == 0) ||
2282 (scic_parms->sds1.no_outbound_task_timeout == 0)) 2211 (sci_parms->no_outbound_task_timeout == 0))
2283 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2212 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2284 2213
2285 memcpy(&ihost->user_parameters, scic_parms, sizeof(*scic_parms)); 2214 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2286 2215
2287 return SCI_SUCCESS; 2216 return SCI_SUCCESS;
2288 } 2217 }
@@ -2290,7 +2219,7 @@ static enum sci_status scic_user_parameters_set(
2290 return SCI_FAILURE_INVALID_STATE; 2219 return SCI_FAILURE_INVALID_STATE;
2291} 2220}
2292 2221
2293static int scic_controller_mem_init(struct isci_host *ihost) 2222static int sci_controller_mem_init(struct isci_host *ihost)
2294{ 2223{
2295 struct device *dev = &ihost->pdev->dev; 2224 struct device *dev = &ihost->pdev->dev;
2296 dma_addr_t dma; 2225 dma_addr_t dma;
@@ -2307,7 +2236,7 @@ static int scic_controller_mem_init(struct isci_host *ihost)
2307 2236
2308 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); 2237 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2309 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, 2238 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
2310 GFP_KERNEL); 2239 GFP_KERNEL);
2311 if (!ihost->remote_node_context_table) 2240 if (!ihost->remote_node_context_table)
2312 return -ENOMEM; 2241 return -ENOMEM;
2313 2242
@@ -2323,7 +2252,7 @@ static int scic_controller_mem_init(struct isci_host *ihost)
2323 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); 2252 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
2324 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); 2253 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
2325 2254
2326 err = scic_sds_unsolicited_frame_control_construct(ihost); 2255 err = sci_unsolicited_frame_control_construct(ihost);
2327 if (err) 2256 if (err)
2328 return err; 2257 return err;
2329 2258
@@ -2348,8 +2277,7 @@ int isci_host_init(struct isci_host *ihost)
2348{ 2277{
2349 int err = 0, i; 2278 int err = 0, i;
2350 enum sci_status status; 2279 enum sci_status status;
2351 union scic_oem_parameters oem; 2280 struct sci_user_parameters sci_user_params;
2352 union scic_user_parameters scic_user_params;
2353 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); 2281 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2354 2282
2355 spin_lock_init(&ihost->state_lock); 2283 spin_lock_init(&ihost->state_lock);
@@ -2358,12 +2286,12 @@ int isci_host_init(struct isci_host *ihost)
2358 2286
2359 isci_host_change_state(ihost, isci_starting); 2287 isci_host_change_state(ihost, isci_starting);
2360 2288
2361 status = scic_controller_construct(ihost, scu_base(ihost), 2289 status = sci_controller_construct(ihost, scu_base(ihost),
2362 smu_base(ihost)); 2290 smu_base(ihost));
2363 2291
2364 if (status != SCI_SUCCESS) { 2292 if (status != SCI_SUCCESS) {
2365 dev_err(&ihost->pdev->dev, 2293 dev_err(&ihost->pdev->dev,
2366 "%s: scic_controller_construct failed - status = %x\n", 2294 "%s: sci_controller_construct failed - status = %x\n",
2367 __func__, 2295 __func__,
2368 status); 2296 status);
2369 return -ENODEV; 2297 return -ENODEV;
@@ -2376,21 +2304,18 @@ int isci_host_init(struct isci_host *ihost)
2376 * grab initial values stored in the controller object for OEM and USER 2304 * grab initial values stored in the controller object for OEM and USER
2377 * parameters 2305 * parameters
2378 */ 2306 */
2379 isci_user_parameters_get(ihost, &scic_user_params); 2307 isci_user_parameters_get(&sci_user_params);
2380 status = scic_user_parameters_set(ihost, 2308 status = sci_user_parameters_set(ihost, &sci_user_params);
2381 &scic_user_params);
2382 if (status != SCI_SUCCESS) { 2309 if (status != SCI_SUCCESS) {
2383 dev_warn(&ihost->pdev->dev, 2310 dev_warn(&ihost->pdev->dev,
2384 "%s: scic_user_parameters_set failed\n", 2311 "%s: sci_user_parameters_set failed\n",
2385 __func__); 2312 __func__);
2386 return -ENODEV; 2313 return -ENODEV;
2387 } 2314 }
2388 2315
2389 scic_oem_parameters_get(ihost, &oem);
2390
2391 /* grab any OEM parameters specified in orom */ 2316 /* grab any OEM parameters specified in orom */
2392 if (pci_info->orom) { 2317 if (pci_info->orom) {
2393 status = isci_parse_oem_parameters(&oem, 2318 status = isci_parse_oem_parameters(&ihost->oem_parameters,
2394 pci_info->orom, 2319 pci_info->orom,
2395 ihost->id); 2320 ihost->id);
2396 if (status != SCI_SUCCESS) { 2321 if (status != SCI_SUCCESS) {
@@ -2400,10 +2325,10 @@ int isci_host_init(struct isci_host *ihost)
2400 } 2325 }
2401 } 2326 }
2402 2327
2403 status = scic_oem_parameters_set(ihost, &oem); 2328 status = sci_oem_parameters_set(ihost);
2404 if (status != SCI_SUCCESS) { 2329 if (status != SCI_SUCCESS) {
2405 dev_warn(&ihost->pdev->dev, 2330 dev_warn(&ihost->pdev->dev,
2406 "%s: scic_oem_parameters_set failed\n", 2331 "%s: sci_oem_parameters_set failed\n",
2407 __func__); 2332 __func__);
2408 return -ENODEV; 2333 return -ENODEV;
2409 } 2334 }
@@ -2415,17 +2340,17 @@ int isci_host_init(struct isci_host *ihost)
2415 INIT_LIST_HEAD(&ihost->requests_to_errorback); 2340 INIT_LIST_HEAD(&ihost->requests_to_errorback);
2416 2341
2417 spin_lock_irq(&ihost->scic_lock); 2342 spin_lock_irq(&ihost->scic_lock);
2418 status = scic_controller_initialize(ihost); 2343 status = sci_controller_initialize(ihost);
2419 spin_unlock_irq(&ihost->scic_lock); 2344 spin_unlock_irq(&ihost->scic_lock);
2420 if (status != SCI_SUCCESS) { 2345 if (status != SCI_SUCCESS) {
2421 dev_warn(&ihost->pdev->dev, 2346 dev_warn(&ihost->pdev->dev,
2422 "%s: scic_controller_initialize failed -" 2347 "%s: sci_controller_initialize failed -"
2423 " status = 0x%x\n", 2348 " status = 0x%x\n",
2424 __func__, status); 2349 __func__, status);
2425 return -ENODEV; 2350 return -ENODEV;
2426 } 2351 }
2427 2352
2428 err = scic_controller_mem_init(ihost); 2353 err = sci_controller_mem_init(ihost);
2429 if (err) 2354 if (err)
2430 return err; 2355 return err;
2431 2356
@@ -2463,20 +2388,20 @@ int isci_host_init(struct isci_host *ihost)
2463 return 0; 2388 return 0;
2464} 2389}
2465 2390
2466void scic_sds_controller_link_up(struct isci_host *ihost, 2391void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2467 struct isci_port *iport, struct isci_phy *iphy) 2392 struct isci_phy *iphy)
2468{ 2393{
2469 switch (ihost->sm.current_state_id) { 2394 switch (ihost->sm.current_state_id) {
2470 case SCIC_STARTING: 2395 case SCIC_STARTING:
2471 sci_del_timer(&ihost->phy_timer); 2396 sci_del_timer(&ihost->phy_timer);
2472 ihost->phy_startup_timer_pending = false; 2397 ihost->phy_startup_timer_pending = false;
2473 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, 2398 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2474 iport, iphy); 2399 iport, iphy);
2475 scic_sds_controller_start_next_phy(ihost); 2400 sci_controller_start_next_phy(ihost);
2476 break; 2401 break;
2477 case SCIC_READY: 2402 case SCIC_READY:
2478 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, 2403 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2479 iport, iphy); 2404 iport, iphy);
2480 break; 2405 break;
2481 default: 2406 default:
2482 dev_dbg(&ihost->pdev->dev, 2407 dev_dbg(&ihost->pdev->dev,
@@ -2486,8 +2411,8 @@ void scic_sds_controller_link_up(struct isci_host *ihost,
2486 } 2411 }
2487} 2412}
2488 2413
2489void scic_sds_controller_link_down(struct isci_host *ihost, 2414void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2490 struct isci_port *iport, struct isci_phy *iphy) 2415 struct isci_phy *iphy)
2491{ 2416{
2492 switch (ihost->sm.current_state_id) { 2417 switch (ihost->sm.current_state_id) {
2493 case SCIC_STARTING: 2418 case SCIC_STARTING:
@@ -2505,12 +2430,7 @@ void scic_sds_controller_link_down(struct isci_host *ihost,
2505 } 2430 }
2506} 2431}
2507 2432
2508/** 2433static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2509 * This is a helper method to determine if any remote devices on this
2510 * controller are still in the stopping state.
2511 *
2512 */
2513static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ihost)
2514{ 2434{
2515 u32 index; 2435 u32 index;
2516 2436
@@ -2523,12 +2443,8 @@ static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ih
2523 return false; 2443 return false;
2524} 2444}
2525 2445
2526/** 2446void sci_controller_remote_device_stopped(struct isci_host *ihost,
2527 * This method is called by the remote device to inform the controller 2447 struct isci_remote_device *idev)
2528 * object that the remote device has stopped.
2529 */
2530void scic_sds_controller_remote_device_stopped(struct isci_host *ihost,
2531 struct isci_remote_device *idev)
2532{ 2448{
2533 if (ihost->sm.current_state_id != SCIC_STOPPING) { 2449 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2534 dev_dbg(&ihost->pdev->dev, 2450 dev_dbg(&ihost->pdev->dev,
@@ -2539,32 +2455,19 @@ void scic_sds_controller_remote_device_stopped(struct isci_host *ihost,
2539 return; 2455 return;
2540 } 2456 }
2541 2457
2542 if (!scic_sds_controller_has_remote_devices_stopping(ihost)) { 2458 if (!sci_controller_has_remote_devices_stopping(ihost))
2543 sci_change_state(&ihost->sm, SCIC_STOPPED); 2459 sci_change_state(&ihost->sm, SCIC_STOPPED);
2544 }
2545} 2460}
2546 2461
2547/** 2462void sci_controller_post_request(struct isci_host *ihost, u32 request)
2548 * This method will write to the SCU PCP register the request value. The method
2549 * is used to suspend/resume ports, devices, and phys.
2550 * @scic:
2551 *
2552 *
2553 */
2554void scic_sds_controller_post_request(
2555 struct isci_host *ihost,
2556 u32 request)
2557{ 2463{
2558 dev_dbg(&ihost->pdev->dev, 2464 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2559 "%s: SCIC Controller 0x%p post request 0x%08x\n", 2465 __func__, ihost->id, request);
2560 __func__,
2561 ihost,
2562 request);
2563 2466
2564 writel(request, &ihost->smu_registers->post_context_port); 2467 writel(request, &ihost->smu_registers->post_context_port);
2565} 2468}
2566 2469
2567struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) 2470struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2568{ 2471{
2569 u16 task_index; 2472 u16 task_index;
2570 u16 task_sequence; 2473 u16 task_sequence;
@@ -2599,15 +2502,14 @@ struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag)
2599 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote 2502 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2600 * node index available. 2503 * node index available.
2601 */ 2504 */
2602enum sci_status scic_sds_controller_allocate_remote_node_context( 2505enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2603 struct isci_host *ihost, 2506 struct isci_remote_device *idev,
2604 struct isci_remote_device *idev, 2507 u16 *node_id)
2605 u16 *node_id)
2606{ 2508{
2607 u16 node_index; 2509 u16 node_index;
2608 u32 remote_node_count = scic_sds_remote_device_node_count(idev); 2510 u32 remote_node_count = sci_remote_device_node_count(idev);
2609 2511
2610 node_index = scic_sds_remote_node_table_allocate_remote_node( 2512 node_index = sci_remote_node_table_allocate_remote_node(
2611 &ihost->available_remote_nodes, remote_node_count 2513 &ihost->available_remote_nodes, remote_node_count
2612 ); 2514 );
2613 2515
@@ -2622,68 +2524,26 @@ enum sci_status scic_sds_controller_allocate_remote_node_context(
2622 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 2524 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2623} 2525}
2624 2526
2625/** 2527void sci_controller_free_remote_node_context(struct isci_host *ihost,
2626 * This method frees the remote node index back to the available pool. Once 2528 struct isci_remote_device *idev,
2627 * this is done the remote node context buffer is no longer valid and can 2529 u16 node_id)
2628 * not be used.
2629 * @scic:
2630 * @sci_dev:
2631 * @node_id:
2632 *
2633 */
2634void scic_sds_controller_free_remote_node_context(
2635 struct isci_host *ihost,
2636 struct isci_remote_device *idev,
2637 u16 node_id)
2638{ 2530{
2639 u32 remote_node_count = scic_sds_remote_device_node_count(idev); 2531 u32 remote_node_count = sci_remote_device_node_count(idev);
2640 2532
2641 if (ihost->device_table[node_id] == idev) { 2533 if (ihost->device_table[node_id] == idev) {
2642 ihost->device_table[node_id] = NULL; 2534 ihost->device_table[node_id] = NULL;
2643 2535
2644 scic_sds_remote_node_table_release_remote_node_index( 2536 sci_remote_node_table_release_remote_node_index(
2645 &ihost->available_remote_nodes, remote_node_count, node_id 2537 &ihost->available_remote_nodes, remote_node_count, node_id
2646 ); 2538 );
2647 } 2539 }
2648} 2540}
2649 2541
2650/** 2542void sci_controller_copy_sata_response(void *response_buffer,
2651 * This method returns the union scu_remote_node_context for the specified remote 2543 void *frame_header,
2652 * node id. 2544 void *frame_buffer)
2653 * @scic:
2654 * @node_id:
2655 *
2656 * union scu_remote_node_context*
2657 */
2658union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
2659 struct isci_host *ihost,
2660 u16 node_id
2661 ) {
2662 if (
2663 (node_id < ihost->remote_node_entries)
2664 && (ihost->device_table[node_id] != NULL)
2665 ) {
2666 return &ihost->remote_node_context_table[node_id];
2667 }
2668
2669 return NULL;
2670}
2671
2672/**
2673 *
2674 * @resposne_buffer: This is the buffer into which the D2H register FIS will be
2675 * constructed.
2676 * @frame_header: This is the frame header returned by the hardware.
2677 * @frame_buffer: This is the frame buffer returned by the hardware.
2678 *
2679 * This method will combind the frame header and frame buffer to create a SATA
2680 * D2H register FIS none
2681 */
2682void scic_sds_controller_copy_sata_response(
2683 void *response_buffer,
2684 void *frame_header,
2685 void *frame_buffer)
2686{ 2545{
2546 /* XXX type safety? */
2687 memcpy(response_buffer, frame_header, sizeof(u32)); 2547 memcpy(response_buffer, frame_header, sizeof(u32));
2688 2548
2689 memcpy(response_buffer + sizeof(u32), 2549 memcpy(response_buffer + sizeof(u32),
@@ -2691,21 +2551,9 @@ void scic_sds_controller_copy_sata_response(
2691 sizeof(struct dev_to_host_fis) - sizeof(u32)); 2551 sizeof(struct dev_to_host_fis) - sizeof(u32));
2692} 2552}
2693 2553
2694/** 2554void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2695 * This method releases the frame once this is done the frame is available for
2696 * re-use by the hardware. The data contained in the frame header and frame
2697 * buffer is no longer valid. The UF queue get pointer is only updated if UF
2698 * control indicates this is appropriate.
2699 * @scic:
2700 * @frame_index:
2701 *
2702 */
2703void scic_sds_controller_release_frame(
2704 struct isci_host *ihost,
2705 u32 frame_index)
2706{ 2555{
2707 if (scic_sds_unsolicited_frame_control_release_frame( 2556 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2708 &ihost->uf_control, frame_index) == true)
2709 writel(ihost->uf_control.get, 2557 writel(ihost->uf_control.get,
2710 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 2558 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2711} 2559}
@@ -2763,21 +2611,9 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2763 return SCI_FAILURE_INVALID_IO_TAG; 2611 return SCI_FAILURE_INVALID_IO_TAG;
2764} 2612}
2765 2613
2766/** 2614enum sci_status sci_controller_start_io(struct isci_host *ihost,
2767 * scic_controller_start_io() - This method is called by the SCI user to 2615 struct isci_remote_device *idev,
2768 * send/start an IO request. If the method invocation is successful, then 2616 struct isci_request *ireq)
2769 * the IO request has been queued to the hardware for processing.
2770 * @controller: the handle to the controller object for which to start an IO
2771 * request.
2772 * @remote_device: the handle to the remote device object for which to start an
2773 * IO request.
2774 * @io_request: the handle to the io request object to start.
2775 * @io_tag: This parameter specifies a previously allocated IO tag that the
2776 * user desires to be utilized for this request.
2777 */
2778enum sci_status scic_controller_start_io(struct isci_host *ihost,
2779 struct isci_remote_device *idev,
2780 struct isci_request *ireq)
2781{ 2617{
2782 enum sci_status status; 2618 enum sci_status status;
2783 2619
@@ -2786,36 +2622,23 @@ enum sci_status scic_controller_start_io(struct isci_host *ihost,
2786 return SCI_FAILURE_INVALID_STATE; 2622 return SCI_FAILURE_INVALID_STATE;
2787 } 2623 }
2788 2624
2789 status = scic_sds_remote_device_start_io(ihost, idev, ireq); 2625 status = sci_remote_device_start_io(ihost, idev, ireq);
2790 if (status != SCI_SUCCESS) 2626 if (status != SCI_SUCCESS)
2791 return status; 2627 return status;
2792 2628
2793 set_bit(IREQ_ACTIVE, &ireq->flags); 2629 set_bit(IREQ_ACTIVE, &ireq->flags);
2794 scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); 2630 sci_controller_post_request(ihost, sci_request_get_post_context(ireq));
2795 return SCI_SUCCESS; 2631 return SCI_SUCCESS;
2796} 2632}
2797 2633
2798/** 2634enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2799 * scic_controller_terminate_request() - This method is called by the SCI Core 2635 struct isci_remote_device *idev,
2800 * user to terminate an ongoing (i.e. started) core IO request. This does 2636 struct isci_request *ireq)
2801 * not abort the IO request at the target, but rather removes the IO request
2802 * from the host controller.
2803 * @controller: the handle to the controller object for which to terminate a
2804 * request.
2805 * @remote_device: the handle to the remote device object for which to
2806 * terminate a request.
2807 * @request: the handle to the io or task management request object to
2808 * terminate.
2809 *
2810 * Indicate if the controller successfully began the terminate process for the
2811 * IO request. SCI_SUCCESS if the terminate process was successfully started
2812 * for the request. Determine the failure situations and return values.
2813 */
2814enum sci_status scic_controller_terminate_request(
2815 struct isci_host *ihost,
2816 struct isci_remote_device *idev,
2817 struct isci_request *ireq)
2818{ 2637{
2638 /* terminate an ongoing (i.e. started) core IO request. This does not
2639 * abort the IO request at the target, but rather removes the IO
2640 * request from the host controller.
2641 */
2819 enum sci_status status; 2642 enum sci_status status;
2820 2643
2821 if (ihost->sm.current_state_id != SCIC_READY) { 2644 if (ihost->sm.current_state_id != SCIC_READY) {
@@ -2824,7 +2647,7 @@ enum sci_status scic_controller_terminate_request(
2824 return SCI_FAILURE_INVALID_STATE; 2647 return SCI_FAILURE_INVALID_STATE;
2825 } 2648 }
2826 2649
2827 status = scic_sds_io_request_terminate(ireq); 2650 status = sci_io_request_terminate(ireq);
2828 if (status != SCI_SUCCESS) 2651 if (status != SCI_SUCCESS)
2829 return status; 2652 return status;
2830 2653
@@ -2832,27 +2655,25 @@ enum sci_status scic_controller_terminate_request(
2832 * Utilize the original post context command and or in the POST_TC_ABORT 2655 * Utilize the original post context command and or in the POST_TC_ABORT
2833 * request sub-type. 2656 * request sub-type.
2834 */ 2657 */
2835 scic_sds_controller_post_request(ihost, 2658 sci_controller_post_request(ihost,
2836 scic_sds_request_get_post_context(ireq) | 2659 ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2837 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2838 return SCI_SUCCESS; 2660 return SCI_SUCCESS;
2839} 2661}
2840 2662
2841/** 2663/**
2842 * scic_controller_complete_io() - This method will perform core specific 2664 * sci_controller_complete_io() - This method will perform core specific
2843 * completion operations for an IO request. After this method is invoked, 2665 * completion operations for an IO request. After this method is invoked,
2844 * the user should consider the IO request as invalid until it is properly 2666 * the user should consider the IO request as invalid until it is properly
2845 * reused (i.e. re-constructed). 2667 * reused (i.e. re-constructed).
2846 * @controller: The handle to the controller object for which to complete the 2668 * @ihost: The handle to the controller object for which to complete the
2847 * IO request. 2669 * IO request.
2848 * @remote_device: The handle to the remote device object for which to complete 2670 * @idev: The handle to the remote device object for which to complete
2849 * the IO request. 2671 * the IO request.
2850 * @io_request: the handle to the io request object to complete. 2672 * @ireq: the handle to the io request object to complete.
2851 */ 2673 */
2852enum sci_status scic_controller_complete_io( 2674enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2853 struct isci_host *ihost, 2675 struct isci_remote_device *idev,
2854 struct isci_remote_device *idev, 2676 struct isci_request *ireq)
2855 struct isci_request *ireq)
2856{ 2677{
2857 enum sci_status status; 2678 enum sci_status status;
2858 u16 index; 2679 u16 index;
@@ -2862,7 +2683,7 @@ enum sci_status scic_controller_complete_io(
2862 /* XXX: Implement this function */ 2683 /* XXX: Implement this function */
2863 return SCI_FAILURE; 2684 return SCI_FAILURE;
2864 case SCIC_READY: 2685 case SCIC_READY:
2865 status = scic_sds_remote_device_complete_io(ihost, idev, ireq); 2686 status = sci_remote_device_complete_io(ihost, idev, ireq);
2866 if (status != SCI_SUCCESS) 2687 if (status != SCI_SUCCESS)
2867 return status; 2688 return status;
2868 2689
@@ -2876,7 +2697,7 @@ enum sci_status scic_controller_complete_io(
2876 2697
2877} 2698}
2878 2699
2879enum sci_status scic_controller_continue_io(struct isci_request *ireq) 2700enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2880{ 2701{
2881 struct isci_host *ihost = ireq->owning_controller; 2702 struct isci_host *ihost = ireq->owning_controller;
2882 2703
@@ -2886,12 +2707,12 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq)
2886 } 2707 }
2887 2708
2888 set_bit(IREQ_ACTIVE, &ireq->flags); 2709 set_bit(IREQ_ACTIVE, &ireq->flags);
2889 scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); 2710 sci_controller_post_request(ihost, sci_request_get_post_context(ireq));
2890 return SCI_SUCCESS; 2711 return SCI_SUCCESS;
2891} 2712}
2892 2713
2893/** 2714/**
2894 * scic_controller_start_task() - This method is called by the SCIC user to 2715 * sci_controller_start_task() - This method is called by the SCIC user to
2895 * send/start a framework task management request. 2716 * send/start a framework task management request.
2896 * @controller: the handle to the controller object for which to start the task 2717 * @controller: the handle to the controller object for which to start the task
2897 * management request. 2718 * management request.
@@ -2899,10 +2720,9 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq)
2899 * the task management request. 2720 * the task management request.
2900 * @task_request: the handle to the task request object to start. 2721 * @task_request: the handle to the task request object to start.
2901 */ 2722 */
2902enum sci_task_status scic_controller_start_task( 2723enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2903 struct isci_host *ihost, 2724 struct isci_remote_device *idev,
2904 struct isci_remote_device *idev, 2725 struct isci_request *ireq)
2905 struct isci_request *ireq)
2906{ 2726{
2907 enum sci_status status; 2727 enum sci_status status;
2908 2728
@@ -2914,7 +2734,7 @@ enum sci_task_status scic_controller_start_task(
2914 return SCI_TASK_FAILURE_INVALID_STATE; 2734 return SCI_TASK_FAILURE_INVALID_STATE;
2915 } 2735 }
2916 2736
2917 status = scic_sds_remote_device_start_task(ihost, idev, ireq); 2737 status = sci_remote_device_start_task(ihost, idev, ireq);
2918 switch (status) { 2738 switch (status) {
2919 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: 2739 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2920 set_bit(IREQ_ACTIVE, &ireq->flags); 2740 set_bit(IREQ_ACTIVE, &ireq->flags);
@@ -2928,8 +2748,8 @@ enum sci_task_status scic_controller_start_task(
2928 case SCI_SUCCESS: 2748 case SCI_SUCCESS:
2929 set_bit(IREQ_ACTIVE, &ireq->flags); 2749 set_bit(IREQ_ACTIVE, &ireq->flags);
2930 2750
2931 scic_sds_controller_post_request(ihost, 2751 sci_controller_post_request(ihost,
2932 scic_sds_request_get_post_context(ireq)); 2752 sci_request_get_post_context(ireq));
2933 break; 2753 break;
2934 default: 2754 default:
2935 break; 2755 break;