diff options
author | Dan Williams <dan.j.williams@intel.com> | 2011-06-30 22:14:33 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2011-07-03 07:04:52 -0400 |
commit | 89a7301f21fb00e753089671eb9e4132aab8ea08 (patch) | |
tree | afa8bac0a36d0d5626997d8995f6c9194aef3a0f | |
parent | d9dcb4ba791de2a06b19ac47cd61601cf3d4e208 (diff) |
isci: retire scic_sds_ and scic_ prefixes
The distinction between scic_sds_ scic_ and sci_ are no longer relevant
so just unify the prefixes on sci_. The distinction between isci_ and
sci_ is historically significant, and useful for comparing the old
'core' to the current Linux driver. 'sci_' represents the former core as
well as the routines that are closer to the hardware and protocol than
their 'isci_' brethren. sci == sas controller interface.
Also unwind the 'sds1' out of the parameter structs.
Reported-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/scsi/isci/host.c | 742 | ||||
-rw-r--r-- | drivers/scsi/isci/host.h | 93 | ||||
-rw-r--r-- | drivers/scsi/isci/init.c | 4 | ||||
-rw-r--r-- | drivers/scsi/isci/isci.h | 6 | ||||
-rw-r--r-- | drivers/scsi/isci/phy.c | 385 | ||||
-rw-r--r-- | drivers/scsi/isci/phy.h | 70 | ||||
-rw-r--r-- | drivers/scsi/isci/port.c | 551 | ||||
-rw-r--r-- | drivers/scsi/isci/port.h | 68 | ||||
-rw-r--r-- | drivers/scsi/isci/port_config.c | 132 | ||||
-rw-r--r-- | drivers/scsi/isci/probe_roms.c | 16 | ||||
-rw-r--r-- | drivers/scsi/isci/probe_roms.h | 38 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_device.c | 300 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_device.h | 90 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_context.c | 198 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_context.h | 30 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_table.c | 114 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_table.h | 16 | ||||
-rw-r--r-- | drivers/scsi/isci/request.c | 360 | ||||
-rw-r--r-- | drivers/scsi/isci/request.h | 52 | ||||
-rw-r--r-- | drivers/scsi/isci/sata.c | 4 | ||||
-rw-r--r-- | drivers/scsi/isci/task.c | 24 | ||||
-rw-r--r-- | drivers/scsi/isci/unsolicited_frame_control.c | 57 | ||||
-rw-r--r-- | drivers/scsi/isci/unsolicited_frame_control.h | 42 |
23 files changed, 1449 insertions, 1943 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index bb298f8f609a..f31f64e4b713 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
@@ -180,8 +180,7 @@ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) | |||
180 | handler(sm); | 180 | handler(sm); |
181 | } | 181 | } |
182 | 182 | ||
183 | static bool scic_sds_controller_completion_queue_has_entries( | 183 | static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) |
184 | struct isci_host *ihost) | ||
185 | { | 184 | { |
186 | u32 get_value = ihost->completion_queue_get; | 185 | u32 get_value = ihost->completion_queue_get; |
187 | u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; | 186 | u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; |
@@ -193,9 +192,9 @@ static bool scic_sds_controller_completion_queue_has_entries( | |||
193 | return false; | 192 | return false; |
194 | } | 193 | } |
195 | 194 | ||
196 | static bool scic_sds_controller_isr(struct isci_host *ihost) | 195 | static bool sci_controller_isr(struct isci_host *ihost) |
197 | { | 196 | { |
198 | if (scic_sds_controller_completion_queue_has_entries(ihost)) { | 197 | if (sci_controller_completion_queue_has_entries(ihost)) { |
199 | return true; | 198 | return true; |
200 | } else { | 199 | } else { |
201 | /* | 200 | /* |
@@ -219,13 +218,13 @@ irqreturn_t isci_msix_isr(int vec, void *data) | |||
219 | { | 218 | { |
220 | struct isci_host *ihost = data; | 219 | struct isci_host *ihost = data; |
221 | 220 | ||
222 | if (scic_sds_controller_isr(ihost)) | 221 | if (sci_controller_isr(ihost)) |
223 | tasklet_schedule(&ihost->completion_tasklet); | 222 | tasklet_schedule(&ihost->completion_tasklet); |
224 | 223 | ||
225 | return IRQ_HANDLED; | 224 | return IRQ_HANDLED; |
226 | } | 225 | } |
227 | 226 | ||
228 | static bool scic_sds_controller_error_isr(struct isci_host *ihost) | 227 | static bool sci_controller_error_isr(struct isci_host *ihost) |
229 | { | 228 | { |
230 | u32 interrupt_status; | 229 | u32 interrupt_status; |
231 | 230 | ||
@@ -252,35 +251,35 @@ static bool scic_sds_controller_error_isr(struct isci_host *ihost) | |||
252 | return false; | 251 | return false; |
253 | } | 252 | } |
254 | 253 | ||
255 | static void scic_sds_controller_task_completion(struct isci_host *ihost, | 254 | static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) |
256 | u32 completion_entry) | ||
257 | { | 255 | { |
258 | u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); | 256 | u32 index = SCU_GET_COMPLETION_INDEX(ent); |
259 | struct isci_request *ireq = ihost->reqs[index]; | 257 | struct isci_request *ireq = ihost->reqs[index]; |
260 | 258 | ||
261 | /* Make sure that we really want to process this IO request */ | 259 | /* Make sure that we really want to process this IO request */ |
262 | if (test_bit(IREQ_ACTIVE, &ireq->flags) && | 260 | if (test_bit(IREQ_ACTIVE, &ireq->flags) && |
263 | ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && | 261 | ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && |
264 | ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) | 262 | ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) |
265 | /* Yep this is a valid io request pass it along to the io request handler */ | 263 | /* Yep this is a valid io request pass it along to the |
266 | scic_sds_io_request_tc_completion(ireq, completion_entry); | 264 | * io request handler |
265 | */ | ||
266 | sci_io_request_tc_completion(ireq, ent); | ||
267 | } | 267 | } |
268 | 268 | ||
269 | static void scic_sds_controller_sdma_completion(struct isci_host *ihost, | 269 | static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) |
270 | u32 completion_entry) | ||
271 | { | 270 | { |
272 | u32 index; | 271 | u32 index; |
273 | struct isci_request *ireq; | 272 | struct isci_request *ireq; |
274 | struct isci_remote_device *idev; | 273 | struct isci_remote_device *idev; |
275 | 274 | ||
276 | index = SCU_GET_COMPLETION_INDEX(completion_entry); | 275 | index = SCU_GET_COMPLETION_INDEX(ent); |
277 | 276 | ||
278 | switch (scu_get_command_request_type(completion_entry)) { | 277 | switch (scu_get_command_request_type(ent)) { |
279 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: | 278 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: |
280 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: | 279 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: |
281 | ireq = ihost->reqs[index]; | 280 | ireq = ihost->reqs[index]; |
282 | dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", | 281 | dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", |
283 | __func__, completion_entry, ireq); | 282 | __func__, ent, ireq); |
284 | /* @todo For a post TC operation we need to fail the IO | 283 | /* @todo For a post TC operation we need to fail the IO |
285 | * request | 284 | * request |
286 | */ | 285 | */ |
@@ -290,20 +289,19 @@ static void scic_sds_controller_sdma_completion(struct isci_host *ihost, | |||
290 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: | 289 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: |
291 | idev = ihost->device_table[index]; | 290 | idev = ihost->device_table[index]; |
292 | dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", | 291 | dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", |
293 | __func__, completion_entry, idev); | 292 | __func__, ent, idev); |
294 | /* @todo For a port RNC operation we need to fail the | 293 | /* @todo For a port RNC operation we need to fail the |
295 | * device | 294 | * device |
296 | */ | 295 | */ |
297 | break; | 296 | break; |
298 | default: | 297 | default: |
299 | dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", | 298 | dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", |
300 | __func__, completion_entry); | 299 | __func__, ent); |
301 | break; | 300 | break; |
302 | } | 301 | } |
303 | } | 302 | } |
304 | 303 | ||
305 | static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, | 304 | static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) |
306 | u32 completion_entry) | ||
307 | { | 305 | { |
308 | u32 index; | 306 | u32 index; |
309 | u32 frame_index; | 307 | u32 frame_index; |
@@ -314,36 +312,36 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, | |||
314 | 312 | ||
315 | enum sci_status result = SCI_FAILURE; | 313 | enum sci_status result = SCI_FAILURE; |
316 | 314 | ||
317 | frame_index = SCU_GET_FRAME_INDEX(completion_entry); | 315 | frame_index = SCU_GET_FRAME_INDEX(ent); |
318 | 316 | ||
319 | frame_header = ihost->uf_control.buffers.array[frame_index].header; | 317 | frame_header = ihost->uf_control.buffers.array[frame_index].header; |
320 | ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; | 318 | ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; |
321 | 319 | ||
322 | if (SCU_GET_FRAME_ERROR(completion_entry)) { | 320 | if (SCU_GET_FRAME_ERROR(ent)) { |
323 | /* | 321 | /* |
324 | * / @todo If the IAF frame or SIGNATURE FIS frame has an error will | 322 | * / @todo If the IAF frame or SIGNATURE FIS frame has an error will |
325 | * / this cause a problem? We expect the phy initialization will | 323 | * / this cause a problem? We expect the phy initialization will |
326 | * / fail if there is an error in the frame. */ | 324 | * / fail if there is an error in the frame. */ |
327 | scic_sds_controller_release_frame(ihost, frame_index); | 325 | sci_controller_release_frame(ihost, frame_index); |
328 | return; | 326 | return; |
329 | } | 327 | } |
330 | 328 | ||
331 | if (frame_header->is_address_frame) { | 329 | if (frame_header->is_address_frame) { |
332 | index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); | 330 | index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); |
333 | iphy = &ihost->phys[index]; | 331 | iphy = &ihost->phys[index]; |
334 | result = scic_sds_phy_frame_handler(iphy, frame_index); | 332 | result = sci_phy_frame_handler(iphy, frame_index); |
335 | } else { | 333 | } else { |
336 | 334 | ||
337 | index = SCU_GET_COMPLETION_INDEX(completion_entry); | 335 | index = SCU_GET_COMPLETION_INDEX(ent); |
338 | 336 | ||
339 | if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { | 337 | if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { |
340 | /* | 338 | /* |
341 | * This is a signature fis or a frame from a direct attached SATA | 339 | * This is a signature fis or a frame from a direct attached SATA |
342 | * device that has not yet been created. In either case forwared | 340 | * device that has not yet been created. In either case forwared |
343 | * the frame to the PE and let it take care of the frame data. */ | 341 | * the frame to the PE and let it take care of the frame data. */ |
344 | index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); | 342 | index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); |
345 | iphy = &ihost->phys[index]; | 343 | iphy = &ihost->phys[index]; |
346 | result = scic_sds_phy_frame_handler(iphy, frame_index); | 344 | result = sci_phy_frame_handler(iphy, frame_index); |
347 | } else { | 345 | } else { |
348 | if (index < ihost->remote_node_entries) | 346 | if (index < ihost->remote_node_entries) |
349 | idev = ihost->device_table[index]; | 347 | idev = ihost->device_table[index]; |
@@ -351,9 +349,9 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, | |||
351 | idev = NULL; | 349 | idev = NULL; |
352 | 350 | ||
353 | if (idev != NULL) | 351 | if (idev != NULL) |
354 | result = scic_sds_remote_device_frame_handler(idev, frame_index); | 352 | result = sci_remote_device_frame_handler(idev, frame_index); |
355 | else | 353 | else |
356 | scic_sds_controller_release_frame(ihost, frame_index); | 354 | sci_controller_release_frame(ihost, frame_index); |
357 | } | 355 | } |
358 | } | 356 | } |
359 | 357 | ||
@@ -364,17 +362,16 @@ static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, | |||
364 | } | 362 | } |
365 | } | 363 | } |
366 | 364 | ||
367 | static void scic_sds_controller_event_completion(struct isci_host *ihost, | 365 | static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) |
368 | u32 completion_entry) | ||
369 | { | 366 | { |
370 | struct isci_remote_device *idev; | 367 | struct isci_remote_device *idev; |
371 | struct isci_request *ireq; | 368 | struct isci_request *ireq; |
372 | struct isci_phy *iphy; | 369 | struct isci_phy *iphy; |
373 | u32 index; | 370 | u32 index; |
374 | 371 | ||
375 | index = SCU_GET_COMPLETION_INDEX(completion_entry); | 372 | index = SCU_GET_COMPLETION_INDEX(ent); |
376 | 373 | ||
377 | switch (scu_get_event_type(completion_entry)) { | 374 | switch (scu_get_event_type(ent)) { |
378 | case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: | 375 | case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: |
379 | /* / @todo The driver did something wrong and we need to fix the condtion. */ | 376 | /* / @todo The driver did something wrong and we need to fix the condtion. */ |
380 | dev_err(&ihost->pdev->dev, | 377 | dev_err(&ihost->pdev->dev, |
@@ -382,7 +379,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, | |||
382 | "0x%x\n", | 379 | "0x%x\n", |
383 | __func__, | 380 | __func__, |
384 | ihost, | 381 | ihost, |
385 | completion_entry); | 382 | ent); |
386 | break; | 383 | break; |
387 | 384 | ||
388 | case SCU_EVENT_TYPE_SMU_PCQ_ERROR: | 385 | case SCU_EVENT_TYPE_SMU_PCQ_ERROR: |
@@ -396,21 +393,21 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, | |||
396 | "event 0x%x\n", | 393 | "event 0x%x\n", |
397 | __func__, | 394 | __func__, |
398 | ihost, | 395 | ihost, |
399 | completion_entry); | 396 | ent); |
400 | break; | 397 | break; |
401 | 398 | ||
402 | case SCU_EVENT_TYPE_TRANSPORT_ERROR: | 399 | case SCU_EVENT_TYPE_TRANSPORT_ERROR: |
403 | ireq = ihost->reqs[index]; | 400 | ireq = ihost->reqs[index]; |
404 | scic_sds_io_request_event_handler(ireq, completion_entry); | 401 | sci_io_request_event_handler(ireq, ent); |
405 | break; | 402 | break; |
406 | 403 | ||
407 | case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: | 404 | case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: |
408 | switch (scu_get_event_specifier(completion_entry)) { | 405 | switch (scu_get_event_specifier(ent)) { |
409 | case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: | 406 | case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: |
410 | case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: | 407 | case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: |
411 | ireq = ihost->reqs[index]; | 408 | ireq = ihost->reqs[index]; |
412 | if (ireq != NULL) | 409 | if (ireq != NULL) |
413 | scic_sds_io_request_event_handler(ireq, completion_entry); | 410 | sci_io_request_event_handler(ireq, ent); |
414 | else | 411 | else |
415 | dev_warn(&ihost->pdev->dev, | 412 | dev_warn(&ihost->pdev->dev, |
416 | "%s: SCIC Controller 0x%p received " | 413 | "%s: SCIC Controller 0x%p received " |
@@ -418,14 +415,14 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, | |||
418 | "that doesnt exist.\n", | 415 | "that doesnt exist.\n", |
419 | __func__, | 416 | __func__, |
420 | ihost, | 417 | ihost, |
421 | completion_entry); | 418 | ent); |
422 | 419 | ||
423 | break; | 420 | break; |
424 | 421 | ||
425 | case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: | 422 | case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: |
426 | idev = ihost->device_table[index]; | 423 | idev = ihost->device_table[index]; |
427 | if (idev != NULL) | 424 | if (idev != NULL) |
428 | scic_sds_remote_device_event_handler(idev, completion_entry); | 425 | sci_remote_device_event_handler(idev, ent); |
429 | else | 426 | else |
430 | dev_warn(&ihost->pdev->dev, | 427 | dev_warn(&ihost->pdev->dev, |
431 | "%s: SCIC Controller 0x%p received " | 428 | "%s: SCIC Controller 0x%p received " |
@@ -433,7 +430,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, | |||
433 | "that doesnt exist.\n", | 430 | "that doesnt exist.\n", |
434 | __func__, | 431 | __func__, |
435 | ihost, | 432 | ihost, |
436 | completion_entry); | 433 | ent); |
437 | 434 | ||
438 | break; | 435 | break; |
439 | } | 436 | } |
@@ -448,9 +445,9 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, | |||
448 | * direct error counter event to the phy object since that is where | 445 | * direct error counter event to the phy object since that is where |
449 | * we get the event notification. This is a type 4 event. */ | 446 | * we get the event notification. This is a type 4 event. */ |
450 | case SCU_EVENT_TYPE_OSSP_EVENT: | 447 | case SCU_EVENT_TYPE_OSSP_EVENT: |
451 | index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); | 448 | index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); |
452 | iphy = &ihost->phys[index]; | 449 | iphy = &ihost->phys[index]; |
453 | scic_sds_phy_event_handler(iphy, completion_entry); | 450 | sci_phy_event_handler(iphy, ent); |
454 | break; | 451 | break; |
455 | 452 | ||
456 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: | 453 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: |
@@ -460,7 +457,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, | |||
460 | idev = ihost->device_table[index]; | 457 | idev = ihost->device_table[index]; |
461 | 458 | ||
462 | if (idev != NULL) | 459 | if (idev != NULL) |
463 | scic_sds_remote_device_event_handler(idev, completion_entry); | 460 | sci_remote_device_event_handler(idev, ent); |
464 | } else | 461 | } else |
465 | dev_err(&ihost->pdev->dev, | 462 | dev_err(&ihost->pdev->dev, |
466 | "%s: SCIC Controller 0x%p received event 0x%x " | 463 | "%s: SCIC Controller 0x%p received event 0x%x " |
@@ -468,7 +465,7 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, | |||
468 | "exist.\n", | 465 | "exist.\n", |
469 | __func__, | 466 | __func__, |
470 | ihost, | 467 | ihost, |
471 | completion_entry, | 468 | ent, |
472 | index); | 469 | index); |
473 | 470 | ||
474 | break; | 471 | break; |
@@ -477,15 +474,15 @@ static void scic_sds_controller_event_completion(struct isci_host *ihost, | |||
477 | dev_warn(&ihost->pdev->dev, | 474 | dev_warn(&ihost->pdev->dev, |
478 | "%s: SCIC Controller received unknown event code %x\n", | 475 | "%s: SCIC Controller received unknown event code %x\n", |
479 | __func__, | 476 | __func__, |
480 | completion_entry); | 477 | ent); |
481 | break; | 478 | break; |
482 | } | 479 | } |
483 | } | 480 | } |
484 | 481 | ||
485 | static void scic_sds_controller_process_completions(struct isci_host *ihost) | 482 | static void sci_controller_process_completions(struct isci_host *ihost) |
486 | { | 483 | { |
487 | u32 completion_count = 0; | 484 | u32 completion_count = 0; |
488 | u32 completion_entry; | 485 | u32 ent; |
489 | u32 get_index; | 486 | u32 get_index; |
490 | u32 get_cycle; | 487 | u32 get_cycle; |
491 | u32 event_get; | 488 | u32 event_get; |
@@ -509,7 +506,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) | |||
509 | ) { | 506 | ) { |
510 | completion_count++; | 507 | completion_count++; |
511 | 508 | ||
512 | completion_entry = ihost->completion_queue[get_index]; | 509 | ent = ihost->completion_queue[get_index]; |
513 | 510 | ||
514 | /* increment the get pointer and check for rollover to toggle the cycle bit */ | 511 | /* increment the get pointer and check for rollover to toggle the cycle bit */ |
515 | get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << | 512 | get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << |
@@ -519,19 +516,19 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) | |||
519 | dev_dbg(&ihost->pdev->dev, | 516 | dev_dbg(&ihost->pdev->dev, |
520 | "%s: completion queue entry:0x%08x\n", | 517 | "%s: completion queue entry:0x%08x\n", |
521 | __func__, | 518 | __func__, |
522 | completion_entry); | 519 | ent); |
523 | 520 | ||
524 | switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { | 521 | switch (SCU_GET_COMPLETION_TYPE(ent)) { |
525 | case SCU_COMPLETION_TYPE_TASK: | 522 | case SCU_COMPLETION_TYPE_TASK: |
526 | scic_sds_controller_task_completion(ihost, completion_entry); | 523 | sci_controller_task_completion(ihost, ent); |
527 | break; | 524 | break; |
528 | 525 | ||
529 | case SCU_COMPLETION_TYPE_SDMA: | 526 | case SCU_COMPLETION_TYPE_SDMA: |
530 | scic_sds_controller_sdma_completion(ihost, completion_entry); | 527 | sci_controller_sdma_completion(ihost, ent); |
531 | break; | 528 | break; |
532 | 529 | ||
533 | case SCU_COMPLETION_TYPE_UFI: | 530 | case SCU_COMPLETION_TYPE_UFI: |
534 | scic_sds_controller_unsolicited_frame(ihost, completion_entry); | 531 | sci_controller_unsolicited_frame(ihost, ent); |
535 | break; | 532 | break; |
536 | 533 | ||
537 | case SCU_COMPLETION_TYPE_EVENT: | 534 | case SCU_COMPLETION_TYPE_EVENT: |
@@ -540,7 +537,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) | |||
540 | (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); | 537 | (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); |
541 | event_get = (event_get+1) & (SCU_MAX_EVENTS-1); | 538 | event_get = (event_get+1) & (SCU_MAX_EVENTS-1); |
542 | 539 | ||
543 | scic_sds_controller_event_completion(ihost, completion_entry); | 540 | sci_controller_event_completion(ihost, ent); |
544 | break; | 541 | break; |
545 | } | 542 | } |
546 | default: | 543 | default: |
@@ -548,7 +545,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) | |||
548 | "%s: SCIC Controller received unknown " | 545 | "%s: SCIC Controller received unknown " |
549 | "completion type %x\n", | 546 | "completion type %x\n", |
550 | __func__, | 547 | __func__, |
551 | completion_entry); | 548 | ent); |
552 | break; | 549 | break; |
553 | } | 550 | } |
554 | } | 551 | } |
@@ -575,7 +572,7 @@ static void scic_sds_controller_process_completions(struct isci_host *ihost) | |||
575 | 572 | ||
576 | } | 573 | } |
577 | 574 | ||
578 | static void scic_sds_controller_error_handler(struct isci_host *ihost) | 575 | static void sci_controller_error_handler(struct isci_host *ihost) |
579 | { | 576 | { |
580 | u32 interrupt_status; | 577 | u32 interrupt_status; |
581 | 578 | ||
@@ -583,9 +580,9 @@ static void scic_sds_controller_error_handler(struct isci_host *ihost) | |||
583 | readl(&ihost->smu_registers->interrupt_status); | 580 | readl(&ihost->smu_registers->interrupt_status); |
584 | 581 | ||
585 | if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && | 582 | if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && |
586 | scic_sds_controller_completion_queue_has_entries(ihost)) { | 583 | sci_controller_completion_queue_has_entries(ihost)) { |
587 | 584 | ||
588 | scic_sds_controller_process_completions(ihost); | 585 | sci_controller_process_completions(ihost); |
589 | writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); | 586 | writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); |
590 | } else { | 587 | } else { |
591 | dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, | 588 | dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, |
@@ -607,13 +604,13 @@ irqreturn_t isci_intx_isr(int vec, void *data) | |||
607 | irqreturn_t ret = IRQ_NONE; | 604 | irqreturn_t ret = IRQ_NONE; |
608 | struct isci_host *ihost = data; | 605 | struct isci_host *ihost = data; |
609 | 606 | ||
610 | if (scic_sds_controller_isr(ihost)) { | 607 | if (sci_controller_isr(ihost)) { |
611 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); | 608 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); |
612 | tasklet_schedule(&ihost->completion_tasklet); | 609 | tasklet_schedule(&ihost->completion_tasklet); |
613 | ret = IRQ_HANDLED; | 610 | ret = IRQ_HANDLED; |
614 | } else if (scic_sds_controller_error_isr(ihost)) { | 611 | } else if (sci_controller_error_isr(ihost)) { |
615 | spin_lock(&ihost->scic_lock); | 612 | spin_lock(&ihost->scic_lock); |
616 | scic_sds_controller_error_handler(ihost); | 613 | sci_controller_error_handler(ihost); |
617 | spin_unlock(&ihost->scic_lock); | 614 | spin_unlock(&ihost->scic_lock); |
618 | ret = IRQ_HANDLED; | 615 | ret = IRQ_HANDLED; |
619 | } | 616 | } |
@@ -625,8 +622,8 @@ irqreturn_t isci_error_isr(int vec, void *data) | |||
625 | { | 622 | { |
626 | struct isci_host *ihost = data; | 623 | struct isci_host *ihost = data; |
627 | 624 | ||
628 | if (scic_sds_controller_error_isr(ihost)) | 625 | if (sci_controller_error_isr(ihost)) |
629 | scic_sds_controller_error_handler(ihost); | 626 | sci_controller_error_handler(ihost); |
630 | 627 | ||
631 | return IRQ_HANDLED; | 628 | return IRQ_HANDLED; |
632 | } | 629 | } |
@@ -670,8 +667,8 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
670 | } | 667 | } |
671 | 668 | ||
672 | /** | 669 | /** |
673 | * scic_controller_get_suggested_start_timeout() - This method returns the | 670 | * sci_controller_get_suggested_start_timeout() - This method returns the |
674 | * suggested scic_controller_start() timeout amount. The user is free to | 671 | * suggested sci_controller_start() timeout amount. The user is free to |
675 | * use any timeout value, but this method provides the suggested minimum | 672 | * use any timeout value, but this method provides the suggested minimum |
676 | * start timeout value. The returned value is based upon empirical | 673 | * start timeout value. The returned value is based upon empirical |
677 | * information determined as a result of interoperability testing. | 674 | * information determined as a result of interoperability testing. |
@@ -681,7 +678,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
681 | * This method returns the number of milliseconds for the suggested start | 678 | * This method returns the number of milliseconds for the suggested start |
682 | * operation timeout. | 679 | * operation timeout. |
683 | */ | 680 | */ |
684 | static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) | 681 | static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) |
685 | { | 682 | { |
686 | /* Validate the user supplied parameters. */ | 683 | /* Validate the user supplied parameters. */ |
687 | if (!ihost) | 684 | if (!ihost) |
@@ -706,19 +703,19 @@ static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) | |||
706 | + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); | 703 | + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); |
707 | } | 704 | } |
708 | 705 | ||
709 | static void scic_controller_enable_interrupts(struct isci_host *ihost) | 706 | static void sci_controller_enable_interrupts(struct isci_host *ihost) |
710 | { | 707 | { |
711 | BUG_ON(ihost->smu_registers == NULL); | 708 | BUG_ON(ihost->smu_registers == NULL); |
712 | writel(0, &ihost->smu_registers->interrupt_mask); | 709 | writel(0, &ihost->smu_registers->interrupt_mask); |
713 | } | 710 | } |
714 | 711 | ||
715 | void scic_controller_disable_interrupts(struct isci_host *ihost) | 712 | void sci_controller_disable_interrupts(struct isci_host *ihost) |
716 | { | 713 | { |
717 | BUG_ON(ihost->smu_registers == NULL); | 714 | BUG_ON(ihost->smu_registers == NULL); |
718 | writel(0xffffffff, &ihost->smu_registers->interrupt_mask); | 715 | writel(0xffffffff, &ihost->smu_registers->interrupt_mask); |
719 | } | 716 | } |
720 | 717 | ||
721 | static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *ihost) | 718 | static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) |
722 | { | 719 | { |
723 | u32 port_task_scheduler_value; | 720 | u32 port_task_scheduler_value; |
724 | 721 | ||
@@ -731,7 +728,7 @@ static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *iho | |||
731 | &ihost->scu_registers->peg0.ptsg.control); | 728 | &ihost->scu_registers->peg0.ptsg.control); |
732 | } | 729 | } |
733 | 730 | ||
734 | static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) | 731 | static void sci_controller_assign_task_entries(struct isci_host *ihost) |
735 | { | 732 | { |
736 | u32 task_assignment; | 733 | u32 task_assignment; |
737 | 734 | ||
@@ -752,7 +749,7 @@ static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) | |||
752 | 749 | ||
753 | } | 750 | } |
754 | 751 | ||
755 | static void scic_sds_controller_initialize_completion_queue(struct isci_host *ihost) | 752 | static void sci_controller_initialize_completion_queue(struct isci_host *ihost) |
756 | { | 753 | { |
757 | u32 index; | 754 | u32 index; |
758 | u32 completion_queue_control_value; | 755 | u32 completion_queue_control_value; |
@@ -799,7 +796,7 @@ static void scic_sds_controller_initialize_completion_queue(struct isci_host *ih | |||
799 | } | 796 | } |
800 | } | 797 | } |
801 | 798 | ||
802 | static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) | 799 | static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) |
803 | { | 800 | { |
804 | u32 frame_queue_control_value; | 801 | u32 frame_queue_control_value; |
805 | u32 frame_queue_get_value; | 802 | u32 frame_queue_get_value; |
@@ -826,22 +823,8 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_h | |||
826 | &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); | 823 | &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); |
827 | } | 824 | } |
828 | 825 | ||
829 | /** | 826 | static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) |
830 | * This method will attempt to transition into the ready state for the | ||
831 | * controller and indicate that the controller start operation has completed | ||
832 | * if all criteria are met. | ||
833 | * @scic: This parameter indicates the controller object for which | ||
834 | * to transition to ready. | ||
835 | * @status: This parameter indicates the status value to be pass into the call | ||
836 | * to scic_cb_controller_start_complete(). | ||
837 | * | ||
838 | * none. | ||
839 | */ | ||
840 | static void scic_sds_controller_transition_to_ready( | ||
841 | struct isci_host *ihost, | ||
842 | enum sci_status status) | ||
843 | { | 827 | { |
844 | |||
845 | if (ihost->sm.current_state_id == SCIC_STARTING) { | 828 | if (ihost->sm.current_state_id == SCIC_STARTING) { |
846 | /* | 829 | /* |
847 | * We move into the ready state, because some of the phys/ports | 830 | * We move into the ready state, because some of the phys/ports |
@@ -855,7 +838,7 @@ static void scic_sds_controller_transition_to_ready( | |||
855 | 838 | ||
856 | static bool is_phy_starting(struct isci_phy *iphy) | 839 | static bool is_phy_starting(struct isci_phy *iphy) |
857 | { | 840 | { |
858 | enum scic_sds_phy_states state; | 841 | enum sci_phy_states state; |
859 | 842 | ||
860 | state = iphy->sm.current_state_id; | 843 | state = iphy->sm.current_state_id; |
861 | switch (state) { | 844 | switch (state) { |
@@ -876,16 +859,16 @@ static bool is_phy_starting(struct isci_phy *iphy) | |||
876 | } | 859 | } |
877 | 860 | ||
878 | /** | 861 | /** |
879 | * scic_sds_controller_start_next_phy - start phy | 862 | * sci_controller_start_next_phy - start phy |
880 | * @scic: controller | 863 | * @scic: controller |
881 | * | 864 | * |
882 | * If all the phys have been started, then attempt to transition the | 865 | * If all the phys have been started, then attempt to transition the |
883 | * controller to the READY state and inform the user | 866 | * controller to the READY state and inform the user |
884 | * (scic_cb_controller_start_complete()). | 867 | * (sci_cb_controller_start_complete()). |
885 | */ | 868 | */ |
886 | static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihost) | 869 | static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) |
887 | { | 870 | { |
888 | struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; | 871 | struct sci_oem_params *oem = &ihost->oem_parameters; |
889 | struct isci_phy *iphy; | 872 | struct isci_phy *iphy; |
890 | enum sci_status status; | 873 | enum sci_status status; |
891 | 874 | ||
@@ -924,7 +907,7 @@ static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihos | |||
924 | * The controller has successfully finished the start process. | 907 | * The controller has successfully finished the start process. |
925 | * Inform the SCI Core user and transition to the READY state. */ | 908 | * Inform the SCI Core user and transition to the READY state. */ |
926 | if (is_controller_start_complete == true) { | 909 | if (is_controller_start_complete == true) { |
927 | scic_sds_controller_transition_to_ready(ihost, SCI_SUCCESS); | 910 | sci_controller_transition_to_ready(ihost, SCI_SUCCESS); |
928 | sci_del_timer(&ihost->phy_timer); | 911 | sci_del_timer(&ihost->phy_timer); |
929 | ihost->phy_startup_timer_pending = false; | 912 | ihost->phy_startup_timer_pending = false; |
930 | } | 913 | } |
@@ -944,11 +927,11 @@ static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihos | |||
944 | * incorrectly for the PORT or it was never | 927 | * incorrectly for the PORT or it was never |
945 | * assigned to a PORT | 928 | * assigned to a PORT |
946 | */ | 929 | */ |
947 | return scic_sds_controller_start_next_phy(ihost); | 930 | return sci_controller_start_next_phy(ihost); |
948 | } | 931 | } |
949 | } | 932 | } |
950 | 933 | ||
951 | status = scic_sds_phy_start(iphy); | 934 | status = sci_phy_start(iphy); |
952 | 935 | ||
953 | if (status == SCI_SUCCESS) { | 936 | if (status == SCI_SUCCESS) { |
954 | sci_mod_timer(&ihost->phy_timer, | 937 | sci_mod_timer(&ihost->phy_timer, |
@@ -985,7 +968,7 @@ static void phy_startup_timeout(unsigned long data) | |||
985 | ihost->phy_startup_timer_pending = false; | 968 | ihost->phy_startup_timer_pending = false; |
986 | 969 | ||
987 | do { | 970 | do { |
988 | status = scic_sds_controller_start_next_phy(ihost); | 971 | status = sci_controller_start_next_phy(ihost); |
989 | } while (status != SCI_SUCCESS); | 972 | } while (status != SCI_SUCCESS); |
990 | 973 | ||
991 | done: | 974 | done: |
@@ -997,7 +980,7 @@ static u16 isci_tci_active(struct isci_host *ihost) | |||
997 | return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); | 980 | return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); |
998 | } | 981 | } |
999 | 982 | ||
1000 | static enum sci_status scic_controller_start(struct isci_host *ihost, | 983 | static enum sci_status sci_controller_start(struct isci_host *ihost, |
1001 | u32 timeout) | 984 | u32 timeout) |
1002 | { | 985 | { |
1003 | enum sci_status result; | 986 | enum sci_status result; |
@@ -1018,38 +1001,37 @@ static enum sci_status scic_controller_start(struct isci_host *ihost, | |||
1018 | isci_tci_free(ihost, index); | 1001 | isci_tci_free(ihost, index); |
1019 | 1002 | ||
1020 | /* Build the RNi free pool */ | 1003 | /* Build the RNi free pool */ |
1021 | scic_sds_remote_node_table_initialize( | 1004 | sci_remote_node_table_initialize(&ihost->available_remote_nodes, |
1022 | &ihost->available_remote_nodes, | 1005 | ihost->remote_node_entries); |
1023 | ihost->remote_node_entries); | ||
1024 | 1006 | ||
1025 | /* | 1007 | /* |
1026 | * Before anything else lets make sure we will not be | 1008 | * Before anything else lets make sure we will not be |
1027 | * interrupted by the hardware. | 1009 | * interrupted by the hardware. |
1028 | */ | 1010 | */ |
1029 | scic_controller_disable_interrupts(ihost); | 1011 | sci_controller_disable_interrupts(ihost); |
1030 | 1012 | ||
1031 | /* Enable the port task scheduler */ | 1013 | /* Enable the port task scheduler */ |
1032 | scic_sds_controller_enable_port_task_scheduler(ihost); | 1014 | sci_controller_enable_port_task_scheduler(ihost); |
1033 | 1015 | ||
1034 | /* Assign all the task entries to ihost physical function */ | 1016 | /* Assign all the task entries to ihost physical function */ |
1035 | scic_sds_controller_assign_task_entries(ihost); | 1017 | sci_controller_assign_task_entries(ihost); |
1036 | 1018 | ||
1037 | /* Now initialize the completion queue */ | 1019 | /* Now initialize the completion queue */ |
1038 | scic_sds_controller_initialize_completion_queue(ihost); | 1020 | sci_controller_initialize_completion_queue(ihost); |
1039 | 1021 | ||
1040 | /* Initialize the unsolicited frame queue for use */ | 1022 | /* Initialize the unsolicited frame queue for use */ |
1041 | scic_sds_controller_initialize_unsolicited_frame_queue(ihost); | 1023 | sci_controller_initialize_unsolicited_frame_queue(ihost); |
1042 | 1024 | ||
1043 | /* Start all of the ports on this controller */ | 1025 | /* Start all of the ports on this controller */ |
1044 | for (index = 0; index < ihost->logical_port_entries; index++) { | 1026 | for (index = 0; index < ihost->logical_port_entries; index++) { |
1045 | struct isci_port *iport = &ihost->ports[index]; | 1027 | struct isci_port *iport = &ihost->ports[index]; |
1046 | 1028 | ||
1047 | result = scic_sds_port_start(iport); | 1029 | result = sci_port_start(iport); |
1048 | if (result) | 1030 | if (result) |
1049 | return result; | 1031 | return result; |
1050 | } | 1032 | } |
1051 | 1033 | ||
1052 | scic_sds_controller_start_next_phy(ihost); | 1034 | sci_controller_start_next_phy(ihost); |
1053 | 1035 | ||
1054 | sci_mod_timer(&ihost->timer, timeout); | 1036 | sci_mod_timer(&ihost->timer, timeout); |
1055 | 1037 | ||
@@ -1061,29 +1043,29 @@ static enum sci_status scic_controller_start(struct isci_host *ihost, | |||
1061 | void isci_host_scan_start(struct Scsi_Host *shost) | 1043 | void isci_host_scan_start(struct Scsi_Host *shost) |
1062 | { | 1044 | { |
1063 | struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; | 1045 | struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; |
1064 | unsigned long tmo = scic_controller_get_suggested_start_timeout(ihost); | 1046 | unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); |
1065 | 1047 | ||
1066 | set_bit(IHOST_START_PENDING, &ihost->flags); | 1048 | set_bit(IHOST_START_PENDING, &ihost->flags); |
1067 | 1049 | ||
1068 | spin_lock_irq(&ihost->scic_lock); | 1050 | spin_lock_irq(&ihost->scic_lock); |
1069 | scic_controller_start(ihost, tmo); | 1051 | sci_controller_start(ihost, tmo); |
1070 | scic_controller_enable_interrupts(ihost); | 1052 | sci_controller_enable_interrupts(ihost); |
1071 | spin_unlock_irq(&ihost->scic_lock); | 1053 | spin_unlock_irq(&ihost->scic_lock); |
1072 | } | 1054 | } |
1073 | 1055 | ||
1074 | static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) | 1056 | static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) |
1075 | { | 1057 | { |
1076 | isci_host_change_state(ihost, isci_stopped); | 1058 | isci_host_change_state(ihost, isci_stopped); |
1077 | scic_controller_disable_interrupts(ihost); | 1059 | sci_controller_disable_interrupts(ihost); |
1078 | clear_bit(IHOST_STOP_PENDING, &ihost->flags); | 1060 | clear_bit(IHOST_STOP_PENDING, &ihost->flags); |
1079 | wake_up(&ihost->eventq); | 1061 | wake_up(&ihost->eventq); |
1080 | } | 1062 | } |
1081 | 1063 | ||
1082 | static void scic_sds_controller_completion_handler(struct isci_host *ihost) | 1064 | static void sci_controller_completion_handler(struct isci_host *ihost) |
1083 | { | 1065 | { |
1084 | /* Empty out the completion queue */ | 1066 | /* Empty out the completion queue */ |
1085 | if (scic_sds_controller_completion_queue_has_entries(ihost)) | 1067 | if (sci_controller_completion_queue_has_entries(ihost)) |
1086 | scic_sds_controller_process_completions(ihost); | 1068 | sci_controller_process_completions(ihost); |
1087 | 1069 | ||
1088 | /* Clear the interrupt and enable all interrupts again */ | 1070 | /* Clear the interrupt and enable all interrupts again */ |
1089 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); | 1071 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); |
@@ -1116,7 +1098,7 @@ static void isci_host_completion_routine(unsigned long data) | |||
1116 | 1098 | ||
1117 | spin_lock_irq(&ihost->scic_lock); | 1099 | spin_lock_irq(&ihost->scic_lock); |
1118 | 1100 | ||
1119 | scic_sds_controller_completion_handler(ihost); | 1101 | sci_controller_completion_handler(ihost); |
1120 | 1102 | ||
1121 | /* Take the lists of completed I/Os from the host. */ | 1103 | /* Take the lists of completed I/Os from the host. */ |
1122 | 1104 | ||
@@ -1203,7 +1185,7 @@ static void isci_host_completion_routine(unsigned long data) | |||
1203 | } | 1185 | } |
1204 | 1186 | ||
1205 | /** | 1187 | /** |
1206 | * scic_controller_stop() - This method will stop an individual controller | 1188 | * sci_controller_stop() - This method will stop an individual controller |
1207 | * object.This method will invoke the associated user callback upon | 1189 | * object.This method will invoke the associated user callback upon |
1208 | * completion. The completion callback is called when the following | 1190 | * completion. The completion callback is called when the following |
1209 | * conditions are met: -# the method return status is SCI_SUCCESS. -# the | 1191 | * conditions are met: -# the method return status is SCI_SUCCESS. -# the |
@@ -1220,8 +1202,7 @@ static void isci_host_completion_routine(unsigned long data) | |||
1220 | * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the | 1202 | * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the |
1221 | * controller is not either in the STARTED or STOPPED states. | 1203 | * controller is not either in the STARTED or STOPPED states. |
1222 | */ | 1204 | */ |
1223 | static enum sci_status scic_controller_stop(struct isci_host *ihost, | 1205 | static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) |
1224 | u32 timeout) | ||
1225 | { | 1206 | { |
1226 | if (ihost->sm.current_state_id != SCIC_READY) { | 1207 | if (ihost->sm.current_state_id != SCIC_READY) { |
1227 | dev_warn(&ihost->pdev->dev, | 1208 | dev_warn(&ihost->pdev->dev, |
@@ -1236,7 +1217,7 @@ static enum sci_status scic_controller_stop(struct isci_host *ihost, | |||
1236 | } | 1217 | } |
1237 | 1218 | ||
1238 | /** | 1219 | /** |
1239 | * scic_controller_reset() - This method will reset the supplied core | 1220 | * sci_controller_reset() - This method will reset the supplied core |
1240 | * controller regardless of the state of said controller. This operation is | 1221 | * controller regardless of the state of said controller. This operation is |
1241 | * considered destructive. In other words, all current operations are wiped | 1222 | * considered destructive. In other words, all current operations are wiped |
1242 | * out. No IO completions for outstanding devices occur. Outstanding IO | 1223 | * out. No IO completions for outstanding devices occur. Outstanding IO |
@@ -1247,7 +1228,7 @@ static enum sci_status scic_controller_stop(struct isci_host *ihost, | |||
1247 | * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if | 1228 | * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if |
1248 | * the controller reset operation is unable to complete. | 1229 | * the controller reset operation is unable to complete. |
1249 | */ | 1230 | */ |
1250 | static enum sci_status scic_controller_reset(struct isci_host *ihost) | 1231 | static enum sci_status sci_controller_reset(struct isci_host *ihost) |
1251 | { | 1232 | { |
1252 | switch (ihost->sm.current_state_id) { | 1233 | switch (ihost->sm.current_state_id) { |
1253 | case SCIC_RESET: | 1234 | case SCIC_RESET: |
@@ -1286,11 +1267,11 @@ void isci_host_deinit(struct isci_host *ihost) | |||
1286 | set_bit(IHOST_STOP_PENDING, &ihost->flags); | 1267 | set_bit(IHOST_STOP_PENDING, &ihost->flags); |
1287 | 1268 | ||
1288 | spin_lock_irq(&ihost->scic_lock); | 1269 | spin_lock_irq(&ihost->scic_lock); |
1289 | scic_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); | 1270 | sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); |
1290 | spin_unlock_irq(&ihost->scic_lock); | 1271 | spin_unlock_irq(&ihost->scic_lock); |
1291 | 1272 | ||
1292 | wait_for_stop(ihost); | 1273 | wait_for_stop(ihost); |
1293 | scic_controller_reset(ihost); | 1274 | sci_controller_reset(ihost); |
1294 | 1275 | ||
1295 | /* Cancel any/all outstanding port timers */ | 1276 | /* Cancel any/all outstanding port timers */ |
1296 | for (i = 0; i < ihost->logical_port_entries; i++) { | 1277 | for (i = 0; i < ihost->logical_port_entries; i++) { |
@@ -1329,11 +1310,8 @@ static void __iomem *smu_base(struct isci_host *isci_host) | |||
1329 | return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; | 1310 | return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; |
1330 | } | 1311 | } |
1331 | 1312 | ||
1332 | static void isci_user_parameters_get( | 1313 | static void isci_user_parameters_get(struct sci_user_parameters *u) |
1333 | struct isci_host *isci_host, | ||
1334 | union scic_user_parameters *scic_user_params) | ||
1335 | { | 1314 | { |
1336 | struct scic_sds_user_parameters *u = &scic_user_params->sds1; | ||
1337 | int i; | 1315 | int i; |
1338 | 1316 | ||
1339 | for (i = 0; i < SCI_MAX_PHYS; i++) { | 1317 | for (i = 0; i < SCI_MAX_PHYS; i++) { |
@@ -1355,14 +1333,14 @@ static void isci_user_parameters_get( | |||
1355 | u->max_number_concurrent_device_spin_up = max_concurr_spinup; | 1333 | u->max_number_concurrent_device_spin_up = max_concurr_spinup; |
1356 | } | 1334 | } |
1357 | 1335 | ||
1358 | static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm) | 1336 | static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) |
1359 | { | 1337 | { |
1360 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1338 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1361 | 1339 | ||
1362 | sci_change_state(&ihost->sm, SCIC_RESET); | 1340 | sci_change_state(&ihost->sm, SCIC_RESET); |
1363 | } | 1341 | } |
1364 | 1342 | ||
1365 | static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm) | 1343 | static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) |
1366 | { | 1344 | { |
1367 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1345 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1368 | 1346 | ||
@@ -1377,7 +1355,7 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state | |||
1377 | #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 | 1355 | #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 |
1378 | 1356 | ||
1379 | /** | 1357 | /** |
1380 | * scic_controller_set_interrupt_coalescence() - This method allows the user to | 1358 | * sci_controller_set_interrupt_coalescence() - This method allows the user to |
1381 | * configure the interrupt coalescence. | 1359 | * configure the interrupt coalescence. |
1382 | * @controller: This parameter represents the handle to the controller object | 1360 | * @controller: This parameter represents the handle to the controller object |
1383 | * for which its interrupt coalesce register is overridden. | 1361 | * for which its interrupt coalesce register is overridden. |
@@ -1394,9 +1372,9 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state | |||
1394 | * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. | 1372 | * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. |
1395 | */ | 1373 | */ |
1396 | static enum sci_status | 1374 | static enum sci_status |
1397 | scic_controller_set_interrupt_coalescence(struct isci_host *ihost, | 1375 | sci_controller_set_interrupt_coalescence(struct isci_host *ihost, |
1398 | u32 coalesce_number, | 1376 | u32 coalesce_number, |
1399 | u32 coalesce_timeout) | 1377 | u32 coalesce_timeout) |
1400 | { | 1378 | { |
1401 | u8 timeout_encode = 0; | 1379 | u8 timeout_encode = 0; |
1402 | u32 min = 0; | 1380 | u32 min = 0; |
@@ -1489,23 +1467,23 @@ scic_controller_set_interrupt_coalescence(struct isci_host *ihost, | |||
1489 | } | 1467 | } |
1490 | 1468 | ||
1491 | 1469 | ||
1492 | static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm) | 1470 | static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) |
1493 | { | 1471 | { |
1494 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1472 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1495 | 1473 | ||
1496 | /* set the default interrupt coalescence number and timeout value. */ | 1474 | /* set the default interrupt coalescence number and timeout value. */ |
1497 | scic_controller_set_interrupt_coalescence(ihost, 0x10, 250); | 1475 | sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); |
1498 | } | 1476 | } |
1499 | 1477 | ||
1500 | static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm) | 1478 | static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) |
1501 | { | 1479 | { |
1502 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1480 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1503 | 1481 | ||
1504 | /* disable interrupt coalescence. */ | 1482 | /* disable interrupt coalescence. */ |
1505 | scic_controller_set_interrupt_coalescence(ihost, 0, 0); | 1483 | sci_controller_set_interrupt_coalescence(ihost, 0, 0); |
1506 | } | 1484 | } |
1507 | 1485 | ||
1508 | static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) | 1486 | static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) |
1509 | { | 1487 | { |
1510 | u32 index; | 1488 | u32 index; |
1511 | enum sci_status status; | 1489 | enum sci_status status; |
@@ -1514,7 +1492,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) | |||
1514 | status = SCI_SUCCESS; | 1492 | status = SCI_SUCCESS; |
1515 | 1493 | ||
1516 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 1494 | for (index = 0; index < SCI_MAX_PHYS; index++) { |
1517 | phy_status = scic_sds_phy_stop(&ihost->phys[index]); | 1495 | phy_status = sci_phy_stop(&ihost->phys[index]); |
1518 | 1496 | ||
1519 | if (phy_status != SCI_SUCCESS && | 1497 | if (phy_status != SCI_SUCCESS && |
1520 | phy_status != SCI_FAILURE_INVALID_STATE) { | 1498 | phy_status != SCI_FAILURE_INVALID_STATE) { |
@@ -1531,7 +1509,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) | |||
1531 | return status; | 1509 | return status; |
1532 | } | 1510 | } |
1533 | 1511 | ||
1534 | static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) | 1512 | static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) |
1535 | { | 1513 | { |
1536 | u32 index; | 1514 | u32 index; |
1537 | enum sci_status port_status; | 1515 | enum sci_status port_status; |
@@ -1540,7 +1518,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) | |||
1540 | for (index = 0; index < ihost->logical_port_entries; index++) { | 1518 | for (index = 0; index < ihost->logical_port_entries; index++) { |
1541 | struct isci_port *iport = &ihost->ports[index]; | 1519 | struct isci_port *iport = &ihost->ports[index]; |
1542 | 1520 | ||
1543 | port_status = scic_sds_port_stop(iport); | 1521 | port_status = sci_port_stop(iport); |
1544 | 1522 | ||
1545 | if ((port_status != SCI_SUCCESS) && | 1523 | if ((port_status != SCI_SUCCESS) && |
1546 | (port_status != SCI_FAILURE_INVALID_STATE)) { | 1524 | (port_status != SCI_FAILURE_INVALID_STATE)) { |
@@ -1558,7 +1536,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) | |||
1558 | return status; | 1536 | return status; |
1559 | } | 1537 | } |
1560 | 1538 | ||
1561 | static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) | 1539 | static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) |
1562 | { | 1540 | { |
1563 | u32 index; | 1541 | u32 index; |
1564 | enum sci_status status; | 1542 | enum sci_status status; |
@@ -1569,7 +1547,7 @@ static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) | |||
1569 | for (index = 0; index < ihost->remote_node_entries; index++) { | 1547 | for (index = 0; index < ihost->remote_node_entries; index++) { |
1570 | if (ihost->device_table[index] != NULL) { | 1548 | if (ihost->device_table[index] != NULL) { |
1571 | /* / @todo What timeout value do we want to provide to this request? */ | 1549 | /* / @todo What timeout value do we want to provide to this request? */ |
1572 | device_status = scic_remote_device_stop(ihost->device_table[index], 0); | 1550 | device_status = sci_remote_device_stop(ihost->device_table[index], 0); |
1573 | 1551 | ||
1574 | if ((device_status != SCI_SUCCESS) && | 1552 | if ((device_status != SCI_SUCCESS) && |
1575 | (device_status != SCI_FAILURE_INVALID_STATE)) { | 1553 | (device_status != SCI_FAILURE_INVALID_STATE)) { |
@@ -1586,33 +1564,27 @@ static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) | |||
1586 | return status; | 1564 | return status; |
1587 | } | 1565 | } |
1588 | 1566 | ||
1589 | static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm) | 1567 | static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) |
1590 | { | 1568 | { |
1591 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1569 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1592 | 1570 | ||
1593 | /* Stop all of the components for this controller */ | 1571 | /* Stop all of the components for this controller */ |
1594 | scic_sds_controller_stop_phys(ihost); | 1572 | sci_controller_stop_phys(ihost); |
1595 | scic_sds_controller_stop_ports(ihost); | 1573 | sci_controller_stop_ports(ihost); |
1596 | scic_sds_controller_stop_devices(ihost); | 1574 | sci_controller_stop_devices(ihost); |
1597 | } | 1575 | } |
1598 | 1576 | ||
1599 | static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm) | 1577 | static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) |
1600 | { | 1578 | { |
1601 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1579 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1602 | 1580 | ||
1603 | sci_del_timer(&ihost->timer); | 1581 | sci_del_timer(&ihost->timer); |
1604 | } | 1582 | } |
1605 | 1583 | ||
1606 | 1584 | static void sci_controller_reset_hardware(struct isci_host *ihost) | |
1607 | /** | ||
1608 | * scic_sds_controller_reset_hardware() - | ||
1609 | * | ||
1610 | * This method will reset the controller hardware. | ||
1611 | */ | ||
1612 | static void scic_sds_controller_reset_hardware(struct isci_host *ihost) | ||
1613 | { | 1585 | { |
1614 | /* Disable interrupts so we dont take any spurious interrupts */ | 1586 | /* Disable interrupts so we dont take any spurious interrupts */ |
1615 | scic_controller_disable_interrupts(ihost); | 1587 | sci_controller_disable_interrupts(ihost); |
1616 | 1588 | ||
1617 | /* Reset the SCU */ | 1589 | /* Reset the SCU */ |
1618 | writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); | 1590 | writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); |
@@ -1627,82 +1599,82 @@ static void scic_sds_controller_reset_hardware(struct isci_host *ihost) | |||
1627 | writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); | 1599 | writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); |
1628 | } | 1600 | } |
1629 | 1601 | ||
1630 | static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm) | 1602 | static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) |
1631 | { | 1603 | { |
1632 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); | 1604 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1633 | 1605 | ||
1634 | scic_sds_controller_reset_hardware(ihost); | 1606 | sci_controller_reset_hardware(ihost); |
1635 | sci_change_state(&ihost->sm, SCIC_RESET); | 1607 | sci_change_state(&ihost->sm, SCIC_RESET); |
1636 | } | 1608 | } |
1637 | 1609 | ||
1638 | static const struct sci_base_state scic_sds_controller_state_table[] = { | 1610 | static const struct sci_base_state sci_controller_state_table[] = { |
1639 | [SCIC_INITIAL] = { | 1611 | [SCIC_INITIAL] = { |
1640 | .enter_state = scic_sds_controller_initial_state_enter, | 1612 | .enter_state = sci_controller_initial_state_enter, |
1641 | }, | 1613 | }, |
1642 | [SCIC_RESET] = {}, | 1614 | [SCIC_RESET] = {}, |
1643 | [SCIC_INITIALIZING] = {}, | 1615 | [SCIC_INITIALIZING] = {}, |
1644 | [SCIC_INITIALIZED] = {}, | 1616 | [SCIC_INITIALIZED] = {}, |
1645 | [SCIC_STARTING] = { | 1617 | [SCIC_STARTING] = { |
1646 | .exit_state = scic_sds_controller_starting_state_exit, | 1618 | .exit_state = sci_controller_starting_state_exit, |
1647 | }, | 1619 | }, |
1648 | [SCIC_READY] = { | 1620 | [SCIC_READY] = { |
1649 | .enter_state = scic_sds_controller_ready_state_enter, | 1621 | .enter_state = sci_controller_ready_state_enter, |
1650 | .exit_state = scic_sds_controller_ready_state_exit, | 1622 | .exit_state = sci_controller_ready_state_exit, |
1651 | }, | 1623 | }, |
1652 | [SCIC_RESETTING] = { | 1624 | [SCIC_RESETTING] = { |
1653 | .enter_state = scic_sds_controller_resetting_state_enter, | 1625 | .enter_state = sci_controller_resetting_state_enter, |
1654 | }, | 1626 | }, |
1655 | [SCIC_STOPPING] = { | 1627 | [SCIC_STOPPING] = { |
1656 | .enter_state = scic_sds_controller_stopping_state_enter, | 1628 | .enter_state = sci_controller_stopping_state_enter, |
1657 | .exit_state = scic_sds_controller_stopping_state_exit, | 1629 | .exit_state = sci_controller_stopping_state_exit, |
1658 | }, | 1630 | }, |
1659 | [SCIC_STOPPED] = {}, | 1631 | [SCIC_STOPPED] = {}, |
1660 | [SCIC_FAILED] = {} | 1632 | [SCIC_FAILED] = {} |
1661 | }; | 1633 | }; |
1662 | 1634 | ||
1663 | static void scic_sds_controller_set_default_config_parameters(struct isci_host *ihost) | 1635 | static void sci_controller_set_default_config_parameters(struct isci_host *ihost) |
1664 | { | 1636 | { |
1665 | /* these defaults are overridden by the platform / firmware */ | 1637 | /* these defaults are overridden by the platform / firmware */ |
1666 | u16 index; | 1638 | u16 index; |
1667 | 1639 | ||
1668 | /* Default to APC mode. */ | 1640 | /* Default to APC mode. */ |
1669 | ihost->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; | 1641 | ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; |
1670 | 1642 | ||
1671 | /* Default to APC mode. */ | 1643 | /* Default to APC mode. */ |
1672 | ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; | 1644 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1; |
1673 | 1645 | ||
1674 | /* Default to no SSC operation. */ | 1646 | /* Default to no SSC operation. */ |
1675 | ihost->oem_parameters.sds1.controller.do_enable_ssc = false; | 1647 | ihost->oem_parameters.controller.do_enable_ssc = false; |
1676 | 1648 | ||
1677 | /* Initialize all of the port parameter information to narrow ports. */ | 1649 | /* Initialize all of the port parameter information to narrow ports. */ |
1678 | for (index = 0; index < SCI_MAX_PORTS; index++) { | 1650 | for (index = 0; index < SCI_MAX_PORTS; index++) { |
1679 | ihost->oem_parameters.sds1.ports[index].phy_mask = 0; | 1651 | ihost->oem_parameters.ports[index].phy_mask = 0; |
1680 | } | 1652 | } |
1681 | 1653 | ||
1682 | /* Initialize all of the phy parameter information. */ | 1654 | /* Initialize all of the phy parameter information. */ |
1683 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 1655 | for (index = 0; index < SCI_MAX_PHYS; index++) { |
1684 | /* Default to 6G (i.e. Gen 3) for now. */ | 1656 | /* Default to 6G (i.e. Gen 3) for now. */ |
1685 | ihost->user_parameters.sds1.phys[index].max_speed_generation = 3; | 1657 | ihost->user_parameters.phys[index].max_speed_generation = 3; |
1686 | 1658 | ||
1687 | /* the frequencies cannot be 0 */ | 1659 | /* the frequencies cannot be 0 */ |
1688 | ihost->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; | 1660 | ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; |
1689 | ihost->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; | 1661 | ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; |
1690 | ihost->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; | 1662 | ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; |
1691 | 1663 | ||
1692 | /* | 1664 | /* |
1693 | * Previous Vitesse based expanders had a arbitration issue that | 1665 | * Previous Vitesse based expanders had a arbitration issue that |
1694 | * is worked around by having the upper 32-bits of SAS address | 1666 | * is worked around by having the upper 32-bits of SAS address |
1695 | * with a value greater then the Vitesse company identifier. | 1667 | * with a value greater then the Vitesse company identifier. |
1696 | * Hence, usage of 0x5FCFFFFF. */ | 1668 | * Hence, usage of 0x5FCFFFFF. */ |
1697 | ihost->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; | 1669 | ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; |
1698 | ihost->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; | 1670 | ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; |
1699 | } | 1671 | } |
1700 | 1672 | ||
1701 | ihost->user_parameters.sds1.stp_inactivity_timeout = 5; | 1673 | ihost->user_parameters.stp_inactivity_timeout = 5; |
1702 | ihost->user_parameters.sds1.ssp_inactivity_timeout = 5; | 1674 | ihost->user_parameters.ssp_inactivity_timeout = 5; |
1703 | ihost->user_parameters.sds1.stp_max_occupancy_timeout = 5; | 1675 | ihost->user_parameters.stp_max_occupancy_timeout = 5; |
1704 | ihost->user_parameters.sds1.ssp_max_occupancy_timeout = 20; | 1676 | ihost->user_parameters.ssp_max_occupancy_timeout = 20; |
1705 | ihost->user_parameters.sds1.no_outbound_task_timeout = 20; | 1677 | ihost->user_parameters.no_outbound_task_timeout = 20; |
1706 | } | 1678 | } |
1707 | 1679 | ||
1708 | static void controller_timeout(unsigned long data) | 1680 | static void controller_timeout(unsigned long data) |
@@ -1718,7 +1690,7 @@ static void controller_timeout(unsigned long data) | |||
1718 | goto done; | 1690 | goto done; |
1719 | 1691 | ||
1720 | if (sm->current_state_id == SCIC_STARTING) | 1692 | if (sm->current_state_id == SCIC_STARTING) |
1721 | scic_sds_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); | 1693 | sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); |
1722 | else if (sm->current_state_id == SCIC_STOPPING) { | 1694 | else if (sm->current_state_id == SCIC_STOPPING) { |
1723 | sci_change_state(sm, SCIC_FAILED); | 1695 | sci_change_state(sm, SCIC_FAILED); |
1724 | isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); | 1696 | isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); |
@@ -1732,45 +1704,29 @@ done: | |||
1732 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1704 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1733 | } | 1705 | } |
1734 | 1706 | ||
1735 | /** | 1707 | static enum sci_status sci_controller_construct(struct isci_host *ihost, |
1736 | * scic_controller_construct() - This method will attempt to construct a | 1708 | void __iomem *scu_base, |
1737 | * controller object utilizing the supplied parameter information. | 1709 | void __iomem *smu_base) |
1738 | * @c: This parameter specifies the controller to be constructed. | ||
1739 | * @scu_base: mapped base address of the scu registers | ||
1740 | * @smu_base: mapped base address of the smu registers | ||
1741 | * | ||
1742 | * Indicate if the controller was successfully constructed or if it failed in | ||
1743 | * some way. SCI_SUCCESS This value is returned if the controller was | ||
1744 | * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned | ||
1745 | * if the interrupt coalescence timer may cause SAS compliance issues for SMP | ||
1746 | * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE | ||
1747 | * This value is returned if the controller does not support the supplied type. | ||
1748 | * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the | ||
1749 | * controller does not support the supplied initialization data version. | ||
1750 | */ | ||
1751 | static enum sci_status scic_controller_construct(struct isci_host *ihost, | ||
1752 | void __iomem *scu_base, | ||
1753 | void __iomem *smu_base) | ||
1754 | { | 1710 | { |
1755 | u8 i; | 1711 | u8 i; |
1756 | 1712 | ||
1757 | sci_init_sm(&ihost->sm, scic_sds_controller_state_table, SCIC_INITIAL); | 1713 | sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); |
1758 | 1714 | ||
1759 | ihost->scu_registers = scu_base; | 1715 | ihost->scu_registers = scu_base; |
1760 | ihost->smu_registers = smu_base; | 1716 | ihost->smu_registers = smu_base; |
1761 | 1717 | ||
1762 | scic_sds_port_configuration_agent_construct(&ihost->port_agent); | 1718 | sci_port_configuration_agent_construct(&ihost->port_agent); |
1763 | 1719 | ||
1764 | /* Construct the ports for this controller */ | 1720 | /* Construct the ports for this controller */ |
1765 | for (i = 0; i < SCI_MAX_PORTS; i++) | 1721 | for (i = 0; i < SCI_MAX_PORTS; i++) |
1766 | scic_sds_port_construct(&ihost->ports[i], i, ihost); | 1722 | sci_port_construct(&ihost->ports[i], i, ihost); |
1767 | scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); | 1723 | sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); |
1768 | 1724 | ||
1769 | /* Construct the phys for this controller */ | 1725 | /* Construct the phys for this controller */ |
1770 | for (i = 0; i < SCI_MAX_PHYS; i++) { | 1726 | for (i = 0; i < SCI_MAX_PHYS; i++) { |
1771 | /* Add all the PHYs to the dummy port */ | 1727 | /* Add all the PHYs to the dummy port */ |
1772 | scic_sds_phy_construct(&ihost->phys[i], | 1728 | sci_phy_construct(&ihost->phys[i], |
1773 | &ihost->ports[SCI_MAX_PORTS], i); | 1729 | &ihost->ports[SCI_MAX_PORTS], i); |
1774 | } | 1730 | } |
1775 | 1731 | ||
1776 | ihost->invalid_phy_mask = 0; | 1732 | ihost->invalid_phy_mask = 0; |
@@ -1778,12 +1734,12 @@ static enum sci_status scic_controller_construct(struct isci_host *ihost, | |||
1778 | sci_init_timer(&ihost->timer, controller_timeout); | 1734 | sci_init_timer(&ihost->timer, controller_timeout); |
1779 | 1735 | ||
1780 | /* Initialize the User and OEM parameters to default values. */ | 1736 | /* Initialize the User and OEM parameters to default values. */ |
1781 | scic_sds_controller_set_default_config_parameters(ihost); | 1737 | sci_controller_set_default_config_parameters(ihost); |
1782 | 1738 | ||
1783 | return scic_controller_reset(ihost); | 1739 | return sci_controller_reset(ihost); |
1784 | } | 1740 | } |
1785 | 1741 | ||
1786 | int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) | 1742 | int sci_oem_parameters_validate(struct sci_oem_params *oem) |
1787 | { | 1743 | { |
1788 | int i; | 1744 | int i; |
1789 | 1745 | ||
@@ -1817,8 +1773,7 @@ int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) | |||
1817 | return 0; | 1773 | return 0; |
1818 | } | 1774 | } |
1819 | 1775 | ||
1820 | static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, | 1776 | static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) |
1821 | union scic_oem_parameters *scic_parms) | ||
1822 | { | 1777 | { |
1823 | u32 state = ihost->sm.current_state_id; | 1778 | u32 state = ihost->sm.current_state_id; |
1824 | 1779 | ||
@@ -1826,9 +1781,8 @@ static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, | |||
1826 | state == SCIC_INITIALIZING || | 1781 | state == SCIC_INITIALIZING || |
1827 | state == SCIC_INITIALIZED) { | 1782 | state == SCIC_INITIALIZED) { |
1828 | 1783 | ||
1829 | if (scic_oem_parameters_validate(&scic_parms->sds1)) | 1784 | if (sci_oem_parameters_validate(&ihost->oem_parameters)) |
1830 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | 1785 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; |
1831 | ihost->oem_parameters.sds1 = scic_parms->sds1; | ||
1832 | 1786 | ||
1833 | return SCI_SUCCESS; | 1787 | return SCI_SUCCESS; |
1834 | } | 1788 | } |
@@ -1836,13 +1790,6 @@ static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, | |||
1836 | return SCI_FAILURE_INVALID_STATE; | 1790 | return SCI_FAILURE_INVALID_STATE; |
1837 | } | 1791 | } |
1838 | 1792 | ||
1839 | void scic_oem_parameters_get( | ||
1840 | struct isci_host *ihost, | ||
1841 | union scic_oem_parameters *scic_parms) | ||
1842 | { | ||
1843 | memcpy(scic_parms, (&ihost->oem_parameters), sizeof(*scic_parms)); | ||
1844 | } | ||
1845 | |||
1846 | static void power_control_timeout(unsigned long data) | 1793 | static void power_control_timeout(unsigned long data) |
1847 | { | 1794 | { |
1848 | struct sci_timer *tmr = (struct sci_timer *)data; | 1795 | struct sci_timer *tmr = (struct sci_timer *)data; |
@@ -1873,13 +1820,13 @@ static void power_control_timeout(unsigned long data) | |||
1873 | continue; | 1820 | continue; |
1874 | 1821 | ||
1875 | if (ihost->power_control.phys_granted_power >= | 1822 | if (ihost->power_control.phys_granted_power >= |
1876 | ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) | 1823 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up) |
1877 | break; | 1824 | break; |
1878 | 1825 | ||
1879 | ihost->power_control.requesters[i] = NULL; | 1826 | ihost->power_control.requesters[i] = NULL; |
1880 | ihost->power_control.phys_waiting--; | 1827 | ihost->power_control.phys_waiting--; |
1881 | ihost->power_control.phys_granted_power++; | 1828 | ihost->power_control.phys_granted_power++; |
1882 | scic_sds_phy_consume_power_handler(iphy); | 1829 | sci_phy_consume_power_handler(iphy); |
1883 | } | 1830 | } |
1884 | 1831 | ||
1885 | /* | 1832 | /* |
@@ -1893,22 +1840,15 @@ done: | |||
1893 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1840 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1894 | } | 1841 | } |
1895 | 1842 | ||
1896 | /** | 1843 | void sci_controller_power_control_queue_insert(struct isci_host *ihost, |
1897 | * This method inserts the phy in the stagger spinup control queue. | 1844 | struct isci_phy *iphy) |
1898 | * @scic: | ||
1899 | * | ||
1900 | * | ||
1901 | */ | ||
1902 | void scic_sds_controller_power_control_queue_insert( | ||
1903 | struct isci_host *ihost, | ||
1904 | struct isci_phy *iphy) | ||
1905 | { | 1845 | { |
1906 | BUG_ON(iphy == NULL); | 1846 | BUG_ON(iphy == NULL); |
1907 | 1847 | ||
1908 | if (ihost->power_control.phys_granted_power < | 1848 | if (ihost->power_control.phys_granted_power < |
1909 | ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { | 1849 | ihost->oem_parameters.controller.max_concurrent_dev_spin_up) { |
1910 | ihost->power_control.phys_granted_power++; | 1850 | ihost->power_control.phys_granted_power++; |
1911 | scic_sds_phy_consume_power_handler(iphy); | 1851 | sci_phy_consume_power_handler(iphy); |
1912 | 1852 | ||
1913 | /* | 1853 | /* |
1914 | * stop and start the power_control timer. When the timer fires, the | 1854 | * stop and start the power_control timer. When the timer fires, the |
@@ -1928,21 +1868,13 @@ void scic_sds_controller_power_control_queue_insert( | |||
1928 | } | 1868 | } |
1929 | } | 1869 | } |
1930 | 1870 | ||
1931 | /** | 1871 | void sci_controller_power_control_queue_remove(struct isci_host *ihost, |
1932 | * This method removes the phy from the stagger spinup control queue. | 1872 | struct isci_phy *iphy) |
1933 | * @scic: | ||
1934 | * | ||
1935 | * | ||
1936 | */ | ||
1937 | void scic_sds_controller_power_control_queue_remove( | ||
1938 | struct isci_host *ihost, | ||
1939 | struct isci_phy *iphy) | ||
1940 | { | 1873 | { |
1941 | BUG_ON(iphy == NULL); | 1874 | BUG_ON(iphy == NULL); |
1942 | 1875 | ||
1943 | if (ihost->power_control.requesters[iphy->phy_index] != NULL) { | 1876 | if (ihost->power_control.requesters[iphy->phy_index]) |
1944 | ihost->power_control.phys_waiting--; | 1877 | ihost->power_control.phys_waiting--; |
1945 | } | ||
1946 | 1878 | ||
1947 | ihost->power_control.requesters[iphy->phy_index] = NULL; | 1879 | ihost->power_control.requesters[iphy->phy_index] = NULL; |
1948 | } | 1880 | } |
@@ -1952,9 +1884,9 @@ void scic_sds_controller_power_control_queue_remove( | |||
1952 | /* Initialize the AFE for this phy index. We need to read the AFE setup from | 1884 | /* Initialize the AFE for this phy index. We need to read the AFE setup from |
1953 | * the OEM parameters | 1885 | * the OEM parameters |
1954 | */ | 1886 | */ |
1955 | static void scic_sds_controller_afe_initialization(struct isci_host *ihost) | 1887 | static void sci_controller_afe_initialization(struct isci_host *ihost) |
1956 | { | 1888 | { |
1957 | const struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; | 1889 | const struct sci_oem_params *oem = &ihost->oem_parameters; |
1958 | u32 afe_status; | 1890 | u32 afe_status; |
1959 | u32 phy_id; | 1891 | u32 phy_id; |
1960 | 1892 | ||
@@ -2111,7 +2043,7 @@ static void scic_sds_controller_afe_initialization(struct isci_host *ihost) | |||
2111 | udelay(AFE_REGISTER_WRITE_DELAY); | 2043 | udelay(AFE_REGISTER_WRITE_DELAY); |
2112 | } | 2044 | } |
2113 | 2045 | ||
2114 | static void scic_sds_controller_initialize_power_control(struct isci_host *ihost) | 2046 | static void sci_controller_initialize_power_control(struct isci_host *ihost) |
2115 | { | 2047 | { |
2116 | sci_init_timer(&ihost->power_control.timer, power_control_timeout); | 2048 | sci_init_timer(&ihost->power_control.timer, power_control_timeout); |
2117 | 2049 | ||
@@ -2122,7 +2054,7 @@ static void scic_sds_controller_initialize_power_control(struct isci_host *ihost | |||
2122 | ihost->power_control.phys_granted_power = 0; | 2054 | ihost->power_control.phys_granted_power = 0; |
2123 | } | 2055 | } |
2124 | 2056 | ||
2125 | static enum sci_status scic_controller_initialize(struct isci_host *ihost) | 2057 | static enum sci_status sci_controller_initialize(struct isci_host *ihost) |
2126 | { | 2058 | { |
2127 | struct sci_base_state_machine *sm = &ihost->sm; | 2059 | struct sci_base_state_machine *sm = &ihost->sm; |
2128 | enum sci_status result = SCI_FAILURE; | 2060 | enum sci_status result = SCI_FAILURE; |
@@ -2142,14 +2074,14 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) | |||
2142 | ihost->next_phy_to_start = 0; | 2074 | ihost->next_phy_to_start = 0; |
2143 | ihost->phy_startup_timer_pending = false; | 2075 | ihost->phy_startup_timer_pending = false; |
2144 | 2076 | ||
2145 | scic_sds_controller_initialize_power_control(ihost); | 2077 | sci_controller_initialize_power_control(ihost); |
2146 | 2078 | ||
2147 | /* | 2079 | /* |
2148 | * There is nothing to do here for B0 since we do not have to | 2080 | * There is nothing to do here for B0 since we do not have to |
2149 | * program the AFE registers. | 2081 | * program the AFE registers. |
2150 | * / @todo The AFE settings are supposed to be correct for the B0 but | 2082 | * / @todo The AFE settings are supposed to be correct for the B0 but |
2151 | * / presently they seem to be wrong. */ | 2083 | * / presently they seem to be wrong. */ |
2152 | scic_sds_controller_afe_initialization(ihost); | 2084 | sci_controller_afe_initialization(ihost); |
2153 | 2085 | ||
2154 | 2086 | ||
2155 | /* Take the hardware out of reset */ | 2087 | /* Take the hardware out of reset */ |
@@ -2206,24 +2138,22 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) | |||
2206 | * are accessed during the port initialization. | 2138 | * are accessed during the port initialization. |
2207 | */ | 2139 | */ |
2208 | for (i = 0; i < SCI_MAX_PHYS; i++) { | 2140 | for (i = 0; i < SCI_MAX_PHYS; i++) { |
2209 | result = scic_sds_phy_initialize(&ihost->phys[i], | 2141 | result = sci_phy_initialize(&ihost->phys[i], |
2210 | &ihost->scu_registers->peg0.pe[i].tl, | 2142 | &ihost->scu_registers->peg0.pe[i].tl, |
2211 | &ihost->scu_registers->peg0.pe[i].ll); | 2143 | &ihost->scu_registers->peg0.pe[i].ll); |
2212 | if (result != SCI_SUCCESS) | 2144 | if (result != SCI_SUCCESS) |
2213 | goto out; | 2145 | goto out; |
2214 | } | 2146 | } |
2215 | 2147 | ||
2216 | for (i = 0; i < ihost->logical_port_entries; i++) { | 2148 | for (i = 0; i < ihost->logical_port_entries; i++) { |
2217 | result = scic_sds_port_initialize(&ihost->ports[i], | 2149 | struct isci_port *iport = &ihost->ports[i]; |
2218 | &ihost->scu_registers->peg0.ptsg.port[i], | ||
2219 | &ihost->scu_registers->peg0.ptsg.protocol_engine, | ||
2220 | &ihost->scu_registers->peg0.viit[i]); | ||
2221 | 2150 | ||
2222 | if (result != SCI_SUCCESS) | 2151 | iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; |
2223 | goto out; | 2152 | iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; |
2153 | iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; | ||
2224 | } | 2154 | } |
2225 | 2155 | ||
2226 | result = scic_sds_port_configuration_agent_initialize(ihost, &ihost->port_agent); | 2156 | result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); |
2227 | 2157 | ||
2228 | out: | 2158 | out: |
2229 | /* Advance the controller state machine */ | 2159 | /* Advance the controller state machine */ |
@@ -2236,9 +2166,8 @@ static enum sci_status scic_controller_initialize(struct isci_host *ihost) | |||
2236 | return result; | 2166 | return result; |
2237 | } | 2167 | } |
2238 | 2168 | ||
2239 | static enum sci_status scic_user_parameters_set( | 2169 | static enum sci_status sci_user_parameters_set(struct isci_host *ihost, |
2240 | struct isci_host *ihost, | 2170 | struct sci_user_parameters *sci_parms) |
2241 | union scic_user_parameters *scic_parms) | ||
2242 | { | 2171 | { |
2243 | u32 state = ihost->sm.current_state_id; | 2172 | u32 state = ihost->sm.current_state_id; |
2244 | 2173 | ||
@@ -2254,7 +2183,7 @@ static enum sci_status scic_user_parameters_set( | |||
2254 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 2183 | for (index = 0; index < SCI_MAX_PHYS; index++) { |
2255 | struct sci_phy_user_params *user_phy; | 2184 | struct sci_phy_user_params *user_phy; |
2256 | 2185 | ||
2257 | user_phy = &scic_parms->sds1.phys[index]; | 2186 | user_phy = &sci_parms->phys[index]; |
2258 | 2187 | ||
2259 | if (!((user_phy->max_speed_generation <= | 2188 | if (!((user_phy->max_speed_generation <= |
2260 | SCIC_SDS_PARM_MAX_SPEED) && | 2189 | SCIC_SDS_PARM_MAX_SPEED) && |
@@ -2275,14 +2204,14 @@ static enum sci_status scic_user_parameters_set( | |||
2275 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | 2204 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; |
2276 | } | 2205 | } |
2277 | 2206 | ||
2278 | if ((scic_parms->sds1.stp_inactivity_timeout == 0) || | 2207 | if ((sci_parms->stp_inactivity_timeout == 0) || |
2279 | (scic_parms->sds1.ssp_inactivity_timeout == 0) || | 2208 | (sci_parms->ssp_inactivity_timeout == 0) || |
2280 | (scic_parms->sds1.stp_max_occupancy_timeout == 0) || | 2209 | (sci_parms->stp_max_occupancy_timeout == 0) || |
2281 | (scic_parms->sds1.ssp_max_occupancy_timeout == 0) || | 2210 | (sci_parms->ssp_max_occupancy_timeout == 0) || |
2282 | (scic_parms->sds1.no_outbound_task_timeout == 0)) | 2211 | (sci_parms->no_outbound_task_timeout == 0)) |
2283 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | 2212 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; |
2284 | 2213 | ||
2285 | memcpy(&ihost->user_parameters, scic_parms, sizeof(*scic_parms)); | 2214 | memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); |
2286 | 2215 | ||
2287 | return SCI_SUCCESS; | 2216 | return SCI_SUCCESS; |
2288 | } | 2217 | } |
@@ -2290,7 +2219,7 @@ static enum sci_status scic_user_parameters_set( | |||
2290 | return SCI_FAILURE_INVALID_STATE; | 2219 | return SCI_FAILURE_INVALID_STATE; |
2291 | } | 2220 | } |
2292 | 2221 | ||
2293 | static int scic_controller_mem_init(struct isci_host *ihost) | 2222 | static int sci_controller_mem_init(struct isci_host *ihost) |
2294 | { | 2223 | { |
2295 | struct device *dev = &ihost->pdev->dev; | 2224 | struct device *dev = &ihost->pdev->dev; |
2296 | dma_addr_t dma; | 2225 | dma_addr_t dma; |
@@ -2307,7 +2236,7 @@ static int scic_controller_mem_init(struct isci_host *ihost) | |||
2307 | 2236 | ||
2308 | size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); | 2237 | size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); |
2309 | ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, | 2238 | ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, |
2310 | GFP_KERNEL); | 2239 | GFP_KERNEL); |
2311 | if (!ihost->remote_node_context_table) | 2240 | if (!ihost->remote_node_context_table) |
2312 | return -ENOMEM; | 2241 | return -ENOMEM; |
2313 | 2242 | ||
@@ -2323,7 +2252,7 @@ static int scic_controller_mem_init(struct isci_host *ihost) | |||
2323 | writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); | 2252 | writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); |
2324 | writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); | 2253 | writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); |
2325 | 2254 | ||
2326 | err = scic_sds_unsolicited_frame_control_construct(ihost); | 2255 | err = sci_unsolicited_frame_control_construct(ihost); |
2327 | if (err) | 2256 | if (err) |
2328 | return err; | 2257 | return err; |
2329 | 2258 | ||
@@ -2348,8 +2277,7 @@ int isci_host_init(struct isci_host *ihost) | |||
2348 | { | 2277 | { |
2349 | int err = 0, i; | 2278 | int err = 0, i; |
2350 | enum sci_status status; | 2279 | enum sci_status status; |
2351 | union scic_oem_parameters oem; | 2280 | struct sci_user_parameters sci_user_params; |
2352 | union scic_user_parameters scic_user_params; | ||
2353 | struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); | 2281 | struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); |
2354 | 2282 | ||
2355 | spin_lock_init(&ihost->state_lock); | 2283 | spin_lock_init(&ihost->state_lock); |
@@ -2358,12 +2286,12 @@ int isci_host_init(struct isci_host *ihost) | |||
2358 | 2286 | ||
2359 | isci_host_change_state(ihost, isci_starting); | 2287 | isci_host_change_state(ihost, isci_starting); |
2360 | 2288 | ||
2361 | status = scic_controller_construct(ihost, scu_base(ihost), | 2289 | status = sci_controller_construct(ihost, scu_base(ihost), |
2362 | smu_base(ihost)); | 2290 | smu_base(ihost)); |
2363 | 2291 | ||
2364 | if (status != SCI_SUCCESS) { | 2292 | if (status != SCI_SUCCESS) { |
2365 | dev_err(&ihost->pdev->dev, | 2293 | dev_err(&ihost->pdev->dev, |
2366 | "%s: scic_controller_construct failed - status = %x\n", | 2294 | "%s: sci_controller_construct failed - status = %x\n", |
2367 | __func__, | 2295 | __func__, |
2368 | status); | 2296 | status); |
2369 | return -ENODEV; | 2297 | return -ENODEV; |
@@ -2376,21 +2304,18 @@ int isci_host_init(struct isci_host *ihost) | |||
2376 | * grab initial values stored in the controller object for OEM and USER | 2304 | * grab initial values stored in the controller object for OEM and USER |
2377 | * parameters | 2305 | * parameters |
2378 | */ | 2306 | */ |
2379 | isci_user_parameters_get(ihost, &scic_user_params); | 2307 | isci_user_parameters_get(&sci_user_params); |
2380 | status = scic_user_parameters_set(ihost, | 2308 | status = sci_user_parameters_set(ihost, &sci_user_params); |
2381 | &scic_user_params); | ||
2382 | if (status != SCI_SUCCESS) { | 2309 | if (status != SCI_SUCCESS) { |
2383 | dev_warn(&ihost->pdev->dev, | 2310 | dev_warn(&ihost->pdev->dev, |
2384 | "%s: scic_user_parameters_set failed\n", | 2311 | "%s: sci_user_parameters_set failed\n", |
2385 | __func__); | 2312 | __func__); |
2386 | return -ENODEV; | 2313 | return -ENODEV; |
2387 | } | 2314 | } |
2388 | 2315 | ||
2389 | scic_oem_parameters_get(ihost, &oem); | ||
2390 | |||
2391 | /* grab any OEM parameters specified in orom */ | 2316 | /* grab any OEM parameters specified in orom */ |
2392 | if (pci_info->orom) { | 2317 | if (pci_info->orom) { |
2393 | status = isci_parse_oem_parameters(&oem, | 2318 | status = isci_parse_oem_parameters(&ihost->oem_parameters, |
2394 | pci_info->orom, | 2319 | pci_info->orom, |
2395 | ihost->id); | 2320 | ihost->id); |
2396 | if (status != SCI_SUCCESS) { | 2321 | if (status != SCI_SUCCESS) { |
@@ -2400,10 +2325,10 @@ int isci_host_init(struct isci_host *ihost) | |||
2400 | } | 2325 | } |
2401 | } | 2326 | } |
2402 | 2327 | ||
2403 | status = scic_oem_parameters_set(ihost, &oem); | 2328 | status = sci_oem_parameters_set(ihost); |
2404 | if (status != SCI_SUCCESS) { | 2329 | if (status != SCI_SUCCESS) { |
2405 | dev_warn(&ihost->pdev->dev, | 2330 | dev_warn(&ihost->pdev->dev, |
2406 | "%s: scic_oem_parameters_set failed\n", | 2331 | "%s: sci_oem_parameters_set failed\n", |
2407 | __func__); | 2332 | __func__); |
2408 | return -ENODEV; | 2333 | return -ENODEV; |
2409 | } | 2334 | } |
@@ -2415,17 +2340,17 @@ int isci_host_init(struct isci_host *ihost) | |||
2415 | INIT_LIST_HEAD(&ihost->requests_to_errorback); | 2340 | INIT_LIST_HEAD(&ihost->requests_to_errorback); |
2416 | 2341 | ||
2417 | spin_lock_irq(&ihost->scic_lock); | 2342 | spin_lock_irq(&ihost->scic_lock); |
2418 | status = scic_controller_initialize(ihost); | 2343 | status = sci_controller_initialize(ihost); |
2419 | spin_unlock_irq(&ihost->scic_lock); | 2344 | spin_unlock_irq(&ihost->scic_lock); |
2420 | if (status != SCI_SUCCESS) { | 2345 | if (status != SCI_SUCCESS) { |
2421 | dev_warn(&ihost->pdev->dev, | 2346 | dev_warn(&ihost->pdev->dev, |
2422 | "%s: scic_controller_initialize failed -" | 2347 | "%s: sci_controller_initialize failed -" |
2423 | " status = 0x%x\n", | 2348 | " status = 0x%x\n", |
2424 | __func__, status); | 2349 | __func__, status); |
2425 | return -ENODEV; | 2350 | return -ENODEV; |
2426 | } | 2351 | } |
2427 | 2352 | ||
2428 | err = scic_controller_mem_init(ihost); | 2353 | err = sci_controller_mem_init(ihost); |
2429 | if (err) | 2354 | if (err) |
2430 | return err; | 2355 | return err; |
2431 | 2356 | ||
@@ -2463,20 +2388,20 @@ int isci_host_init(struct isci_host *ihost) | |||
2463 | return 0; | 2388 | return 0; |
2464 | } | 2389 | } |
2465 | 2390 | ||
2466 | void scic_sds_controller_link_up(struct isci_host *ihost, | 2391 | void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, |
2467 | struct isci_port *iport, struct isci_phy *iphy) | 2392 | struct isci_phy *iphy) |
2468 | { | 2393 | { |
2469 | switch (ihost->sm.current_state_id) { | 2394 | switch (ihost->sm.current_state_id) { |
2470 | case SCIC_STARTING: | 2395 | case SCIC_STARTING: |
2471 | sci_del_timer(&ihost->phy_timer); | 2396 | sci_del_timer(&ihost->phy_timer); |
2472 | ihost->phy_startup_timer_pending = false; | 2397 | ihost->phy_startup_timer_pending = false; |
2473 | ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, | 2398 | ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, |
2474 | iport, iphy); | 2399 | iport, iphy); |
2475 | scic_sds_controller_start_next_phy(ihost); | 2400 | sci_controller_start_next_phy(ihost); |
2476 | break; | 2401 | break; |
2477 | case SCIC_READY: | 2402 | case SCIC_READY: |
2478 | ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, | 2403 | ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, |
2479 | iport, iphy); | 2404 | iport, iphy); |
2480 | break; | 2405 | break; |
2481 | default: | 2406 | default: |
2482 | dev_dbg(&ihost->pdev->dev, | 2407 | dev_dbg(&ihost->pdev->dev, |
@@ -2486,8 +2411,8 @@ void scic_sds_controller_link_up(struct isci_host *ihost, | |||
2486 | } | 2411 | } |
2487 | } | 2412 | } |
2488 | 2413 | ||
2489 | void scic_sds_controller_link_down(struct isci_host *ihost, | 2414 | void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, |
2490 | struct isci_port *iport, struct isci_phy *iphy) | 2415 | struct isci_phy *iphy) |
2491 | { | 2416 | { |
2492 | switch (ihost->sm.current_state_id) { | 2417 | switch (ihost->sm.current_state_id) { |
2493 | case SCIC_STARTING: | 2418 | case SCIC_STARTING: |
@@ -2505,12 +2430,7 @@ void scic_sds_controller_link_down(struct isci_host *ihost, | |||
2505 | } | 2430 | } |
2506 | } | 2431 | } |
2507 | 2432 | ||
2508 | /** | 2433 | static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) |
2509 | * This is a helper method to determine if any remote devices on this | ||
2510 | * controller are still in the stopping state. | ||
2511 | * | ||
2512 | */ | ||
2513 | static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ihost) | ||
2514 | { | 2434 | { |
2515 | u32 index; | 2435 | u32 index; |
2516 | 2436 | ||
@@ -2523,12 +2443,8 @@ static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ih | |||
2523 | return false; | 2443 | return false; |
2524 | } | 2444 | } |
2525 | 2445 | ||
2526 | /** | 2446 | void sci_controller_remote_device_stopped(struct isci_host *ihost, |
2527 | * This method is called by the remote device to inform the controller | 2447 | struct isci_remote_device *idev) |
2528 | * object that the remote device has stopped. | ||
2529 | */ | ||
2530 | void scic_sds_controller_remote_device_stopped(struct isci_host *ihost, | ||
2531 | struct isci_remote_device *idev) | ||
2532 | { | 2448 | { |
2533 | if (ihost->sm.current_state_id != SCIC_STOPPING) { | 2449 | if (ihost->sm.current_state_id != SCIC_STOPPING) { |
2534 | dev_dbg(&ihost->pdev->dev, | 2450 | dev_dbg(&ihost->pdev->dev, |
@@ -2539,32 +2455,19 @@ void scic_sds_controller_remote_device_stopped(struct isci_host *ihost, | |||
2539 | return; | 2455 | return; |
2540 | } | 2456 | } |
2541 | 2457 | ||
2542 | if (!scic_sds_controller_has_remote_devices_stopping(ihost)) { | 2458 | if (!sci_controller_has_remote_devices_stopping(ihost)) |
2543 | sci_change_state(&ihost->sm, SCIC_STOPPED); | 2459 | sci_change_state(&ihost->sm, SCIC_STOPPED); |
2544 | } | ||
2545 | } | 2460 | } |
2546 | 2461 | ||
2547 | /** | 2462 | void sci_controller_post_request(struct isci_host *ihost, u32 request) |
2548 | * This method will write to the SCU PCP register the request value. The method | ||
2549 | * is used to suspend/resume ports, devices, and phys. | ||
2550 | * @scic: | ||
2551 | * | ||
2552 | * | ||
2553 | */ | ||
2554 | void scic_sds_controller_post_request( | ||
2555 | struct isci_host *ihost, | ||
2556 | u32 request) | ||
2557 | { | 2463 | { |
2558 | dev_dbg(&ihost->pdev->dev, | 2464 | dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", |
2559 | "%s: SCIC Controller 0x%p post request 0x%08x\n", | 2465 | __func__, ihost->id, request); |
2560 | __func__, | ||
2561 | ihost, | ||
2562 | request); | ||
2563 | 2466 | ||
2564 | writel(request, &ihost->smu_registers->post_context_port); | 2467 | writel(request, &ihost->smu_registers->post_context_port); |
2565 | } | 2468 | } |
2566 | 2469 | ||
2567 | struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) | 2470 | struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) |
2568 | { | 2471 | { |
2569 | u16 task_index; | 2472 | u16 task_index; |
2570 | u16 task_sequence; | 2473 | u16 task_sequence; |
@@ -2599,15 +2502,14 @@ struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) | |||
2599 | * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote | 2502 | * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote |
2600 | * node index available. | 2503 | * node index available. |
2601 | */ | 2504 | */ |
2602 | enum sci_status scic_sds_controller_allocate_remote_node_context( | 2505 | enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, |
2603 | struct isci_host *ihost, | 2506 | struct isci_remote_device *idev, |
2604 | struct isci_remote_device *idev, | 2507 | u16 *node_id) |
2605 | u16 *node_id) | ||
2606 | { | 2508 | { |
2607 | u16 node_index; | 2509 | u16 node_index; |
2608 | u32 remote_node_count = scic_sds_remote_device_node_count(idev); | 2510 | u32 remote_node_count = sci_remote_device_node_count(idev); |
2609 | 2511 | ||
2610 | node_index = scic_sds_remote_node_table_allocate_remote_node( | 2512 | node_index = sci_remote_node_table_allocate_remote_node( |
2611 | &ihost->available_remote_nodes, remote_node_count | 2513 | &ihost->available_remote_nodes, remote_node_count |
2612 | ); | 2514 | ); |
2613 | 2515 | ||
@@ -2622,68 +2524,26 @@ enum sci_status scic_sds_controller_allocate_remote_node_context( | |||
2622 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; | 2524 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; |
2623 | } | 2525 | } |
2624 | 2526 | ||
2625 | /** | 2527 | void sci_controller_free_remote_node_context(struct isci_host *ihost, |
2626 | * This method frees the remote node index back to the available pool. Once | 2528 | struct isci_remote_device *idev, |
2627 | * this is done the remote node context buffer is no longer valid and can | 2529 | u16 node_id) |
2628 | * not be used. | ||
2629 | * @scic: | ||
2630 | * @sci_dev: | ||
2631 | * @node_id: | ||
2632 | * | ||
2633 | */ | ||
2634 | void scic_sds_controller_free_remote_node_context( | ||
2635 | struct isci_host *ihost, | ||
2636 | struct isci_remote_device *idev, | ||
2637 | u16 node_id) | ||
2638 | { | 2530 | { |
2639 | u32 remote_node_count = scic_sds_remote_device_node_count(idev); | 2531 | u32 remote_node_count = sci_remote_device_node_count(idev); |
2640 | 2532 | ||
2641 | if (ihost->device_table[node_id] == idev) { | 2533 | if (ihost->device_table[node_id] == idev) { |
2642 | ihost->device_table[node_id] = NULL; | 2534 | ihost->device_table[node_id] = NULL; |
2643 | 2535 | ||
2644 | scic_sds_remote_node_table_release_remote_node_index( | 2536 | sci_remote_node_table_release_remote_node_index( |
2645 | &ihost->available_remote_nodes, remote_node_count, node_id | 2537 | &ihost->available_remote_nodes, remote_node_count, node_id |
2646 | ); | 2538 | ); |
2647 | } | 2539 | } |
2648 | } | 2540 | } |
2649 | 2541 | ||
2650 | /** | 2542 | void sci_controller_copy_sata_response(void *response_buffer, |
2651 | * This method returns the union scu_remote_node_context for the specified remote | 2543 | void *frame_header, |
2652 | * node id. | 2544 | void *frame_buffer) |
2653 | * @scic: | ||
2654 | * @node_id: | ||
2655 | * | ||
2656 | * union scu_remote_node_context* | ||
2657 | */ | ||
2658 | union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( | ||
2659 | struct isci_host *ihost, | ||
2660 | u16 node_id | ||
2661 | ) { | ||
2662 | if ( | ||
2663 | (node_id < ihost->remote_node_entries) | ||
2664 | && (ihost->device_table[node_id] != NULL) | ||
2665 | ) { | ||
2666 | return &ihost->remote_node_context_table[node_id]; | ||
2667 | } | ||
2668 | |||
2669 | return NULL; | ||
2670 | } | ||
2671 | |||
2672 | /** | ||
2673 | * | ||
2674 | * @resposne_buffer: This is the buffer into which the D2H register FIS will be | ||
2675 | * constructed. | ||
2676 | * @frame_header: This is the frame header returned by the hardware. | ||
2677 | * @frame_buffer: This is the frame buffer returned by the hardware. | ||
2678 | * | ||
2679 | * This method will combind the frame header and frame buffer to create a SATA | ||
2680 | * D2H register FIS none | ||
2681 | */ | ||
2682 | void scic_sds_controller_copy_sata_response( | ||
2683 | void *response_buffer, | ||
2684 | void *frame_header, | ||
2685 | void *frame_buffer) | ||
2686 | { | 2545 | { |
2546 | /* XXX type safety? */ | ||
2687 | memcpy(response_buffer, frame_header, sizeof(u32)); | 2547 | memcpy(response_buffer, frame_header, sizeof(u32)); |
2688 | 2548 | ||
2689 | memcpy(response_buffer + sizeof(u32), | 2549 | memcpy(response_buffer + sizeof(u32), |
@@ -2691,21 +2551,9 @@ void scic_sds_controller_copy_sata_response( | |||
2691 | sizeof(struct dev_to_host_fis) - sizeof(u32)); | 2551 | sizeof(struct dev_to_host_fis) - sizeof(u32)); |
2692 | } | 2552 | } |
2693 | 2553 | ||
2694 | /** | 2554 | void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) |
2695 | * This method releases the frame once this is done the frame is available for | ||
2696 | * re-use by the hardware. The data contained in the frame header and frame | ||
2697 | * buffer is no longer valid. The UF queue get pointer is only updated if UF | ||
2698 | * control indicates this is appropriate. | ||
2699 | * @scic: | ||
2700 | * @frame_index: | ||
2701 | * | ||
2702 | */ | ||
2703 | void scic_sds_controller_release_frame( | ||
2704 | struct isci_host *ihost, | ||
2705 | u32 frame_index) | ||
2706 | { | 2555 | { |
2707 | if (scic_sds_unsolicited_frame_control_release_frame( | 2556 | if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) |
2708 | &ihost->uf_control, frame_index) == true) | ||
2709 | writel(ihost->uf_control.get, | 2557 | writel(ihost->uf_control.get, |
2710 | &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); | 2558 | &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); |
2711 | } | 2559 | } |
@@ -2763,21 +2611,9 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) | |||
2763 | return SCI_FAILURE_INVALID_IO_TAG; | 2611 | return SCI_FAILURE_INVALID_IO_TAG; |
2764 | } | 2612 | } |
2765 | 2613 | ||
2766 | /** | 2614 | enum sci_status sci_controller_start_io(struct isci_host *ihost, |
2767 | * scic_controller_start_io() - This method is called by the SCI user to | 2615 | struct isci_remote_device *idev, |
2768 | * send/start an IO request. If the method invocation is successful, then | 2616 | struct isci_request *ireq) |
2769 | * the IO request has been queued to the hardware for processing. | ||
2770 | * @controller: the handle to the controller object for which to start an IO | ||
2771 | * request. | ||
2772 | * @remote_device: the handle to the remote device object for which to start an | ||
2773 | * IO request. | ||
2774 | * @io_request: the handle to the io request object to start. | ||
2775 | * @io_tag: This parameter specifies a previously allocated IO tag that the | ||
2776 | * user desires to be utilized for this request. | ||
2777 | */ | ||
2778 | enum sci_status scic_controller_start_io(struct isci_host *ihost, | ||
2779 | struct isci_remote_device *idev, | ||
2780 | struct isci_request *ireq) | ||
2781 | { | 2617 | { |
2782 | enum sci_status status; | 2618 | enum sci_status status; |
2783 | 2619 | ||
@@ -2786,36 +2622,23 @@ enum sci_status scic_controller_start_io(struct isci_host *ihost, | |||
2786 | return SCI_FAILURE_INVALID_STATE; | 2622 | return SCI_FAILURE_INVALID_STATE; |
2787 | } | 2623 | } |
2788 | 2624 | ||
2789 | status = scic_sds_remote_device_start_io(ihost, idev, ireq); | 2625 | status = sci_remote_device_start_io(ihost, idev, ireq); |
2790 | if (status != SCI_SUCCESS) | 2626 | if (status != SCI_SUCCESS) |
2791 | return status; | 2627 | return status; |
2792 | 2628 | ||
2793 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2629 | set_bit(IREQ_ACTIVE, &ireq->flags); |
2794 | scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); | 2630 | sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); |
2795 | return SCI_SUCCESS; | 2631 | return SCI_SUCCESS; |
2796 | } | 2632 | } |
2797 | 2633 | ||
2798 | /** | 2634 | enum sci_status sci_controller_terminate_request(struct isci_host *ihost, |
2799 | * scic_controller_terminate_request() - This method is called by the SCI Core | 2635 | struct isci_remote_device *idev, |
2800 | * user to terminate an ongoing (i.e. started) core IO request. This does | 2636 | struct isci_request *ireq) |
2801 | * not abort the IO request at the target, but rather removes the IO request | ||
2802 | * from the host controller. | ||
2803 | * @controller: the handle to the controller object for which to terminate a | ||
2804 | * request. | ||
2805 | * @remote_device: the handle to the remote device object for which to | ||
2806 | * terminate a request. | ||
2807 | * @request: the handle to the io or task management request object to | ||
2808 | * terminate. | ||
2809 | * | ||
2810 | * Indicate if the controller successfully began the terminate process for the | ||
2811 | * IO request. SCI_SUCCESS if the terminate process was successfully started | ||
2812 | * for the request. Determine the failure situations and return values. | ||
2813 | */ | ||
2814 | enum sci_status scic_controller_terminate_request( | ||
2815 | struct isci_host *ihost, | ||
2816 | struct isci_remote_device *idev, | ||
2817 | struct isci_request *ireq) | ||
2818 | { | 2637 | { |
2638 | /* terminate an ongoing (i.e. started) core IO request. This does not | ||
2639 | * abort the IO request at the target, but rather removes the IO | ||
2640 | * request from the host controller. | ||
2641 | */ | ||
2819 | enum sci_status status; | 2642 | enum sci_status status; |
2820 | 2643 | ||
2821 | if (ihost->sm.current_state_id != SCIC_READY) { | 2644 | if (ihost->sm.current_state_id != SCIC_READY) { |
@@ -2824,7 +2647,7 @@ enum sci_status scic_controller_terminate_request( | |||
2824 | return SCI_FAILURE_INVALID_STATE; | 2647 | return SCI_FAILURE_INVALID_STATE; |
2825 | } | 2648 | } |
2826 | 2649 | ||
2827 | status = scic_sds_io_request_terminate(ireq); | 2650 | status = sci_io_request_terminate(ireq); |
2828 | if (status != SCI_SUCCESS) | 2651 | if (status != SCI_SUCCESS) |
2829 | return status; | 2652 | return status; |
2830 | 2653 | ||
@@ -2832,27 +2655,25 @@ enum sci_status scic_controller_terminate_request( | |||
2832 | * Utilize the original post context command and or in the POST_TC_ABORT | 2655 | * Utilize the original post context command and or in the POST_TC_ABORT |
2833 | * request sub-type. | 2656 | * request sub-type. |
2834 | */ | 2657 | */ |
2835 | scic_sds_controller_post_request(ihost, | 2658 | sci_controller_post_request(ihost, |
2836 | scic_sds_request_get_post_context(ireq) | | 2659 | ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); |
2837 | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); | ||
2838 | return SCI_SUCCESS; | 2660 | return SCI_SUCCESS; |
2839 | } | 2661 | } |
2840 | 2662 | ||
2841 | /** | 2663 | /** |
2842 | * scic_controller_complete_io() - This method will perform core specific | 2664 | * sci_controller_complete_io() - This method will perform core specific |
2843 | * completion operations for an IO request. After this method is invoked, | 2665 | * completion operations for an IO request. After this method is invoked, |
2844 | * the user should consider the IO request as invalid until it is properly | 2666 | * the user should consider the IO request as invalid until it is properly |
2845 | * reused (i.e. re-constructed). | 2667 | * reused (i.e. re-constructed). |
2846 | * @controller: The handle to the controller object for which to complete the | 2668 | * @ihost: The handle to the controller object for which to complete the |
2847 | * IO request. | 2669 | * IO request. |
2848 | * @remote_device: The handle to the remote device object for which to complete | 2670 | * @idev: The handle to the remote device object for which to complete |
2849 | * the IO request. | 2671 | * the IO request. |
2850 | * @io_request: the handle to the io request object to complete. | 2672 | * @ireq: the handle to the io request object to complete. |
2851 | */ | 2673 | */ |
2852 | enum sci_status scic_controller_complete_io( | 2674 | enum sci_status sci_controller_complete_io(struct isci_host *ihost, |
2853 | struct isci_host *ihost, | 2675 | struct isci_remote_device *idev, |
2854 | struct isci_remote_device *idev, | 2676 | struct isci_request *ireq) |
2855 | struct isci_request *ireq) | ||
2856 | { | 2677 | { |
2857 | enum sci_status status; | 2678 | enum sci_status status; |
2858 | u16 index; | 2679 | u16 index; |
@@ -2862,7 +2683,7 @@ enum sci_status scic_controller_complete_io( | |||
2862 | /* XXX: Implement this function */ | 2683 | /* XXX: Implement this function */ |
2863 | return SCI_FAILURE; | 2684 | return SCI_FAILURE; |
2864 | case SCIC_READY: | 2685 | case SCIC_READY: |
2865 | status = scic_sds_remote_device_complete_io(ihost, idev, ireq); | 2686 | status = sci_remote_device_complete_io(ihost, idev, ireq); |
2866 | if (status != SCI_SUCCESS) | 2687 | if (status != SCI_SUCCESS) |
2867 | return status; | 2688 | return status; |
2868 | 2689 | ||
@@ -2876,7 +2697,7 @@ enum sci_status scic_controller_complete_io( | |||
2876 | 2697 | ||
2877 | } | 2698 | } |
2878 | 2699 | ||
2879 | enum sci_status scic_controller_continue_io(struct isci_request *ireq) | 2700 | enum sci_status sci_controller_continue_io(struct isci_request *ireq) |
2880 | { | 2701 | { |
2881 | struct isci_host *ihost = ireq->owning_controller; | 2702 | struct isci_host *ihost = ireq->owning_controller; |
2882 | 2703 | ||
@@ -2886,12 +2707,12 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) | |||
2886 | } | 2707 | } |
2887 | 2708 | ||
2888 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2709 | set_bit(IREQ_ACTIVE, &ireq->flags); |
2889 | scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); | 2710 | sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); |
2890 | return SCI_SUCCESS; | 2711 | return SCI_SUCCESS; |
2891 | } | 2712 | } |
2892 | 2713 | ||
2893 | /** | 2714 | /** |
2894 | * scic_controller_start_task() - This method is called by the SCIC user to | 2715 | * sci_controller_start_task() - This method is called by the SCIC user to |
2895 | * send/start a framework task management request. | 2716 | * send/start a framework task management request. |
2896 | * @controller: the handle to the controller object for which to start the task | 2717 | * @controller: the handle to the controller object for which to start the task |
2897 | * management request. | 2718 | * management request. |
@@ -2899,10 +2720,9 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) | |||
2899 | * the task management request. | 2720 | * the task management request. |
2900 | * @task_request: the handle to the task request object to start. | 2721 | * @task_request: the handle to the task request object to start. |
2901 | */ | 2722 | */ |
2902 | enum sci_task_status scic_controller_start_task( | 2723 | enum sci_task_status sci_controller_start_task(struct isci_host *ihost, |
2903 | struct isci_host *ihost, | 2724 | struct isci_remote_device *idev, |
2904 | struct isci_remote_device *idev, | 2725 | struct isci_request *ireq) |
2905 | struct isci_request *ireq) | ||
2906 | { | 2726 | { |
2907 | enum sci_status status; | 2727 | enum sci_status status; |
2908 | 2728 | ||
@@ -2914,7 +2734,7 @@ enum sci_task_status scic_controller_start_task( | |||
2914 | return SCI_TASK_FAILURE_INVALID_STATE; | 2734 | return SCI_TASK_FAILURE_INVALID_STATE; |
2915 | } | 2735 | } |
2916 | 2736 | ||
2917 | status = scic_sds_remote_device_start_task(ihost, idev, ireq); | 2737 | status = sci_remote_device_start_task(ihost, idev, ireq); |
2918 | switch (status) { | 2738 | switch (status) { |
2919 | case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: | 2739 | case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: |
2920 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2740 | set_bit(IREQ_ACTIVE, &ireq->flags); |
@@ -2928,8 +2748,8 @@ enum sci_task_status scic_controller_start_task( | |||
2928 | case SCI_SUCCESS: | 2748 | case SCI_SUCCESS: |
2929 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2749 | set_bit(IREQ_ACTIVE, &ireq->flags); |
2930 | 2750 | ||
2931 | scic_sds_controller_post_request(ihost, | 2751 | sci_controller_post_request(ihost, |
2932 | scic_sds_request_get_post_context(ireq)); | 2752 | sci_request_get_post_context(ireq)); |
2933 | break; | 2753 | break; |
2934 | default: | 2754 | default: |
2935 | break; | 2755 | break; |
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 013f672a8fd7..d87f21de1807 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -69,12 +69,12 @@ struct scu_task_context; | |||
69 | 69 | ||
70 | 70 | ||
71 | /** | 71 | /** |
72 | * struct scic_power_control - | 72 | * struct sci_power_control - |
73 | * | 73 | * |
74 | * This structure defines the fields for managing power control for direct | 74 | * This structure defines the fields for managing power control for direct |
75 | * attached disk devices. | 75 | * attached disk devices. |
76 | */ | 76 | */ |
77 | struct scic_power_control { | 77 | struct sci_power_control { |
78 | /** | 78 | /** |
79 | * This field is set when the power control timer is running and cleared when | 79 | * This field is set when the power control timer is running and cleared when |
80 | * it is not. | 80 | * it is not. |
@@ -99,18 +99,18 @@ struct scic_power_control { | |||
99 | 99 | ||
100 | /** | 100 | /** |
101 | * This field is an array of phys that we are waiting on. The phys are direct | 101 | * This field is an array of phys that we are waiting on. The phys are direct |
102 | * mapped into requesters via struct scic_sds_phy.phy_index | 102 | * mapped into requesters via struct sci_phy.phy_index |
103 | */ | 103 | */ |
104 | struct isci_phy *requesters[SCI_MAX_PHYS]; | 104 | struct isci_phy *requesters[SCI_MAX_PHYS]; |
105 | 105 | ||
106 | }; | 106 | }; |
107 | 107 | ||
108 | struct scic_sds_port_configuration_agent; | 108 | struct sci_port_configuration_agent; |
109 | typedef void (*port_config_fn)(struct isci_host *, | 109 | typedef void (*port_config_fn)(struct isci_host *, |
110 | struct scic_sds_port_configuration_agent *, | 110 | struct sci_port_configuration_agent *, |
111 | struct isci_port *, struct isci_phy *); | 111 | struct isci_port *, struct isci_phy *); |
112 | 112 | ||
113 | struct scic_sds_port_configuration_agent { | 113 | struct sci_port_configuration_agent { |
114 | u16 phy_configured_mask; | 114 | u16 phy_configured_mask; |
115 | u16 phy_ready_mask; | 115 | u16 phy_ready_mask; |
116 | struct { | 116 | struct { |
@@ -149,13 +149,13 @@ struct isci_host { | |||
149 | /* XXX can we time this externally */ | 149 | /* XXX can we time this externally */ |
150 | struct sci_timer timer; | 150 | struct sci_timer timer; |
151 | /* XXX drop reference module params directly */ | 151 | /* XXX drop reference module params directly */ |
152 | union scic_user_parameters user_parameters; | 152 | struct sci_user_parameters user_parameters; |
153 | /* XXX no need to be a union */ | 153 | /* XXX no need to be a union */ |
154 | union scic_oem_parameters oem_parameters; | 154 | struct sci_oem_params oem_parameters; |
155 | struct scic_sds_port_configuration_agent port_agent; | 155 | struct sci_port_configuration_agent port_agent; |
156 | struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; | 156 | struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; |
157 | struct scic_remote_node_table available_remote_nodes; | 157 | struct sci_remote_node_table available_remote_nodes; |
158 | struct scic_power_control power_control; | 158 | struct sci_power_control power_control; |
159 | u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; | 159 | u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; |
160 | struct scu_task_context *task_context_table; | 160 | struct scu_task_context *task_context_table; |
161 | dma_addr_t task_context_dma; | 161 | dma_addr_t task_context_dma; |
@@ -165,7 +165,7 @@ struct isci_host { | |||
165 | u32 logical_port_entries; | 165 | u32 logical_port_entries; |
166 | u32 remote_node_entries; | 166 | u32 remote_node_entries; |
167 | u32 task_context_entries; | 167 | u32 task_context_entries; |
168 | struct scic_sds_unsolicited_frame_control uf_control; | 168 | struct sci_unsolicited_frame_control uf_control; |
169 | 169 | ||
170 | /* phy startup */ | 170 | /* phy startup */ |
171 | struct sci_timer phy_timer; | 171 | struct sci_timer phy_timer; |
@@ -206,10 +206,10 @@ struct isci_host { | |||
206 | }; | 206 | }; |
207 | 207 | ||
208 | /** | 208 | /** |
209 | * enum scic_sds_controller_states - This enumeration depicts all the states | 209 | * enum sci_controller_states - This enumeration depicts all the states |
210 | * for the common controller state machine. | 210 | * for the common controller state machine. |
211 | */ | 211 | */ |
212 | enum scic_sds_controller_states { | 212 | enum sci_controller_states { |
213 | /** | 213 | /** |
214 | * Simply the initial state for the base controller state machine. | 214 | * Simply the initial state for the base controller state machine. |
215 | */ | 215 | */ |
@@ -360,14 +360,14 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) | |||
360 | } | 360 | } |
361 | 361 | ||
362 | /** | 362 | /** |
363 | * scic_sds_controller_get_protocol_engine_group() - | 363 | * sci_controller_get_protocol_engine_group() - |
364 | * | 364 | * |
365 | * This macro returns the protocol engine group for this controller object. | 365 | * This macro returns the protocol engine group for this controller object. |
366 | * Presently we only support protocol engine group 0 so just return that | 366 | * Presently we only support protocol engine group 0 so just return that |
367 | */ | 367 | */ |
368 | #define scic_sds_controller_get_protocol_engine_group(controller) 0 | 368 | #define sci_controller_get_protocol_engine_group(controller) 0 |
369 | 369 | ||
370 | /* see scic_controller_io_tag_allocate|free for how seq and tci are built */ | 370 | /* see sci_controller_io_tag_allocate|free for how seq and tci are built */ |
371 | #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) | 371 | #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) |
372 | 372 | ||
373 | /* these are returned by the hardware, so sanitize them */ | 373 | /* these are returned by the hardware, so sanitize them */ |
@@ -375,7 +375,7 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) | |||
375 | #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) | 375 | #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) |
376 | 376 | ||
377 | /* expander attached sata devices require 3 rnc slots */ | 377 | /* expander attached sata devices require 3 rnc slots */ |
378 | static inline int scic_sds_remote_device_node_count(struct isci_remote_device *idev) | 378 | static inline int sci_remote_device_node_count(struct isci_remote_device *idev) |
379 | { | 379 | { |
380 | struct domain_device *dev = idev->domain_dev; | 380 | struct domain_device *dev = idev->domain_dev; |
381 | 381 | ||
@@ -386,23 +386,23 @@ static inline int scic_sds_remote_device_node_count(struct isci_remote_device *i | |||
386 | } | 386 | } |
387 | 387 | ||
388 | /** | 388 | /** |
389 | * scic_sds_controller_set_invalid_phy() - | 389 | * sci_controller_set_invalid_phy() - |
390 | * | 390 | * |
391 | * This macro will set the bit in the invalid phy mask for this controller | 391 | * This macro will set the bit in the invalid phy mask for this controller |
392 | * object. This is used to control messages reported for invalid link up | 392 | * object. This is used to control messages reported for invalid link up |
393 | * notifications. | 393 | * notifications. |
394 | */ | 394 | */ |
395 | #define scic_sds_controller_set_invalid_phy(controller, phy) \ | 395 | #define sci_controller_set_invalid_phy(controller, phy) \ |
396 | ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index)) | 396 | ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index)) |
397 | 397 | ||
398 | /** | 398 | /** |
399 | * scic_sds_controller_clear_invalid_phy() - | 399 | * sci_controller_clear_invalid_phy() - |
400 | * | 400 | * |
401 | * This macro will clear the bit in the invalid phy mask for this controller | 401 | * This macro will clear the bit in the invalid phy mask for this controller |
402 | * object. This is used to control messages reported for invalid link up | 402 | * object. This is used to control messages reported for invalid link up |
403 | * notifications. | 403 | * notifications. |
404 | */ | 404 | */ |
405 | #define scic_sds_controller_clear_invalid_phy(controller, phy) \ | 405 | #define sci_controller_clear_invalid_phy(controller, phy) \ |
406 | ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) | 406 | ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) |
407 | 407 | ||
408 | static inline struct device *sciphy_to_dev(struct isci_phy *iphy) | 408 | static inline struct device *sciphy_to_dev(struct isci_phy *iphy) |
@@ -460,56 +460,53 @@ static inline bool is_c0(void) | |||
460 | return isci_si_rev > ISCI_SI_REVB0; | 460 | return isci_si_rev > ISCI_SI_REVB0; |
461 | } | 461 | } |
462 | 462 | ||
463 | void scic_sds_controller_post_request(struct isci_host *ihost, | 463 | void sci_controller_post_request(struct isci_host *ihost, |
464 | u32 request); | 464 | u32 request); |
465 | void scic_sds_controller_release_frame(struct isci_host *ihost, | 465 | void sci_controller_release_frame(struct isci_host *ihost, |
466 | u32 frame_index); | 466 | u32 frame_index); |
467 | void scic_sds_controller_copy_sata_response(void *response_buffer, | 467 | void sci_controller_copy_sata_response(void *response_buffer, |
468 | void *frame_header, | 468 | void *frame_header, |
469 | void *frame_buffer); | 469 | void *frame_buffer); |
470 | enum sci_status scic_sds_controller_allocate_remote_node_context(struct isci_host *ihost, | 470 | enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, |
471 | struct isci_remote_device *idev, | 471 | struct isci_remote_device *idev, |
472 | u16 *node_id); | 472 | u16 *node_id); |
473 | void scic_sds_controller_free_remote_node_context( | 473 | void sci_controller_free_remote_node_context( |
474 | struct isci_host *ihost, | 474 | struct isci_host *ihost, |
475 | struct isci_remote_device *idev, | 475 | struct isci_remote_device *idev, |
476 | u16 node_id); | 476 | u16 node_id); |
477 | union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( | ||
478 | struct isci_host *ihost, | ||
479 | u16 node_id); | ||
480 | 477 | ||
481 | struct isci_request *scic_request_by_tag(struct isci_host *ihost, | 478 | struct isci_request *sci_request_by_tag(struct isci_host *ihost, |
482 | u16 io_tag); | 479 | u16 io_tag); |
483 | 480 | ||
484 | void scic_sds_controller_power_control_queue_insert( | 481 | void sci_controller_power_control_queue_insert( |
485 | struct isci_host *ihost, | 482 | struct isci_host *ihost, |
486 | struct isci_phy *iphy); | 483 | struct isci_phy *iphy); |
487 | 484 | ||
488 | void scic_sds_controller_power_control_queue_remove( | 485 | void sci_controller_power_control_queue_remove( |
489 | struct isci_host *ihost, | 486 | struct isci_host *ihost, |
490 | struct isci_phy *iphy); | 487 | struct isci_phy *iphy); |
491 | 488 | ||
492 | void scic_sds_controller_link_up( | 489 | void sci_controller_link_up( |
493 | struct isci_host *ihost, | 490 | struct isci_host *ihost, |
494 | struct isci_port *iport, | 491 | struct isci_port *iport, |
495 | struct isci_phy *iphy); | 492 | struct isci_phy *iphy); |
496 | 493 | ||
497 | void scic_sds_controller_link_down( | 494 | void sci_controller_link_down( |
498 | struct isci_host *ihost, | 495 | struct isci_host *ihost, |
499 | struct isci_port *iport, | 496 | struct isci_port *iport, |
500 | struct isci_phy *iphy); | 497 | struct isci_phy *iphy); |
501 | 498 | ||
502 | void scic_sds_controller_remote_device_stopped( | 499 | void sci_controller_remote_device_stopped( |
503 | struct isci_host *ihost, | 500 | struct isci_host *ihost, |
504 | struct isci_remote_device *idev); | 501 | struct isci_remote_device *idev); |
505 | 502 | ||
506 | void scic_sds_controller_copy_task_context( | 503 | void sci_controller_copy_task_context( |
507 | struct isci_host *ihost, | 504 | struct isci_host *ihost, |
508 | struct isci_request *ireq); | 505 | struct isci_request *ireq); |
509 | 506 | ||
510 | void scic_sds_controller_register_setup(struct isci_host *ihost); | 507 | void sci_controller_register_setup(struct isci_host *ihost); |
511 | 508 | ||
512 | enum sci_status scic_controller_continue_io(struct isci_request *ireq); | 509 | enum sci_status sci_controller_continue_io(struct isci_request *ireq); |
513 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); | 510 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); |
514 | void isci_host_scan_start(struct Scsi_Host *); | 511 | void isci_host_scan_start(struct Scsi_Host *); |
515 | u16 isci_alloc_tag(struct isci_host *ihost); | 512 | u16 isci_alloc_tag(struct isci_host *ihost); |
@@ -536,33 +533,33 @@ void isci_host_remote_device_start_complete( | |||
536 | struct isci_remote_device *, | 533 | struct isci_remote_device *, |
537 | enum sci_status); | 534 | enum sci_status); |
538 | 535 | ||
539 | void scic_controller_disable_interrupts( | 536 | void sci_controller_disable_interrupts( |
540 | struct isci_host *ihost); | 537 | struct isci_host *ihost); |
541 | 538 | ||
542 | enum sci_status scic_controller_start_io( | 539 | enum sci_status sci_controller_start_io( |
543 | struct isci_host *ihost, | 540 | struct isci_host *ihost, |
544 | struct isci_remote_device *idev, | 541 | struct isci_remote_device *idev, |
545 | struct isci_request *ireq); | 542 | struct isci_request *ireq); |
546 | 543 | ||
547 | enum sci_task_status scic_controller_start_task( | 544 | enum sci_task_status sci_controller_start_task( |
548 | struct isci_host *ihost, | 545 | struct isci_host *ihost, |
549 | struct isci_remote_device *idev, | 546 | struct isci_remote_device *idev, |
550 | struct isci_request *ireq); | 547 | struct isci_request *ireq); |
551 | 548 | ||
552 | enum sci_status scic_controller_terminate_request( | 549 | enum sci_status sci_controller_terminate_request( |
553 | struct isci_host *ihost, | 550 | struct isci_host *ihost, |
554 | struct isci_remote_device *idev, | 551 | struct isci_remote_device *idev, |
555 | struct isci_request *ireq); | 552 | struct isci_request *ireq); |
556 | 553 | ||
557 | enum sci_status scic_controller_complete_io( | 554 | enum sci_status sci_controller_complete_io( |
558 | struct isci_host *ihost, | 555 | struct isci_host *ihost, |
559 | struct isci_remote_device *idev, | 556 | struct isci_remote_device *idev, |
560 | struct isci_request *ireq); | 557 | struct isci_request *ireq); |
561 | 558 | ||
562 | void scic_sds_port_configuration_agent_construct( | 559 | void sci_port_configuration_agent_construct( |
563 | struct scic_sds_port_configuration_agent *port_agent); | 560 | struct sci_port_configuration_agent *port_agent); |
564 | 561 | ||
565 | enum sci_status scic_sds_port_configuration_agent_initialize( | 562 | enum sci_status sci_port_configuration_agent_initialize( |
566 | struct isci_host *ihost, | 563 | struct isci_host *ihost, |
567 | struct scic_sds_port_configuration_agent *port_agent); | 564 | struct sci_port_configuration_agent *port_agent); |
568 | #endif | 565 | #endif |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 68ca1a4f30af..8d9a8bfff4d5 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
@@ -484,7 +484,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic | |||
484 | orom = isci_request_oprom(pdev); | 484 | orom = isci_request_oprom(pdev); |
485 | 485 | ||
486 | for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { | 486 | for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { |
487 | if (scic_oem_parameters_validate(&orom->ctrl[i])) { | 487 | if (sci_oem_parameters_validate(&orom->ctrl[i])) { |
488 | dev_warn(&pdev->dev, | 488 | dev_warn(&pdev->dev, |
489 | "[%d]: invalid oem parameters detected, falling back to firmware\n", i); | 489 | "[%d]: invalid oem parameters detected, falling back to firmware\n", i); |
490 | devm_kfree(&pdev->dev, orom); | 490 | devm_kfree(&pdev->dev, orom); |
@@ -554,7 +554,7 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev) | |||
554 | for_each_isci_host(i, ihost, pdev) { | 554 | for_each_isci_host(i, ihost, pdev) { |
555 | isci_unregister(ihost); | 555 | isci_unregister(ihost); |
556 | isci_host_deinit(ihost); | 556 | isci_host_deinit(ihost); |
557 | scic_controller_disable_interrupts(ihost); | 557 | sci_controller_disable_interrupts(ihost); |
558 | } | 558 | } |
559 | } | 559 | } |
560 | 560 | ||
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 207328369edd..3afccfcb94e1 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h | |||
@@ -304,7 +304,7 @@ enum sci_status { | |||
304 | * This member indicates that the operation failed, the failure is | 304 | * This member indicates that the operation failed, the failure is |
305 | * controller implementation specific, and the response data associated | 305 | * controller implementation specific, and the response data associated |
306 | * with the request is not valid. You can query for the controller | 306 | * with the request is not valid. You can query for the controller |
307 | * specific error information via scic_controller_get_request_status() | 307 | * specific error information via sci_controller_get_request_status() |
308 | */ | 308 | */ |
309 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, | 309 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR, |
310 | 310 | ||
@@ -395,7 +395,7 @@ enum sci_status { | |||
395 | /** | 395 | /** |
396 | * This value indicates that an unsupported PCI device ID has been | 396 | * This value indicates that an unsupported PCI device ID has been |
397 | * specified. This indicates that attempts to invoke | 397 | * specified. This indicates that attempts to invoke |
398 | * scic_library_allocate_controller() will fail. | 398 | * sci_library_allocate_controller() will fail. |
399 | */ | 399 | */ |
400 | SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID | 400 | SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID |
401 | 401 | ||
@@ -493,7 +493,7 @@ irqreturn_t isci_error_isr(int vec, void *data); | |||
493 | /* | 493 | /* |
494 | * Each timer is associated with a cancellation flag that is set when | 494 | * Each timer is associated with a cancellation flag that is set when |
495 | * del_timer() is called and checked in the timer callback function. This | 495 | * del_timer() is called and checked in the timer callback function. This |
496 | * is needed since del_timer_sync() cannot be called with scic_lock held. | 496 | * is needed since del_timer_sync() cannot be called with sci_lock held. |
497 | * For deinit however, del_timer_sync() is used without holding the lock. | 497 | * For deinit however, del_timer_sync() is used without holding the lock. |
498 | */ | 498 | */ |
499 | struct sci_timer { | 499 | struct sci_timer { |
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index ca96b5ad0d52..0df9f713f487 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c | |||
@@ -67,25 +67,13 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy) | |||
67 | return iphy->max_negotiated_speed; | 67 | return iphy->max_negotiated_speed; |
68 | } | 68 | } |
69 | 69 | ||
70 | /* | 70 | static enum sci_status |
71 | * ***************************************************************************** | 71 | sci_phy_transport_layer_initialization(struct isci_phy *iphy, |
72 | * * SCIC SDS PHY Internal Methods | 72 | struct scu_transport_layer_registers __iomem *reg) |
73 | * ***************************************************************************** */ | ||
74 | |||
75 | /** | ||
76 | * This method will initialize the phy transport layer registers | ||
77 | * @sci_phy: | ||
78 | * @transport_layer_registers | ||
79 | * | ||
80 | * enum sci_status | ||
81 | */ | ||
82 | static enum sci_status scic_sds_phy_transport_layer_initialization( | ||
83 | struct isci_phy *iphy, | ||
84 | struct scu_transport_layer_registers __iomem *transport_layer_registers) | ||
85 | { | 73 | { |
86 | u32 tl_control; | 74 | u32 tl_control; |
87 | 75 | ||
88 | iphy->transport_layer_registers = transport_layer_registers; | 76 | iphy->transport_layer_registers = reg; |
89 | 77 | ||
90 | writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX, | 78 | writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX, |
91 | &iphy->transport_layer_registers->stp_rni); | 79 | &iphy->transport_layer_registers->stp_rni); |
@@ -101,32 +89,23 @@ static enum sci_status scic_sds_phy_transport_layer_initialization( | |||
101 | return SCI_SUCCESS; | 89 | return SCI_SUCCESS; |
102 | } | 90 | } |
103 | 91 | ||
104 | /** | ||
105 | * This method will initialize the phy link layer registers | ||
106 | * @sci_phy: | ||
107 | * @link_layer_registers: | ||
108 | * | ||
109 | * enum sci_status | ||
110 | */ | ||
111 | static enum sci_status | 92 | static enum sci_status |
112 | scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, | 93 | sci_phy_link_layer_initialization(struct isci_phy *iphy, |
113 | struct scu_link_layer_registers __iomem *link_layer_registers) | 94 | struct scu_link_layer_registers __iomem *reg) |
114 | { | 95 | { |
115 | struct isci_host *ihost = | 96 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
116 | iphy->owning_port->owning_controller; | ||
117 | int phy_idx = iphy->phy_index; | 97 | int phy_idx = iphy->phy_index; |
118 | struct sci_phy_user_params *phy_user = | 98 | struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx]; |
119 | &ihost->user_parameters.sds1.phys[phy_idx]; | ||
120 | struct sci_phy_oem_params *phy_oem = | 99 | struct sci_phy_oem_params *phy_oem = |
121 | &ihost->oem_parameters.sds1.phys[phy_idx]; | 100 | &ihost->oem_parameters.phys[phy_idx]; |
122 | u32 phy_configuration; | 101 | u32 phy_configuration; |
123 | struct scic_phy_cap phy_cap; | 102 | struct sci_phy_cap phy_cap; |
124 | u32 parity_check = 0; | 103 | u32 parity_check = 0; |
125 | u32 parity_count = 0; | 104 | u32 parity_count = 0; |
126 | u32 llctl, link_rate; | 105 | u32 llctl, link_rate; |
127 | u32 clksm_value = 0; | 106 | u32 clksm_value = 0; |
128 | 107 | ||
129 | iphy->link_layer_registers = link_layer_registers; | 108 | iphy->link_layer_registers = reg; |
130 | 109 | ||
131 | /* Set our IDENTIFY frame data */ | 110 | /* Set our IDENTIFY frame data */ |
132 | #define SCI_END_DEVICE 0x01 | 111 | #define SCI_END_DEVICE 0x01 |
@@ -169,7 +148,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, | |||
169 | phy_cap.gen3_no_ssc = 1; | 148 | phy_cap.gen3_no_ssc = 1; |
170 | phy_cap.gen2_no_ssc = 1; | 149 | phy_cap.gen2_no_ssc = 1; |
171 | phy_cap.gen1_no_ssc = 1; | 150 | phy_cap.gen1_no_ssc = 1; |
172 | if (ihost->oem_parameters.sds1.controller.do_enable_ssc == true) { | 151 | if (ihost->oem_parameters.controller.do_enable_ssc == true) { |
173 | phy_cap.gen3_ssc = 1; | 152 | phy_cap.gen3_ssc = 1; |
174 | phy_cap.gen2_ssc = 1; | 153 | phy_cap.gen2_ssc = 1; |
175 | phy_cap.gen1_ssc = 1; | 154 | phy_cap.gen1_ssc = 1; |
@@ -216,7 +195,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, | |||
216 | &iphy->link_layer_registers->afe_lookup_table_control); | 195 | &iphy->link_layer_registers->afe_lookup_table_control); |
217 | 196 | ||
218 | llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, | 197 | llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, |
219 | (u8)ihost->user_parameters.sds1.no_outbound_task_timeout); | 198 | (u8)ihost->user_parameters.no_outbound_task_timeout); |
220 | 199 | ||
221 | switch(phy_user->max_speed_generation) { | 200 | switch(phy_user->max_speed_generation) { |
222 | case SCIC_SDS_PARM_GEN3_SPEED: | 201 | case SCIC_SDS_PARM_GEN3_SPEED: |
@@ -289,7 +268,7 @@ done: | |||
289 | struct isci_port *phy_get_non_dummy_port( | 268 | struct isci_port *phy_get_non_dummy_port( |
290 | struct isci_phy *iphy) | 269 | struct isci_phy *iphy) |
291 | { | 270 | { |
292 | if (scic_sds_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) | 271 | if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) |
293 | return NULL; | 272 | return NULL; |
294 | 273 | ||
295 | return iphy->owning_port; | 274 | return iphy->owning_port; |
@@ -302,7 +281,7 @@ struct isci_port *phy_get_non_dummy_port( | |||
302 | * | 281 | * |
303 | * | 282 | * |
304 | */ | 283 | */ |
305 | void scic_sds_phy_set_port( | 284 | void sci_phy_set_port( |
306 | struct isci_phy *iphy, | 285 | struct isci_phy *iphy, |
307 | struct isci_port *iport) | 286 | struct isci_port *iport) |
308 | { | 287 | { |
@@ -310,33 +289,23 @@ void scic_sds_phy_set_port( | |||
310 | 289 | ||
311 | if (iphy->bcn_received_while_port_unassigned) { | 290 | if (iphy->bcn_received_while_port_unassigned) { |
312 | iphy->bcn_received_while_port_unassigned = false; | 291 | iphy->bcn_received_while_port_unassigned = false; |
313 | scic_sds_port_broadcast_change_received(iphy->owning_port, iphy); | 292 | sci_port_broadcast_change_received(iphy->owning_port, iphy); |
314 | } | 293 | } |
315 | } | 294 | } |
316 | 295 | ||
317 | /** | 296 | enum sci_status sci_phy_initialize(struct isci_phy *iphy, |
318 | * This method will initialize the constructed phy | 297 | struct scu_transport_layer_registers __iomem *tl, |
319 | * @sci_phy: | 298 | struct scu_link_layer_registers __iomem *ll) |
320 | * @link_layer_registers: | ||
321 | * | ||
322 | * enum sci_status | ||
323 | */ | ||
324 | enum sci_status scic_sds_phy_initialize( | ||
325 | struct isci_phy *iphy, | ||
326 | struct scu_transport_layer_registers __iomem *transport_layer_registers, | ||
327 | struct scu_link_layer_registers __iomem *link_layer_registers) | ||
328 | { | 299 | { |
329 | /* Perfrom the initialization of the TL hardware */ | 300 | /* Perfrom the initialization of the TL hardware */ |
330 | scic_sds_phy_transport_layer_initialization( | 301 | sci_phy_transport_layer_initialization(iphy, tl); |
331 | iphy, | ||
332 | transport_layer_registers); | ||
333 | 302 | ||
334 | /* Perofrm the initialization of the PE hardware */ | 303 | /* Perofrm the initialization of the PE hardware */ |
335 | scic_sds_phy_link_layer_initialization(iphy, link_layer_registers); | 304 | sci_phy_link_layer_initialization(iphy, ll); |
336 | 305 | ||
337 | /* | 306 | /* There is nothing that needs to be done in this state just |
338 | * There is nothing that needs to be done in this state just | 307 | * transition to the stopped state |
339 | * transition to the stopped state. */ | 308 | */ |
340 | sci_change_state(&iphy->sm, SCI_PHY_STOPPED); | 309 | sci_change_state(&iphy->sm, SCI_PHY_STOPPED); |
341 | 310 | ||
342 | return SCI_SUCCESS; | 311 | return SCI_SUCCESS; |
@@ -351,9 +320,7 @@ enum sci_status scic_sds_phy_initialize( | |||
351 | * This will either be the RNi for the device or an invalid RNi if there | 320 | * This will either be the RNi for the device or an invalid RNi if there |
352 | * is no current device assigned to the phy. | 321 | * is no current device assigned to the phy. |
353 | */ | 322 | */ |
354 | void scic_sds_phy_setup_transport( | 323 | void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id) |
355 | struct isci_phy *iphy, | ||
356 | u32 device_id) | ||
357 | { | 324 | { |
358 | u32 tl_control; | 325 | u32 tl_control; |
359 | 326 | ||
@@ -368,15 +335,7 @@ void scic_sds_phy_setup_transport( | |||
368 | writel(tl_control, &iphy->transport_layer_registers->control); | 335 | writel(tl_control, &iphy->transport_layer_registers->control); |
369 | } | 336 | } |
370 | 337 | ||
371 | /** | 338 | static void sci_phy_suspend(struct isci_phy *iphy) |
372 | * | ||
373 | * @sci_phy: The phy object to be suspended. | ||
374 | * | ||
375 | * This function will perform the register reads/writes to suspend the SCU | ||
376 | * hardware protocol engine. none | ||
377 | */ | ||
378 | static void scic_sds_phy_suspend( | ||
379 | struct isci_phy *iphy) | ||
380 | { | 339 | { |
381 | u32 scu_sas_pcfg_value; | 340 | u32 scu_sas_pcfg_value; |
382 | 341 | ||
@@ -386,12 +345,10 @@ static void scic_sds_phy_suspend( | |||
386 | writel(scu_sas_pcfg_value, | 345 | writel(scu_sas_pcfg_value, |
387 | &iphy->link_layer_registers->phy_configuration); | 346 | &iphy->link_layer_registers->phy_configuration); |
388 | 347 | ||
389 | scic_sds_phy_setup_transport( | 348 | sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); |
390 | iphy, | ||
391 | SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); | ||
392 | } | 349 | } |
393 | 350 | ||
394 | void scic_sds_phy_resume(struct isci_phy *iphy) | 351 | void sci_phy_resume(struct isci_phy *iphy) |
395 | { | 352 | { |
396 | u32 scu_sas_pcfg_value; | 353 | u32 scu_sas_pcfg_value; |
397 | 354 | ||
@@ -402,34 +359,28 @@ void scic_sds_phy_resume(struct isci_phy *iphy) | |||
402 | &iphy->link_layer_registers->phy_configuration); | 359 | &iphy->link_layer_registers->phy_configuration); |
403 | } | 360 | } |
404 | 361 | ||
405 | void scic_sds_phy_get_sas_address(struct isci_phy *iphy, | 362 | void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) |
406 | struct sci_sas_address *sas_address) | ||
407 | { | 363 | { |
408 | sas_address->high = readl(&iphy->link_layer_registers->source_sas_address_high); | 364 | sas->high = readl(&iphy->link_layer_registers->source_sas_address_high); |
409 | sas_address->low = readl(&iphy->link_layer_registers->source_sas_address_low); | 365 | sas->low = readl(&iphy->link_layer_registers->source_sas_address_low); |
410 | } | 366 | } |
411 | 367 | ||
412 | void scic_sds_phy_get_attached_sas_address(struct isci_phy *iphy, | 368 | void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas) |
413 | struct sci_sas_address *sas_address) | ||
414 | { | 369 | { |
415 | struct sas_identify_frame *iaf; | 370 | struct sas_identify_frame *iaf; |
416 | 371 | ||
417 | iaf = &iphy->frame_rcvd.iaf; | 372 | iaf = &iphy->frame_rcvd.iaf; |
418 | memcpy(sas_address, iaf->sas_addr, SAS_ADDR_SIZE); | 373 | memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE); |
419 | } | 374 | } |
420 | 375 | ||
421 | void scic_sds_phy_get_protocols(struct isci_phy *iphy, | 376 | void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto) |
422 | struct scic_phy_proto *protocols) | ||
423 | { | 377 | { |
424 | protocols->all = | 378 | proto->all = readl(&iphy->link_layer_registers->transmit_identification); |
425 | (u16)(readl(&iphy-> | ||
426 | link_layer_registers->transmit_identification) & | ||
427 | 0x0000FFFF); | ||
428 | } | 379 | } |
429 | 380 | ||
430 | enum sci_status scic_sds_phy_start(struct isci_phy *iphy) | 381 | enum sci_status sci_phy_start(struct isci_phy *iphy) |
431 | { | 382 | { |
432 | enum scic_sds_phy_states state = iphy->sm.current_state_id; | 383 | enum sci_phy_states state = iphy->sm.current_state_id; |
433 | 384 | ||
434 | if (state != SCI_PHY_STOPPED) { | 385 | if (state != SCI_PHY_STOPPED) { |
435 | dev_dbg(sciphy_to_dev(iphy), | 386 | dev_dbg(sciphy_to_dev(iphy), |
@@ -441,9 +392,9 @@ enum sci_status scic_sds_phy_start(struct isci_phy *iphy) | |||
441 | return SCI_SUCCESS; | 392 | return SCI_SUCCESS; |
442 | } | 393 | } |
443 | 394 | ||
444 | enum sci_status scic_sds_phy_stop(struct isci_phy *iphy) | 395 | enum sci_status sci_phy_stop(struct isci_phy *iphy) |
445 | { | 396 | { |
446 | enum scic_sds_phy_states state = iphy->sm.current_state_id; | 397 | enum sci_phy_states state = iphy->sm.current_state_id; |
447 | 398 | ||
448 | switch (state) { | 399 | switch (state) { |
449 | case SCI_PHY_SUB_INITIAL: | 400 | case SCI_PHY_SUB_INITIAL: |
@@ -467,9 +418,9 @@ enum sci_status scic_sds_phy_stop(struct isci_phy *iphy) | |||
467 | return SCI_SUCCESS; | 418 | return SCI_SUCCESS; |
468 | } | 419 | } |
469 | 420 | ||
470 | enum sci_status scic_sds_phy_reset(struct isci_phy *iphy) | 421 | enum sci_status sci_phy_reset(struct isci_phy *iphy) |
471 | { | 422 | { |
472 | enum scic_sds_phy_states state = iphy->sm.current_state_id; | 423 | enum sci_phy_states state = iphy->sm.current_state_id; |
473 | 424 | ||
474 | if (state != SCI_PHY_READY) { | 425 | if (state != SCI_PHY_READY) { |
475 | dev_dbg(sciphy_to_dev(iphy), | 426 | dev_dbg(sciphy_to_dev(iphy), |
@@ -481,9 +432,9 @@ enum sci_status scic_sds_phy_reset(struct isci_phy *iphy) | |||
481 | return SCI_SUCCESS; | 432 | return SCI_SUCCESS; |
482 | } | 433 | } |
483 | 434 | ||
484 | enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy) | 435 | enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy) |
485 | { | 436 | { |
486 | enum scic_sds_phy_states state = iphy->sm.current_state_id; | 437 | enum sci_phy_states state = iphy->sm.current_state_id; |
487 | 438 | ||
488 | switch (state) { | 439 | switch (state) { |
489 | case SCI_PHY_SUB_AWAIT_SAS_POWER: { | 440 | case SCI_PHY_SUB_AWAIT_SAS_POWER: { |
@@ -528,55 +479,37 @@ enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy) | |||
528 | } | 479 | } |
529 | } | 480 | } |
530 | 481 | ||
531 | /* | 482 | static void sci_phy_start_sas_link_training(struct isci_phy *iphy) |
532 | * ***************************************************************************** | ||
533 | * * SCIC SDS PHY HELPER FUNCTIONS | ||
534 | * ***************************************************************************** */ | ||
535 | |||
536 | |||
537 | /** | ||
538 | * | ||
539 | * @sci_phy: The phy object that received SAS PHY DETECTED. | ||
540 | * | ||
541 | * This method continues the link training for the phy as if it were a SAS PHY | ||
542 | * instead of a SATA PHY. This is done because the completion queue had a SAS | ||
543 | * PHY DETECTED event when the state machine was expecting a SATA PHY event. | ||
544 | * none | ||
545 | */ | ||
546 | static void scic_sds_phy_start_sas_link_training( | ||
547 | struct isci_phy *iphy) | ||
548 | { | 483 | { |
484 | /* continue the link training for the phy as if it were a SAS PHY | ||
485 | * instead of a SATA PHY. This is done because the completion queue had a SAS | ||
486 | * PHY DETECTED event when the state machine was expecting a SATA PHY event. | ||
487 | */ | ||
549 | u32 phy_control; | 488 | u32 phy_control; |
550 | 489 | ||
551 | phy_control = | 490 | phy_control = readl(&iphy->link_layer_registers->phy_configuration); |
552 | readl(&iphy->link_layer_registers->phy_configuration); | ||
553 | phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); | 491 | phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); |
554 | writel(phy_control, | 492 | writel(phy_control, |
555 | &iphy->link_layer_registers->phy_configuration); | 493 | &iphy->link_layer_registers->phy_configuration); |
556 | 494 | ||
557 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); | 495 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); |
558 | 496 | ||
559 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; | 497 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; |
560 | } | 498 | } |
561 | 499 | ||
562 | /** | 500 | static void sci_phy_start_sata_link_training(struct isci_phy *iphy) |
563 | * | ||
564 | * @sci_phy: The phy object that received a SATA SPINUP HOLD event | ||
565 | * | ||
566 | * This method continues the link training for the phy as if it were a SATA PHY | ||
567 | * instead of a SAS PHY. This is done because the completion queue had a SATA | ||
568 | * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none | ||
569 | */ | ||
570 | static void scic_sds_phy_start_sata_link_training( | ||
571 | struct isci_phy *iphy) | ||
572 | { | 501 | { |
502 | /* This method continues the link training for the phy as if it were a SATA PHY | ||
503 | * instead of a SAS PHY. This is done because the completion queue had a SATA | ||
504 | * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none | ||
505 | */ | ||
573 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); | 506 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); |
574 | 507 | ||
575 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; | 508 | iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; |
576 | } | 509 | } |
577 | 510 | ||
578 | /** | 511 | /** |
579 | * scic_sds_phy_complete_link_training - perform processing common to | 512 | * sci_phy_complete_link_training - perform processing common to |
580 | * all protocols upon completion of link training. | 513 | * all protocols upon completion of link training. |
581 | * @sci_phy: This parameter specifies the phy object for which link training | 514 | * @sci_phy: This parameter specifies the phy object for which link training |
582 | * has completed. | 515 | * has completed. |
@@ -586,30 +519,28 @@ static void scic_sds_phy_start_sata_link_training( | |||
586 | * sub-state machine. | 519 | * sub-state machine. |
587 | * | 520 | * |
588 | */ | 521 | */ |
589 | static void scic_sds_phy_complete_link_training( | 522 | static void sci_phy_complete_link_training(struct isci_phy *iphy, |
590 | struct isci_phy *iphy, | 523 | enum sas_linkrate max_link_rate, |
591 | enum sas_linkrate max_link_rate, | 524 | u32 next_state) |
592 | u32 next_state) | ||
593 | { | 525 | { |
594 | iphy->max_negotiated_speed = max_link_rate; | 526 | iphy->max_negotiated_speed = max_link_rate; |
595 | 527 | ||
596 | sci_change_state(&iphy->sm, next_state); | 528 | sci_change_state(&iphy->sm, next_state); |
597 | } | 529 | } |
598 | 530 | ||
599 | enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | 531 | enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) |
600 | u32 event_code) | ||
601 | { | 532 | { |
602 | enum scic_sds_phy_states state = iphy->sm.current_state_id; | 533 | enum sci_phy_states state = iphy->sm.current_state_id; |
603 | 534 | ||
604 | switch (state) { | 535 | switch (state) { |
605 | case SCI_PHY_SUB_AWAIT_OSSP_EN: | 536 | case SCI_PHY_SUB_AWAIT_OSSP_EN: |
606 | switch (scu_get_event_code(event_code)) { | 537 | switch (scu_get_event_code(event_code)) { |
607 | case SCU_EVENT_SAS_PHY_DETECTED: | 538 | case SCU_EVENT_SAS_PHY_DETECTED: |
608 | scic_sds_phy_start_sas_link_training(iphy); | 539 | sci_phy_start_sas_link_training(iphy); |
609 | iphy->is_in_link_training = true; | 540 | iphy->is_in_link_training = true; |
610 | break; | 541 | break; |
611 | case SCU_EVENT_SATA_SPINUP_HOLD: | 542 | case SCU_EVENT_SATA_SPINUP_HOLD: |
612 | scic_sds_phy_start_sata_link_training(iphy); | 543 | sci_phy_start_sata_link_training(iphy); |
613 | iphy->is_in_link_training = true; | 544 | iphy->is_in_link_training = true; |
614 | break; | 545 | break; |
615 | default: | 546 | default: |
@@ -630,30 +561,24 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | |||
630 | break; | 561 | break; |
631 | case SCU_EVENT_SAS_15: | 562 | case SCU_EVENT_SAS_15: |
632 | case SCU_EVENT_SAS_15_SSC: | 563 | case SCU_EVENT_SAS_15_SSC: |
633 | scic_sds_phy_complete_link_training( | 564 | sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, |
634 | iphy, | 565 | SCI_PHY_SUB_AWAIT_IAF_UF); |
635 | SAS_LINK_RATE_1_5_GBPS, | ||
636 | SCI_PHY_SUB_AWAIT_IAF_UF); | ||
637 | break; | 566 | break; |
638 | case SCU_EVENT_SAS_30: | 567 | case SCU_EVENT_SAS_30: |
639 | case SCU_EVENT_SAS_30_SSC: | 568 | case SCU_EVENT_SAS_30_SSC: |
640 | scic_sds_phy_complete_link_training( | 569 | sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, |
641 | iphy, | 570 | SCI_PHY_SUB_AWAIT_IAF_UF); |
642 | SAS_LINK_RATE_3_0_GBPS, | ||
643 | SCI_PHY_SUB_AWAIT_IAF_UF); | ||
644 | break; | 571 | break; |
645 | case SCU_EVENT_SAS_60: | 572 | case SCU_EVENT_SAS_60: |
646 | case SCU_EVENT_SAS_60_SSC: | 573 | case SCU_EVENT_SAS_60_SSC: |
647 | scic_sds_phy_complete_link_training( | 574 | sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, |
648 | iphy, | 575 | SCI_PHY_SUB_AWAIT_IAF_UF); |
649 | SAS_LINK_RATE_6_0_GBPS, | ||
650 | SCI_PHY_SUB_AWAIT_IAF_UF); | ||
651 | break; | 576 | break; |
652 | case SCU_EVENT_SATA_SPINUP_HOLD: | 577 | case SCU_EVENT_SATA_SPINUP_HOLD: |
653 | /* | 578 | /* |
654 | * We were doing SAS PHY link training and received a SATA PHY event | 579 | * We were doing SAS PHY link training and received a SATA PHY event |
655 | * continue OOB/SN as if this were a SATA PHY */ | 580 | * continue OOB/SN as if this were a SATA PHY */ |
656 | scic_sds_phy_start_sata_link_training(iphy); | 581 | sci_phy_start_sata_link_training(iphy); |
657 | break; | 582 | break; |
658 | case SCU_EVENT_LINK_FAILURE: | 583 | case SCU_EVENT_LINK_FAILURE: |
659 | /* Link failure change state back to the starting state */ | 584 | /* Link failure change state back to the starting state */ |
@@ -673,14 +598,14 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | |||
673 | switch (scu_get_event_code(event_code)) { | 598 | switch (scu_get_event_code(event_code)) { |
674 | case SCU_EVENT_SAS_PHY_DETECTED: | 599 | case SCU_EVENT_SAS_PHY_DETECTED: |
675 | /* Backup the state machine */ | 600 | /* Backup the state machine */ |
676 | scic_sds_phy_start_sas_link_training(iphy); | 601 | sci_phy_start_sas_link_training(iphy); |
677 | break; | 602 | break; |
678 | case SCU_EVENT_SATA_SPINUP_HOLD: | 603 | case SCU_EVENT_SATA_SPINUP_HOLD: |
679 | /* We were doing SAS PHY link training and received a | 604 | /* We were doing SAS PHY link training and received a |
680 | * SATA PHY event continue OOB/SN as if this were a | 605 | * SATA PHY event continue OOB/SN as if this were a |
681 | * SATA PHY | 606 | * SATA PHY |
682 | */ | 607 | */ |
683 | scic_sds_phy_start_sata_link_training(iphy); | 608 | sci_phy_start_sata_link_training(iphy); |
684 | break; | 609 | break; |
685 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: | 610 | case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: |
686 | case SCU_EVENT_LINK_FAILURE: | 611 | case SCU_EVENT_LINK_FAILURE: |
@@ -727,7 +652,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | |||
727 | /* There has been a change in the phy type before OOB/SN for the | 652 | /* There has been a change in the phy type before OOB/SN for the |
728 | * SATA finished start down the SAS link traning path. | 653 | * SATA finished start down the SAS link traning path. |
729 | */ | 654 | */ |
730 | scic_sds_phy_start_sas_link_training(iphy); | 655 | sci_phy_start_sas_link_training(iphy); |
731 | break; | 656 | break; |
732 | 657 | ||
733 | default: | 658 | default: |
@@ -760,7 +685,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | |||
760 | /* There has been a change in the phy type before OOB/SN for the | 685 | /* There has been a change in the phy type before OOB/SN for the |
761 | * SATA finished start down the SAS link traning path. | 686 | * SATA finished start down the SAS link traning path. |
762 | */ | 687 | */ |
763 | scic_sds_phy_start_sas_link_training(iphy); | 688 | sci_phy_start_sas_link_training(iphy); |
764 | break; | 689 | break; |
765 | default: | 690 | default: |
766 | dev_warn(sciphy_to_dev(iphy), | 691 | dev_warn(sciphy_to_dev(iphy), |
@@ -781,24 +706,18 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | |||
781 | break; | 706 | break; |
782 | case SCU_EVENT_SATA_15: | 707 | case SCU_EVENT_SATA_15: |
783 | case SCU_EVENT_SATA_15_SSC: | 708 | case SCU_EVENT_SATA_15_SSC: |
784 | scic_sds_phy_complete_link_training( | 709 | sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS, |
785 | iphy, | 710 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); |
786 | SAS_LINK_RATE_1_5_GBPS, | ||
787 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); | ||
788 | break; | 711 | break; |
789 | case SCU_EVENT_SATA_30: | 712 | case SCU_EVENT_SATA_30: |
790 | case SCU_EVENT_SATA_30_SSC: | 713 | case SCU_EVENT_SATA_30_SSC: |
791 | scic_sds_phy_complete_link_training( | 714 | sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS, |
792 | iphy, | 715 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); |
793 | SAS_LINK_RATE_3_0_GBPS, | ||
794 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); | ||
795 | break; | 716 | break; |
796 | case SCU_EVENT_SATA_60: | 717 | case SCU_EVENT_SATA_60: |
797 | case SCU_EVENT_SATA_60_SSC: | 718 | case SCU_EVENT_SATA_60_SSC: |
798 | scic_sds_phy_complete_link_training( | 719 | sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS, |
799 | iphy, | 720 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); |
800 | SAS_LINK_RATE_6_0_GBPS, | ||
801 | SCI_PHY_SUB_AWAIT_SIG_FIS_UF); | ||
802 | break; | 721 | break; |
803 | case SCU_EVENT_LINK_FAILURE: | 722 | case SCU_EVENT_LINK_FAILURE: |
804 | /* Link failure change state back to the starting state */ | 723 | /* Link failure change state back to the starting state */ |
@@ -808,7 +727,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | |||
808 | /* | 727 | /* |
809 | * There has been a change in the phy type before OOB/SN for the | 728 | * There has been a change in the phy type before OOB/SN for the |
810 | * SATA finished start down the SAS link traning path. */ | 729 | * SATA finished start down the SAS link traning path. */ |
811 | scic_sds_phy_start_sas_link_training(iphy); | 730 | sci_phy_start_sas_link_training(iphy); |
812 | break; | 731 | break; |
813 | default: | 732 | default: |
814 | dev_warn(sciphy_to_dev(iphy), | 733 | dev_warn(sciphy_to_dev(iphy), |
@@ -851,7 +770,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | |||
851 | case SCU_EVENT_BROADCAST_CHANGE: | 770 | case SCU_EVENT_BROADCAST_CHANGE: |
852 | /* Broadcast change received. Notify the port. */ | 771 | /* Broadcast change received. Notify the port. */ |
853 | if (phy_get_non_dummy_port(iphy) != NULL) | 772 | if (phy_get_non_dummy_port(iphy) != NULL) |
854 | scic_sds_port_broadcast_change_received(iphy->owning_port, iphy); | 773 | sci_port_broadcast_change_received(iphy->owning_port, iphy); |
855 | else | 774 | else |
856 | iphy->bcn_received_while_port_unassigned = true; | 775 | iphy->bcn_received_while_port_unassigned = true; |
857 | break; | 776 | break; |
@@ -886,10 +805,9 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy, | |||
886 | } | 805 | } |
887 | } | 806 | } |
888 | 807 | ||
889 | enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | 808 | enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) |
890 | u32 frame_index) | ||
891 | { | 809 | { |
892 | enum scic_sds_phy_states state = iphy->sm.current_state_id; | 810 | enum sci_phy_states state = iphy->sm.current_state_id; |
893 | struct isci_host *ihost = iphy->owning_port->owning_controller; | 811 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
894 | enum sci_status result; | 812 | enum sci_status result; |
895 | unsigned long flags; | 813 | unsigned long flags; |
@@ -899,9 +817,9 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
899 | u32 *frame_words; | 817 | u32 *frame_words; |
900 | struct sas_identify_frame iaf; | 818 | struct sas_identify_frame iaf; |
901 | 819 | ||
902 | result = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 820 | result = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
903 | frame_index, | 821 | frame_index, |
904 | (void **)&frame_words); | 822 | (void **)&frame_words); |
905 | 823 | ||
906 | if (result != SCI_SUCCESS) | 824 | if (result != SCI_SUCCESS) |
907 | return result; | 825 | return result; |
@@ -933,15 +851,15 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
933 | "unexpected frame id %x\n", | 851 | "unexpected frame id %x\n", |
934 | __func__, frame_index); | 852 | __func__, frame_index); |
935 | 853 | ||
936 | scic_sds_controller_release_frame(ihost, frame_index); | 854 | sci_controller_release_frame(ihost, frame_index); |
937 | return result; | 855 | return result; |
938 | } | 856 | } |
939 | case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { | 857 | case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { |
940 | struct dev_to_host_fis *frame_header; | 858 | struct dev_to_host_fis *frame_header; |
941 | u32 *fis_frame_data; | 859 | u32 *fis_frame_data; |
942 | 860 | ||
943 | result = scic_sds_unsolicited_frame_control_get_header( | 861 | result = sci_unsolicited_frame_control_get_header( |
944 | &(scic_sds_phy_get_controller(iphy)->uf_control), | 862 | &(sci_phy_get_controller(iphy)->uf_control), |
945 | frame_index, | 863 | frame_index, |
946 | (void **)&frame_header); | 864 | (void **)&frame_header); |
947 | 865 | ||
@@ -950,14 +868,14 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
950 | 868 | ||
951 | if ((frame_header->fis_type == FIS_REGD2H) && | 869 | if ((frame_header->fis_type == FIS_REGD2H) && |
952 | !(frame_header->status & ATA_BUSY)) { | 870 | !(frame_header->status & ATA_BUSY)) { |
953 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 871 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
954 | frame_index, | 872 | frame_index, |
955 | (void **)&fis_frame_data); | 873 | (void **)&fis_frame_data); |
956 | 874 | ||
957 | spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); | 875 | spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); |
958 | scic_sds_controller_copy_sata_response(&iphy->frame_rcvd.fis, | 876 | sci_controller_copy_sata_response(&iphy->frame_rcvd.fis, |
959 | frame_header, | 877 | frame_header, |
960 | fis_frame_data); | 878 | fis_frame_data); |
961 | spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); | 879 | spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); |
962 | 880 | ||
963 | /* got IAF we can now go to the await spinup semaphore state */ | 881 | /* got IAF we can now go to the await spinup semaphore state */ |
@@ -971,7 +889,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
971 | __func__, frame_index); | 889 | __func__, frame_index); |
972 | 890 | ||
973 | /* Regardless of the result we are done with this frame with it */ | 891 | /* Regardless of the result we are done with this frame with it */ |
974 | scic_sds_controller_release_frame(ihost, frame_index); | 892 | sci_controller_release_frame(ihost, frame_index); |
975 | 893 | ||
976 | return result; | 894 | return result; |
977 | } | 895 | } |
@@ -983,7 +901,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
983 | 901 | ||
984 | } | 902 | } |
985 | 903 | ||
986 | static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) | 904 | static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm) |
987 | { | 905 | { |
988 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 906 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
989 | 907 | ||
@@ -991,71 +909,71 @@ static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_m | |||
991 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); | 909 | sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN); |
992 | } | 910 | } |
993 | 911 | ||
994 | static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) | 912 | static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) |
995 | { | 913 | { |
996 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 914 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
997 | struct isci_host *ihost = iphy->owning_port->owning_controller; | 915 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
998 | 916 | ||
999 | scic_sds_controller_power_control_queue_insert(ihost, iphy); | 917 | sci_controller_power_control_queue_insert(ihost, iphy); |
1000 | } | 918 | } |
1001 | 919 | ||
1002 | static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) | 920 | static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) |
1003 | { | 921 | { |
1004 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 922 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1005 | struct isci_host *ihost = iphy->owning_port->owning_controller; | 923 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
1006 | 924 | ||
1007 | scic_sds_controller_power_control_queue_remove(ihost, iphy); | 925 | sci_controller_power_control_queue_remove(ihost, iphy); |
1008 | } | 926 | } |
1009 | 927 | ||
1010 | static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) | 928 | static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) |
1011 | { | 929 | { |
1012 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 930 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1013 | struct isci_host *ihost = iphy->owning_port->owning_controller; | 931 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
1014 | 932 | ||
1015 | scic_sds_controller_power_control_queue_insert(ihost, iphy); | 933 | sci_controller_power_control_queue_insert(ihost, iphy); |
1016 | } | 934 | } |
1017 | 935 | ||
1018 | static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) | 936 | static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) |
1019 | { | 937 | { |
1020 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 938 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1021 | struct isci_host *ihost = iphy->owning_port->owning_controller; | 939 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
1022 | 940 | ||
1023 | scic_sds_controller_power_control_queue_remove(ihost, iphy); | 941 | sci_controller_power_control_queue_remove(ihost, iphy); |
1024 | } | 942 | } |
1025 | 943 | ||
1026 | static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) | 944 | static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) |
1027 | { | 945 | { |
1028 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 946 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1029 | 947 | ||
1030 | sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); | 948 | sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); |
1031 | } | 949 | } |
1032 | 950 | ||
1033 | static void scic_sds_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) | 951 | static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm) |
1034 | { | 952 | { |
1035 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 953 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1036 | 954 | ||
1037 | sci_del_timer(&iphy->sata_timer); | 955 | sci_del_timer(&iphy->sata_timer); |
1038 | } | 956 | } |
1039 | 957 | ||
1040 | static void scic_sds_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) | 958 | static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm) |
1041 | { | 959 | { |
1042 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 960 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1043 | 961 | ||
1044 | sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); | 962 | sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT); |
1045 | } | 963 | } |
1046 | 964 | ||
1047 | static void scic_sds_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) | 965 | static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm) |
1048 | { | 966 | { |
1049 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 967 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1050 | 968 | ||
1051 | sci_del_timer(&iphy->sata_timer); | 969 | sci_del_timer(&iphy->sata_timer); |
1052 | } | 970 | } |
1053 | 971 | ||
1054 | static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) | 972 | static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm) |
1055 | { | 973 | { |
1056 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 974 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1057 | 975 | ||
1058 | if (scic_sds_port_link_detected(iphy->owning_port, iphy)) { | 976 | if (sci_port_link_detected(iphy->owning_port, iphy)) { |
1059 | 977 | ||
1060 | /* | 978 | /* |
1061 | * Clear the PE suspend condition so we can actually | 979 | * Clear the PE suspend condition so we can actually |
@@ -1063,7 +981,7 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas | |||
1063 | * The hardware will not respond to the XRDY until the PE | 981 | * The hardware will not respond to the XRDY until the PE |
1064 | * suspend condition is cleared. | 982 | * suspend condition is cleared. |
1065 | */ | 983 | */ |
1066 | scic_sds_phy_resume(iphy); | 984 | sci_phy_resume(iphy); |
1067 | 985 | ||
1068 | sci_mod_timer(&iphy->sata_timer, | 986 | sci_mod_timer(&iphy->sata_timer, |
1069 | SCIC_SDS_SIGNATURE_FIS_TIMEOUT); | 987 | SCIC_SDS_SIGNATURE_FIS_TIMEOUT); |
@@ -1071,14 +989,14 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas | |||
1071 | iphy->is_in_link_training = false; | 989 | iphy->is_in_link_training = false; |
1072 | } | 990 | } |
1073 | 991 | ||
1074 | static void scic_sds_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) | 992 | static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm) |
1075 | { | 993 | { |
1076 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 994 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1077 | 995 | ||
1078 | sci_del_timer(&iphy->sata_timer); | 996 | sci_del_timer(&iphy->sata_timer); |
1079 | } | 997 | } |
1080 | 998 | ||
1081 | static void scic_sds_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) | 999 | static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm) |
1082 | { | 1000 | { |
1083 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1001 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1084 | 1002 | ||
@@ -1169,7 +1087,7 @@ static void scu_link_layer_tx_hard_reset( | |||
1169 | &iphy->link_layer_registers->phy_configuration); | 1087 | &iphy->link_layer_registers->phy_configuration); |
1170 | } | 1088 | } |
1171 | 1089 | ||
1172 | static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) | 1090 | static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) |
1173 | { | 1091 | { |
1174 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1092 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1175 | 1093 | ||
@@ -1182,12 +1100,12 @@ static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm) | |||
1182 | scu_link_layer_stop_protocol_engine(iphy); | 1100 | scu_link_layer_stop_protocol_engine(iphy); |
1183 | 1101 | ||
1184 | if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) | 1102 | if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) |
1185 | scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy), | 1103 | sci_controller_link_down(sci_phy_get_controller(iphy), |
1186 | phy_get_non_dummy_port(iphy), | 1104 | phy_get_non_dummy_port(iphy), |
1187 | iphy); | 1105 | iphy); |
1188 | } | 1106 | } |
1189 | 1107 | ||
1190 | static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm) | 1108 | static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) |
1191 | { | 1109 | { |
1192 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1110 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1193 | 1111 | ||
@@ -1199,31 +1117,31 @@ static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm) | |||
1199 | iphy->bcn_received_while_port_unassigned = false; | 1117 | iphy->bcn_received_while_port_unassigned = false; |
1200 | 1118 | ||
1201 | if (iphy->sm.previous_state_id == SCI_PHY_READY) | 1119 | if (iphy->sm.previous_state_id == SCI_PHY_READY) |
1202 | scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy), | 1120 | sci_controller_link_down(sci_phy_get_controller(iphy), |
1203 | phy_get_non_dummy_port(iphy), | 1121 | phy_get_non_dummy_port(iphy), |
1204 | iphy); | 1122 | iphy); |
1205 | 1123 | ||
1206 | sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); | 1124 | sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); |
1207 | } | 1125 | } |
1208 | 1126 | ||
1209 | static void scic_sds_phy_ready_state_enter(struct sci_base_state_machine *sm) | 1127 | static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) |
1210 | { | 1128 | { |
1211 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1129 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1212 | 1130 | ||
1213 | scic_sds_controller_link_up(scic_sds_phy_get_controller(iphy), | 1131 | sci_controller_link_up(sci_phy_get_controller(iphy), |
1214 | phy_get_non_dummy_port(iphy), | 1132 | phy_get_non_dummy_port(iphy), |
1215 | iphy); | 1133 | iphy); |
1216 | 1134 | ||
1217 | } | 1135 | } |
1218 | 1136 | ||
1219 | static void scic_sds_phy_ready_state_exit(struct sci_base_state_machine *sm) | 1137 | static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) |
1220 | { | 1138 | { |
1221 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1139 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1222 | 1140 | ||
1223 | scic_sds_phy_suspend(iphy); | 1141 | sci_phy_suspend(iphy); |
1224 | } | 1142 | } |
1225 | 1143 | ||
1226 | static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm) | 1144 | static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) |
1227 | { | 1145 | { |
1228 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1146 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1229 | 1147 | ||
@@ -1231,7 +1149,7 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm | |||
1231 | * the resetting state we don't notify the user regarding link up and | 1149 | * the resetting state we don't notify the user regarding link up and |
1232 | * link down notifications | 1150 | * link down notifications |
1233 | */ | 1151 | */ |
1234 | scic_sds_port_deactivate_phy(iphy->owning_port, iphy, false); | 1152 | sci_port_deactivate_phy(iphy->owning_port, iphy, false); |
1235 | 1153 | ||
1236 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { | 1154 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { |
1237 | scu_link_layer_tx_hard_reset(iphy); | 1155 | scu_link_layer_tx_hard_reset(iphy); |
@@ -1243,57 +1161,57 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm | |||
1243 | } | 1161 | } |
1244 | } | 1162 | } |
1245 | 1163 | ||
1246 | static const struct sci_base_state scic_sds_phy_state_table[] = { | 1164 | static const struct sci_base_state sci_phy_state_table[] = { |
1247 | [SCI_PHY_INITIAL] = { }, | 1165 | [SCI_PHY_INITIAL] = { }, |
1248 | [SCI_PHY_STOPPED] = { | 1166 | [SCI_PHY_STOPPED] = { |
1249 | .enter_state = scic_sds_phy_stopped_state_enter, | 1167 | .enter_state = sci_phy_stopped_state_enter, |
1250 | }, | 1168 | }, |
1251 | [SCI_PHY_STARTING] = { | 1169 | [SCI_PHY_STARTING] = { |
1252 | .enter_state = scic_sds_phy_starting_state_enter, | 1170 | .enter_state = sci_phy_starting_state_enter, |
1253 | }, | 1171 | }, |
1254 | [SCI_PHY_SUB_INITIAL] = { | 1172 | [SCI_PHY_SUB_INITIAL] = { |
1255 | .enter_state = scic_sds_phy_starting_initial_substate_enter, | 1173 | .enter_state = sci_phy_starting_initial_substate_enter, |
1256 | }, | 1174 | }, |
1257 | [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, | 1175 | [SCI_PHY_SUB_AWAIT_OSSP_EN] = { }, |
1258 | [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, | 1176 | [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { }, |
1259 | [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, | 1177 | [SCI_PHY_SUB_AWAIT_IAF_UF] = { }, |
1260 | [SCI_PHY_SUB_AWAIT_SAS_POWER] = { | 1178 | [SCI_PHY_SUB_AWAIT_SAS_POWER] = { |
1261 | .enter_state = scic_sds_phy_starting_await_sas_power_substate_enter, | 1179 | .enter_state = sci_phy_starting_await_sas_power_substate_enter, |
1262 | .exit_state = scic_sds_phy_starting_await_sas_power_substate_exit, | 1180 | .exit_state = sci_phy_starting_await_sas_power_substate_exit, |
1263 | }, | 1181 | }, |
1264 | [SCI_PHY_SUB_AWAIT_SATA_POWER] = { | 1182 | [SCI_PHY_SUB_AWAIT_SATA_POWER] = { |
1265 | .enter_state = scic_sds_phy_starting_await_sata_power_substate_enter, | 1183 | .enter_state = sci_phy_starting_await_sata_power_substate_enter, |
1266 | .exit_state = scic_sds_phy_starting_await_sata_power_substate_exit | 1184 | .exit_state = sci_phy_starting_await_sata_power_substate_exit |
1267 | }, | 1185 | }, |
1268 | [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { | 1186 | [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = { |
1269 | .enter_state = scic_sds_phy_starting_await_sata_phy_substate_enter, | 1187 | .enter_state = sci_phy_starting_await_sata_phy_substate_enter, |
1270 | .exit_state = scic_sds_phy_starting_await_sata_phy_substate_exit | 1188 | .exit_state = sci_phy_starting_await_sata_phy_substate_exit |
1271 | }, | 1189 | }, |
1272 | [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { | 1190 | [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = { |
1273 | .enter_state = scic_sds_phy_starting_await_sata_speed_substate_enter, | 1191 | .enter_state = sci_phy_starting_await_sata_speed_substate_enter, |
1274 | .exit_state = scic_sds_phy_starting_await_sata_speed_substate_exit | 1192 | .exit_state = sci_phy_starting_await_sata_speed_substate_exit |
1275 | }, | 1193 | }, |
1276 | [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { | 1194 | [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = { |
1277 | .enter_state = scic_sds_phy_starting_await_sig_fis_uf_substate_enter, | 1195 | .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter, |
1278 | .exit_state = scic_sds_phy_starting_await_sig_fis_uf_substate_exit | 1196 | .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit |
1279 | }, | 1197 | }, |
1280 | [SCI_PHY_SUB_FINAL] = { | 1198 | [SCI_PHY_SUB_FINAL] = { |
1281 | .enter_state = scic_sds_phy_starting_final_substate_enter, | 1199 | .enter_state = sci_phy_starting_final_substate_enter, |
1282 | }, | 1200 | }, |
1283 | [SCI_PHY_READY] = { | 1201 | [SCI_PHY_READY] = { |
1284 | .enter_state = scic_sds_phy_ready_state_enter, | 1202 | .enter_state = sci_phy_ready_state_enter, |
1285 | .exit_state = scic_sds_phy_ready_state_exit, | 1203 | .exit_state = sci_phy_ready_state_exit, |
1286 | }, | 1204 | }, |
1287 | [SCI_PHY_RESETTING] = { | 1205 | [SCI_PHY_RESETTING] = { |
1288 | .enter_state = scic_sds_phy_resetting_state_enter, | 1206 | .enter_state = sci_phy_resetting_state_enter, |
1289 | }, | 1207 | }, |
1290 | [SCI_PHY_FINAL] = { }, | 1208 | [SCI_PHY_FINAL] = { }, |
1291 | }; | 1209 | }; |
1292 | 1210 | ||
1293 | void scic_sds_phy_construct(struct isci_phy *iphy, | 1211 | void sci_phy_construct(struct isci_phy *iphy, |
1294 | struct isci_port *iport, u8 phy_index) | 1212 | struct isci_port *iport, u8 phy_index) |
1295 | { | 1213 | { |
1296 | sci_init_sm(&iphy->sm, scic_sds_phy_state_table, SCI_PHY_INITIAL); | 1214 | sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL); |
1297 | 1215 | ||
1298 | /* Copy the rest of the input data to our locals */ | 1216 | /* Copy the rest of the input data to our locals */ |
1299 | iphy->owning_port = iport; | 1217 | iphy->owning_port = iport; |
@@ -1309,14 +1227,13 @@ void scic_sds_phy_construct(struct isci_phy *iphy, | |||
1309 | 1227 | ||
1310 | void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) | 1228 | void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) |
1311 | { | 1229 | { |
1312 | union scic_oem_parameters oem; | 1230 | struct sci_oem_params *oem = &ihost->oem_parameters; |
1313 | u64 sci_sas_addr; | 1231 | u64 sci_sas_addr; |
1314 | __be64 sas_addr; | 1232 | __be64 sas_addr; |
1315 | 1233 | ||
1316 | scic_oem_parameters_get(ihost, &oem); | 1234 | sci_sas_addr = oem->phys[index].sas_address.high; |
1317 | sci_sas_addr = oem.sds1.phys[index].sas_address.high; | ||
1318 | sci_sas_addr <<= 32; | 1235 | sci_sas_addr <<= 32; |
1319 | sci_sas_addr |= oem.sds1.phys[index].sas_address.low; | 1236 | sci_sas_addr |= oem->phys[index].sas_address.low; |
1320 | sas_addr = cpu_to_be64(sci_sas_addr); | 1237 | sas_addr = cpu_to_be64(sci_sas_addr); |
1321 | memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); | 1238 | memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); |
1322 | 1239 | ||
@@ -1365,14 +1282,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy, | |||
1365 | switch (func) { | 1282 | switch (func) { |
1366 | case PHY_FUNC_DISABLE: | 1283 | case PHY_FUNC_DISABLE: |
1367 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1284 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1368 | scic_sds_phy_stop(iphy); | 1285 | sci_phy_stop(iphy); |
1369 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1286 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1370 | break; | 1287 | break; |
1371 | 1288 | ||
1372 | case PHY_FUNC_LINK_RESET: | 1289 | case PHY_FUNC_LINK_RESET: |
1373 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1290 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1374 | scic_sds_phy_stop(iphy); | 1291 | sci_phy_stop(iphy); |
1375 | scic_sds_phy_start(iphy); | 1292 | sci_phy_start(iphy); |
1376 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1293 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1377 | break; | 1294 | break; |
1378 | 1295 | ||
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 19aa444517b4..5d2c1b4906a3 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h | |||
@@ -76,7 +76,7 @@ | |||
76 | */ | 76 | */ |
77 | #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 | 77 | #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 |
78 | 78 | ||
79 | enum scic_sds_phy_protocol { | 79 | enum sci_phy_protocol { |
80 | SCIC_SDS_PHY_PROTOCOL_UNKNOWN, | 80 | SCIC_SDS_PHY_PROTOCOL_UNKNOWN, |
81 | SCIC_SDS_PHY_PROTOCOL_SAS, | 81 | SCIC_SDS_PHY_PROTOCOL_SAS, |
82 | SCIC_SDS_PHY_PROTOCOL_SATA, | 82 | SCIC_SDS_PHY_PROTOCOL_SATA, |
@@ -95,7 +95,7 @@ struct isci_phy { | |||
95 | struct sci_base_state_machine sm; | 95 | struct sci_base_state_machine sm; |
96 | struct isci_port *owning_port; | 96 | struct isci_port *owning_port; |
97 | enum sas_linkrate max_negotiated_speed; | 97 | enum sas_linkrate max_negotiated_speed; |
98 | enum scic_sds_phy_protocol protocol; | 98 | enum sci_phy_protocol protocol; |
99 | u8 phy_index; | 99 | u8 phy_index; |
100 | bool bcn_received_while_port_unassigned; | 100 | bool bcn_received_while_port_unassigned; |
101 | bool is_in_link_training; | 101 | bool is_in_link_training; |
@@ -118,7 +118,7 @@ static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy) | |||
118 | return iphy; | 118 | return iphy; |
119 | } | 119 | } |
120 | 120 | ||
121 | struct scic_phy_cap { | 121 | struct sci_phy_cap { |
122 | union { | 122 | union { |
123 | struct { | 123 | struct { |
124 | /* | 124 | /* |
@@ -147,7 +147,7 @@ struct scic_phy_cap { | |||
147 | } __packed; | 147 | } __packed; |
148 | 148 | ||
149 | /* this data structure reflects the link layer transmit identification reg */ | 149 | /* this data structure reflects the link layer transmit identification reg */ |
150 | struct scic_phy_proto { | 150 | struct sci_phy_proto { |
151 | union { | 151 | union { |
152 | struct { | 152 | struct { |
153 | u16 _r_a:1; | 153 | u16 _r_a:1; |
@@ -167,12 +167,12 @@ struct scic_phy_proto { | |||
167 | 167 | ||
168 | 168 | ||
169 | /** | 169 | /** |
170 | * struct scic_phy_properties - This structure defines the properties common to | 170 | * struct sci_phy_properties - This structure defines the properties common to |
171 | * all phys that can be retrieved. | 171 | * all phys that can be retrieved. |
172 | * | 172 | * |
173 | * | 173 | * |
174 | */ | 174 | */ |
175 | struct scic_phy_properties { | 175 | struct sci_phy_properties { |
176 | /** | 176 | /** |
177 | * This field specifies the port that currently contains the | 177 | * This field specifies the port that currently contains the |
178 | * supplied phy. This field may be set to NULL | 178 | * supplied phy. This field may be set to NULL |
@@ -194,12 +194,12 @@ struct scic_phy_properties { | |||
194 | }; | 194 | }; |
195 | 195 | ||
196 | /** | 196 | /** |
197 | * struct scic_sas_phy_properties - This structure defines the properties, | 197 | * struct sci_sas_phy_properties - This structure defines the properties, |
198 | * specific to a SAS phy, that can be retrieved. | 198 | * specific to a SAS phy, that can be retrieved. |
199 | * | 199 | * |
200 | * | 200 | * |
201 | */ | 201 | */ |
202 | struct scic_sas_phy_properties { | 202 | struct sci_sas_phy_properties { |
203 | /** | 203 | /** |
204 | * This field delineates the Identify Address Frame received | 204 | * This field delineates the Identify Address Frame received |
205 | * from the remote end point. | 205 | * from the remote end point. |
@@ -210,17 +210,17 @@ struct scic_sas_phy_properties { | |||
210 | * This field delineates the Phy capabilities structure received | 210 | * This field delineates the Phy capabilities structure received |
211 | * from the remote end point. | 211 | * from the remote end point. |
212 | */ | 212 | */ |
213 | struct scic_phy_cap rcvd_cap; | 213 | struct sci_phy_cap rcvd_cap; |
214 | 214 | ||
215 | }; | 215 | }; |
216 | 216 | ||
217 | /** | 217 | /** |
218 | * struct scic_sata_phy_properties - This structure defines the properties, | 218 | * struct sci_sata_phy_properties - This structure defines the properties, |
219 | * specific to a SATA phy, that can be retrieved. | 219 | * specific to a SATA phy, that can be retrieved. |
220 | * | 220 | * |
221 | * | 221 | * |
222 | */ | 222 | */ |
223 | struct scic_sata_phy_properties { | 223 | struct sci_sata_phy_properties { |
224 | /** | 224 | /** |
225 | * This field delineates the signature FIS received from the | 225 | * This field delineates the signature FIS received from the |
226 | * attached target. | 226 | * attached target. |
@@ -236,12 +236,12 @@ struct scic_sata_phy_properties { | |||
236 | }; | 236 | }; |
237 | 237 | ||
238 | /** | 238 | /** |
239 | * enum scic_phy_counter_id - This enumeration depicts the various pieces of | 239 | * enum sci_phy_counter_id - This enumeration depicts the various pieces of |
240 | * optional information that can be retrieved for a specific phy. | 240 | * optional information that can be retrieved for a specific phy. |
241 | * | 241 | * |
242 | * | 242 | * |
243 | */ | 243 | */ |
244 | enum scic_phy_counter_id { | 244 | enum sci_phy_counter_id { |
245 | /** | 245 | /** |
246 | * This PHY information field tracks the number of frames received. | 246 | * This PHY information field tracks the number of frames received. |
247 | */ | 247 | */ |
@@ -344,7 +344,7 @@ enum scic_phy_counter_id { | |||
344 | SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR | 344 | SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR |
345 | }; | 345 | }; |
346 | 346 | ||
347 | enum scic_sds_phy_states { | 347 | enum sci_phy_states { |
348 | /** | 348 | /** |
349 | * Simply the initial state for the base domain state machine. | 349 | * Simply the initial state for the base domain state machine. |
350 | */ | 350 | */ |
@@ -441,77 +441,77 @@ enum scic_sds_phy_states { | |||
441 | }; | 441 | }; |
442 | 442 | ||
443 | /** | 443 | /** |
444 | * scic_sds_phy_get_index() - | 444 | * sci_phy_get_index() - |
445 | * | 445 | * |
446 | * This macro returns the phy index for the specified phy | 446 | * This macro returns the phy index for the specified phy |
447 | */ | 447 | */ |
448 | #define scic_sds_phy_get_index(phy) \ | 448 | #define sci_phy_get_index(phy) \ |
449 | ((phy)->phy_index) | 449 | ((phy)->phy_index) |
450 | 450 | ||
451 | /** | 451 | /** |
452 | * scic_sds_phy_get_controller() - This macro returns the controller for this | 452 | * sci_phy_get_controller() - This macro returns the controller for this |
453 | * phy | 453 | * phy |
454 | * | 454 | * |
455 | * | 455 | * |
456 | */ | 456 | */ |
457 | #define scic_sds_phy_get_controller(phy) \ | 457 | #define sci_phy_get_controller(phy) \ |
458 | (scic_sds_port_get_controller((phy)->owning_port)) | 458 | (sci_port_get_controller((phy)->owning_port)) |
459 | 459 | ||
460 | void scic_sds_phy_construct( | 460 | void sci_phy_construct( |
461 | struct isci_phy *iphy, | 461 | struct isci_phy *iphy, |
462 | struct isci_port *iport, | 462 | struct isci_port *iport, |
463 | u8 phy_index); | 463 | u8 phy_index); |
464 | 464 | ||
465 | struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy); | 465 | struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy); |
466 | 466 | ||
467 | void scic_sds_phy_set_port( | 467 | void sci_phy_set_port( |
468 | struct isci_phy *iphy, | 468 | struct isci_phy *iphy, |
469 | struct isci_port *iport); | 469 | struct isci_port *iport); |
470 | 470 | ||
471 | enum sci_status scic_sds_phy_initialize( | 471 | enum sci_status sci_phy_initialize( |
472 | struct isci_phy *iphy, | 472 | struct isci_phy *iphy, |
473 | struct scu_transport_layer_registers __iomem *transport_layer_registers, | 473 | struct scu_transport_layer_registers __iomem *transport_layer_registers, |
474 | struct scu_link_layer_registers __iomem *link_layer_registers); | 474 | struct scu_link_layer_registers __iomem *link_layer_registers); |
475 | 475 | ||
476 | enum sci_status scic_sds_phy_start( | 476 | enum sci_status sci_phy_start( |
477 | struct isci_phy *iphy); | 477 | struct isci_phy *iphy); |
478 | 478 | ||
479 | enum sci_status scic_sds_phy_stop( | 479 | enum sci_status sci_phy_stop( |
480 | struct isci_phy *iphy); | 480 | struct isci_phy *iphy); |
481 | 481 | ||
482 | enum sci_status scic_sds_phy_reset( | 482 | enum sci_status sci_phy_reset( |
483 | struct isci_phy *iphy); | 483 | struct isci_phy *iphy); |
484 | 484 | ||
485 | void scic_sds_phy_resume( | 485 | void sci_phy_resume( |
486 | struct isci_phy *iphy); | 486 | struct isci_phy *iphy); |
487 | 487 | ||
488 | void scic_sds_phy_setup_transport( | 488 | void sci_phy_setup_transport( |
489 | struct isci_phy *iphy, | 489 | struct isci_phy *iphy, |
490 | u32 device_id); | 490 | u32 device_id); |
491 | 491 | ||
492 | enum sci_status scic_sds_phy_event_handler( | 492 | enum sci_status sci_phy_event_handler( |
493 | struct isci_phy *iphy, | 493 | struct isci_phy *iphy, |
494 | u32 event_code); | 494 | u32 event_code); |
495 | 495 | ||
496 | enum sci_status scic_sds_phy_frame_handler( | 496 | enum sci_status sci_phy_frame_handler( |
497 | struct isci_phy *iphy, | 497 | struct isci_phy *iphy, |
498 | u32 frame_index); | 498 | u32 frame_index); |
499 | 499 | ||
500 | enum sci_status scic_sds_phy_consume_power_handler( | 500 | enum sci_status sci_phy_consume_power_handler( |
501 | struct isci_phy *iphy); | 501 | struct isci_phy *iphy); |
502 | 502 | ||
503 | void scic_sds_phy_get_sas_address( | 503 | void sci_phy_get_sas_address( |
504 | struct isci_phy *iphy, | 504 | struct isci_phy *iphy, |
505 | struct sci_sas_address *sas_address); | 505 | struct sci_sas_address *sas_address); |
506 | 506 | ||
507 | void scic_sds_phy_get_attached_sas_address( | 507 | void sci_phy_get_attached_sas_address( |
508 | struct isci_phy *iphy, | 508 | struct isci_phy *iphy, |
509 | struct sci_sas_address *sas_address); | 509 | struct sci_sas_address *sas_address); |
510 | 510 | ||
511 | struct scic_phy_proto; | 511 | struct sci_phy_proto; |
512 | void scic_sds_phy_get_protocols( | 512 | void sci_phy_get_protocols( |
513 | struct isci_phy *iphy, | 513 | struct isci_phy *iphy, |
514 | struct scic_phy_proto *protocols); | 514 | struct sci_phy_proto *protocols); |
515 | enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy); | 515 | enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy); |
516 | 516 | ||
517 | struct isci_host; | 517 | struct isci_host; |
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index c434d5a0effa..1822ed68409e 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c | |||
@@ -74,57 +74,35 @@ static void isci_port_change_state(struct isci_port *iport, enum isci_status sta | |||
74 | spin_unlock_irqrestore(&iport->state_lock, flags); | 74 | spin_unlock_irqrestore(&iport->state_lock, flags); |
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) |
78 | * This function will indicate which protocols are supported by this port. | ||
79 | * @sci_port: a handle corresponding to the SAS port for which to return the | ||
80 | * supported protocols. | ||
81 | * @protocols: This parameter specifies a pointer to a data structure | ||
82 | * which the core will copy the protocol values for the port from the | ||
83 | * transmit_identification register. | ||
84 | */ | ||
85 | static void | ||
86 | scic_sds_port_get_protocols(struct isci_port *iport, | ||
87 | struct scic_phy_proto *protocols) | ||
88 | { | 78 | { |
89 | u8 index; | 79 | u8 index; |
90 | 80 | ||
91 | protocols->all = 0; | 81 | proto->all = 0; |
92 | |||
93 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 82 | for (index = 0; index < SCI_MAX_PHYS; index++) { |
94 | if (iport->phy_table[index] != NULL) { | 83 | struct isci_phy *iphy = iport->phy_table[index]; |
95 | scic_sds_phy_get_protocols(iport->phy_table[index], | 84 | |
96 | protocols); | 85 | if (!iphy) |
97 | } | 86 | continue; |
87 | sci_phy_get_protocols(iphy, proto); | ||
98 | } | 88 | } |
99 | } | 89 | } |
100 | 90 | ||
101 | /** | 91 | static u32 sci_port_get_phys(struct isci_port *iport) |
102 | * This method requests a list (mask) of the phys contained in the supplied SAS | ||
103 | * port. | ||
104 | * @sci_port: a handle corresponding to the SAS port for which to return the | ||
105 | * phy mask. | ||
106 | * | ||
107 | * Return a bit mask indicating which phys are a part of this port. Each bit | ||
108 | * corresponds to a phy identifier (e.g. bit 0 = phy id 0). | ||
109 | */ | ||
110 | static u32 scic_sds_port_get_phys(struct isci_port *iport) | ||
111 | { | 92 | { |
112 | u32 index; | 93 | u32 index; |
113 | u32 mask; | 94 | u32 mask; |
114 | 95 | ||
115 | mask = 0; | 96 | mask = 0; |
116 | 97 | for (index = 0; index < SCI_MAX_PHYS; index++) | |
117 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 98 | if (iport->phy_table[index]) |
118 | if (iport->phy_table[index] != NULL) { | ||
119 | mask |= (1 << index); | 99 | mask |= (1 << index); |
120 | } | ||
121 | } | ||
122 | 100 | ||
123 | return mask; | 101 | return mask; |
124 | } | 102 | } |
125 | 103 | ||
126 | /** | 104 | /** |
127 | * scic_port_get_properties() - This method simply returns the properties | 105 | * sci_port_get_properties() - This method simply returns the properties |
128 | * regarding the port, such as: physical index, protocols, sas address, etc. | 106 | * regarding the port, such as: physical index, protocols, sas address, etc. |
129 | * @port: this parameter specifies the port for which to retrieve the physical | 107 | * @port: this parameter specifies the port for which to retrieve the physical |
130 | * index. | 108 | * index. |
@@ -136,22 +114,22 @@ static u32 scic_sds_port_get_phys(struct isci_port *iport) | |||
136 | * value is returned if the specified port is not valid. When this value is | 114 | * value is returned if the specified port is not valid. When this value is |
137 | * returned, no data is copied to the properties output parameter. | 115 | * returned, no data is copied to the properties output parameter. |
138 | */ | 116 | */ |
139 | static enum sci_status scic_port_get_properties(struct isci_port *iport, | 117 | static enum sci_status sci_port_get_properties(struct isci_port *iport, |
140 | struct scic_port_properties *prop) | 118 | struct sci_port_properties *prop) |
141 | { | 119 | { |
142 | if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) | 120 | if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) |
143 | return SCI_FAILURE_INVALID_PORT; | 121 | return SCI_FAILURE_INVALID_PORT; |
144 | 122 | ||
145 | prop->index = iport->logical_port_index; | 123 | prop->index = iport->logical_port_index; |
146 | prop->phy_mask = scic_sds_port_get_phys(iport); | 124 | prop->phy_mask = sci_port_get_phys(iport); |
147 | scic_sds_port_get_sas_address(iport, &prop->local.sas_address); | 125 | sci_port_get_sas_address(iport, &prop->local.sas_address); |
148 | scic_sds_port_get_protocols(iport, &prop->local.protocols); | 126 | sci_port_get_protocols(iport, &prop->local.protocols); |
149 | scic_sds_port_get_attached_sas_address(iport, &prop->remote.sas_address); | 127 | sci_port_get_attached_sas_address(iport, &prop->remote.sas_address); |
150 | 128 | ||
151 | return SCI_SUCCESS; | 129 | return SCI_SUCCESS; |
152 | } | 130 | } |
153 | 131 | ||
154 | static void scic_port_bcn_enable(struct isci_port *iport) | 132 | static void sci_port_bcn_enable(struct isci_port *iport) |
155 | { | 133 | { |
156 | struct isci_phy *iphy; | 134 | struct isci_phy *iphy; |
157 | u32 val; | 135 | u32 val; |
@@ -167,7 +145,7 @@ static void scic_port_bcn_enable(struct isci_port *iport) | |||
167 | } | 145 | } |
168 | } | 146 | } |
169 | 147 | ||
170 | /* called under scic_lock to stabilize phy:port associations */ | 148 | /* called under sci_lock to stabilize phy:port associations */ |
171 | void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport) | 149 | void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport) |
172 | { | 150 | { |
173 | int i; | 151 | int i; |
@@ -209,7 +187,7 @@ static void isci_port_bc_change_received(struct isci_host *ihost, | |||
209 | ihost->sas_ha.notify_port_event(&iphy->sas_phy, | 187 | ihost->sas_ha.notify_port_event(&iphy->sas_phy, |
210 | PORTE_BROADCAST_RCVD); | 188 | PORTE_BROADCAST_RCVD); |
211 | } | 189 | } |
212 | scic_port_bcn_enable(iport); | 190 | sci_port_bcn_enable(iport); |
213 | } | 191 | } |
214 | 192 | ||
215 | static void isci_port_link_up(struct isci_host *isci_host, | 193 | static void isci_port_link_up(struct isci_host *isci_host, |
@@ -217,7 +195,7 @@ static void isci_port_link_up(struct isci_host *isci_host, | |||
217 | struct isci_phy *iphy) | 195 | struct isci_phy *iphy) |
218 | { | 196 | { |
219 | unsigned long flags; | 197 | unsigned long flags; |
220 | struct scic_port_properties properties; | 198 | struct sci_port_properties properties; |
221 | unsigned long success = true; | 199 | unsigned long success = true; |
222 | 200 | ||
223 | BUG_ON(iphy->isci_port != NULL); | 201 | BUG_ON(iphy->isci_port != NULL); |
@@ -232,7 +210,7 @@ static void isci_port_link_up(struct isci_host *isci_host, | |||
232 | 210 | ||
233 | isci_port_change_state(iphy->isci_port, isci_starting); | 211 | isci_port_change_state(iphy->isci_port, isci_starting); |
234 | 212 | ||
235 | scic_port_get_properties(iport, &properties); | 213 | sci_port_get_properties(iport, &properties); |
236 | 214 | ||
237 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { | 215 | if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { |
238 | u64 attached_sas_address; | 216 | u64 attached_sas_address; |
@@ -245,7 +223,7 @@ static void isci_port_link_up(struct isci_host *isci_host, | |||
245 | * automagically assign a SAS address to the end device | 223 | * automagically assign a SAS address to the end device |
246 | * for the purpose of creating a port. This SAS address | 224 | * for the purpose of creating a port. This SAS address |
247 | * will not be the same as assigned to the PHY and needs | 225 | * will not be the same as assigned to the PHY and needs |
248 | * to be obtained from struct scic_port_properties properties. | 226 | * to be obtained from struct sci_port_properties properties. |
249 | */ | 227 | */ |
250 | attached_sas_address = properties.remote.sas_address.high; | 228 | attached_sas_address = properties.remote.sas_address.high; |
251 | attached_sas_address <<= 32; | 229 | attached_sas_address <<= 32; |
@@ -399,50 +377,40 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port, | |||
399 | * doesn't preclude all configurations. It merely ensures that a phy is part | 377 | * doesn't preclude all configurations. It merely ensures that a phy is part |
400 | * of the allowable set of phy identifiers for that port. For example, one | 378 | * of the allowable set of phy identifiers for that port. For example, one |
401 | * could assign phy 3 to port 0 and no other phys. Please refer to | 379 | * could assign phy 3 to port 0 and no other phys. Please refer to |
402 | * scic_sds_port_is_phy_mask_valid() for information regarding whether the | 380 | * sci_port_is_phy_mask_valid() for information regarding whether the |
403 | * phy_mask for a port can be supported. bool true if this is a valid phy | 381 | * phy_mask for a port can be supported. bool true if this is a valid phy |
404 | * assignment for the port false if this is not a valid phy assignment for the | 382 | * assignment for the port false if this is not a valid phy assignment for the |
405 | * port | 383 | * port |
406 | */ | 384 | */ |
407 | bool scic_sds_port_is_valid_phy_assignment(struct isci_port *iport, | 385 | bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index) |
408 | u32 phy_index) | ||
409 | { | 386 | { |
387 | struct isci_host *ihost = iport->owning_controller; | ||
388 | struct sci_user_parameters *user = &ihost->user_parameters; | ||
389 | |||
410 | /* Initialize to invalid value. */ | 390 | /* Initialize to invalid value. */ |
411 | u32 existing_phy_index = SCI_MAX_PHYS; | 391 | u32 existing_phy_index = SCI_MAX_PHYS; |
412 | u32 index; | 392 | u32 index; |
413 | 393 | ||
414 | if ((iport->physical_port_index == 1) && (phy_index != 1)) { | 394 | if ((iport->physical_port_index == 1) && (phy_index != 1)) |
415 | return false; | 395 | return false; |
416 | } | ||
417 | 396 | ||
418 | if (iport->physical_port_index == 3 && phy_index != 3) { | 397 | if (iport->physical_port_index == 3 && phy_index != 3) |
419 | return false; | 398 | return false; |
420 | } | ||
421 | 399 | ||
422 | if ( | 400 | if (iport->physical_port_index == 2 && |
423 | (iport->physical_port_index == 2) | 401 | (phy_index == 0 || phy_index == 1)) |
424 | && ((phy_index == 0) || (phy_index == 1)) | ||
425 | ) { | ||
426 | return false; | 402 | return false; |
427 | } | ||
428 | 403 | ||
429 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 404 | for (index = 0; index < SCI_MAX_PHYS; index++) |
430 | if ((iport->phy_table[index] != NULL) | 405 | if (iport->phy_table[index] && index != phy_index) |
431 | && (index != phy_index)) { | ||
432 | existing_phy_index = index; | 406 | existing_phy_index = index; |
433 | } | ||
434 | } | ||
435 | 407 | ||
436 | /* | 408 | /* Ensure that all of the phys in the port are capable of |
437 | * Ensure that all of the phys in the port are capable of | 409 | * operating at the same maximum link rate. |
438 | * operating at the same maximum link rate. */ | 410 | */ |
439 | if ( | 411 | if (existing_phy_index < SCI_MAX_PHYS && |
440 | (existing_phy_index < SCI_MAX_PHYS) | 412 | user->phys[phy_index].max_speed_generation != |
441 | && (iport->owning_controller->user_parameters.sds1.phys[ | 413 | user->phys[existing_phy_index].max_speed_generation) |
442 | phy_index].max_speed_generation != | ||
443 | iport->owning_controller->user_parameters.sds1.phys[ | ||
444 | existing_phy_index].max_speed_generation) | ||
445 | ) | ||
446 | return false; | 414 | return false; |
447 | 415 | ||
448 | return true; | 416 | return true; |
@@ -460,7 +428,7 @@ bool scic_sds_port_is_valid_phy_assignment(struct isci_port *iport, | |||
460 | * phy mask can be supported. true if this is a valid phy assignment for the | 428 | * phy mask can be supported. true if this is a valid phy assignment for the |
461 | * port false if this is not a valid phy assignment for the port | 429 | * port false if this is not a valid phy assignment for the port |
462 | */ | 430 | */ |
463 | static bool scic_sds_port_is_phy_mask_valid( | 431 | static bool sci_port_is_phy_mask_valid( |
464 | struct isci_port *iport, | 432 | struct isci_port *iport, |
465 | u32 phy_mask) | 433 | u32 phy_mask) |
466 | { | 434 | { |
@@ -493,10 +461,10 @@ static bool scic_sds_port_is_phy_mask_valid( | |||
493 | * the port. Currently, the lowest order phy that is connected is returned. | 461 | * the port. Currently, the lowest order phy that is connected is returned. |
494 | * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is | 462 | * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is |
495 | * returned if there are no currently active (i.e. connected to a remote end | 463 | * returned if there are no currently active (i.e. connected to a remote end |
496 | * point) phys contained in the port. All other values specify a struct scic_sds_phy | 464 | * point) phys contained in the port. All other values specify a struct sci_phy |
497 | * object that is active in the port. | 465 | * object that is active in the port. |
498 | */ | 466 | */ |
499 | static struct isci_phy *scic_sds_port_get_a_connected_phy(struct isci_port *iport) | 467 | static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport) |
500 | { | 468 | { |
501 | u32 index; | 469 | u32 index; |
502 | struct isci_phy *iphy; | 470 | struct isci_phy *iphy; |
@@ -506,14 +474,14 @@ static struct isci_phy *scic_sds_port_get_a_connected_phy(struct isci_port *ipor | |||
506 | * connected to the remote end-point. | 474 | * connected to the remote end-point. |
507 | */ | 475 | */ |
508 | iphy = iport->phy_table[index]; | 476 | iphy = iport->phy_table[index]; |
509 | if (iphy && scic_sds_port_active_phy(iport, iphy)) | 477 | if (iphy && sci_port_active_phy(iport, iphy)) |
510 | return iphy; | 478 | return iphy; |
511 | } | 479 | } |
512 | 480 | ||
513 | return NULL; | 481 | return NULL; |
514 | } | 482 | } |
515 | 483 | ||
516 | static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) | 484 | static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) |
517 | { | 485 | { |
518 | /* Check to see if we can add this phy to a port | 486 | /* Check to see if we can add this phy to a port |
519 | * that means that the phy is not part of a port and that the port does | 487 | * that means that the phy is not part of a port and that the port does |
@@ -521,13 +489,13 @@ static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isc | |||
521 | */ | 489 | */ |
522 | if (!iport->phy_table[iphy->phy_index] && | 490 | if (!iport->phy_table[iphy->phy_index] && |
523 | !phy_get_non_dummy_port(iphy) && | 491 | !phy_get_non_dummy_port(iphy) && |
524 | scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) { | 492 | sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { |
525 | /* Phy is being added in the stopped state so we are in MPC mode | 493 | /* Phy is being added in the stopped state so we are in MPC mode |
526 | * make logical port index = physical port index | 494 | * make logical port index = physical port index |
527 | */ | 495 | */ |
528 | iport->logical_port_index = iport->physical_port_index; | 496 | iport->logical_port_index = iport->physical_port_index; |
529 | iport->phy_table[iphy->phy_index] = iphy; | 497 | iport->phy_table[iphy->phy_index] = iphy; |
530 | scic_sds_phy_set_port(iphy, iport); | 498 | sci_phy_set_port(iphy, iport); |
531 | 499 | ||
532 | return SCI_SUCCESS; | 500 | return SCI_SUCCESS; |
533 | } | 501 | } |
@@ -535,8 +503,7 @@ static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isc | |||
535 | return SCI_FAILURE; | 503 | return SCI_FAILURE; |
536 | } | 504 | } |
537 | 505 | ||
538 | static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, | 506 | static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy) |
539 | struct isci_phy *iphy) | ||
540 | { | 507 | { |
541 | /* Make sure that this phy is part of this port */ | 508 | /* Make sure that this phy is part of this port */ |
542 | if (iport->phy_table[iphy->phy_index] == iphy && | 509 | if (iport->phy_table[iphy->phy_index] == iphy && |
@@ -544,7 +511,7 @@ static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, | |||
544 | struct isci_host *ihost = iport->owning_controller; | 511 | struct isci_host *ihost = iport->owning_controller; |
545 | 512 | ||
546 | /* Yep it is assigned to this port so remove it */ | 513 | /* Yep it is assigned to this port so remove it */ |
547 | scic_sds_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); | 514 | sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); |
548 | iport->phy_table[iphy->phy_index] = NULL; | 515 | iport->phy_table[iphy->phy_index] = NULL; |
549 | return SCI_SUCCESS; | 516 | return SCI_SUCCESS; |
550 | } | 517 | } |
@@ -552,45 +519,18 @@ static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, | |||
552 | return SCI_FAILURE; | 519 | return SCI_FAILURE; |
553 | } | 520 | } |
554 | 521 | ||
555 | 522 | void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas) | |
556 | /** | ||
557 | * This method requests the SAS address for the supplied SAS port from the SCI | ||
558 | * implementation. | ||
559 | * @sci_port: a handle corresponding to the SAS port for which to return the | ||
560 | * SAS address. | ||
561 | * @sas_address: This parameter specifies a pointer to a SAS address structure | ||
562 | * into which the core will copy the SAS address for the port. | ||
563 | * | ||
564 | */ | ||
565 | void scic_sds_port_get_sas_address( | ||
566 | struct isci_port *iport, | ||
567 | struct sci_sas_address *sas_address) | ||
568 | { | 523 | { |
569 | u32 index; | 524 | u32 index; |
570 | 525 | ||
571 | sas_address->high = 0; | 526 | sas->high = 0; |
572 | sas_address->low = 0; | 527 | sas->low = 0; |
573 | 528 | for (index = 0; index < SCI_MAX_PHYS; index++) | |
574 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 529 | if (iport->phy_table[index]) |
575 | if (iport->phy_table[index] != NULL) { | 530 | sci_phy_get_sas_address(iport->phy_table[index], sas); |
576 | scic_sds_phy_get_sas_address(iport->phy_table[index], sas_address); | ||
577 | } | ||
578 | } | ||
579 | } | 531 | } |
580 | 532 | ||
581 | /* | 533 | void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas) |
582 | * This function requests the SAS address for the device directly attached to | ||
583 | * this SAS port. | ||
584 | * @sci_port: a handle corresponding to the SAS port for which to return the | ||
585 | * SAS address. | ||
586 | * @sas_address: This parameter specifies a pointer to a SAS address structure | ||
587 | * into which the core will copy the SAS address for the device directly | ||
588 | * attached to the port. | ||
589 | * | ||
590 | */ | ||
591 | void scic_sds_port_get_attached_sas_address( | ||
592 | struct isci_port *iport, | ||
593 | struct sci_sas_address *sas_address) | ||
594 | { | 534 | { |
595 | struct isci_phy *iphy; | 535 | struct isci_phy *iphy; |
596 | 536 | ||
@@ -598,23 +538,22 @@ void scic_sds_port_get_attached_sas_address( | |||
598 | * Ensure that the phy is both part of the port and currently | 538 | * Ensure that the phy is both part of the port and currently |
599 | * connected to the remote end-point. | 539 | * connected to the remote end-point. |
600 | */ | 540 | */ |
601 | iphy = scic_sds_port_get_a_connected_phy(iport); | 541 | iphy = sci_port_get_a_connected_phy(iport); |
602 | if (iphy) { | 542 | if (iphy) { |
603 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { | 543 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { |
604 | scic_sds_phy_get_attached_sas_address(iphy, | 544 | sci_phy_get_attached_sas_address(iphy, sas); |
605 | sas_address); | ||
606 | } else { | 545 | } else { |
607 | scic_sds_phy_get_sas_address(iphy, sas_address); | 546 | sci_phy_get_sas_address(iphy, sas); |
608 | sas_address->low += iphy->phy_index; | 547 | sas->low += iphy->phy_index; |
609 | } | 548 | } |
610 | } else { | 549 | } else { |
611 | sas_address->high = 0; | 550 | sas->high = 0; |
612 | sas_address->low = 0; | 551 | sas->low = 0; |
613 | } | 552 | } |
614 | } | 553 | } |
615 | 554 | ||
616 | /** | 555 | /** |
617 | * scic_sds_port_construct_dummy_rnc() - create dummy rnc for si workaround | 556 | * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround |
618 | * | 557 | * |
619 | * @sci_port: logical port on which we need to create the remote node context | 558 | * @sci_port: logical port on which we need to create the remote node context |
620 | * @rni: remote node index for this remote node context. | 559 | * @rni: remote node index for this remote node context. |
@@ -623,7 +562,7 @@ void scic_sds_port_get_attached_sas_address( | |||
623 | * This structure will be posted to the hardware to work around a scheduler | 562 | * This structure will be posted to the hardware to work around a scheduler |
624 | * error in the hardware. | 563 | * error in the hardware. |
625 | */ | 564 | */ |
626 | static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) | 565 | static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) |
627 | { | 566 | { |
628 | union scu_remote_node_context *rnc; | 567 | union scu_remote_node_context *rnc; |
629 | 568 | ||
@@ -651,7 +590,7 @@ static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) | |||
651 | * structure will be posted to the hardwre to work around a scheduler error | 590 | * structure will be posted to the hardwre to work around a scheduler error |
652 | * in the hardware. | 591 | * in the hardware. |
653 | */ | 592 | */ |
654 | static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) | 593 | static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag) |
655 | { | 594 | { |
656 | struct isci_host *ihost = iport->owning_controller; | 595 | struct isci_host *ihost = iport->owning_controller; |
657 | struct scu_task_context *task_context; | 596 | struct scu_task_context *task_context; |
@@ -671,7 +610,7 @@ static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) | |||
671 | task_context->task_phase = 0x01; | 610 | task_context->task_phase = 0x01; |
672 | } | 611 | } |
673 | 612 | ||
674 | static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) | 613 | static void sci_port_destroy_dummy_resources(struct isci_port *iport) |
675 | { | 614 | { |
676 | struct isci_host *ihost = iport->owning_controller; | 615 | struct isci_host *ihost = iport->owning_controller; |
677 | 616 | ||
@@ -679,93 +618,43 @@ static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) | |||
679 | isci_free_tag(ihost, iport->reserved_tag); | 618 | isci_free_tag(ihost, iport->reserved_tag); |
680 | 619 | ||
681 | if (iport->reserved_rni != SCU_DUMMY_INDEX) | 620 | if (iport->reserved_rni != SCU_DUMMY_INDEX) |
682 | scic_sds_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, | 621 | sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, |
683 | 1, iport->reserved_rni); | 622 | 1, iport->reserved_rni); |
684 | 623 | ||
685 | iport->reserved_rni = SCU_DUMMY_INDEX; | 624 | iport->reserved_rni = SCU_DUMMY_INDEX; |
686 | iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; | 625 | iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; |
687 | } | 626 | } |
688 | 627 | ||
689 | /** | 628 | void sci_port_setup_transports(struct isci_port *iport, u32 device_id) |
690 | * This method performs initialization of the supplied port. Initialization | ||
691 | * includes: - state machine initialization - member variable initialization | ||
692 | * - configuring the phy_mask | ||
693 | * @sci_port: | ||
694 | * @transport_layer_registers: | ||
695 | * @port_task_scheduler_registers: | ||
696 | * @port_configuration_regsiter: | ||
697 | * | ||
698 | * enum sci_status SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION This value is returned | ||
699 | * if the phy being added to the port | ||
700 | */ | ||
701 | enum sci_status scic_sds_port_initialize( | ||
702 | struct isci_port *iport, | ||
703 | void __iomem *port_task_scheduler_registers, | ||
704 | void __iomem *port_configuration_regsiter, | ||
705 | void __iomem *viit_registers) | ||
706 | { | ||
707 | iport->port_task_scheduler_registers = port_task_scheduler_registers; | ||
708 | iport->port_pe_configuration_register = port_configuration_regsiter; | ||
709 | iport->viit_registers = viit_registers; | ||
710 | |||
711 | return SCI_SUCCESS; | ||
712 | } | ||
713 | |||
714 | |||
715 | /** | ||
716 | * This method assigns the direct attached device ID for this port. | ||
717 | * | ||
718 | * @param[in] iport The port for which the direct attached device id is to | ||
719 | * be assigned. | ||
720 | * @param[in] device_id The direct attached device ID to assign to the port. | ||
721 | * This will be the RNi for the device | ||
722 | */ | ||
723 | void scic_sds_port_setup_transports( | ||
724 | struct isci_port *iport, | ||
725 | u32 device_id) | ||
726 | { | 629 | { |
727 | u8 index; | 630 | u8 index; |
728 | 631 | ||
729 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 632 | for (index = 0; index < SCI_MAX_PHYS; index++) { |
730 | if (iport->active_phy_mask & (1 << index)) | 633 | if (iport->active_phy_mask & (1 << index)) |
731 | scic_sds_phy_setup_transport(iport->phy_table[index], device_id); | 634 | sci_phy_setup_transport(iport->phy_table[index], device_id); |
732 | } | 635 | } |
733 | } | 636 | } |
734 | 637 | ||
735 | /** | 638 | static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, |
736 | * | 639 | bool do_notify_user) |
737 | * @sci_port: This is the port on which the phy should be enabled. | ||
738 | * @sci_phy: This is the specific phy which to enable. | ||
739 | * @do_notify_user: This parameter specifies whether to inform the user (via | ||
740 | * scic_cb_port_link_up()) as to the fact that a new phy as become ready. | ||
741 | * | ||
742 | * This function will activate the phy in the port. | ||
743 | * Activation includes: - adding | ||
744 | * the phy to the port - enabling the Protocol Engine in the silicon. - | ||
745 | * notifying the user that the link is up. none | ||
746 | */ | ||
747 | static void scic_sds_port_activate_phy(struct isci_port *iport, | ||
748 | struct isci_phy *iphy, | ||
749 | bool do_notify_user) | ||
750 | { | 640 | { |
751 | struct isci_host *ihost = iport->owning_controller; | 641 | struct isci_host *ihost = iport->owning_controller; |
752 | 642 | ||
753 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) | 643 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) |
754 | scic_sds_phy_resume(iphy); | 644 | sci_phy_resume(iphy); |
755 | 645 | ||
756 | iport->active_phy_mask |= 1 << iphy->phy_index; | 646 | iport->active_phy_mask |= 1 << iphy->phy_index; |
757 | 647 | ||
758 | scic_sds_controller_clear_invalid_phy(ihost, iphy); | 648 | sci_controller_clear_invalid_phy(ihost, iphy); |
759 | 649 | ||
760 | if (do_notify_user == true) | 650 | if (do_notify_user == true) |
761 | isci_port_link_up(ihost, iport, iphy); | 651 | isci_port_link_up(ihost, iport, iphy); |
762 | } | 652 | } |
763 | 653 | ||
764 | void scic_sds_port_deactivate_phy(struct isci_port *iport, | 654 | void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, |
765 | struct isci_phy *iphy, | 655 | bool do_notify_user) |
766 | bool do_notify_user) | ||
767 | { | 656 | { |
768 | struct isci_host *ihost = scic_sds_port_get_controller(iport); | 657 | struct isci_host *ihost = sci_port_get_controller(iport); |
769 | 658 | ||
770 | iport->active_phy_mask &= ~(1 << iphy->phy_index); | 659 | iport->active_phy_mask &= ~(1 << iphy->phy_index); |
771 | 660 | ||
@@ -779,16 +668,7 @@ void scic_sds_port_deactivate_phy(struct isci_port *iport, | |||
779 | isci_port_link_down(ihost, iphy, iport); | 668 | isci_port_link_down(ihost, iphy, iport); |
780 | } | 669 | } |
781 | 670 | ||
782 | /** | 671 | static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) |
783 | * | ||
784 | * @sci_port: This is the port on which the phy should be disabled. | ||
785 | * @sci_phy: This is the specific phy which to disabled. | ||
786 | * | ||
787 | * This function will disable the phy and report that the phy is not valid for | ||
788 | * this port object. None | ||
789 | */ | ||
790 | static void scic_sds_port_invalid_link_up(struct isci_port *iport, | ||
791 | struct isci_phy *iphy) | ||
792 | { | 672 | { |
793 | struct isci_host *ihost = iport->owning_controller; | 673 | struct isci_host *ihost = iport->owning_controller; |
794 | 674 | ||
@@ -798,12 +678,12 @@ static void scic_sds_port_invalid_link_up(struct isci_port *iport, | |||
798 | * invalid link. | 678 | * invalid link. |
799 | */ | 679 | */ |
800 | if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { | 680 | if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { |
801 | scic_sds_controller_set_invalid_phy(ihost, iphy); | 681 | sci_controller_set_invalid_phy(ihost, iphy); |
802 | dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); | 682 | dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); |
803 | } | 683 | } |
804 | } | 684 | } |
805 | 685 | ||
806 | static bool is_port_ready_state(enum scic_sds_port_states state) | 686 | static bool is_port_ready_state(enum sci_port_states state) |
807 | { | 687 | { |
808 | switch (state) { | 688 | switch (state) { |
809 | case SCI_PORT_READY: | 689 | case SCI_PORT_READY: |
@@ -818,10 +698,10 @@ static bool is_port_ready_state(enum scic_sds_port_states state) | |||
818 | 698 | ||
819 | /* flag dummy rnc hanling when exiting a ready state */ | 699 | /* flag dummy rnc hanling when exiting a ready state */ |
820 | static void port_state_machine_change(struct isci_port *iport, | 700 | static void port_state_machine_change(struct isci_port *iport, |
821 | enum scic_sds_port_states state) | 701 | enum sci_port_states state) |
822 | { | 702 | { |
823 | struct sci_base_state_machine *sm = &iport->sm; | 703 | struct sci_base_state_machine *sm = &iport->sm; |
824 | enum scic_sds_port_states old_state = sm->current_state_id; | 704 | enum sci_port_states old_state = sm->current_state_id; |
825 | 705 | ||
826 | if (is_port_ready_state(old_state) && !is_port_ready_state(state)) | 706 | if (is_port_ready_state(old_state) && !is_port_ready_state(state)) |
827 | iport->ready_exit = true; | 707 | iport->ready_exit = true; |
@@ -831,11 +711,11 @@ static void port_state_machine_change(struct isci_port *iport, | |||
831 | } | 711 | } |
832 | 712 | ||
833 | /** | 713 | /** |
834 | * scic_sds_port_general_link_up_handler - phy can be assigned to port? | 714 | * sci_port_general_link_up_handler - phy can be assigned to port? |
835 | * @sci_port: scic_sds_port object for which has a phy that has gone link up. | 715 | * @sci_port: sci_port object for which has a phy that has gone link up. |
836 | * @sci_phy: This is the struct isci_phy object that has gone link up. | 716 | * @sci_phy: This is the struct isci_phy object that has gone link up. |
837 | * @do_notify_user: This parameter specifies whether to inform the user (via | 717 | * @do_notify_user: This parameter specifies whether to inform the user (via |
838 | * scic_cb_port_link_up()) as to the fact that a new phy as become ready. | 718 | * sci_port_link_up()) as to the fact that a new phy as become ready. |
839 | * | 719 | * |
840 | * Determine if this phy can be assigned to this | 720 | * Determine if this phy can be assigned to this |
841 | * port . If the phy is not a valid PHY for | 721 | * port . If the phy is not a valid PHY for |
@@ -843,15 +723,15 @@ static void port_state_machine_change(struct isci_port *iport, | |||
843 | * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in | 723 | * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in |
844 | * the same port. none | 724 | * the same port. none |
845 | */ | 725 | */ |
846 | static void scic_sds_port_general_link_up_handler(struct isci_port *iport, | 726 | static void sci_port_general_link_up_handler(struct isci_port *iport, |
847 | struct isci_phy *iphy, | 727 | struct isci_phy *iphy, |
848 | bool do_notify_user) | 728 | bool do_notify_user) |
849 | { | 729 | { |
850 | struct sci_sas_address port_sas_address; | 730 | struct sci_sas_address port_sas_address; |
851 | struct sci_sas_address phy_sas_address; | 731 | struct sci_sas_address phy_sas_address; |
852 | 732 | ||
853 | scic_sds_port_get_attached_sas_address(iport, &port_sas_address); | 733 | sci_port_get_attached_sas_address(iport, &port_sas_address); |
854 | scic_sds_phy_get_attached_sas_address(iphy, &phy_sas_address); | 734 | sci_phy_get_attached_sas_address(iphy, &phy_sas_address); |
855 | 735 | ||
856 | /* If the SAS address of the new phy matches the SAS address of | 736 | /* If the SAS address of the new phy matches the SAS address of |
857 | * other phys in the port OR this is the first phy in the port, | 737 | * other phys in the port OR this is the first phy in the port, |
@@ -863,11 +743,11 @@ static void scic_sds_port_general_link_up_handler(struct isci_port *iport, | |||
863 | iport->active_phy_mask == 0) { | 743 | iport->active_phy_mask == 0) { |
864 | struct sci_base_state_machine *sm = &iport->sm; | 744 | struct sci_base_state_machine *sm = &iport->sm; |
865 | 745 | ||
866 | scic_sds_port_activate_phy(iport, iphy, do_notify_user); | 746 | sci_port_activate_phy(iport, iphy, do_notify_user); |
867 | if (sm->current_state_id == SCI_PORT_RESETTING) | 747 | if (sm->current_state_id == SCI_PORT_RESETTING) |
868 | port_state_machine_change(iport, SCI_PORT_READY); | 748 | port_state_machine_change(iport, SCI_PORT_READY); |
869 | } else | 749 | } else |
870 | scic_sds_port_invalid_link_up(iport, iphy); | 750 | sci_port_invalid_link_up(iport, iphy); |
871 | } | 751 | } |
872 | 752 | ||
873 | 753 | ||
@@ -881,7 +761,7 @@ static void scic_sds_port_general_link_up_handler(struct isci_port *iport, | |||
881 | * bool true Is returned if this is a wide ported port. false Is returned if | 761 | * bool true Is returned if this is a wide ported port. false Is returned if |
882 | * this is a narrow port. | 762 | * this is a narrow port. |
883 | */ | 763 | */ |
884 | static bool scic_sds_port_is_wide(struct isci_port *iport) | 764 | static bool sci_port_is_wide(struct isci_port *iport) |
885 | { | 765 | { |
886 | u32 index; | 766 | u32 index; |
887 | u32 phy_count = 0; | 767 | u32 phy_count = 0; |
@@ -909,14 +789,14 @@ static bool scic_sds_port_is_wide(struct isci_port *iport) | |||
909 | * wide ports and direct attached phys. Since there are no wide ported SATA | 789 | * wide ports and direct attached phys. Since there are no wide ported SATA |
910 | * devices this could become an invalid port configuration. | 790 | * devices this could become an invalid port configuration. |
911 | */ | 791 | */ |
912 | bool scic_sds_port_link_detected( | 792 | bool sci_port_link_detected( |
913 | struct isci_port *iport, | 793 | struct isci_port *iport, |
914 | struct isci_phy *iphy) | 794 | struct isci_phy *iphy) |
915 | { | 795 | { |
916 | if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && | 796 | if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && |
917 | (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && | 797 | (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && |
918 | scic_sds_port_is_wide(iport)) { | 798 | sci_port_is_wide(iport)) { |
919 | scic_sds_port_invalid_link_up(iport, iphy); | 799 | sci_port_invalid_link_up(iport, iphy); |
920 | 800 | ||
921 | return false; | 801 | return false; |
922 | } | 802 | } |
@@ -977,11 +857,11 @@ done: | |||
977 | * | 857 | * |
978 | * | 858 | * |
979 | */ | 859 | */ |
980 | static void scic_sds_port_update_viit_entry(struct isci_port *iport) | 860 | static void sci_port_update_viit_entry(struct isci_port *iport) |
981 | { | 861 | { |
982 | struct sci_sas_address sas_address; | 862 | struct sci_sas_address sas_address; |
983 | 863 | ||
984 | scic_sds_port_get_sas_address(iport, &sas_address); | 864 | sci_port_get_sas_address(iport, &sas_address); |
985 | 865 | ||
986 | writel(sas_address.high, | 866 | writel(sas_address.high, |
987 | &iport->viit_registers->initiator_sas_address_hi); | 867 | &iport->viit_registers->initiator_sas_address_hi); |
@@ -999,7 +879,7 @@ static void scic_sds_port_update_viit_entry(struct isci_port *iport) | |||
999 | &iport->viit_registers->status); | 879 | &iport->viit_registers->status); |
1000 | } | 880 | } |
1001 | 881 | ||
1002 | enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) | 882 | enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport) |
1003 | { | 883 | { |
1004 | u16 index; | 884 | u16 index; |
1005 | struct isci_phy *iphy; | 885 | struct isci_phy *iphy; |
@@ -1010,7 +890,7 @@ enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) | |||
1010 | * lowest maximum link rate. */ | 890 | * lowest maximum link rate. */ |
1011 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 891 | for (index = 0; index < SCI_MAX_PHYS; index++) { |
1012 | iphy = iport->phy_table[index]; | 892 | iphy = iport->phy_table[index]; |
1013 | if (iphy && scic_sds_port_active_phy(iport, iphy) && | 893 | if (iphy && sci_port_active_phy(iport, iphy) && |
1014 | iphy->max_negotiated_speed < max_allowed_speed) | 894 | iphy->max_negotiated_speed < max_allowed_speed) |
1015 | max_allowed_speed = iphy->max_negotiated_speed; | 895 | max_allowed_speed = iphy->max_negotiated_speed; |
1016 | } | 896 | } |
@@ -1018,7 +898,7 @@ enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport) | |||
1018 | return max_allowed_speed; | 898 | return max_allowed_speed; |
1019 | } | 899 | } |
1020 | 900 | ||
1021 | static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) | 901 | static void sci_port_suspend_port_task_scheduler(struct isci_port *iport) |
1022 | { | 902 | { |
1023 | u32 pts_control_value; | 903 | u32 pts_control_value; |
1024 | 904 | ||
@@ -1028,7 +908,7 @@ static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) | |||
1028 | } | 908 | } |
1029 | 909 | ||
1030 | /** | 910 | /** |
1031 | * scic_sds_port_post_dummy_request() - post dummy/workaround request | 911 | * sci_port_post_dummy_request() - post dummy/workaround request |
1032 | * @sci_port: port to post task | 912 | * @sci_port: port to post task |
1033 | * | 913 | * |
1034 | * Prevent the hardware scheduler from posting new requests to the front | 914 | * Prevent the hardware scheduler from posting new requests to the front |
@@ -1036,7 +916,7 @@ static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) | |||
1036 | * ongoing requests. | 916 | * ongoing requests. |
1037 | * | 917 | * |
1038 | */ | 918 | */ |
1039 | static void scic_sds_port_post_dummy_request(struct isci_port *iport) | 919 | static void sci_port_post_dummy_request(struct isci_port *iport) |
1040 | { | 920 | { |
1041 | struct isci_host *ihost = iport->owning_controller; | 921 | struct isci_host *ihost = iport->owning_controller; |
1042 | u16 tag = iport->reserved_tag; | 922 | u16 tag = iport->reserved_tag; |
@@ -1050,7 +930,7 @@ static void scic_sds_port_post_dummy_request(struct isci_port *iport) | |||
1050 | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | | 930 | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | |
1051 | ISCI_TAG_TCI(tag); | 931 | ISCI_TAG_TCI(tag); |
1052 | 932 | ||
1053 | scic_sds_controller_post_request(ihost, command); | 933 | sci_controller_post_request(ihost, command); |
1054 | } | 934 | } |
1055 | 935 | ||
1056 | /** | 936 | /** |
@@ -1060,7 +940,7 @@ static void scic_sds_port_post_dummy_request(struct isci_port *iport) | |||
1060 | * @sci_port: The port on which the task must be aborted. | 940 | * @sci_port: The port on which the task must be aborted. |
1061 | * | 941 | * |
1062 | */ | 942 | */ |
1063 | static void scic_sds_port_abort_dummy_request(struct isci_port *iport) | 943 | static void sci_port_abort_dummy_request(struct isci_port *iport) |
1064 | { | 944 | { |
1065 | struct isci_host *ihost = iport->owning_controller; | 945 | struct isci_host *ihost = iport->owning_controller; |
1066 | u16 tag = iport->reserved_tag; | 946 | u16 tag = iport->reserved_tag; |
@@ -1074,7 +954,7 @@ static void scic_sds_port_abort_dummy_request(struct isci_port *iport) | |||
1074 | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | | 954 | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | |
1075 | ISCI_TAG_TCI(tag); | 955 | ISCI_TAG_TCI(tag); |
1076 | 956 | ||
1077 | scic_sds_controller_post_request(ihost, command); | 957 | sci_controller_post_request(ihost, command); |
1078 | } | 958 | } |
1079 | 959 | ||
1080 | /** | 960 | /** |
@@ -1084,7 +964,7 @@ static void scic_sds_port_abort_dummy_request(struct isci_port *iport) | |||
1084 | * This method will resume the port task scheduler for this port object. none | 964 | * This method will resume the port task scheduler for this port object. none |
1085 | */ | 965 | */ |
1086 | static void | 966 | static void |
1087 | scic_sds_port_resume_port_task_scheduler(struct isci_port *iport) | 967 | sci_port_resume_port_task_scheduler(struct isci_port *iport) |
1088 | { | 968 | { |
1089 | u32 pts_control_value; | 969 | u32 pts_control_value; |
1090 | 970 | ||
@@ -1093,11 +973,11 @@ scic_sds_port_resume_port_task_scheduler(struct isci_port *iport) | |||
1093 | writel(pts_control_value, &iport->port_task_scheduler_registers->control); | 973 | writel(pts_control_value, &iport->port_task_scheduler_registers->control); |
1094 | } | 974 | } |
1095 | 975 | ||
1096 | static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) | 976 | static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) |
1097 | { | 977 | { |
1098 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 978 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1099 | 979 | ||
1100 | scic_sds_port_suspend_port_task_scheduler(iport); | 980 | sci_port_suspend_port_task_scheduler(iport); |
1101 | 981 | ||
1102 | iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; | 982 | iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; |
1103 | 983 | ||
@@ -1108,7 +988,7 @@ static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_mac | |||
1108 | } | 988 | } |
1109 | } | 989 | } |
1110 | 990 | ||
1111 | static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) | 991 | static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) |
1112 | { | 992 | { |
1113 | u32 index; | 993 | u32 index; |
1114 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 994 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
@@ -1124,18 +1004,18 @@ static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state | |||
1124 | } | 1004 | } |
1125 | } | 1005 | } |
1126 | 1006 | ||
1127 | scic_sds_port_update_viit_entry(iport); | 1007 | sci_port_update_viit_entry(iport); |
1128 | 1008 | ||
1129 | scic_sds_port_resume_port_task_scheduler(iport); | 1009 | sci_port_resume_port_task_scheduler(iport); |
1130 | 1010 | ||
1131 | /* | 1011 | /* |
1132 | * Post the dummy task for the port so the hardware can schedule | 1012 | * Post the dummy task for the port so the hardware can schedule |
1133 | * io correctly | 1013 | * io correctly |
1134 | */ | 1014 | */ |
1135 | scic_sds_port_post_dummy_request(iport); | 1015 | sci_port_post_dummy_request(iport); |
1136 | } | 1016 | } |
1137 | 1017 | ||
1138 | static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) | 1018 | static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport) |
1139 | { | 1019 | { |
1140 | struct isci_host *ihost = iport->owning_controller; | 1020 | struct isci_host *ihost = iport->owning_controller; |
1141 | u8 phys_index = iport->physical_port_index; | 1021 | u8 phys_index = iport->physical_port_index; |
@@ -1157,7 +1037,7 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) | |||
1157 | command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | | 1037 | command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | |
1158 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; | 1038 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; |
1159 | 1039 | ||
1160 | scic_sds_controller_post_request(ihost, command); | 1040 | sci_controller_post_request(ihost, command); |
1161 | } | 1041 | } |
1162 | 1042 | ||
1163 | /** | 1043 | /** |
@@ -1168,7 +1048,7 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) | |||
1168 | * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports | 1048 | * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports |
1169 | * the port not ready and suspends the port task scheduler. none | 1049 | * the port not ready and suspends the port task scheduler. none |
1170 | */ | 1050 | */ |
1171 | static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) | 1051 | static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) |
1172 | { | 1052 | { |
1173 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1053 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1174 | struct isci_host *ihost = iport->owning_controller; | 1054 | struct isci_host *ihost = iport->owning_controller; |
@@ -1178,15 +1058,15 @@ static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_ | |||
1178 | * the hardware will treat this as a NOP and just return abort | 1058 | * the hardware will treat this as a NOP and just return abort |
1179 | * complete. | 1059 | * complete. |
1180 | */ | 1060 | */ |
1181 | scic_sds_port_abort_dummy_request(iport); | 1061 | sci_port_abort_dummy_request(iport); |
1182 | 1062 | ||
1183 | isci_port_not_ready(ihost, iport); | 1063 | isci_port_not_ready(ihost, iport); |
1184 | 1064 | ||
1185 | if (iport->ready_exit) | 1065 | if (iport->ready_exit) |
1186 | scic_sds_port_invalidate_dummy_remote_node(iport); | 1066 | sci_port_invalidate_dummy_remote_node(iport); |
1187 | } | 1067 | } |
1188 | 1068 | ||
1189 | static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) | 1069 | static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) |
1190 | { | 1070 | { |
1191 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1071 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1192 | struct isci_host *ihost = iport->owning_controller; | 1072 | struct isci_host *ihost = iport->owning_controller; |
@@ -1201,20 +1081,20 @@ static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state | |||
1201 | SCI_PORT_SUB_OPERATIONAL); | 1081 | SCI_PORT_SUB_OPERATIONAL); |
1202 | } | 1082 | } |
1203 | 1083 | ||
1204 | static void scic_sds_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) | 1084 | static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) |
1205 | { | 1085 | { |
1206 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1086 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1207 | 1087 | ||
1208 | scic_sds_port_suspend_port_task_scheduler(iport); | 1088 | sci_port_suspend_port_task_scheduler(iport); |
1209 | if (iport->ready_exit) | 1089 | if (iport->ready_exit) |
1210 | scic_sds_port_invalidate_dummy_remote_node(iport); | 1090 | sci_port_invalidate_dummy_remote_node(iport); |
1211 | } | 1091 | } |
1212 | 1092 | ||
1213 | enum sci_status scic_sds_port_start(struct isci_port *iport) | 1093 | enum sci_status sci_port_start(struct isci_port *iport) |
1214 | { | 1094 | { |
1215 | struct isci_host *ihost = iport->owning_controller; | 1095 | struct isci_host *ihost = iport->owning_controller; |
1216 | enum sci_status status = SCI_SUCCESS; | 1096 | enum sci_status status = SCI_SUCCESS; |
1217 | enum scic_sds_port_states state; | 1097 | enum sci_port_states state; |
1218 | u32 phy_mask; | 1098 | u32 phy_mask; |
1219 | 1099 | ||
1220 | state = iport->sm.current_state_id; | 1100 | state = iport->sm.current_state_id; |
@@ -1234,11 +1114,11 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) | |||
1234 | } | 1114 | } |
1235 | 1115 | ||
1236 | if (iport->reserved_rni == SCU_DUMMY_INDEX) { | 1116 | if (iport->reserved_rni == SCU_DUMMY_INDEX) { |
1237 | u16 rni = scic_sds_remote_node_table_allocate_remote_node( | 1117 | u16 rni = sci_remote_node_table_allocate_remote_node( |
1238 | &ihost->available_remote_nodes, 1); | 1118 | &ihost->available_remote_nodes, 1); |
1239 | 1119 | ||
1240 | if (rni != SCU_DUMMY_INDEX) | 1120 | if (rni != SCU_DUMMY_INDEX) |
1241 | scic_sds_port_construct_dummy_rnc(iport, rni); | 1121 | sci_port_construct_dummy_rnc(iport, rni); |
1242 | else | 1122 | else |
1243 | status = SCI_FAILURE_INSUFFICIENT_RESOURCES; | 1123 | status = SCI_FAILURE_INSUFFICIENT_RESOURCES; |
1244 | iport->reserved_rni = rni; | 1124 | iport->reserved_rni = rni; |
@@ -1251,19 +1131,19 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) | |||
1251 | if (tag == SCI_CONTROLLER_INVALID_IO_TAG) | 1131 | if (tag == SCI_CONTROLLER_INVALID_IO_TAG) |
1252 | status = SCI_FAILURE_INSUFFICIENT_RESOURCES; | 1132 | status = SCI_FAILURE_INSUFFICIENT_RESOURCES; |
1253 | else | 1133 | else |
1254 | scic_sds_port_construct_dummy_task(iport, tag); | 1134 | sci_port_construct_dummy_task(iport, tag); |
1255 | iport->reserved_tag = tag; | 1135 | iport->reserved_tag = tag; |
1256 | } | 1136 | } |
1257 | 1137 | ||
1258 | if (status == SCI_SUCCESS) { | 1138 | if (status == SCI_SUCCESS) { |
1259 | phy_mask = scic_sds_port_get_phys(iport); | 1139 | phy_mask = sci_port_get_phys(iport); |
1260 | 1140 | ||
1261 | /* | 1141 | /* |
1262 | * There are one or more phys assigned to this port. Make sure | 1142 | * There are one or more phys assigned to this port. Make sure |
1263 | * the port's phy mask is in fact legal and supported by the | 1143 | * the port's phy mask is in fact legal and supported by the |
1264 | * silicon. | 1144 | * silicon. |
1265 | */ | 1145 | */ |
1266 | if (scic_sds_port_is_phy_mask_valid(iport, phy_mask) == true) { | 1146 | if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) { |
1267 | port_state_machine_change(iport, | 1147 | port_state_machine_change(iport, |
1268 | SCI_PORT_READY); | 1148 | SCI_PORT_READY); |
1269 | 1149 | ||
@@ -1273,14 +1153,14 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) | |||
1273 | } | 1153 | } |
1274 | 1154 | ||
1275 | if (status != SCI_SUCCESS) | 1155 | if (status != SCI_SUCCESS) |
1276 | scic_sds_port_destroy_dummy_resources(iport); | 1156 | sci_port_destroy_dummy_resources(iport); |
1277 | 1157 | ||
1278 | return status; | 1158 | return status; |
1279 | } | 1159 | } |
1280 | 1160 | ||
1281 | enum sci_status scic_sds_port_stop(struct isci_port *iport) | 1161 | enum sci_status sci_port_stop(struct isci_port *iport) |
1282 | { | 1162 | { |
1283 | enum scic_sds_port_states state; | 1163 | enum sci_port_states state; |
1284 | 1164 | ||
1285 | state = iport->sm.current_state_id; | 1165 | state = iport->sm.current_state_id; |
1286 | switch (state) { | 1166 | switch (state) { |
@@ -1300,11 +1180,11 @@ enum sci_status scic_sds_port_stop(struct isci_port *iport) | |||
1300 | } | 1180 | } |
1301 | } | 1181 | } |
1302 | 1182 | ||
1303 | static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout) | 1183 | static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) |
1304 | { | 1184 | { |
1305 | enum sci_status status = SCI_FAILURE_INVALID_PHY; | 1185 | enum sci_status status = SCI_FAILURE_INVALID_PHY; |
1306 | struct isci_phy *iphy = NULL; | 1186 | struct isci_phy *iphy = NULL; |
1307 | enum scic_sds_port_states state; | 1187 | enum sci_port_states state; |
1308 | u32 phy_index; | 1188 | u32 phy_index; |
1309 | 1189 | ||
1310 | state = iport->sm.current_state_id; | 1190 | state = iport->sm.current_state_id; |
@@ -1317,7 +1197,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout | |||
1317 | /* Select a phy on which we can send the hard reset request. */ | 1197 | /* Select a phy on which we can send the hard reset request. */ |
1318 | for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { | 1198 | for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { |
1319 | iphy = iport->phy_table[phy_index]; | 1199 | iphy = iport->phy_table[phy_index]; |
1320 | if (iphy && !scic_sds_port_active_phy(iport, iphy)) { | 1200 | if (iphy && !sci_port_active_phy(iport, iphy)) { |
1321 | /* | 1201 | /* |
1322 | * We found a phy but it is not ready select | 1202 | * We found a phy but it is not ready select |
1323 | * different phy | 1203 | * different phy |
@@ -1329,7 +1209,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout | |||
1329 | /* If we have a phy then go ahead and start the reset procedure */ | 1209 | /* If we have a phy then go ahead and start the reset procedure */ |
1330 | if (!iphy) | 1210 | if (!iphy) |
1331 | return status; | 1211 | return status; |
1332 | status = scic_sds_phy_reset(iphy); | 1212 | status = sci_phy_reset(iphy); |
1333 | 1213 | ||
1334 | if (status != SCI_SUCCESS) | 1214 | if (status != SCI_SUCCESS) |
1335 | return status; | 1215 | return status; |
@@ -1342,7 +1222,7 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout | |||
1342 | } | 1222 | } |
1343 | 1223 | ||
1344 | /** | 1224 | /** |
1345 | * scic_sds_port_add_phy() - | 1225 | * sci_port_add_phy() - |
1346 | * @sci_port: This parameter specifies the port in which the phy will be added. | 1226 | * @sci_port: This parameter specifies the port in which the phy will be added. |
1347 | * @sci_phy: This parameter is the phy which is to be added to the port. | 1227 | * @sci_phy: This parameter is the phy which is to be added to the port. |
1348 | * | 1228 | * |
@@ -1350,11 +1230,11 @@ static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout | |||
1350 | * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other | 1230 | * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other |
1351 | * status is a failure to add the phy to the port. | 1231 | * status is a failure to add the phy to the port. |
1352 | */ | 1232 | */ |
1353 | enum sci_status scic_sds_port_add_phy(struct isci_port *iport, | 1233 | enum sci_status sci_port_add_phy(struct isci_port *iport, |
1354 | struct isci_phy *iphy) | 1234 | struct isci_phy *iphy) |
1355 | { | 1235 | { |
1356 | enum sci_status status; | 1236 | enum sci_status status; |
1357 | enum scic_sds_port_states state; | 1237 | enum sci_port_states state; |
1358 | 1238 | ||
1359 | state = iport->sm.current_state_id; | 1239 | state = iport->sm.current_state_id; |
1360 | switch (state) { | 1240 | switch (state) { |
@@ -1362,7 +1242,7 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, | |||
1362 | struct sci_sas_address port_sas_address; | 1242 | struct sci_sas_address port_sas_address; |
1363 | 1243 | ||
1364 | /* Read the port assigned SAS Address if there is one */ | 1244 | /* Read the port assigned SAS Address if there is one */ |
1365 | scic_sds_port_get_sas_address(iport, &port_sas_address); | 1245 | sci_port_get_sas_address(iport, &port_sas_address); |
1366 | 1246 | ||
1367 | if (port_sas_address.high != 0 && port_sas_address.low != 0) { | 1247 | if (port_sas_address.high != 0 && port_sas_address.low != 0) { |
1368 | struct sci_sas_address phy_sas_address; | 1248 | struct sci_sas_address phy_sas_address; |
@@ -1370,32 +1250,32 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, | |||
1370 | /* Make sure that the PHY SAS Address matches the SAS Address | 1250 | /* Make sure that the PHY SAS Address matches the SAS Address |
1371 | * for this port | 1251 | * for this port |
1372 | */ | 1252 | */ |
1373 | scic_sds_phy_get_sas_address(iphy, &phy_sas_address); | 1253 | sci_phy_get_sas_address(iphy, &phy_sas_address); |
1374 | 1254 | ||
1375 | if (port_sas_address.high != phy_sas_address.high || | 1255 | if (port_sas_address.high != phy_sas_address.high || |
1376 | port_sas_address.low != phy_sas_address.low) | 1256 | port_sas_address.low != phy_sas_address.low) |
1377 | return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; | 1257 | return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; |
1378 | } | 1258 | } |
1379 | return scic_sds_port_set_phy(iport, iphy); | 1259 | return sci_port_set_phy(iport, iphy); |
1380 | } | 1260 | } |
1381 | case SCI_PORT_SUB_WAITING: | 1261 | case SCI_PORT_SUB_WAITING: |
1382 | case SCI_PORT_SUB_OPERATIONAL: | 1262 | case SCI_PORT_SUB_OPERATIONAL: |
1383 | status = scic_sds_port_set_phy(iport, iphy); | 1263 | status = sci_port_set_phy(iport, iphy); |
1384 | 1264 | ||
1385 | if (status != SCI_SUCCESS) | 1265 | if (status != SCI_SUCCESS) |
1386 | return status; | 1266 | return status; |
1387 | 1267 | ||
1388 | scic_sds_port_general_link_up_handler(iport, iphy, true); | 1268 | sci_port_general_link_up_handler(iport, iphy, true); |
1389 | iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; | 1269 | iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; |
1390 | port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); | 1270 | port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); |
1391 | 1271 | ||
1392 | return status; | 1272 | return status; |
1393 | case SCI_PORT_SUB_CONFIGURING: | 1273 | case SCI_PORT_SUB_CONFIGURING: |
1394 | status = scic_sds_port_set_phy(iport, iphy); | 1274 | status = sci_port_set_phy(iport, iphy); |
1395 | 1275 | ||
1396 | if (status != SCI_SUCCESS) | 1276 | if (status != SCI_SUCCESS) |
1397 | return status; | 1277 | return status; |
1398 | scic_sds_port_general_link_up_handler(iport, iphy, true); | 1278 | sci_port_general_link_up_handler(iport, iphy, true); |
1399 | 1279 | ||
1400 | /* Re-enter the configuring state since this may be the last phy in | 1280 | /* Re-enter the configuring state since this may be the last phy in |
1401 | * the port. | 1281 | * the port. |
@@ -1411,7 +1291,7 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, | |||
1411 | } | 1291 | } |
1412 | 1292 | ||
1413 | /** | 1293 | /** |
1414 | * scic_sds_port_remove_phy() - | 1294 | * sci_port_remove_phy() - |
1415 | * @sci_port: This parameter specifies the port in which the phy will be added. | 1295 | * @sci_port: This parameter specifies the port in which the phy will be added. |
1416 | * @sci_phy: This parameter is the phy which is to be added to the port. | 1296 | * @sci_phy: This parameter is the phy which is to be added to the port. |
1417 | * | 1297 | * |
@@ -1419,33 +1299,33 @@ enum sci_status scic_sds_port_add_phy(struct isci_port *iport, | |||
1419 | * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any | 1299 | * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any |
1420 | * other status is a failure to add the phy to the port. | 1300 | * other status is a failure to add the phy to the port. |
1421 | */ | 1301 | */ |
1422 | enum sci_status scic_sds_port_remove_phy(struct isci_port *iport, | 1302 | enum sci_status sci_port_remove_phy(struct isci_port *iport, |
1423 | struct isci_phy *iphy) | 1303 | struct isci_phy *iphy) |
1424 | { | 1304 | { |
1425 | enum sci_status status; | 1305 | enum sci_status status; |
1426 | enum scic_sds_port_states state; | 1306 | enum sci_port_states state; |
1427 | 1307 | ||
1428 | state = iport->sm.current_state_id; | 1308 | state = iport->sm.current_state_id; |
1429 | 1309 | ||
1430 | switch (state) { | 1310 | switch (state) { |
1431 | case SCI_PORT_STOPPED: | 1311 | case SCI_PORT_STOPPED: |
1432 | return scic_sds_port_clear_phy(iport, iphy); | 1312 | return sci_port_clear_phy(iport, iphy); |
1433 | case SCI_PORT_SUB_OPERATIONAL: | 1313 | case SCI_PORT_SUB_OPERATIONAL: |
1434 | status = scic_sds_port_clear_phy(iport, iphy); | 1314 | status = sci_port_clear_phy(iport, iphy); |
1435 | if (status != SCI_SUCCESS) | 1315 | if (status != SCI_SUCCESS) |
1436 | return status; | 1316 | return status; |
1437 | 1317 | ||
1438 | scic_sds_port_deactivate_phy(iport, iphy, true); | 1318 | sci_port_deactivate_phy(iport, iphy, true); |
1439 | iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; | 1319 | iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; |
1440 | port_state_machine_change(iport, | 1320 | port_state_machine_change(iport, |
1441 | SCI_PORT_SUB_CONFIGURING); | 1321 | SCI_PORT_SUB_CONFIGURING); |
1442 | return SCI_SUCCESS; | 1322 | return SCI_SUCCESS; |
1443 | case SCI_PORT_SUB_CONFIGURING: | 1323 | case SCI_PORT_SUB_CONFIGURING: |
1444 | status = scic_sds_port_clear_phy(iport, iphy); | 1324 | status = sci_port_clear_phy(iport, iphy); |
1445 | 1325 | ||
1446 | if (status != SCI_SUCCESS) | 1326 | if (status != SCI_SUCCESS) |
1447 | return status; | 1327 | return status; |
1448 | scic_sds_port_deactivate_phy(iport, iphy, true); | 1328 | sci_port_deactivate_phy(iport, iphy, true); |
1449 | 1329 | ||
1450 | /* Re-enter the configuring state since this may be the last phy in | 1330 | /* Re-enter the configuring state since this may be the last phy in |
1451 | * the port | 1331 | * the port |
@@ -1460,10 +1340,10 @@ enum sci_status scic_sds_port_remove_phy(struct isci_port *iport, | |||
1460 | } | 1340 | } |
1461 | } | 1341 | } |
1462 | 1342 | ||
1463 | enum sci_status scic_sds_port_link_up(struct isci_port *iport, | 1343 | enum sci_status sci_port_link_up(struct isci_port *iport, |
1464 | struct isci_phy *iphy) | 1344 | struct isci_phy *iphy) |
1465 | { | 1345 | { |
1466 | enum scic_sds_port_states state; | 1346 | enum sci_port_states state; |
1467 | 1347 | ||
1468 | state = iport->sm.current_state_id; | 1348 | state = iport->sm.current_state_id; |
1469 | switch (state) { | 1349 | switch (state) { |
@@ -1471,13 +1351,13 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, | |||
1471 | /* Since this is the first phy going link up for the port we | 1351 | /* Since this is the first phy going link up for the port we |
1472 | * can just enable it and continue | 1352 | * can just enable it and continue |
1473 | */ | 1353 | */ |
1474 | scic_sds_port_activate_phy(iport, iphy, true); | 1354 | sci_port_activate_phy(iport, iphy, true); |
1475 | 1355 | ||
1476 | port_state_machine_change(iport, | 1356 | port_state_machine_change(iport, |
1477 | SCI_PORT_SUB_OPERATIONAL); | 1357 | SCI_PORT_SUB_OPERATIONAL); |
1478 | return SCI_SUCCESS; | 1358 | return SCI_SUCCESS; |
1479 | case SCI_PORT_SUB_OPERATIONAL: | 1359 | case SCI_PORT_SUB_OPERATIONAL: |
1480 | scic_sds_port_general_link_up_handler(iport, iphy, true); | 1360 | sci_port_general_link_up_handler(iport, iphy, true); |
1481 | return SCI_SUCCESS; | 1361 | return SCI_SUCCESS; |
1482 | case SCI_PORT_RESETTING: | 1362 | case SCI_PORT_RESETTING: |
1483 | /* TODO We should make sure that the phy that has gone | 1363 | /* TODO We should make sure that the phy that has gone |
@@ -1494,7 +1374,7 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, | |||
1494 | /* In the resetting state we don't notify the user regarding | 1374 | /* In the resetting state we don't notify the user regarding |
1495 | * link up and link down notifications. | 1375 | * link up and link down notifications. |
1496 | */ | 1376 | */ |
1497 | scic_sds_port_general_link_up_handler(iport, iphy, false); | 1377 | sci_port_general_link_up_handler(iport, iphy, false); |
1498 | return SCI_SUCCESS; | 1378 | return SCI_SUCCESS; |
1499 | default: | 1379 | default: |
1500 | dev_warn(sciport_to_dev(iport), | 1380 | dev_warn(sciport_to_dev(iport), |
@@ -1503,15 +1383,15 @@ enum sci_status scic_sds_port_link_up(struct isci_port *iport, | |||
1503 | } | 1383 | } |
1504 | } | 1384 | } |
1505 | 1385 | ||
1506 | enum sci_status scic_sds_port_link_down(struct isci_port *iport, | 1386 | enum sci_status sci_port_link_down(struct isci_port *iport, |
1507 | struct isci_phy *iphy) | 1387 | struct isci_phy *iphy) |
1508 | { | 1388 | { |
1509 | enum scic_sds_port_states state; | 1389 | enum sci_port_states state; |
1510 | 1390 | ||
1511 | state = iport->sm.current_state_id; | 1391 | state = iport->sm.current_state_id; |
1512 | switch (state) { | 1392 | switch (state) { |
1513 | case SCI_PORT_SUB_OPERATIONAL: | 1393 | case SCI_PORT_SUB_OPERATIONAL: |
1514 | scic_sds_port_deactivate_phy(iport, iphy, true); | 1394 | sci_port_deactivate_phy(iport, iphy, true); |
1515 | 1395 | ||
1516 | /* If there are no active phys left in the port, then | 1396 | /* If there are no active phys left in the port, then |
1517 | * transition the port to the WAITING state until such time | 1397 | * transition the port to the WAITING state until such time |
@@ -1524,7 +1404,7 @@ enum sci_status scic_sds_port_link_down(struct isci_port *iport, | |||
1524 | case SCI_PORT_RESETTING: | 1404 | case SCI_PORT_RESETTING: |
1525 | /* In the resetting state we don't notify the user regarding | 1405 | /* In the resetting state we don't notify the user regarding |
1526 | * link up and link down notifications. */ | 1406 | * link up and link down notifications. */ |
1527 | scic_sds_port_deactivate_phy(iport, iphy, false); | 1407 | sci_port_deactivate_phy(iport, iphy, false); |
1528 | return SCI_SUCCESS; | 1408 | return SCI_SUCCESS; |
1529 | default: | 1409 | default: |
1530 | dev_warn(sciport_to_dev(iport), | 1410 | dev_warn(sciport_to_dev(iport), |
@@ -1533,11 +1413,11 @@ enum sci_status scic_sds_port_link_down(struct isci_port *iport, | |||
1533 | } | 1413 | } |
1534 | } | 1414 | } |
1535 | 1415 | ||
1536 | enum sci_status scic_sds_port_start_io(struct isci_port *iport, | 1416 | enum sci_status sci_port_start_io(struct isci_port *iport, |
1537 | struct isci_remote_device *idev, | 1417 | struct isci_remote_device *idev, |
1538 | struct isci_request *ireq) | 1418 | struct isci_request *ireq) |
1539 | { | 1419 | { |
1540 | enum scic_sds_port_states state; | 1420 | enum sci_port_states state; |
1541 | 1421 | ||
1542 | state = iport->sm.current_state_id; | 1422 | state = iport->sm.current_state_id; |
1543 | switch (state) { | 1423 | switch (state) { |
@@ -1553,11 +1433,11 @@ enum sci_status scic_sds_port_start_io(struct isci_port *iport, | |||
1553 | } | 1433 | } |
1554 | } | 1434 | } |
1555 | 1435 | ||
1556 | enum sci_status scic_sds_port_complete_io(struct isci_port *iport, | 1436 | enum sci_status sci_port_complete_io(struct isci_port *iport, |
1557 | struct isci_remote_device *idev, | 1437 | struct isci_remote_device *idev, |
1558 | struct isci_request *ireq) | 1438 | struct isci_request *ireq) |
1559 | { | 1439 | { |
1560 | enum scic_sds_port_states state; | 1440 | enum sci_port_states state; |
1561 | 1441 | ||
1562 | state = iport->sm.current_state_id; | 1442 | state = iport->sm.current_state_id; |
1563 | switch (state) { | 1443 | switch (state) { |
@@ -1566,7 +1446,7 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, | |||
1566 | "%s: in wrong state: %d\n", __func__, state); | 1446 | "%s: in wrong state: %d\n", __func__, state); |
1567 | return SCI_FAILURE_INVALID_STATE; | 1447 | return SCI_FAILURE_INVALID_STATE; |
1568 | case SCI_PORT_STOPPING: | 1448 | case SCI_PORT_STOPPING: |
1569 | scic_sds_port_decrement_request_count(iport); | 1449 | sci_port_decrement_request_count(iport); |
1570 | 1450 | ||
1571 | if (iport->started_request_count == 0) | 1451 | if (iport->started_request_count == 0) |
1572 | port_state_machine_change(iport, | 1452 | port_state_machine_change(iport, |
@@ -1577,10 +1457,10 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, | |||
1577 | case SCI_PORT_FAILED: | 1457 | case SCI_PORT_FAILED: |
1578 | case SCI_PORT_SUB_WAITING: | 1458 | case SCI_PORT_SUB_WAITING: |
1579 | case SCI_PORT_SUB_OPERATIONAL: | 1459 | case SCI_PORT_SUB_OPERATIONAL: |
1580 | scic_sds_port_decrement_request_count(iport); | 1460 | sci_port_decrement_request_count(iport); |
1581 | break; | 1461 | break; |
1582 | case SCI_PORT_SUB_CONFIGURING: | 1462 | case SCI_PORT_SUB_CONFIGURING: |
1583 | scic_sds_port_decrement_request_count(iport); | 1463 | sci_port_decrement_request_count(iport); |
1584 | if (iport->started_request_count == 0) { | 1464 | if (iport->started_request_count == 0) { |
1585 | port_state_machine_change(iport, | 1465 | port_state_machine_change(iport, |
1586 | SCI_PORT_SUB_OPERATIONAL); | 1466 | SCI_PORT_SUB_OPERATIONAL); |
@@ -1590,32 +1470,17 @@ enum sci_status scic_sds_port_complete_io(struct isci_port *iport, | |||
1590 | return SCI_SUCCESS; | 1470 | return SCI_SUCCESS; |
1591 | } | 1471 | } |
1592 | 1472 | ||
1593 | /** | 1473 | static void sci_port_enable_port_task_scheduler(struct isci_port *iport) |
1594 | * | ||
1595 | * @sci_port: This is the port object which to suspend. | ||
1596 | * | ||
1597 | * This method will enable the SCU Port Task Scheduler for this port object but | ||
1598 | * will leave the port task scheduler in a suspended state. none | ||
1599 | */ | ||
1600 | static void | ||
1601 | scic_sds_port_enable_port_task_scheduler(struct isci_port *iport) | ||
1602 | { | 1474 | { |
1603 | u32 pts_control_value; | 1475 | u32 pts_control_value; |
1604 | 1476 | ||
1477 | /* enable the port task scheduler in a suspended state */ | ||
1605 | pts_control_value = readl(&iport->port_task_scheduler_registers->control); | 1478 | pts_control_value = readl(&iport->port_task_scheduler_registers->control); |
1606 | pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); | 1479 | pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); |
1607 | writel(pts_control_value, &iport->port_task_scheduler_registers->control); | 1480 | writel(pts_control_value, &iport->port_task_scheduler_registers->control); |
1608 | } | 1481 | } |
1609 | 1482 | ||
1610 | /** | 1483 | static void sci_port_disable_port_task_scheduler(struct isci_port *iport) |
1611 | * | ||
1612 | * @sci_port: This is the port object which to resume. | ||
1613 | * | ||
1614 | * This method will disable the SCU port task scheduler for this port object. | ||
1615 | * none | ||
1616 | */ | ||
1617 | static void | ||
1618 | scic_sds_port_disable_port_task_scheduler(struct isci_port *iport) | ||
1619 | { | 1484 | { |
1620 | u32 pts_control_value; | 1485 | u32 pts_control_value; |
1621 | 1486 | ||
@@ -1625,7 +1490,7 @@ scic_sds_port_disable_port_task_scheduler(struct isci_port *iport) | |||
1625 | writel(pts_control_value, &iport->port_task_scheduler_registers->control); | 1490 | writel(pts_control_value, &iport->port_task_scheduler_registers->control); |
1626 | } | 1491 | } |
1627 | 1492 | ||
1628 | static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) | 1493 | static void sci_port_post_dummy_remote_node(struct isci_port *iport) |
1629 | { | 1494 | { |
1630 | struct isci_host *ihost = iport->owning_controller; | 1495 | struct isci_host *ihost = iport->owning_controller; |
1631 | u8 phys_index = iport->physical_port_index; | 1496 | u8 phys_index = iport->physical_port_index; |
@@ -1639,7 +1504,7 @@ static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) | |||
1639 | command = SCU_CONTEXT_COMMAND_POST_RNC_32 | | 1504 | command = SCU_CONTEXT_COMMAND_POST_RNC_32 | |
1640 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; | 1505 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; |
1641 | 1506 | ||
1642 | scic_sds_controller_post_request(ihost, command); | 1507 | sci_controller_post_request(ihost, command); |
1643 | 1508 | ||
1644 | /* ensure hardware has seen the post rnc command and give it | 1509 | /* ensure hardware has seen the post rnc command and give it |
1645 | * ample time to act before sending the suspend | 1510 | * ample time to act before sending the suspend |
@@ -1650,10 +1515,10 @@ static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) | |||
1650 | command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | | 1515 | command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | |
1651 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; | 1516 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; |
1652 | 1517 | ||
1653 | scic_sds_controller_post_request(ihost, command); | 1518 | sci_controller_post_request(ihost, command); |
1654 | } | 1519 | } |
1655 | 1520 | ||
1656 | static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) | 1521 | static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm) |
1657 | { | 1522 | { |
1658 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1523 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1659 | 1524 | ||
@@ -1662,19 +1527,19 @@ static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) | |||
1662 | * If we enter this state becasuse of a request to stop | 1527 | * If we enter this state becasuse of a request to stop |
1663 | * the port then we want to disable the hardwares port | 1528 | * the port then we want to disable the hardwares port |
1664 | * task scheduler. */ | 1529 | * task scheduler. */ |
1665 | scic_sds_port_disable_port_task_scheduler(iport); | 1530 | sci_port_disable_port_task_scheduler(iport); |
1666 | } | 1531 | } |
1667 | } | 1532 | } |
1668 | 1533 | ||
1669 | static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm) | 1534 | static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm) |
1670 | { | 1535 | { |
1671 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1536 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1672 | 1537 | ||
1673 | /* Enable and suspend the port task scheduler */ | 1538 | /* Enable and suspend the port task scheduler */ |
1674 | scic_sds_port_enable_port_task_scheduler(iport); | 1539 | sci_port_enable_port_task_scheduler(iport); |
1675 | } | 1540 | } |
1676 | 1541 | ||
1677 | static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) | 1542 | static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) |
1678 | { | 1543 | { |
1679 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1544 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1680 | struct isci_host *ihost = iport->owning_controller; | 1545 | struct isci_host *ihost = iport->owning_controller; |
@@ -1687,30 +1552,30 @@ static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) | |||
1687 | isci_port_not_ready(ihost, iport); | 1552 | isci_port_not_ready(ihost, iport); |
1688 | 1553 | ||
1689 | /* Post and suspend the dummy remote node context for this port. */ | 1554 | /* Post and suspend the dummy remote node context for this port. */ |
1690 | scic_sds_port_post_dummy_remote_node(iport); | 1555 | sci_port_post_dummy_remote_node(iport); |
1691 | 1556 | ||
1692 | /* Start the ready substate machine */ | 1557 | /* Start the ready substate machine */ |
1693 | port_state_machine_change(iport, | 1558 | port_state_machine_change(iport, |
1694 | SCI_PORT_SUB_WAITING); | 1559 | SCI_PORT_SUB_WAITING); |
1695 | } | 1560 | } |
1696 | 1561 | ||
1697 | static void scic_sds_port_resetting_state_exit(struct sci_base_state_machine *sm) | 1562 | static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm) |
1698 | { | 1563 | { |
1699 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1564 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1700 | 1565 | ||
1701 | sci_del_timer(&iport->timer); | 1566 | sci_del_timer(&iport->timer); |
1702 | } | 1567 | } |
1703 | 1568 | ||
1704 | static void scic_sds_port_stopping_state_exit(struct sci_base_state_machine *sm) | 1569 | static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm) |
1705 | { | 1570 | { |
1706 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1571 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1707 | 1572 | ||
1708 | sci_del_timer(&iport->timer); | 1573 | sci_del_timer(&iport->timer); |
1709 | 1574 | ||
1710 | scic_sds_port_destroy_dummy_resources(iport); | 1575 | sci_port_destroy_dummy_resources(iport); |
1711 | } | 1576 | } |
1712 | 1577 | ||
1713 | static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm) | 1578 | static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) |
1714 | { | 1579 | { |
1715 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1580 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1716 | 1581 | ||
@@ -1719,40 +1584,40 @@ static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm) | |||
1719 | 1584 | ||
1720 | /* --------------------------------------------------------------------------- */ | 1585 | /* --------------------------------------------------------------------------- */ |
1721 | 1586 | ||
1722 | static const struct sci_base_state scic_sds_port_state_table[] = { | 1587 | static const struct sci_base_state sci_port_state_table[] = { |
1723 | [SCI_PORT_STOPPED] = { | 1588 | [SCI_PORT_STOPPED] = { |
1724 | .enter_state = scic_sds_port_stopped_state_enter, | 1589 | .enter_state = sci_port_stopped_state_enter, |
1725 | .exit_state = scic_sds_port_stopped_state_exit | 1590 | .exit_state = sci_port_stopped_state_exit |
1726 | }, | 1591 | }, |
1727 | [SCI_PORT_STOPPING] = { | 1592 | [SCI_PORT_STOPPING] = { |
1728 | .exit_state = scic_sds_port_stopping_state_exit | 1593 | .exit_state = sci_port_stopping_state_exit |
1729 | }, | 1594 | }, |
1730 | [SCI_PORT_READY] = { | 1595 | [SCI_PORT_READY] = { |
1731 | .enter_state = scic_sds_port_ready_state_enter, | 1596 | .enter_state = sci_port_ready_state_enter, |
1732 | }, | 1597 | }, |
1733 | [SCI_PORT_SUB_WAITING] = { | 1598 | [SCI_PORT_SUB_WAITING] = { |
1734 | .enter_state = scic_sds_port_ready_substate_waiting_enter, | 1599 | .enter_state = sci_port_ready_substate_waiting_enter, |
1735 | }, | 1600 | }, |
1736 | [SCI_PORT_SUB_OPERATIONAL] = { | 1601 | [SCI_PORT_SUB_OPERATIONAL] = { |
1737 | .enter_state = scic_sds_port_ready_substate_operational_enter, | 1602 | .enter_state = sci_port_ready_substate_operational_enter, |
1738 | .exit_state = scic_sds_port_ready_substate_operational_exit | 1603 | .exit_state = sci_port_ready_substate_operational_exit |
1739 | }, | 1604 | }, |
1740 | [SCI_PORT_SUB_CONFIGURING] = { | 1605 | [SCI_PORT_SUB_CONFIGURING] = { |
1741 | .enter_state = scic_sds_port_ready_substate_configuring_enter, | 1606 | .enter_state = sci_port_ready_substate_configuring_enter, |
1742 | .exit_state = scic_sds_port_ready_substate_configuring_exit | 1607 | .exit_state = sci_port_ready_substate_configuring_exit |
1743 | }, | 1608 | }, |
1744 | [SCI_PORT_RESETTING] = { | 1609 | [SCI_PORT_RESETTING] = { |
1745 | .exit_state = scic_sds_port_resetting_state_exit | 1610 | .exit_state = sci_port_resetting_state_exit |
1746 | }, | 1611 | }, |
1747 | [SCI_PORT_FAILED] = { | 1612 | [SCI_PORT_FAILED] = { |
1748 | .enter_state = scic_sds_port_failed_state_enter, | 1613 | .enter_state = sci_port_failed_state_enter, |
1749 | } | 1614 | } |
1750 | }; | 1615 | }; |
1751 | 1616 | ||
1752 | void scic_sds_port_construct(struct isci_port *iport, u8 index, | 1617 | void sci_port_construct(struct isci_port *iport, u8 index, |
1753 | struct isci_host *ihost) | 1618 | struct isci_host *ihost) |
1754 | { | 1619 | { |
1755 | sci_init_sm(&iport->sm, scic_sds_port_state_table, SCI_PORT_STOPPED); | 1620 | sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED); |
1756 | 1621 | ||
1757 | iport->logical_port_index = SCIC_SDS_DUMMY_PORT; | 1622 | iport->logical_port_index = SCIC_SDS_DUMMY_PORT; |
1758 | iport->physical_port_index = index; | 1623 | iport->physical_port_index = index; |
@@ -1798,9 +1663,7 @@ enum isci_status isci_port_get_state( | |||
1798 | return isci_port->status; | 1663 | return isci_port->status; |
1799 | } | 1664 | } |
1800 | 1665 | ||
1801 | void scic_sds_port_broadcast_change_received( | 1666 | void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) |
1802 | struct isci_port *iport, | ||
1803 | struct isci_phy *iphy) | ||
1804 | { | 1667 | { |
1805 | struct isci_host *ihost = iport->owning_controller; | 1668 | struct isci_host *ihost = iport->owning_controller; |
1806 | 1669 | ||
@@ -1823,7 +1686,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
1823 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1686 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1824 | 1687 | ||
1825 | #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT | 1688 | #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT |
1826 | status = scic_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); | 1689 | status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); |
1827 | 1690 | ||
1828 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1691 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1829 | 1692 | ||
@@ -1840,7 +1703,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
1840 | ret = TMF_RESP_FUNC_FAILED; | 1703 | ret = TMF_RESP_FUNC_FAILED; |
1841 | 1704 | ||
1842 | dev_err(&ihost->pdev->dev, | 1705 | dev_err(&ihost->pdev->dev, |
1843 | "%s: iport = %p; scic_port_hard_reset call" | 1706 | "%s: iport = %p; sci_port_hard_reset call" |
1844 | " failed 0x%x\n", | 1707 | " failed 0x%x\n", |
1845 | __func__, iport, status); | 1708 | __func__, iport, status); |
1846 | 1709 | ||
@@ -1863,8 +1726,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor | |||
1863 | 1726 | ||
1864 | if (!iphy) | 1727 | if (!iphy) |
1865 | continue; | 1728 | continue; |
1866 | scic_sds_phy_stop(iphy); | 1729 | sci_phy_stop(iphy); |
1867 | scic_sds_phy_start(iphy); | 1730 | sci_phy_start(iphy); |
1868 | } | 1731 | } |
1869 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1732 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1870 | } | 1733 | } |
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 9a9be7b47b4a..4c4ab8126d9f 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h | |||
@@ -123,7 +123,7 @@ struct isci_port { | |||
123 | struct scu_viit_entry __iomem *viit_registers; | 123 | struct scu_viit_entry __iomem *viit_registers; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | enum scic_port_not_ready_reason_code { | 126 | enum sci_port_not_ready_reason_code { |
127 | SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS, | 127 | SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS, |
128 | SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED, | 128 | SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED, |
129 | SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION, | 129 | SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION, |
@@ -132,25 +132,25 @@ enum scic_port_not_ready_reason_code { | |||
132 | SCIC_PORT_NOT_READY_REASON_CODE_MAX | 132 | SCIC_PORT_NOT_READY_REASON_CODE_MAX |
133 | }; | 133 | }; |
134 | 134 | ||
135 | struct scic_port_end_point_properties { | 135 | struct sci_port_end_point_properties { |
136 | struct sci_sas_address sas_address; | 136 | struct sci_sas_address sas_address; |
137 | struct scic_phy_proto protocols; | 137 | struct sci_phy_proto protocols; |
138 | }; | 138 | }; |
139 | 139 | ||
140 | struct scic_port_properties { | 140 | struct sci_port_properties { |
141 | u32 index; | 141 | u32 index; |
142 | struct scic_port_end_point_properties local; | 142 | struct sci_port_end_point_properties local; |
143 | struct scic_port_end_point_properties remote; | 143 | struct sci_port_end_point_properties remote; |
144 | u32 phy_mask; | 144 | u32 phy_mask; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | /** | 147 | /** |
148 | * enum scic_sds_port_states - This enumeration depicts all the states for the | 148 | * enum sci_port_states - This enumeration depicts all the states for the |
149 | * common port state machine. | 149 | * common port state machine. |
150 | * | 150 | * |
151 | * | 151 | * |
152 | */ | 152 | */ |
153 | enum scic_sds_port_states { | 153 | enum sci_port_states { |
154 | /** | 154 | /** |
155 | * This state indicates that the port has successfully been stopped. | 155 | * This state indicates that the port has successfully been stopped. |
156 | * In this state no new IO operations are permitted. | 156 | * In this state no new IO operations are permitted. |
@@ -211,23 +211,23 @@ enum scic_sds_port_states { | |||
211 | }; | 211 | }; |
212 | 212 | ||
213 | /** | 213 | /** |
214 | * scic_sds_port_get_controller() - | 214 | * sci_port_get_controller() - |
215 | * | 215 | * |
216 | * Helper macro to get the owning controller of this port | 216 | * Helper macro to get the owning controller of this port |
217 | */ | 217 | */ |
218 | #define scic_sds_port_get_controller(this_port) \ | 218 | #define sci_port_get_controller(this_port) \ |
219 | ((this_port)->owning_controller) | 219 | ((this_port)->owning_controller) |
220 | 220 | ||
221 | /** | 221 | /** |
222 | * scic_sds_port_get_index() - | 222 | * sci_port_get_index() - |
223 | * | 223 | * |
224 | * This macro returns the physical port index for this port object | 224 | * This macro returns the physical port index for this port object |
225 | */ | 225 | */ |
226 | #define scic_sds_port_get_index(this_port) \ | 226 | #define sci_port_get_index(this_port) \ |
227 | ((this_port)->physical_port_index) | 227 | ((this_port)->physical_port_index) |
228 | 228 | ||
229 | 229 | ||
230 | static inline void scic_sds_port_decrement_request_count(struct isci_port *iport) | 230 | static inline void sci_port_decrement_request_count(struct isci_port *iport) |
231 | { | 231 | { |
232 | if (WARN_ONCE(iport->started_request_count == 0, | 232 | if (WARN_ONCE(iport->started_request_count == 0, |
233 | "%s: tried to decrement started_request_count past 0!?", | 233 | "%s: tried to decrement started_request_count past 0!?", |
@@ -237,79 +237,73 @@ static inline void scic_sds_port_decrement_request_count(struct isci_port *iport | |||
237 | iport->started_request_count--; | 237 | iport->started_request_count--; |
238 | } | 238 | } |
239 | 239 | ||
240 | #define scic_sds_port_active_phy(port, phy) \ | 240 | #define sci_port_active_phy(port, phy) \ |
241 | (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0) | 241 | (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0) |
242 | 242 | ||
243 | void scic_sds_port_construct( | 243 | void sci_port_construct( |
244 | struct isci_port *iport, | 244 | struct isci_port *iport, |
245 | u8 port_index, | 245 | u8 port_index, |
246 | struct isci_host *ihost); | 246 | struct isci_host *ihost); |
247 | 247 | ||
248 | enum sci_status scic_sds_port_initialize( | 248 | enum sci_status sci_port_start(struct isci_port *iport); |
249 | struct isci_port *iport, | 249 | enum sci_status sci_port_stop(struct isci_port *iport); |
250 | void __iomem *port_task_scheduler_registers, | ||
251 | void __iomem *port_configuration_regsiter, | ||
252 | void __iomem *viit_registers); | ||
253 | |||
254 | enum sci_status scic_sds_port_start(struct isci_port *iport); | ||
255 | enum sci_status scic_sds_port_stop(struct isci_port *iport); | ||
256 | 250 | ||
257 | enum sci_status scic_sds_port_add_phy( | 251 | enum sci_status sci_port_add_phy( |
258 | struct isci_port *iport, | 252 | struct isci_port *iport, |
259 | struct isci_phy *iphy); | 253 | struct isci_phy *iphy); |
260 | 254 | ||
261 | enum sci_status scic_sds_port_remove_phy( | 255 | enum sci_status sci_port_remove_phy( |
262 | struct isci_port *iport, | 256 | struct isci_port *iport, |
263 | struct isci_phy *iphy); | 257 | struct isci_phy *iphy); |
264 | 258 | ||
265 | void scic_sds_port_setup_transports( | 259 | void sci_port_setup_transports( |
266 | struct isci_port *iport, | 260 | struct isci_port *iport, |
267 | u32 device_id); | 261 | u32 device_id); |
268 | 262 | ||
269 | void isci_port_bcn_enable(struct isci_host *, struct isci_port *); | 263 | void isci_port_bcn_enable(struct isci_host *, struct isci_port *); |
270 | 264 | ||
271 | void scic_sds_port_deactivate_phy( | 265 | void sci_port_deactivate_phy( |
272 | struct isci_port *iport, | 266 | struct isci_port *iport, |
273 | struct isci_phy *iphy, | 267 | struct isci_phy *iphy, |
274 | bool do_notify_user); | 268 | bool do_notify_user); |
275 | 269 | ||
276 | bool scic_sds_port_link_detected( | 270 | bool sci_port_link_detected( |
277 | struct isci_port *iport, | 271 | struct isci_port *iport, |
278 | struct isci_phy *iphy); | 272 | struct isci_phy *iphy); |
279 | 273 | ||
280 | enum sci_status scic_sds_port_link_up(struct isci_port *iport, | 274 | enum sci_status sci_port_link_up(struct isci_port *iport, |
281 | struct isci_phy *iphy); | 275 | struct isci_phy *iphy); |
282 | enum sci_status scic_sds_port_link_down(struct isci_port *iport, | 276 | enum sci_status sci_port_link_down(struct isci_port *iport, |
283 | struct isci_phy *iphy); | 277 | struct isci_phy *iphy); |
284 | 278 | ||
285 | struct isci_request; | 279 | struct isci_request; |
286 | struct isci_remote_device; | 280 | struct isci_remote_device; |
287 | enum sci_status scic_sds_port_start_io( | 281 | enum sci_status sci_port_start_io( |
288 | struct isci_port *iport, | 282 | struct isci_port *iport, |
289 | struct isci_remote_device *idev, | 283 | struct isci_remote_device *idev, |
290 | struct isci_request *ireq); | 284 | struct isci_request *ireq); |
291 | 285 | ||
292 | enum sci_status scic_sds_port_complete_io( | 286 | enum sci_status sci_port_complete_io( |
293 | struct isci_port *iport, | 287 | struct isci_port *iport, |
294 | struct isci_remote_device *idev, | 288 | struct isci_remote_device *idev, |
295 | struct isci_request *ireq); | 289 | struct isci_request *ireq); |
296 | 290 | ||
297 | enum sas_linkrate scic_sds_port_get_max_allowed_speed( | 291 | enum sas_linkrate sci_port_get_max_allowed_speed( |
298 | struct isci_port *iport); | 292 | struct isci_port *iport); |
299 | 293 | ||
300 | void scic_sds_port_broadcast_change_received( | 294 | void sci_port_broadcast_change_received( |
301 | struct isci_port *iport, | 295 | struct isci_port *iport, |
302 | struct isci_phy *iphy); | 296 | struct isci_phy *iphy); |
303 | 297 | ||
304 | bool scic_sds_port_is_valid_phy_assignment( | 298 | bool sci_port_is_valid_phy_assignment( |
305 | struct isci_port *iport, | 299 | struct isci_port *iport, |
306 | u32 phy_index); | 300 | u32 phy_index); |
307 | 301 | ||
308 | void scic_sds_port_get_sas_address( | 302 | void sci_port_get_sas_address( |
309 | struct isci_port *iport, | 303 | struct isci_port *iport, |
310 | struct sci_sas_address *sas_address); | 304 | struct sci_sas_address *sas_address); |
311 | 305 | ||
312 | void scic_sds_port_get_attached_sas_address( | 306 | void sci_port_get_attached_sas_address( |
313 | struct isci_port *iport, | 307 | struct isci_port *iport, |
314 | struct sci_sas_address *sas_address); | 308 | struct sci_sas_address *sas_address); |
315 | 309 | ||
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index a0a135d54e95..c8b16db6bbde 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c | |||
@@ -112,7 +112,7 @@ static s32 sci_sas_address_compare( | |||
112 | * port. port address if the port can be found to match the phy. | 112 | * port. port address if the port can be found to match the phy. |
113 | * NULL if there is no matching port for the phy. | 113 | * NULL if there is no matching port for the phy. |
114 | */ | 114 | */ |
115 | static struct isci_port *scic_sds_port_configuration_agent_find_port( | 115 | static struct isci_port *sci_port_configuration_agent_find_port( |
116 | struct isci_host *ihost, | 116 | struct isci_host *ihost, |
117 | struct isci_phy *iphy) | 117 | struct isci_phy *iphy) |
118 | { | 118 | { |
@@ -127,14 +127,14 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( | |||
127 | * more phys match the sent and received SAS address as this phy in which | 127 | * more phys match the sent and received SAS address as this phy in which |
128 | * case it should participate in the same port. | 128 | * case it should participate in the same port. |
129 | */ | 129 | */ |
130 | scic_sds_phy_get_sas_address(iphy, &phy_sas_address); | 130 | sci_phy_get_sas_address(iphy, &phy_sas_address); |
131 | scic_sds_phy_get_attached_sas_address(iphy, &phy_attached_device_address); | 131 | sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address); |
132 | 132 | ||
133 | for (i = 0; i < ihost->logical_port_entries; i++) { | 133 | for (i = 0; i < ihost->logical_port_entries; i++) { |
134 | struct isci_port *iport = &ihost->ports[i]; | 134 | struct isci_port *iport = &ihost->ports[i]; |
135 | 135 | ||
136 | scic_sds_port_get_sas_address(iport, &port_sas_address); | 136 | sci_port_get_sas_address(iport, &port_sas_address); |
137 | scic_sds_port_get_attached_sas_address(iport, &port_attached_device_address); | 137 | sci_port_get_attached_sas_address(iport, &port_attached_device_address); |
138 | 138 | ||
139 | if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 && | 139 | if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 && |
140 | sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0) | 140 | sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0) |
@@ -156,9 +156,9 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( | |||
156 | * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION | 156 | * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION |
157 | * the port configuration is not valid for this port configuration agent. | 157 | * the port configuration is not valid for this port configuration agent. |
158 | */ | 158 | */ |
159 | static enum sci_status scic_sds_port_configuration_agent_validate_ports( | 159 | static enum sci_status sci_port_configuration_agent_validate_ports( |
160 | struct isci_host *ihost, | 160 | struct isci_host *ihost, |
161 | struct scic_sds_port_configuration_agent *port_agent) | 161 | struct sci_port_configuration_agent *port_agent) |
162 | { | 162 | { |
163 | struct sci_sas_address first_address; | 163 | struct sci_sas_address first_address; |
164 | struct sci_sas_address second_address; | 164 | struct sci_sas_address second_address; |
@@ -194,8 +194,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( | |||
194 | * PE0 and PE3 can never have the same SAS Address unless they | 194 | * PE0 and PE3 can never have the same SAS Address unless they |
195 | * are part of the same x4 wide port and we have already checked | 195 | * are part of the same x4 wide port and we have already checked |
196 | * for this condition. */ | 196 | * for this condition. */ |
197 | scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address); | 197 | sci_phy_get_sas_address(&ihost->phys[0], &first_address); |
198 | scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address); | 198 | sci_phy_get_sas_address(&ihost->phys[3], &second_address); |
199 | 199 | ||
200 | if (sci_sas_address_compare(first_address, second_address) == 0) { | 200 | if (sci_sas_address_compare(first_address, second_address) == 0) { |
201 | return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; | 201 | return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; |
@@ -207,8 +207,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( | |||
207 | * part of the same port. */ | 207 | * part of the same port. */ |
208 | if (port_agent->phy_valid_port_range[0].min_index == 0 && | 208 | if (port_agent->phy_valid_port_range[0].min_index == 0 && |
209 | port_agent->phy_valid_port_range[1].min_index == 1) { | 209 | port_agent->phy_valid_port_range[1].min_index == 1) { |
210 | scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address); | 210 | sci_phy_get_sas_address(&ihost->phys[0], &first_address); |
211 | scic_sds_phy_get_sas_address(&ihost->phys[2], &second_address); | 211 | sci_phy_get_sas_address(&ihost->phys[2], &second_address); |
212 | 212 | ||
213 | if (sci_sas_address_compare(first_address, second_address) == 0) { | 213 | if (sci_sas_address_compare(first_address, second_address) == 0) { |
214 | return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; | 214 | return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; |
@@ -221,8 +221,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( | |||
221 | * part of the same port. */ | 221 | * part of the same port. */ |
222 | if (port_agent->phy_valid_port_range[2].min_index == 2 && | 222 | if (port_agent->phy_valid_port_range[2].min_index == 2 && |
223 | port_agent->phy_valid_port_range[3].min_index == 3) { | 223 | port_agent->phy_valid_port_range[3].min_index == 3) { |
224 | scic_sds_phy_get_sas_address(&ihost->phys[1], &first_address); | 224 | sci_phy_get_sas_address(&ihost->phys[1], &first_address); |
225 | scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address); | 225 | sci_phy_get_sas_address(&ihost->phys[3], &second_address); |
226 | 226 | ||
227 | if (sci_sas_address_compare(first_address, second_address) == 0) { | 227 | if (sci_sas_address_compare(first_address, second_address) == 0) { |
228 | return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; | 228 | return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; |
@@ -239,8 +239,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( | |||
239 | 239 | ||
240 | /* verify all of the phys in the same port are using the same SAS address */ | 240 | /* verify all of the phys in the same port are using the same SAS address */ |
241 | static enum sci_status | 241 | static enum sci_status |
242 | scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, | 242 | sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, |
243 | struct scic_sds_port_configuration_agent *port_agent) | 243 | struct sci_port_configuration_agent *port_agent) |
244 | { | 244 | { |
245 | u32 phy_mask; | 245 | u32 phy_mask; |
246 | u32 assigned_phy_mask; | 246 | u32 assigned_phy_mask; |
@@ -254,7 +254,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
254 | sas_address.low = 0; | 254 | sas_address.low = 0; |
255 | 255 | ||
256 | for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { | 256 | for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { |
257 | phy_mask = ihost->oem_parameters.sds1.ports[port_index].phy_mask; | 257 | phy_mask = ihost->oem_parameters.ports[port_index].phy_mask; |
258 | 258 | ||
259 | if (!phy_mask) | 259 | if (!phy_mask) |
260 | continue; | 260 | continue; |
@@ -269,7 +269,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
269 | for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { | 269 | for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { |
270 | if ((phy_mask & (1 << phy_index)) == 0) | 270 | if ((phy_mask & (1 << phy_index)) == 0) |
271 | continue; | 271 | continue; |
272 | scic_sds_phy_get_sas_address(&ihost->phys[phy_index], | 272 | sci_phy_get_sas_address(&ihost->phys[phy_index], |
273 | &sas_address); | 273 | &sas_address); |
274 | 274 | ||
275 | /* | 275 | /* |
@@ -294,7 +294,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
294 | while (phy_index < SCI_MAX_PHYS) { | 294 | while (phy_index < SCI_MAX_PHYS) { |
295 | if ((phy_mask & (1 << phy_index)) == 0) | 295 | if ((phy_mask & (1 << phy_index)) == 0) |
296 | continue; | 296 | continue; |
297 | scic_sds_phy_get_sas_address(&ihost->phys[phy_index], | 297 | sci_phy_get_sas_address(&ihost->phys[phy_index], |
298 | &phy_assigned_address); | 298 | &phy_assigned_address); |
299 | 299 | ||
300 | if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) { | 300 | if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) { |
@@ -307,7 +307,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
307 | port_agent->phy_valid_port_range[phy_index].min_index = port_index; | 307 | port_agent->phy_valid_port_range[phy_index].min_index = port_index; |
308 | port_agent->phy_valid_port_range[phy_index].max_index = phy_index; | 308 | port_agent->phy_valid_port_range[phy_index].max_index = phy_index; |
309 | 309 | ||
310 | scic_sds_port_add_phy(&ihost->ports[port_index], | 310 | sci_port_add_phy(&ihost->ports[port_index], |
311 | &ihost->phys[phy_index]); | 311 | &ihost->phys[phy_index]); |
312 | 312 | ||
313 | assigned_phy_mask |= (1 << phy_index); | 313 | assigned_phy_mask |= (1 << phy_index); |
@@ -316,14 +316,14 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
316 | phy_index++; | 316 | phy_index++; |
317 | } | 317 | } |
318 | 318 | ||
319 | return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); | 319 | return sci_port_configuration_agent_validate_ports(ihost, port_agent); |
320 | } | 320 | } |
321 | 321 | ||
322 | static void mpc_agent_timeout(unsigned long data) | 322 | static void mpc_agent_timeout(unsigned long data) |
323 | { | 323 | { |
324 | u8 index; | 324 | u8 index; |
325 | struct sci_timer *tmr = (struct sci_timer *)data; | 325 | struct sci_timer *tmr = (struct sci_timer *)data; |
326 | struct scic_sds_port_configuration_agent *port_agent; | 326 | struct sci_port_configuration_agent *port_agent; |
327 | struct isci_host *ihost; | 327 | struct isci_host *ihost; |
328 | unsigned long flags; | 328 | unsigned long flags; |
329 | u16 configure_phy_mask; | 329 | u16 configure_phy_mask; |
@@ -355,8 +355,8 @@ done: | |||
355 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 355 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
356 | } | 356 | } |
357 | 357 | ||
358 | static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, | 358 | static void sci_mpc_agent_link_up(struct isci_host *ihost, |
359 | struct scic_sds_port_configuration_agent *port_agent, | 359 | struct sci_port_configuration_agent *port_agent, |
360 | struct isci_port *iport, | 360 | struct isci_port *iport, |
361 | struct isci_phy *iphy) | 361 | struct isci_phy *iphy) |
362 | { | 362 | { |
@@ -367,10 +367,10 @@ static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, | |||
367 | if (!iport) | 367 | if (!iport) |
368 | return; | 368 | return; |
369 | 369 | ||
370 | port_agent->phy_ready_mask |= (1 << scic_sds_phy_get_index(iphy)); | 370 | port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy)); |
371 | scic_sds_port_link_up(iport, iphy); | 371 | sci_port_link_up(iport, iphy); |
372 | if ((iport->active_phy_mask & (1 << scic_sds_phy_get_index(iphy)))) | 372 | if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy)))) |
373 | port_agent->phy_configured_mask |= (1 << scic_sds_phy_get_index(iphy)); | 373 | port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy)); |
374 | } | 374 | } |
375 | 375 | ||
376 | /** | 376 | /** |
@@ -390,9 +390,9 @@ static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, | |||
390 | * not associated with a port there is no action taken. Is it possible to get a | 390 | * not associated with a port there is no action taken. Is it possible to get a |
391 | * link down notification from a phy that has no assocoated port? | 391 | * link down notification from a phy that has no assocoated port? |
392 | */ | 392 | */ |
393 | static void scic_sds_mpc_agent_link_down( | 393 | static void sci_mpc_agent_link_down( |
394 | struct isci_host *ihost, | 394 | struct isci_host *ihost, |
395 | struct scic_sds_port_configuration_agent *port_agent, | 395 | struct sci_port_configuration_agent *port_agent, |
396 | struct isci_port *iport, | 396 | struct isci_port *iport, |
397 | struct isci_phy *iphy) | 397 | struct isci_phy *iphy) |
398 | { | 398 | { |
@@ -405,9 +405,9 @@ static void scic_sds_mpc_agent_link_down( | |||
405 | * state. | 405 | * state. |
406 | */ | 406 | */ |
407 | port_agent->phy_ready_mask &= | 407 | port_agent->phy_ready_mask &= |
408 | ~(1 << scic_sds_phy_get_index(iphy)); | 408 | ~(1 << sci_phy_get_index(iphy)); |
409 | port_agent->phy_configured_mask &= | 409 | port_agent->phy_configured_mask &= |
410 | ~(1 << scic_sds_phy_get_index(iphy)); | 410 | ~(1 << sci_phy_get_index(iphy)); |
411 | 411 | ||
412 | /* | 412 | /* |
413 | * Check to see if there are more phys waiting to be | 413 | * Check to see if there are more phys waiting to be |
@@ -424,7 +424,7 @@ static void scic_sds_mpc_agent_link_down( | |||
424 | SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT); | 424 | SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT); |
425 | } | 425 | } |
426 | 426 | ||
427 | scic_sds_port_link_down(iport, iphy); | 427 | sci_port_link_down(iport, iphy); |
428 | } | 428 | } |
429 | } | 429 | } |
430 | 430 | ||
@@ -432,8 +432,8 @@ static void scic_sds_mpc_agent_link_down( | |||
432 | * configuration mode. | 432 | * configuration mode. |
433 | */ | 433 | */ |
434 | static enum sci_status | 434 | static enum sci_status |
435 | scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, | 435 | sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, |
436 | struct scic_sds_port_configuration_agent *port_agent) | 436 | struct sci_port_configuration_agent *port_agent) |
437 | { | 437 | { |
438 | u8 phy_index; | 438 | u8 phy_index; |
439 | u8 port_index; | 439 | u8 port_index; |
@@ -446,11 +446,11 @@ scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
446 | port_index = phy_index; | 446 | port_index = phy_index; |
447 | 447 | ||
448 | /* Get the assigned SAS Address for the first PHY on the controller. */ | 448 | /* Get the assigned SAS Address for the first PHY on the controller. */ |
449 | scic_sds_phy_get_sas_address(&ihost->phys[phy_index], | 449 | sci_phy_get_sas_address(&ihost->phys[phy_index], |
450 | &sas_address); | 450 | &sas_address); |
451 | 451 | ||
452 | while (++phy_index < SCI_MAX_PHYS) { | 452 | while (++phy_index < SCI_MAX_PHYS) { |
453 | scic_sds_phy_get_sas_address(&ihost->phys[phy_index], | 453 | sci_phy_get_sas_address(&ihost->phys[phy_index], |
454 | &phy_assigned_address); | 454 | &phy_assigned_address); |
455 | 455 | ||
456 | /* Verify each of the SAS address are all the same for every PHY */ | 456 | /* Verify each of the SAS address are all the same for every PHY */ |
@@ -465,11 +465,11 @@ scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, | |||
465 | } | 465 | } |
466 | } | 466 | } |
467 | 467 | ||
468 | return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); | 468 | return sci_port_configuration_agent_validate_ports(ihost, port_agent); |
469 | } | 469 | } |
470 | 470 | ||
471 | static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, | 471 | static void sci_apc_agent_configure_ports(struct isci_host *ihost, |
472 | struct scic_sds_port_configuration_agent *port_agent, | 472 | struct sci_port_configuration_agent *port_agent, |
473 | struct isci_phy *iphy, | 473 | struct isci_phy *iphy, |
474 | bool start_timer) | 474 | bool start_timer) |
475 | { | 475 | { |
@@ -478,10 +478,10 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, | |||
478 | struct isci_port *iport; | 478 | struct isci_port *iport; |
479 | enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; | 479 | enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; |
480 | 480 | ||
481 | iport = scic_sds_port_configuration_agent_find_port(ihost, iphy); | 481 | iport = sci_port_configuration_agent_find_port(ihost, iphy); |
482 | 482 | ||
483 | if (iport) { | 483 | if (iport) { |
484 | if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) | 484 | if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) |
485 | apc_activity = SCIC_SDS_APC_ADD_PHY; | 485 | apc_activity = SCIC_SDS_APC_ADD_PHY; |
486 | else | 486 | else |
487 | apc_activity = SCIC_SDS_APC_SKIP_PHY; | 487 | apc_activity = SCIC_SDS_APC_SKIP_PHY; |
@@ -499,7 +499,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, | |||
499 | iport = &ihost->ports[port_index]; | 499 | iport = &ihost->ports[port_index]; |
500 | 500 | ||
501 | /* First we must make sure that this PHY can be added to this Port. */ | 501 | /* First we must make sure that this PHY can be added to this Port. */ |
502 | if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) { | 502 | if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { |
503 | /* | 503 | /* |
504 | * Port contains a PHY with a greater PHY ID than the current | 504 | * Port contains a PHY with a greater PHY ID than the current |
505 | * PHY that has gone link up. This phy can not be part of any | 505 | * PHY that has gone link up. This phy can not be part of any |
@@ -559,7 +559,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, | |||
559 | 559 | ||
560 | switch (apc_activity) { | 560 | switch (apc_activity) { |
561 | case SCIC_SDS_APC_ADD_PHY: | 561 | case SCIC_SDS_APC_ADD_PHY: |
562 | status = scic_sds_port_add_phy(iport, iphy); | 562 | status = sci_port_add_phy(iport, iphy); |
563 | 563 | ||
564 | if (status == SCI_SUCCESS) { | 564 | if (status == SCI_SUCCESS) { |
565 | port_agent->phy_configured_mask |= (1 << iphy->phy_index); | 565 | port_agent->phy_configured_mask |= (1 << iphy->phy_index); |
@@ -588,7 +588,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, | |||
588 | } | 588 | } |
589 | 589 | ||
590 | /** | 590 | /** |
591 | * scic_sds_apc_agent_link_up - handle apc link up events | 591 | * sci_apc_agent_link_up - handle apc link up events |
592 | * @scic: This is the controller object that receives the link up | 592 | * @scic: This is the controller object that receives the link up |
593 | * notification. | 593 | * notification. |
594 | * @sci_port: This is the port object associated with the phy. If the is no | 594 | * @sci_port: This is the port object associated with the phy. If the is no |
@@ -599,8 +599,8 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, | |||
599 | * notifications. Is it possible to get a link down notification from a phy | 599 | * notifications. Is it possible to get a link down notification from a phy |
600 | * that has no assocoated port? | 600 | * that has no assocoated port? |
601 | */ | 601 | */ |
602 | static void scic_sds_apc_agent_link_up(struct isci_host *ihost, | 602 | static void sci_apc_agent_link_up(struct isci_host *ihost, |
603 | struct scic_sds_port_configuration_agent *port_agent, | 603 | struct sci_port_configuration_agent *port_agent, |
604 | struct isci_port *iport, | 604 | struct isci_port *iport, |
605 | struct isci_phy *iphy) | 605 | struct isci_phy *iphy) |
606 | { | 606 | { |
@@ -609,7 +609,7 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, | |||
609 | if (!iport) { | 609 | if (!iport) { |
610 | /* the phy is not the part of this port */ | 610 | /* the phy is not the part of this port */ |
611 | port_agent->phy_ready_mask |= 1 << phy_index; | 611 | port_agent->phy_ready_mask |= 1 << phy_index; |
612 | scic_sds_apc_agent_configure_ports(ihost, port_agent, iphy, true); | 612 | sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); |
613 | } else { | 613 | } else { |
614 | /* the phy is already the part of the port */ | 614 | /* the phy is already the part of the port */ |
615 | u32 port_state = iport->sm.current_state_id; | 615 | u32 port_state = iport->sm.current_state_id; |
@@ -620,7 +620,7 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, | |||
620 | */ | 620 | */ |
621 | BUG_ON(port_state != SCI_PORT_RESETTING); | 621 | BUG_ON(port_state != SCI_PORT_RESETTING); |
622 | port_agent->phy_ready_mask |= 1 << phy_index; | 622 | port_agent->phy_ready_mask |= 1 << phy_index; |
623 | scic_sds_port_link_up(iport, iphy); | 623 | sci_port_link_up(iport, iphy); |
624 | } | 624 | } |
625 | } | 625 | } |
626 | 626 | ||
@@ -637,20 +637,20 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost, | |||
637 | * possible to get a link down notification from a phy that has no assocoated | 637 | * possible to get a link down notification from a phy that has no assocoated |
638 | * port? | 638 | * port? |
639 | */ | 639 | */ |
640 | static void scic_sds_apc_agent_link_down( | 640 | static void sci_apc_agent_link_down( |
641 | struct isci_host *ihost, | 641 | struct isci_host *ihost, |
642 | struct scic_sds_port_configuration_agent *port_agent, | 642 | struct sci_port_configuration_agent *port_agent, |
643 | struct isci_port *iport, | 643 | struct isci_port *iport, |
644 | struct isci_phy *iphy) | 644 | struct isci_phy *iphy) |
645 | { | 645 | { |
646 | port_agent->phy_ready_mask &= ~(1 << scic_sds_phy_get_index(iphy)); | 646 | port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy)); |
647 | 647 | ||
648 | if (!iport) | 648 | if (!iport) |
649 | return; | 649 | return; |
650 | if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) { | 650 | if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) { |
651 | enum sci_status status; | 651 | enum sci_status status; |
652 | 652 | ||
653 | status = scic_sds_port_remove_phy(iport, iphy); | 653 | status = sci_port_remove_phy(iport, iphy); |
654 | 654 | ||
655 | if (status == SCI_SUCCESS) | 655 | if (status == SCI_SUCCESS) |
656 | port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); | 656 | port_agent->phy_configured_mask &= ~(1 << iphy->phy_index); |
@@ -662,7 +662,7 @@ static void apc_agent_timeout(unsigned long data) | |||
662 | { | 662 | { |
663 | u32 index; | 663 | u32 index; |
664 | struct sci_timer *tmr = (struct sci_timer *)data; | 664 | struct sci_timer *tmr = (struct sci_timer *)data; |
665 | struct scic_sds_port_configuration_agent *port_agent; | 665 | struct sci_port_configuration_agent *port_agent; |
666 | struct isci_host *ihost; | 666 | struct isci_host *ihost; |
667 | unsigned long flags; | 667 | unsigned long flags; |
668 | u16 configure_phy_mask; | 668 | u16 configure_phy_mask; |
@@ -686,7 +686,7 @@ static void apc_agent_timeout(unsigned long data) | |||
686 | if ((configure_phy_mask & (1 << index)) == 0) | 686 | if ((configure_phy_mask & (1 << index)) == 0) |
687 | continue; | 687 | continue; |
688 | 688 | ||
689 | scic_sds_apc_agent_configure_ports(ihost, port_agent, | 689 | sci_apc_agent_configure_ports(ihost, port_agent, |
690 | &ihost->phys[index], false); | 690 | &ihost->phys[index], false); |
691 | } | 691 | } |
692 | 692 | ||
@@ -706,8 +706,8 @@ done: | |||
706 | * call is universal for both manual port configuration and automatic port | 706 | * call is universal for both manual port configuration and automatic port |
707 | * configuration modes. | 707 | * configuration modes. |
708 | */ | 708 | */ |
709 | void scic_sds_port_configuration_agent_construct( | 709 | void sci_port_configuration_agent_construct( |
710 | struct scic_sds_port_configuration_agent *port_agent) | 710 | struct sci_port_configuration_agent *port_agent) |
711 | { | 711 | { |
712 | u32 index; | 712 | u32 index; |
713 | 713 | ||
@@ -725,29 +725,29 @@ void scic_sds_port_configuration_agent_construct( | |||
725 | } | 725 | } |
726 | } | 726 | } |
727 | 727 | ||
728 | enum sci_status scic_sds_port_configuration_agent_initialize( | 728 | enum sci_status sci_port_configuration_agent_initialize( |
729 | struct isci_host *ihost, | 729 | struct isci_host *ihost, |
730 | struct scic_sds_port_configuration_agent *port_agent) | 730 | struct sci_port_configuration_agent *port_agent) |
731 | { | 731 | { |
732 | enum sci_status status; | 732 | enum sci_status status; |
733 | enum scic_port_configuration_mode mode; | 733 | enum sci_port_configuration_mode mode; |
734 | 734 | ||
735 | mode = ihost->oem_parameters.sds1.controller.mode_type; | 735 | mode = ihost->oem_parameters.controller.mode_type; |
736 | 736 | ||
737 | if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { | 737 | if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { |
738 | status = scic_sds_mpc_agent_validate_phy_configuration( | 738 | status = sci_mpc_agent_validate_phy_configuration( |
739 | ihost, port_agent); | 739 | ihost, port_agent); |
740 | 740 | ||
741 | port_agent->link_up_handler = scic_sds_mpc_agent_link_up; | 741 | port_agent->link_up_handler = sci_mpc_agent_link_up; |
742 | port_agent->link_down_handler = scic_sds_mpc_agent_link_down; | 742 | port_agent->link_down_handler = sci_mpc_agent_link_down; |
743 | 743 | ||
744 | sci_init_timer(&port_agent->timer, mpc_agent_timeout); | 744 | sci_init_timer(&port_agent->timer, mpc_agent_timeout); |
745 | } else { | 745 | } else { |
746 | status = scic_sds_apc_agent_validate_phy_configuration( | 746 | status = sci_apc_agent_validate_phy_configuration( |
747 | ihost, port_agent); | 747 | ihost, port_agent); |
748 | 748 | ||
749 | port_agent->link_up_handler = scic_sds_apc_agent_link_up; | 749 | port_agent->link_up_handler = sci_apc_agent_link_up; |
750 | port_agent->link_down_handler = scic_sds_apc_agent_link_down; | 750 | port_agent->link_down_handler = sci_apc_agent_link_down; |
751 | 751 | ||
752 | sci_init_timer(&port_agent->timer, apc_agent_timeout); | 752 | sci_init_timer(&port_agent->timer, apc_agent_timeout); |
753 | } | 753 | } |
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 99b13c191877..c7732fb28889 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c | |||
@@ -111,25 +111,15 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) | |||
111 | return rom; | 111 | return rom; |
112 | } | 112 | } |
113 | 113 | ||
114 | /** | 114 | enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, |
115 | * isci_parse_oem_parameters() - This method will take OEM parameters | ||
116 | * from the module init parameters and copy them to oem_params. This will | ||
117 | * only copy values that are not set to the module parameter default values | ||
118 | * @oem_parameters: This parameter specifies the controller default OEM | ||
119 | * parameters. It is expected that this has been initialized to the default | ||
120 | * parameters for the controller | ||
121 | * | ||
122 | * | ||
123 | */ | ||
124 | enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem_params, | ||
125 | struct isci_orom *orom, int scu_index) | 115 | struct isci_orom *orom, int scu_index) |
126 | { | 116 | { |
127 | /* check for valid inputs */ | 117 | /* check for valid inputs */ |
128 | if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS || | 118 | if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS || |
129 | scu_index > orom->hdr.num_elements || !oem_params) | 119 | scu_index > orom->hdr.num_elements || !oem) |
130 | return -EINVAL; | 120 | return -EINVAL; |
131 | 121 | ||
132 | oem_params->sds1 = orom->ctrl[scu_index]; | 122 | *oem = orom->ctrl[scu_index]; |
133 | return 0; | 123 | return 0; |
134 | } | 124 | } |
135 | 125 | ||
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index e40cb5f6eba5..dc007e692f4e 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h | |||
@@ -74,7 +74,7 @@ | |||
74 | #define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED | 74 | #define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED |
75 | 75 | ||
76 | /* parameters that can be set by module parameters */ | 76 | /* parameters that can be set by module parameters */ |
77 | struct scic_sds_user_parameters { | 77 | struct sci_user_parameters { |
78 | struct sci_phy_user_params { | 78 | struct sci_phy_user_params { |
79 | /** | 79 | /** |
80 | * This field specifies the NOTIFY (ENABLE SPIN UP) primitive | 80 | * This field specifies the NOTIFY (ENABLE SPIN UP) primitive |
@@ -147,30 +147,16 @@ struct scic_sds_user_parameters { | |||
147 | 147 | ||
148 | }; | 148 | }; |
149 | 149 | ||
150 | /* XXX kill this union */ | ||
151 | union scic_user_parameters { | ||
152 | /** | ||
153 | * This field specifies the user parameters specific to the | ||
154 | * Storage Controller Unit (SCU) Driver Standard (SDS) version | ||
155 | * 1. | ||
156 | */ | ||
157 | struct scic_sds_user_parameters sds1; | ||
158 | }; | ||
159 | |||
160 | #define SCIC_SDS_PARM_PHY_MASK_MIN 0x0 | 150 | #define SCIC_SDS_PARM_PHY_MASK_MIN 0x0 |
161 | #define SCIC_SDS_PARM_PHY_MASK_MAX 0xF | 151 | #define SCIC_SDS_PARM_PHY_MASK_MAX 0xF |
162 | #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 | 152 | #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 |
163 | 153 | ||
164 | struct scic_sds_oem_params; | 154 | struct sci_oem_params; |
165 | int scic_oem_parameters_validate(struct scic_sds_oem_params *oem); | 155 | int sci_oem_parameters_validate(struct sci_oem_params *oem); |
166 | |||
167 | union scic_oem_parameters; | ||
168 | void scic_oem_parameters_get(struct isci_host *ihost, | ||
169 | union scic_oem_parameters *oem); | ||
170 | 156 | ||
171 | struct isci_orom; | 157 | struct isci_orom; |
172 | struct isci_orom *isci_request_oprom(struct pci_dev *pdev); | 158 | struct isci_orom *isci_request_oprom(struct pci_dev *pdev); |
173 | enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem, | 159 | enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, |
174 | struct isci_orom *orom, int scu_index); | 160 | struct isci_orom *orom, int scu_index); |
175 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); | 161 | struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); |
176 | struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); | 162 | struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); |
@@ -214,7 +200,7 @@ struct isci_oem_hdr { | |||
214 | * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs | 200 | * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs |
215 | * being assigned is sufficient to declare manual PORT configuration. | 201 | * being assigned is sufficient to declare manual PORT configuration. |
216 | */ | 202 | */ |
217 | enum scic_port_configuration_mode { | 203 | enum sci_port_configuration_mode { |
218 | SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0, | 204 | SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0, |
219 | SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1 | 205 | SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1 |
220 | }; | 206 | }; |
@@ -230,7 +216,7 @@ struct sci_bios_oem_param_block_hdr { | |||
230 | uint8_t reserved[8]; | 216 | uint8_t reserved[8]; |
231 | } __attribute__ ((packed)); | 217 | } __attribute__ ((packed)); |
232 | 218 | ||
233 | struct scic_sds_oem_params { | 219 | struct sci_oem_params { |
234 | struct { | 220 | struct { |
235 | uint8_t mode_type; | 221 | uint8_t mode_type; |
236 | uint8_t max_concurrent_dev_spin_up; | 222 | uint8_t max_concurrent_dev_spin_up; |
@@ -255,19 +241,9 @@ struct scic_sds_oem_params { | |||
255 | } phys[SCI_MAX_PHYS]; | 241 | } phys[SCI_MAX_PHYS]; |
256 | } __attribute__ ((packed)); | 242 | } __attribute__ ((packed)); |
257 | 243 | ||
258 | /* XXX kill this union */ | ||
259 | union scic_oem_parameters { | ||
260 | /** | ||
261 | * This field specifies the OEM parameters specific to the | ||
262 | * Storage Controller Unit (SCU) Driver Standard (SDS) version | ||
263 | * 1. | ||
264 | */ | ||
265 | struct scic_sds_oem_params sds1; | ||
266 | }; | ||
267 | |||
268 | struct isci_orom { | 244 | struct isci_orom { |
269 | struct sci_bios_oem_param_block_hdr hdr; | 245 | struct sci_bios_oem_param_block_hdr hdr; |
270 | struct scic_sds_oem_params ctrl[SCI_MAX_CONTROLLERS]; | 246 | struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS]; |
271 | } __attribute__ ((packed)); | 247 | } __attribute__ ((packed)); |
272 | 248 | ||
273 | #endif | 249 | #endif |
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 9043b458c999..8c752abb4331 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c | |||
@@ -68,7 +68,7 @@ | |||
68 | * @isci_host: This parameter specifies the isci host object. | 68 | * @isci_host: This parameter specifies the isci host object. |
69 | * @isci_device: This parameter specifies the remote device | 69 | * @isci_device: This parameter specifies the remote device |
70 | * | 70 | * |
71 | * scic_lock is held on entrance to this function. | 71 | * sci_lock is held on entrance to this function. |
72 | */ | 72 | */ |
73 | static void isci_remote_device_not_ready(struct isci_host *ihost, | 73 | static void isci_remote_device_not_ready(struct isci_host *ihost, |
74 | struct isci_remote_device *idev, u32 reason) | 74 | struct isci_remote_device *idev, u32 reason) |
@@ -92,7 +92,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, | |||
92 | "%s: isci_device = %p request = %p\n", | 92 | "%s: isci_device = %p request = %p\n", |
93 | __func__, idev, ireq); | 93 | __func__, idev, ireq); |
94 | 94 | ||
95 | scic_controller_terminate_request(ihost, | 95 | sci_controller_terminate_request(ihost, |
96 | idev, | 96 | idev, |
97 | ireq); | 97 | ireq); |
98 | } | 98 | } |
@@ -133,7 +133,7 @@ static void rnc_destruct_done(void *_dev) | |||
133 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); | 133 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); |
134 | } | 134 | } |
135 | 135 | ||
136 | static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev) | 136 | static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) |
137 | { | 137 | { |
138 | struct isci_host *ihost = idev->owning_port->owning_controller; | 138 | struct isci_host *ihost = idev->owning_port->owning_controller; |
139 | enum sci_status status = SCI_SUCCESS; | 139 | enum sci_status status = SCI_SUCCESS; |
@@ -147,7 +147,7 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem | |||
147 | ireq->target_device != idev) | 147 | ireq->target_device != idev) |
148 | continue; | 148 | continue; |
149 | 149 | ||
150 | s = scic_controller_terminate_request(ihost, idev, ireq); | 150 | s = sci_controller_terminate_request(ihost, idev, ireq); |
151 | if (s != SCI_SUCCESS) | 151 | if (s != SCI_SUCCESS) |
152 | status = s; | 152 | status = s; |
153 | } | 153 | } |
@@ -155,11 +155,11 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem | |||
155 | return status; | 155 | return status; |
156 | } | 156 | } |
157 | 157 | ||
158 | enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, | 158 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, |
159 | u32 timeout) | 159 | u32 timeout) |
160 | { | 160 | { |
161 | struct sci_base_state_machine *sm = &idev->sm; | 161 | struct sci_base_state_machine *sm = &idev->sm; |
162 | enum scic_sds_remote_device_states state = sm->current_state_id; | 162 | enum sci_remote_device_states state = sm->current_state_id; |
163 | 163 | ||
164 | switch (state) { | 164 | switch (state) { |
165 | case SCI_DEV_INITIAL: | 165 | case SCI_DEV_INITIAL: |
@@ -174,7 +174,7 @@ enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, | |||
174 | case SCI_DEV_STARTING: | 174 | case SCI_DEV_STARTING: |
175 | /* device not started so there had better be no requests */ | 175 | /* device not started so there had better be no requests */ |
176 | BUG_ON(idev->started_request_count != 0); | 176 | BUG_ON(idev->started_request_count != 0); |
177 | scic_sds_remote_node_context_destruct(&idev->rnc, | 177 | sci_remote_node_context_destruct(&idev->rnc, |
178 | rnc_destruct_done, idev); | 178 | rnc_destruct_done, idev); |
179 | /* Transition to the stopping state and wait for the | 179 | /* Transition to the stopping state and wait for the |
180 | * remote node to complete being posted and invalidated. | 180 | * remote node to complete being posted and invalidated. |
@@ -191,28 +191,28 @@ enum sci_status scic_remote_device_stop(struct isci_remote_device *idev, | |||
191 | case SCI_SMP_DEV_CMD: | 191 | case SCI_SMP_DEV_CMD: |
192 | sci_change_state(sm, SCI_DEV_STOPPING); | 192 | sci_change_state(sm, SCI_DEV_STOPPING); |
193 | if (idev->started_request_count == 0) { | 193 | if (idev->started_request_count == 0) { |
194 | scic_sds_remote_node_context_destruct(&idev->rnc, | 194 | sci_remote_node_context_destruct(&idev->rnc, |
195 | rnc_destruct_done, idev); | 195 | rnc_destruct_done, idev); |
196 | return SCI_SUCCESS; | 196 | return SCI_SUCCESS; |
197 | } else | 197 | } else |
198 | return scic_sds_remote_device_terminate_requests(idev); | 198 | return sci_remote_device_terminate_requests(idev); |
199 | break; | 199 | break; |
200 | case SCI_DEV_STOPPING: | 200 | case SCI_DEV_STOPPING: |
201 | /* All requests should have been terminated, but if there is an | 201 | /* All requests should have been terminated, but if there is an |
202 | * attempt to stop a device already in the stopping state, then | 202 | * attempt to stop a device already in the stopping state, then |
203 | * try again to terminate. | 203 | * try again to terminate. |
204 | */ | 204 | */ |
205 | return scic_sds_remote_device_terminate_requests(idev); | 205 | return sci_remote_device_terminate_requests(idev); |
206 | case SCI_DEV_RESETTING: | 206 | case SCI_DEV_RESETTING: |
207 | sci_change_state(sm, SCI_DEV_STOPPING); | 207 | sci_change_state(sm, SCI_DEV_STOPPING); |
208 | return SCI_SUCCESS; | 208 | return SCI_SUCCESS; |
209 | } | 209 | } |
210 | } | 210 | } |
211 | 211 | ||
212 | enum sci_status scic_remote_device_reset(struct isci_remote_device *idev) | 212 | enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) |
213 | { | 213 | { |
214 | struct sci_base_state_machine *sm = &idev->sm; | 214 | struct sci_base_state_machine *sm = &idev->sm; |
215 | enum scic_sds_remote_device_states state = sm->current_state_id; | 215 | enum sci_remote_device_states state = sm->current_state_id; |
216 | 216 | ||
217 | switch (state) { | 217 | switch (state) { |
218 | case SCI_DEV_INITIAL: | 218 | case SCI_DEV_INITIAL: |
@@ -239,10 +239,10 @@ enum sci_status scic_remote_device_reset(struct isci_remote_device *idev) | |||
239 | } | 239 | } |
240 | } | 240 | } |
241 | 241 | ||
242 | enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *idev) | 242 | enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) |
243 | { | 243 | { |
244 | struct sci_base_state_machine *sm = &idev->sm; | 244 | struct sci_base_state_machine *sm = &idev->sm; |
245 | enum scic_sds_remote_device_states state = sm->current_state_id; | 245 | enum sci_remote_device_states state = sm->current_state_id; |
246 | 246 | ||
247 | if (state != SCI_DEV_RESETTING) { | 247 | if (state != SCI_DEV_RESETTING) { |
248 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", | 248 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
@@ -254,11 +254,11 @@ enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *ide | |||
254 | return SCI_SUCCESS; | 254 | return SCI_SUCCESS; |
255 | } | 255 | } |
256 | 256 | ||
257 | enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev, | 257 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, |
258 | u32 suspend_type) | 258 | u32 suspend_type) |
259 | { | 259 | { |
260 | struct sci_base_state_machine *sm = &idev->sm; | 260 | struct sci_base_state_machine *sm = &idev->sm; |
261 | enum scic_sds_remote_device_states state = sm->current_state_id; | 261 | enum sci_remote_device_states state = sm->current_state_id; |
262 | 262 | ||
263 | if (state != SCI_STP_DEV_CMD) { | 263 | if (state != SCI_STP_DEV_CMD) { |
264 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", | 264 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
@@ -266,15 +266,15 @@ enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev, | |||
266 | return SCI_FAILURE_INVALID_STATE; | 266 | return SCI_FAILURE_INVALID_STATE; |
267 | } | 267 | } |
268 | 268 | ||
269 | return scic_sds_remote_node_context_suspend(&idev->rnc, | 269 | return sci_remote_node_context_suspend(&idev->rnc, |
270 | suspend_type, NULL, NULL); | 270 | suspend_type, NULL, NULL); |
271 | } | 271 | } |
272 | 272 | ||
273 | enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *idev, | 273 | enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, |
274 | u32 frame_index) | 274 | u32 frame_index) |
275 | { | 275 | { |
276 | struct sci_base_state_machine *sm = &idev->sm; | 276 | struct sci_base_state_machine *sm = &idev->sm; |
277 | enum scic_sds_remote_device_states state = sm->current_state_id; | 277 | enum sci_remote_device_states state = sm->current_state_id; |
278 | struct isci_host *ihost = idev->owning_port->owning_controller; | 278 | struct isci_host *ihost = idev->owning_port->owning_controller; |
279 | enum sci_status status; | 279 | enum sci_status status; |
280 | 280 | ||
@@ -289,7 +289,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
289 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", | 289 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
290 | __func__, state); | 290 | __func__, state); |
291 | /* Return the frame back to the controller */ | 291 | /* Return the frame back to the controller */ |
292 | scic_sds_controller_release_frame(ihost, frame_index); | 292 | sci_controller_release_frame(ihost, frame_index); |
293 | return SCI_FAILURE_INVALID_STATE; | 293 | return SCI_FAILURE_INVALID_STATE; |
294 | case SCI_DEV_READY: | 294 | case SCI_DEV_READY: |
295 | case SCI_STP_DEV_NCQ_ERROR: | 295 | case SCI_STP_DEV_NCQ_ERROR: |
@@ -302,7 +302,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
302 | void *frame_header; | 302 | void *frame_header; |
303 | ssize_t word_cnt; | 303 | ssize_t word_cnt; |
304 | 304 | ||
305 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 305 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
306 | frame_index, | 306 | frame_index, |
307 | &frame_header); | 307 | &frame_header); |
308 | if (status != SCI_SUCCESS) | 308 | if (status != SCI_SUCCESS) |
@@ -311,22 +311,22 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
311 | word_cnt = sizeof(hdr) / sizeof(u32); | 311 | word_cnt = sizeof(hdr) / sizeof(u32); |
312 | sci_swab32_cpy(&hdr, frame_header, word_cnt); | 312 | sci_swab32_cpy(&hdr, frame_header, word_cnt); |
313 | 313 | ||
314 | ireq = scic_request_by_tag(ihost, be16_to_cpu(hdr.tag)); | 314 | ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); |
315 | if (ireq && ireq->target_device == idev) { | 315 | if (ireq && ireq->target_device == idev) { |
316 | /* The IO request is now in charge of releasing the frame */ | 316 | /* The IO request is now in charge of releasing the frame */ |
317 | status = scic_sds_io_request_frame_handler(ireq, frame_index); | 317 | status = sci_io_request_frame_handler(ireq, frame_index); |
318 | } else { | 318 | } else { |
319 | /* We could not map this tag to a valid IO | 319 | /* We could not map this tag to a valid IO |
320 | * request Just toss the frame and continue | 320 | * request Just toss the frame and continue |
321 | */ | 321 | */ |
322 | scic_sds_controller_release_frame(ihost, frame_index); | 322 | sci_controller_release_frame(ihost, frame_index); |
323 | } | 323 | } |
324 | break; | 324 | break; |
325 | } | 325 | } |
326 | case SCI_STP_DEV_NCQ: { | 326 | case SCI_STP_DEV_NCQ: { |
327 | struct dev_to_host_fis *hdr; | 327 | struct dev_to_host_fis *hdr; |
328 | 328 | ||
329 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 329 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
330 | frame_index, | 330 | frame_index, |
331 | (void **)&hdr); | 331 | (void **)&hdr); |
332 | if (status != SCI_SUCCESS) | 332 | if (status != SCI_SUCCESS) |
@@ -349,7 +349,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
349 | } else | 349 | } else |
350 | status = SCI_FAILURE; | 350 | status = SCI_FAILURE; |
351 | 351 | ||
352 | scic_sds_controller_release_frame(ihost, frame_index); | 352 | sci_controller_release_frame(ihost, frame_index); |
353 | break; | 353 | break; |
354 | } | 354 | } |
355 | case SCI_STP_DEV_CMD: | 355 | case SCI_STP_DEV_CMD: |
@@ -358,7 +358,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
358 | * in this state. All unsolicited frames are forwarded to the io request | 358 | * in this state. All unsolicited frames are forwarded to the io request |
359 | * object. | 359 | * object. |
360 | */ | 360 | */ |
361 | status = scic_sds_io_request_frame_handler(idev->working_request, frame_index); | 361 | status = sci_io_request_frame_handler(idev->working_request, frame_index); |
362 | break; | 362 | break; |
363 | } | 363 | } |
364 | 364 | ||
@@ -369,7 +369,7 @@ static bool is_remote_device_ready(struct isci_remote_device *idev) | |||
369 | { | 369 | { |
370 | 370 | ||
371 | struct sci_base_state_machine *sm = &idev->sm; | 371 | struct sci_base_state_machine *sm = &idev->sm; |
372 | enum scic_sds_remote_device_states state = sm->current_state_id; | 372 | enum sci_remote_device_states state = sm->current_state_id; |
373 | 373 | ||
374 | switch (state) { | 374 | switch (state) { |
375 | case SCI_DEV_READY: | 375 | case SCI_DEV_READY: |
@@ -386,25 +386,25 @@ static bool is_remote_device_ready(struct isci_remote_device *idev) | |||
386 | } | 386 | } |
387 | } | 387 | } |
388 | 388 | ||
389 | enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device *idev, | 389 | enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, |
390 | u32 event_code) | 390 | u32 event_code) |
391 | { | 391 | { |
392 | struct sci_base_state_machine *sm = &idev->sm; | 392 | struct sci_base_state_machine *sm = &idev->sm; |
393 | enum scic_sds_remote_device_states state = sm->current_state_id; | 393 | enum sci_remote_device_states state = sm->current_state_id; |
394 | enum sci_status status; | 394 | enum sci_status status; |
395 | 395 | ||
396 | switch (scu_get_event_type(event_code)) { | 396 | switch (scu_get_event_type(event_code)) { |
397 | case SCU_EVENT_TYPE_RNC_OPS_MISC: | 397 | case SCU_EVENT_TYPE_RNC_OPS_MISC: |
398 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: | 398 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: |
399 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: | 399 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: |
400 | status = scic_sds_remote_node_context_event_handler(&idev->rnc, event_code); | 400 | status = sci_remote_node_context_event_handler(&idev->rnc, event_code); |
401 | break; | 401 | break; |
402 | case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: | 402 | case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: |
403 | if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { | 403 | if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { |
404 | status = SCI_SUCCESS; | 404 | status = SCI_SUCCESS; |
405 | 405 | ||
406 | /* Suspend the associated RNC */ | 406 | /* Suspend the associated RNC */ |
407 | scic_sds_remote_node_context_suspend(&idev->rnc, | 407 | sci_remote_node_context_suspend(&idev->rnc, |
408 | SCI_SOFTWARE_SUSPENSION, | 408 | SCI_SOFTWARE_SUSPENSION, |
409 | NULL, NULL); | 409 | NULL, NULL); |
410 | 410 | ||
@@ -439,13 +439,13 @@ enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device * | |||
439 | */ | 439 | */ |
440 | if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || | 440 | if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || |
441 | scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) | 441 | scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) |
442 | status = scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL); | 442 | status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); |
443 | } | 443 | } |
444 | 444 | ||
445 | return status; | 445 | return status; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void scic_sds_remote_device_start_request(struct isci_remote_device *idev, | 448 | static void sci_remote_device_start_request(struct isci_remote_device *idev, |
449 | struct isci_request *ireq, | 449 | struct isci_request *ireq, |
450 | enum sci_status status) | 450 | enum sci_status status) |
451 | { | 451 | { |
@@ -453,19 +453,19 @@ static void scic_sds_remote_device_start_request(struct isci_remote_device *idev | |||
453 | 453 | ||
454 | /* cleanup requests that failed after starting on the port */ | 454 | /* cleanup requests that failed after starting on the port */ |
455 | if (status != SCI_SUCCESS) | 455 | if (status != SCI_SUCCESS) |
456 | scic_sds_port_complete_io(iport, idev, ireq); | 456 | sci_port_complete_io(iport, idev, ireq); |
457 | else { | 457 | else { |
458 | kref_get(&idev->kref); | 458 | kref_get(&idev->kref); |
459 | scic_sds_remote_device_increment_request_count(idev); | 459 | sci_remote_device_increment_request_count(idev); |
460 | } | 460 | } |
461 | } | 461 | } |
462 | 462 | ||
463 | enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, | 463 | enum sci_status sci_remote_device_start_io(struct isci_host *ihost, |
464 | struct isci_remote_device *idev, | 464 | struct isci_remote_device *idev, |
465 | struct isci_request *ireq) | 465 | struct isci_request *ireq) |
466 | { | 466 | { |
467 | struct sci_base_state_machine *sm = &idev->sm; | 467 | struct sci_base_state_machine *sm = &idev->sm; |
468 | enum scic_sds_remote_device_states state = sm->current_state_id; | 468 | enum sci_remote_device_states state = sm->current_state_id; |
469 | struct isci_port *iport = idev->owning_port; | 469 | struct isci_port *iport = idev->owning_port; |
470 | enum sci_status status; | 470 | enum sci_status status; |
471 | 471 | ||
@@ -488,15 +488,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, | |||
488 | * successful it will start the request for the port object then | 488 | * successful it will start the request for the port object then |
489 | * increment its own request count. | 489 | * increment its own request count. |
490 | */ | 490 | */ |
491 | status = scic_sds_port_start_io(iport, idev, ireq); | 491 | status = sci_port_start_io(iport, idev, ireq); |
492 | if (status != SCI_SUCCESS) | 492 | if (status != SCI_SUCCESS) |
493 | return status; | 493 | return status; |
494 | 494 | ||
495 | status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); | 495 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); |
496 | if (status != SCI_SUCCESS) | 496 | if (status != SCI_SUCCESS) |
497 | break; | 497 | break; |
498 | 498 | ||
499 | status = scic_sds_request_start(ireq); | 499 | status = sci_request_start(ireq); |
500 | break; | 500 | break; |
501 | case SCI_STP_DEV_IDLE: { | 501 | case SCI_STP_DEV_IDLE: { |
502 | /* handle the start io operation for a sata device that is in | 502 | /* handle the start io operation for a sata device that is in |
@@ -507,18 +507,18 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, | |||
507 | * If this is a softreset we may want to have a different | 507 | * If this is a softreset we may want to have a different |
508 | * substate. | 508 | * substate. |
509 | */ | 509 | */ |
510 | enum scic_sds_remote_device_states new_state; | 510 | enum sci_remote_device_states new_state; |
511 | struct sas_task *task = isci_request_access_task(ireq); | 511 | struct sas_task *task = isci_request_access_task(ireq); |
512 | 512 | ||
513 | status = scic_sds_port_start_io(iport, idev, ireq); | 513 | status = sci_port_start_io(iport, idev, ireq); |
514 | if (status != SCI_SUCCESS) | 514 | if (status != SCI_SUCCESS) |
515 | return status; | 515 | return status; |
516 | 516 | ||
517 | status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); | 517 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); |
518 | if (status != SCI_SUCCESS) | 518 | if (status != SCI_SUCCESS) |
519 | break; | 519 | break; |
520 | 520 | ||
521 | status = scic_sds_request_start(ireq); | 521 | status = sci_request_start(ireq); |
522 | if (status != SCI_SUCCESS) | 522 | if (status != SCI_SUCCESS) |
523 | break; | 523 | break; |
524 | 524 | ||
@@ -535,15 +535,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, | |||
535 | struct sas_task *task = isci_request_access_task(ireq); | 535 | struct sas_task *task = isci_request_access_task(ireq); |
536 | 536 | ||
537 | if (task->ata_task.use_ncq) { | 537 | if (task->ata_task.use_ncq) { |
538 | status = scic_sds_port_start_io(iport, idev, ireq); | 538 | status = sci_port_start_io(iport, idev, ireq); |
539 | if (status != SCI_SUCCESS) | 539 | if (status != SCI_SUCCESS) |
540 | return status; | 540 | return status; |
541 | 541 | ||
542 | status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); | 542 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); |
543 | if (status != SCI_SUCCESS) | 543 | if (status != SCI_SUCCESS) |
544 | break; | 544 | break; |
545 | 545 | ||
546 | status = scic_sds_request_start(ireq); | 546 | status = sci_request_start(ireq); |
547 | } else | 547 | } else |
548 | return SCI_FAILURE_INVALID_STATE; | 548 | return SCI_FAILURE_INVALID_STATE; |
549 | break; | 549 | break; |
@@ -551,15 +551,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, | |||
551 | case SCI_STP_DEV_AWAIT_RESET: | 551 | case SCI_STP_DEV_AWAIT_RESET: |
552 | return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; | 552 | return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
553 | case SCI_SMP_DEV_IDLE: | 553 | case SCI_SMP_DEV_IDLE: |
554 | status = scic_sds_port_start_io(iport, idev, ireq); | 554 | status = sci_port_start_io(iport, idev, ireq); |
555 | if (status != SCI_SUCCESS) | 555 | if (status != SCI_SUCCESS) |
556 | return status; | 556 | return status; |
557 | 557 | ||
558 | status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq); | 558 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); |
559 | if (status != SCI_SUCCESS) | 559 | if (status != SCI_SUCCESS) |
560 | break; | 560 | break; |
561 | 561 | ||
562 | status = scic_sds_request_start(ireq); | 562 | status = sci_request_start(ireq); |
563 | if (status != SCI_SUCCESS) | 563 | if (status != SCI_SUCCESS) |
564 | break; | 564 | break; |
565 | 565 | ||
@@ -574,7 +574,7 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, | |||
574 | return SCI_FAILURE_INVALID_STATE; | 574 | return SCI_FAILURE_INVALID_STATE; |
575 | } | 575 | } |
576 | 576 | ||
577 | scic_sds_remote_device_start_request(idev, ireq, status); | 577 | sci_remote_device_start_request(idev, ireq, status); |
578 | return status; | 578 | return status; |
579 | } | 579 | } |
580 | 580 | ||
@@ -584,24 +584,24 @@ static enum sci_status common_complete_io(struct isci_port *iport, | |||
584 | { | 584 | { |
585 | enum sci_status status; | 585 | enum sci_status status; |
586 | 586 | ||
587 | status = scic_sds_request_complete(ireq); | 587 | status = sci_request_complete(ireq); |
588 | if (status != SCI_SUCCESS) | 588 | if (status != SCI_SUCCESS) |
589 | return status; | 589 | return status; |
590 | 590 | ||
591 | status = scic_sds_port_complete_io(iport, idev, ireq); | 591 | status = sci_port_complete_io(iport, idev, ireq); |
592 | if (status != SCI_SUCCESS) | 592 | if (status != SCI_SUCCESS) |
593 | return status; | 593 | return status; |
594 | 594 | ||
595 | scic_sds_remote_device_decrement_request_count(idev); | 595 | sci_remote_device_decrement_request_count(idev); |
596 | return status; | 596 | return status; |
597 | } | 597 | } |
598 | 598 | ||
599 | enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, | 599 | enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, |
600 | struct isci_remote_device *idev, | 600 | struct isci_remote_device *idev, |
601 | struct isci_request *ireq) | 601 | struct isci_request *ireq) |
602 | { | 602 | { |
603 | struct sci_base_state_machine *sm = &idev->sm; | 603 | struct sci_base_state_machine *sm = &idev->sm; |
604 | enum scic_sds_remote_device_states state = sm->current_state_id; | 604 | enum sci_remote_device_states state = sm->current_state_id; |
605 | struct isci_port *iport = idev->owning_port; | 605 | struct isci_port *iport = idev->owning_port; |
606 | enum sci_status status; | 606 | enum sci_status status; |
607 | 607 | ||
@@ -636,7 +636,7 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, | |||
636 | * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". | 636 | * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". |
637 | */ | 637 | */ |
638 | sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); | 638 | sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); |
639 | } else if (scic_sds_remote_device_get_request_count(idev) == 0) | 639 | } else if (sci_remote_device_get_request_count(idev) == 0) |
640 | sci_change_state(sm, SCI_STP_DEV_IDLE); | 640 | sci_change_state(sm, SCI_STP_DEV_IDLE); |
641 | break; | 641 | break; |
642 | case SCI_SMP_DEV_CMD: | 642 | case SCI_SMP_DEV_CMD: |
@@ -650,8 +650,8 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, | |||
650 | if (status != SCI_SUCCESS) | 650 | if (status != SCI_SUCCESS) |
651 | break; | 651 | break; |
652 | 652 | ||
653 | if (scic_sds_remote_device_get_request_count(idev) == 0) | 653 | if (sci_remote_device_get_request_count(idev) == 0) |
654 | scic_sds_remote_node_context_destruct(&idev->rnc, | 654 | sci_remote_node_context_destruct(&idev->rnc, |
655 | rnc_destruct_done, | 655 | rnc_destruct_done, |
656 | idev); | 656 | idev); |
657 | break; | 657 | break; |
@@ -668,21 +668,21 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, | |||
668 | return status; | 668 | return status; |
669 | } | 669 | } |
670 | 670 | ||
671 | static void scic_sds_remote_device_continue_request(void *dev) | 671 | static void sci_remote_device_continue_request(void *dev) |
672 | { | 672 | { |
673 | struct isci_remote_device *idev = dev; | 673 | struct isci_remote_device *idev = dev; |
674 | 674 | ||
675 | /* we need to check if this request is still valid to continue. */ | 675 | /* we need to check if this request is still valid to continue. */ |
676 | if (idev->working_request) | 676 | if (idev->working_request) |
677 | scic_controller_continue_io(idev->working_request); | 677 | sci_controller_continue_io(idev->working_request); |
678 | } | 678 | } |
679 | 679 | ||
680 | enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, | 680 | enum sci_status sci_remote_device_start_task(struct isci_host *ihost, |
681 | struct isci_remote_device *idev, | 681 | struct isci_remote_device *idev, |
682 | struct isci_request *ireq) | 682 | struct isci_request *ireq) |
683 | { | 683 | { |
684 | struct sci_base_state_machine *sm = &idev->sm; | 684 | struct sci_base_state_machine *sm = &idev->sm; |
685 | enum scic_sds_remote_device_states state = sm->current_state_id; | 685 | enum sci_remote_device_states state = sm->current_state_id; |
686 | struct isci_port *iport = idev->owning_port; | 686 | struct isci_port *iport = idev->owning_port; |
687 | enum sci_status status; | 687 | enum sci_status status; |
688 | 688 | ||
@@ -705,15 +705,15 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, | |||
705 | case SCI_STP_DEV_NCQ: | 705 | case SCI_STP_DEV_NCQ: |
706 | case SCI_STP_DEV_NCQ_ERROR: | 706 | case SCI_STP_DEV_NCQ_ERROR: |
707 | case SCI_STP_DEV_AWAIT_RESET: | 707 | case SCI_STP_DEV_AWAIT_RESET: |
708 | status = scic_sds_port_start_io(iport, idev, ireq); | 708 | status = sci_port_start_io(iport, idev, ireq); |
709 | if (status != SCI_SUCCESS) | 709 | if (status != SCI_SUCCESS) |
710 | return status; | 710 | return status; |
711 | 711 | ||
712 | status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq); | 712 | status = sci_remote_node_context_start_task(&idev->rnc, ireq); |
713 | if (status != SCI_SUCCESS) | 713 | if (status != SCI_SUCCESS) |
714 | goto out; | 714 | goto out; |
715 | 715 | ||
716 | status = scic_sds_request_start(ireq); | 716 | status = sci_request_start(ireq); |
717 | if (status != SCI_SUCCESS) | 717 | if (status != SCI_SUCCESS) |
718 | goto out; | 718 | goto out; |
719 | 719 | ||
@@ -731,32 +731,32 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, | |||
731 | * the correct action when the remote node context is suspended | 731 | * the correct action when the remote node context is suspended |
732 | * and later resumed. | 732 | * and later resumed. |
733 | */ | 733 | */ |
734 | scic_sds_remote_node_context_suspend(&idev->rnc, | 734 | sci_remote_node_context_suspend(&idev->rnc, |
735 | SCI_SOFTWARE_SUSPENSION, NULL, NULL); | 735 | SCI_SOFTWARE_SUSPENSION, NULL, NULL); |
736 | scic_sds_remote_node_context_resume(&idev->rnc, | 736 | sci_remote_node_context_resume(&idev->rnc, |
737 | scic_sds_remote_device_continue_request, | 737 | sci_remote_device_continue_request, |
738 | idev); | 738 | idev); |
739 | 739 | ||
740 | out: | 740 | out: |
741 | scic_sds_remote_device_start_request(idev, ireq, status); | 741 | sci_remote_device_start_request(idev, ireq, status); |
742 | /* We need to let the controller start request handler know that | 742 | /* We need to let the controller start request handler know that |
743 | * it can't post TC yet. We will provide a callback function to | 743 | * it can't post TC yet. We will provide a callback function to |
744 | * post TC when RNC gets resumed. | 744 | * post TC when RNC gets resumed. |
745 | */ | 745 | */ |
746 | return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; | 746 | return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; |
747 | case SCI_DEV_READY: | 747 | case SCI_DEV_READY: |
748 | status = scic_sds_port_start_io(iport, idev, ireq); | 748 | status = sci_port_start_io(iport, idev, ireq); |
749 | if (status != SCI_SUCCESS) | 749 | if (status != SCI_SUCCESS) |
750 | return status; | 750 | return status; |
751 | 751 | ||
752 | status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq); | 752 | status = sci_remote_node_context_start_task(&idev->rnc, ireq); |
753 | if (status != SCI_SUCCESS) | 753 | if (status != SCI_SUCCESS) |
754 | break; | 754 | break; |
755 | 755 | ||
756 | status = scic_sds_request_start(ireq); | 756 | status = sci_request_start(ireq); |
757 | break; | 757 | break; |
758 | } | 758 | } |
759 | scic_sds_remote_device_start_request(idev, ireq, status); | 759 | sci_remote_device_start_request(idev, ireq, status); |
760 | 760 | ||
761 | return status; | 761 | return status; |
762 | } | 762 | } |
@@ -769,16 +769,16 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, | |||
769 | * This method takes the request and bulids an appropriate SCU context for the | 769 | * This method takes the request and bulids an appropriate SCU context for the |
770 | * request and then requests the controller to post the request. none | 770 | * request and then requests the controller to post the request. none |
771 | */ | 771 | */ |
772 | void scic_sds_remote_device_post_request( | 772 | void sci_remote_device_post_request( |
773 | struct isci_remote_device *idev, | 773 | struct isci_remote_device *idev, |
774 | u32 request) | 774 | u32 request) |
775 | { | 775 | { |
776 | u32 context; | 776 | u32 context; |
777 | 777 | ||
778 | context = scic_sds_remote_device_build_command_context(idev, request); | 778 | context = sci_remote_device_build_command_context(idev, request); |
779 | 779 | ||
780 | scic_sds_controller_post_request( | 780 | sci_controller_post_request( |
781 | scic_sds_remote_device_get_controller(idev), | 781 | sci_remote_device_get_controller(idev), |
782 | context | 782 | context |
783 | ); | 783 | ); |
784 | } | 784 | } |
@@ -798,7 +798,7 @@ static void remote_device_resume_done(void *_dev) | |||
798 | sci_change_state(&idev->sm, SCI_DEV_READY); | 798 | sci_change_state(&idev->sm, SCI_DEV_READY); |
799 | } | 799 | } |
800 | 800 | ||
801 | static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) | 801 | static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) |
802 | { | 802 | { |
803 | struct isci_remote_device *idev = _dev; | 803 | struct isci_remote_device *idev = _dev; |
804 | struct isci_host *ihost = idev->owning_port->owning_controller; | 804 | struct isci_host *ihost = idev->owning_port->owning_controller; |
@@ -810,7 +810,7 @@ static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handl | |||
810 | isci_remote_device_ready(ihost, idev); | 810 | isci_remote_device_ready(ihost, idev); |
811 | } | 811 | } |
812 | 812 | ||
813 | static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm) | 813 | static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) |
814 | { | 814 | { |
815 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 815 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
816 | 816 | ||
@@ -819,7 +819,7 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac | |||
819 | } | 819 | } |
820 | 820 | ||
821 | /** | 821 | /** |
822 | * scic_remote_device_destruct() - free remote node context and destruct | 822 | * sci_remote_device_destruct() - free remote node context and destruct |
823 | * @remote_device: This parameter specifies the remote device to be destructed. | 823 | * @remote_device: This parameter specifies the remote device to be destructed. |
824 | * | 824 | * |
825 | * Remote device objects are a limited resource. As such, they must be | 825 | * Remote device objects are a limited resource. As such, they must be |
@@ -831,10 +831,10 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac | |||
831 | * device isn't valid (e.g. it's already been destoryed, the handle isn't | 831 | * device isn't valid (e.g. it's already been destoryed, the handle isn't |
832 | * valid, etc.). | 832 | * valid, etc.). |
833 | */ | 833 | */ |
834 | static enum sci_status scic_remote_device_destruct(struct isci_remote_device *idev) | 834 | static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) |
835 | { | 835 | { |
836 | struct sci_base_state_machine *sm = &idev->sm; | 836 | struct sci_base_state_machine *sm = &idev->sm; |
837 | enum scic_sds_remote_device_states state = sm->current_state_id; | 837 | enum sci_remote_device_states state = sm->current_state_id; |
838 | struct isci_host *ihost; | 838 | struct isci_host *ihost; |
839 | 839 | ||
840 | if (state != SCI_DEV_STOPPED) { | 840 | if (state != SCI_DEV_STOPPED) { |
@@ -844,7 +844,7 @@ static enum sci_status scic_remote_device_destruct(struct isci_remote_device *id | |||
844 | } | 844 | } |
845 | 845 | ||
846 | ihost = idev->owning_port->owning_controller; | 846 | ihost = idev->owning_port->owning_controller; |
847 | scic_sds_controller_free_remote_node_context(ihost, idev, | 847 | sci_controller_free_remote_node_context(ihost, idev, |
848 | idev->rnc.remote_node_index); | 848 | idev->rnc.remote_node_index); |
849 | idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; | 849 | idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; |
850 | sci_change_state(sm, SCI_DEV_FINAL); | 850 | sci_change_state(sm, SCI_DEV_FINAL); |
@@ -869,12 +869,12 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ | |||
869 | * io requests in process */ | 869 | * io requests in process */ |
870 | BUG_ON(!list_empty(&idev->reqs_in_process)); | 870 | BUG_ON(!list_empty(&idev->reqs_in_process)); |
871 | 871 | ||
872 | scic_remote_device_destruct(idev); | 872 | sci_remote_device_destruct(idev); |
873 | list_del_init(&idev->node); | 873 | list_del_init(&idev->node); |
874 | isci_put_device(idev); | 874 | isci_put_device(idev); |
875 | } | 875 | } |
876 | 876 | ||
877 | static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) | 877 | static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) |
878 | { | 878 | { |
879 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 879 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
880 | struct isci_host *ihost = idev->owning_port->owning_controller; | 880 | struct isci_host *ihost = idev->owning_port->owning_controller; |
@@ -887,19 +887,19 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac | |||
887 | if (prev_state == SCI_DEV_STOPPING) | 887 | if (prev_state == SCI_DEV_STOPPING) |
888 | isci_remote_device_deconstruct(ihost, idev); | 888 | isci_remote_device_deconstruct(ihost, idev); |
889 | 889 | ||
890 | scic_sds_controller_remote_device_stopped(ihost, idev); | 890 | sci_controller_remote_device_stopped(ihost, idev); |
891 | } | 891 | } |
892 | 892 | ||
893 | static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm) | 893 | static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) |
894 | { | 894 | { |
895 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 895 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
896 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); | 896 | struct isci_host *ihost = sci_remote_device_get_controller(idev); |
897 | 897 | ||
898 | isci_remote_device_not_ready(ihost, idev, | 898 | isci_remote_device_not_ready(ihost, idev, |
899 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); | 899 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); |
900 | } | 900 | } |
901 | 901 | ||
902 | static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm) | 902 | static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) |
903 | { | 903 | { |
904 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 904 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
905 | struct isci_host *ihost = idev->owning_port->owning_controller; | 905 | struct isci_host *ihost = idev->owning_port->owning_controller; |
@@ -913,7 +913,7 @@ static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machi | |||
913 | isci_remote_device_ready(ihost, idev); | 913 | isci_remote_device_ready(ihost, idev); |
914 | } | 914 | } |
915 | 915 | ||
916 | static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm) | 916 | static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) |
917 | { | 917 | { |
918 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 918 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
919 | struct domain_device *dev = idev->domain_dev; | 919 | struct domain_device *dev = idev->domain_dev; |
@@ -926,42 +926,42 @@ static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machin | |||
926 | } | 926 | } |
927 | } | 927 | } |
928 | 928 | ||
929 | static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) | 929 | static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) |
930 | { | 930 | { |
931 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 931 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
932 | 932 | ||
933 | scic_sds_remote_node_context_suspend( | 933 | sci_remote_node_context_suspend( |
934 | &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); | 934 | &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); |
935 | } | 935 | } |
936 | 936 | ||
937 | static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) | 937 | static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) |
938 | { | 938 | { |
939 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 939 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
940 | 940 | ||
941 | scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL); | 941 | sci_remote_node_context_resume(&idev->rnc, NULL, NULL); |
942 | } | 942 | } |
943 | 943 | ||
944 | static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) | 944 | static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) |
945 | { | 945 | { |
946 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 946 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
947 | 947 | ||
948 | idev->working_request = NULL; | 948 | idev->working_request = NULL; |
949 | if (scic_sds_remote_node_context_is_ready(&idev->rnc)) { | 949 | if (sci_remote_node_context_is_ready(&idev->rnc)) { |
950 | /* | 950 | /* |
951 | * Since the RNC is ready, it's alright to finish completion | 951 | * Since the RNC is ready, it's alright to finish completion |
952 | * processing (e.g. signal the remote device is ready). */ | 952 | * processing (e.g. signal the remote device is ready). */ |
953 | scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); | 953 | sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); |
954 | } else { | 954 | } else { |
955 | scic_sds_remote_node_context_resume(&idev->rnc, | 955 | sci_remote_node_context_resume(&idev->rnc, |
956 | scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler, | 956 | sci_stp_remote_device_ready_idle_substate_resume_complete_handler, |
957 | idev); | 957 | idev); |
958 | } | 958 | } |
959 | } | 959 | } |
960 | 960 | ||
961 | static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) | 961 | static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) |
962 | { | 962 | { |
963 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 963 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
964 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); | 964 | struct isci_host *ihost = sci_remote_device_get_controller(idev); |
965 | 965 | ||
966 | BUG_ON(idev->working_request == NULL); | 966 | BUG_ON(idev->working_request == NULL); |
967 | 967 | ||
@@ -969,28 +969,28 @@ static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_ | |||
969 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); | 969 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); |
970 | } | 970 | } |
971 | 971 | ||
972 | static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) | 972 | static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) |
973 | { | 973 | { |
974 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 974 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
975 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); | 975 | struct isci_host *ihost = sci_remote_device_get_controller(idev); |
976 | 976 | ||
977 | if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) | 977 | if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) |
978 | isci_remote_device_not_ready(ihost, idev, | 978 | isci_remote_device_not_ready(ihost, idev, |
979 | idev->not_ready_reason); | 979 | idev->not_ready_reason); |
980 | } | 980 | } |
981 | 981 | ||
982 | static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) | 982 | static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) |
983 | { | 983 | { |
984 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 984 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
985 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); | 985 | struct isci_host *ihost = sci_remote_device_get_controller(idev); |
986 | 986 | ||
987 | isci_remote_device_ready(ihost, idev); | 987 | isci_remote_device_ready(ihost, idev); |
988 | } | 988 | } |
989 | 989 | ||
990 | static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) | 990 | static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) |
991 | { | 991 | { |
992 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 992 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
993 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); | 993 | struct isci_host *ihost = sci_remote_device_get_controller(idev); |
994 | 994 | ||
995 | BUG_ON(idev->working_request == NULL); | 995 | BUG_ON(idev->working_request == NULL); |
996 | 996 | ||
@@ -998,83 +998,83 @@ static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_ | |||
998 | SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); | 998 | SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); |
999 | } | 999 | } |
1000 | 1000 | ||
1001 | static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) | 1001 | static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) |
1002 | { | 1002 | { |
1003 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1003 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1004 | 1004 | ||
1005 | idev->working_request = NULL; | 1005 | idev->working_request = NULL; |
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | static const struct sci_base_state scic_sds_remote_device_state_table[] = { | 1008 | static const struct sci_base_state sci_remote_device_state_table[] = { |
1009 | [SCI_DEV_INITIAL] = { | 1009 | [SCI_DEV_INITIAL] = { |
1010 | .enter_state = scic_sds_remote_device_initial_state_enter, | 1010 | .enter_state = sci_remote_device_initial_state_enter, |
1011 | }, | 1011 | }, |
1012 | [SCI_DEV_STOPPED] = { | 1012 | [SCI_DEV_STOPPED] = { |
1013 | .enter_state = scic_sds_remote_device_stopped_state_enter, | 1013 | .enter_state = sci_remote_device_stopped_state_enter, |
1014 | }, | 1014 | }, |
1015 | [SCI_DEV_STARTING] = { | 1015 | [SCI_DEV_STARTING] = { |
1016 | .enter_state = scic_sds_remote_device_starting_state_enter, | 1016 | .enter_state = sci_remote_device_starting_state_enter, |
1017 | }, | 1017 | }, |
1018 | [SCI_DEV_READY] = { | 1018 | [SCI_DEV_READY] = { |
1019 | .enter_state = scic_sds_remote_device_ready_state_enter, | 1019 | .enter_state = sci_remote_device_ready_state_enter, |
1020 | .exit_state = scic_sds_remote_device_ready_state_exit | 1020 | .exit_state = sci_remote_device_ready_state_exit |
1021 | }, | 1021 | }, |
1022 | [SCI_STP_DEV_IDLE] = { | 1022 | [SCI_STP_DEV_IDLE] = { |
1023 | .enter_state = scic_sds_stp_remote_device_ready_idle_substate_enter, | 1023 | .enter_state = sci_stp_remote_device_ready_idle_substate_enter, |
1024 | }, | 1024 | }, |
1025 | [SCI_STP_DEV_CMD] = { | 1025 | [SCI_STP_DEV_CMD] = { |
1026 | .enter_state = scic_sds_stp_remote_device_ready_cmd_substate_enter, | 1026 | .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, |
1027 | }, | 1027 | }, |
1028 | [SCI_STP_DEV_NCQ] = { }, | 1028 | [SCI_STP_DEV_NCQ] = { }, |
1029 | [SCI_STP_DEV_NCQ_ERROR] = { | 1029 | [SCI_STP_DEV_NCQ_ERROR] = { |
1030 | .enter_state = scic_sds_stp_remote_device_ready_ncq_error_substate_enter, | 1030 | .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, |
1031 | }, | 1031 | }, |
1032 | [SCI_STP_DEV_AWAIT_RESET] = { }, | 1032 | [SCI_STP_DEV_AWAIT_RESET] = { }, |
1033 | [SCI_SMP_DEV_IDLE] = { | 1033 | [SCI_SMP_DEV_IDLE] = { |
1034 | .enter_state = scic_sds_smp_remote_device_ready_idle_substate_enter, | 1034 | .enter_state = sci_smp_remote_device_ready_idle_substate_enter, |
1035 | }, | 1035 | }, |
1036 | [SCI_SMP_DEV_CMD] = { | 1036 | [SCI_SMP_DEV_CMD] = { |
1037 | .enter_state = scic_sds_smp_remote_device_ready_cmd_substate_enter, | 1037 | .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, |
1038 | .exit_state = scic_sds_smp_remote_device_ready_cmd_substate_exit, | 1038 | .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, |
1039 | }, | 1039 | }, |
1040 | [SCI_DEV_STOPPING] = { }, | 1040 | [SCI_DEV_STOPPING] = { }, |
1041 | [SCI_DEV_FAILED] = { }, | 1041 | [SCI_DEV_FAILED] = { }, |
1042 | [SCI_DEV_RESETTING] = { | 1042 | [SCI_DEV_RESETTING] = { |
1043 | .enter_state = scic_sds_remote_device_resetting_state_enter, | 1043 | .enter_state = sci_remote_device_resetting_state_enter, |
1044 | .exit_state = scic_sds_remote_device_resetting_state_exit | 1044 | .exit_state = sci_remote_device_resetting_state_exit |
1045 | }, | 1045 | }, |
1046 | [SCI_DEV_FINAL] = { }, | 1046 | [SCI_DEV_FINAL] = { }, |
1047 | }; | 1047 | }; |
1048 | 1048 | ||
1049 | /** | 1049 | /** |
1050 | * scic_remote_device_construct() - common construction | 1050 | * sci_remote_device_construct() - common construction |
1051 | * @sci_port: SAS/SATA port through which this device is accessed. | 1051 | * @sci_port: SAS/SATA port through which this device is accessed. |
1052 | * @sci_dev: remote device to construct | 1052 | * @sci_dev: remote device to construct |
1053 | * | 1053 | * |
1054 | * This routine just performs benign initialization and does not | 1054 | * This routine just performs benign initialization and does not |
1055 | * allocate the remote_node_context which is left to | 1055 | * allocate the remote_node_context which is left to |
1056 | * scic_remote_device_[de]a_construct(). scic_remote_device_destruct() | 1056 | * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() |
1057 | * frees the remote_node_context(s) for the device. | 1057 | * frees the remote_node_context(s) for the device. |
1058 | */ | 1058 | */ |
1059 | static void scic_remote_device_construct(struct isci_port *iport, | 1059 | static void sci_remote_device_construct(struct isci_port *iport, |
1060 | struct isci_remote_device *idev) | 1060 | struct isci_remote_device *idev) |
1061 | { | 1061 | { |
1062 | idev->owning_port = iport; | 1062 | idev->owning_port = iport; |
1063 | idev->started_request_count = 0; | 1063 | idev->started_request_count = 0; |
1064 | 1064 | ||
1065 | sci_init_sm(&idev->sm, scic_sds_remote_device_state_table, SCI_DEV_INITIAL); | 1065 | sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); |
1066 | 1066 | ||
1067 | scic_sds_remote_node_context_construct(&idev->rnc, | 1067 | sci_remote_node_context_construct(&idev->rnc, |
1068 | SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); | 1068 | SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | /** | 1071 | /** |
1072 | * scic_remote_device_da_construct() - construct direct attached device. | 1072 | * sci_remote_device_da_construct() - construct direct attached device. |
1073 | * | 1073 | * |
1074 | * The information (e.g. IAF, Signature FIS, etc.) necessary to build | 1074 | * The information (e.g. IAF, Signature FIS, etc.) necessary to build |
1075 | * the device is known to the SCI Core since it is contained in the | 1075 | * the device is known to the SCI Core since it is contained in the |
1076 | * scic_phy object. Remote node context(s) is/are a global resource | 1076 | * sci_phy object. Remote node context(s) is/are a global resource |
1077 | * allocated by this routine, freed by scic_remote_device_destruct(). | 1077 | * allocated by this routine, freed by sci_remote_device_destruct(). |
1078 | * | 1078 | * |
1079 | * Returns: | 1079 | * Returns: |
1080 | * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. | 1080 | * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. |
@@ -1082,20 +1082,20 @@ static void scic_remote_device_construct(struct isci_port *iport, | |||
1082 | * sata-only controller instance. | 1082 | * sata-only controller instance. |
1083 | * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. | 1083 | * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. |
1084 | */ | 1084 | */ |
1085 | static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, | 1085 | static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, |
1086 | struct isci_remote_device *idev) | 1086 | struct isci_remote_device *idev) |
1087 | { | 1087 | { |
1088 | enum sci_status status; | 1088 | enum sci_status status; |
1089 | struct domain_device *dev = idev->domain_dev; | 1089 | struct domain_device *dev = idev->domain_dev; |
1090 | 1090 | ||
1091 | scic_remote_device_construct(iport, idev); | 1091 | sci_remote_device_construct(iport, idev); |
1092 | 1092 | ||
1093 | /* | 1093 | /* |
1094 | * This information is request to determine how many remote node context | 1094 | * This information is request to determine how many remote node context |
1095 | * entries will be needed to store the remote node. | 1095 | * entries will be needed to store the remote node. |
1096 | */ | 1096 | */ |
1097 | idev->is_direct_attached = true; | 1097 | idev->is_direct_attached = true; |
1098 | status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller, | 1098 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, |
1099 | idev, | 1099 | idev, |
1100 | &idev->rnc.remote_node_index); | 1100 | &idev->rnc.remote_node_index); |
1101 | 1101 | ||
@@ -1108,7 +1108,7 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, | |||
1108 | else | 1108 | else |
1109 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 1109 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
1110 | 1110 | ||
1111 | idev->connection_rate = scic_sds_port_get_max_allowed_speed(iport); | 1111 | idev->connection_rate = sci_port_get_max_allowed_speed(iport); |
1112 | 1112 | ||
1113 | /* / @todo Should I assign the port width by reading all of the phys on the port? */ | 1113 | /* / @todo Should I assign the port width by reading all of the phys on the port? */ |
1114 | idev->device_port_width = 1; | 1114 | idev->device_port_width = 1; |
@@ -1117,10 +1117,10 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, | |||
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | /** | 1119 | /** |
1120 | * scic_remote_device_ea_construct() - construct expander attached device | 1120 | * sci_remote_device_ea_construct() - construct expander attached device |
1121 | * | 1121 | * |
1122 | * Remote node context(s) is/are a global resource allocated by this | 1122 | * Remote node context(s) is/are a global resource allocated by this |
1123 | * routine, freed by scic_remote_device_destruct(). | 1123 | * routine, freed by sci_remote_device_destruct(). |
1124 | * | 1124 | * |
1125 | * Returns: | 1125 | * Returns: |
1126 | * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. | 1126 | * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. |
@@ -1128,15 +1128,15 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport, | |||
1128 | * sata-only controller instance. | 1128 | * sata-only controller instance. |
1129 | * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. | 1129 | * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. |
1130 | */ | 1130 | */ |
1131 | static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, | 1131 | static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, |
1132 | struct isci_remote_device *idev) | 1132 | struct isci_remote_device *idev) |
1133 | { | 1133 | { |
1134 | struct domain_device *dev = idev->domain_dev; | 1134 | struct domain_device *dev = idev->domain_dev; |
1135 | enum sci_status status; | 1135 | enum sci_status status; |
1136 | 1136 | ||
1137 | scic_remote_device_construct(iport, idev); | 1137 | sci_remote_device_construct(iport, idev); |
1138 | 1138 | ||
1139 | status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller, | 1139 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, |
1140 | idev, | 1140 | idev, |
1141 | &idev->rnc.remote_node_index); | 1141 | &idev->rnc.remote_node_index); |
1142 | if (status != SCI_SUCCESS) | 1142 | if (status != SCI_SUCCESS) |
@@ -1155,7 +1155,7 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, | |||
1155 | * connection the logical link rate is that same as the | 1155 | * connection the logical link rate is that same as the |
1156 | * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay | 1156 | * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay |
1157 | * one another, so this code works for both situations. */ | 1157 | * one another, so this code works for both situations. */ |
1158 | idev->connection_rate = min_t(u16, scic_sds_port_get_max_allowed_speed(iport), | 1158 | idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), |
1159 | dev->linkrate); | 1159 | dev->linkrate); |
1160 | 1160 | ||
1161 | /* / @todo Should I assign the port width by reading all of the phys on the port? */ | 1161 | /* / @todo Should I assign the port width by reading all of the phys on the port? */ |
@@ -1165,7 +1165,7 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, | |||
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | /** | 1167 | /** |
1168 | * scic_remote_device_start() - This method will start the supplied remote | 1168 | * sci_remote_device_start() - This method will start the supplied remote |
1169 | * device. This method enables normal IO requests to flow through to the | 1169 | * device. This method enables normal IO requests to flow through to the |
1170 | * remote device. | 1170 | * remote device. |
1171 | * @remote_device: This parameter specifies the device to be started. | 1171 | * @remote_device: This parameter specifies the device to be started. |
@@ -1177,11 +1177,11 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport, | |||
1177 | * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start | 1177 | * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start |
1178 | * the device when there have been no phys added to it. | 1178 | * the device when there have been no phys added to it. |
1179 | */ | 1179 | */ |
1180 | static enum sci_status scic_remote_device_start(struct isci_remote_device *idev, | 1180 | static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, |
1181 | u32 timeout) | 1181 | u32 timeout) |
1182 | { | 1182 | { |
1183 | struct sci_base_state_machine *sm = &idev->sm; | 1183 | struct sci_base_state_machine *sm = &idev->sm; |
1184 | enum scic_sds_remote_device_states state = sm->current_state_id; | 1184 | enum sci_remote_device_states state = sm->current_state_id; |
1185 | enum sci_status status; | 1185 | enum sci_status status; |
1186 | 1186 | ||
1187 | if (state != SCI_DEV_STOPPED) { | 1187 | if (state != SCI_DEV_STOPPED) { |
@@ -1190,7 +1190,7 @@ static enum sci_status scic_remote_device_start(struct isci_remote_device *idev, | |||
1190 | return SCI_FAILURE_INVALID_STATE; | 1190 | return SCI_FAILURE_INVALID_STATE; |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | status = scic_sds_remote_node_context_resume(&idev->rnc, | 1193 | status = sci_remote_node_context_resume(&idev->rnc, |
1194 | remote_device_resume_done, | 1194 | remote_device_resume_done, |
1195 | idev); | 1195 | idev); |
1196 | if (status != SCI_SUCCESS) | 1196 | if (status != SCI_SUCCESS) |
@@ -1209,9 +1209,9 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, | |||
1209 | enum sci_status status; | 1209 | enum sci_status status; |
1210 | 1210 | ||
1211 | if (dev->parent && dev_is_expander(dev->parent)) | 1211 | if (dev->parent && dev_is_expander(dev->parent)) |
1212 | status = scic_remote_device_ea_construct(iport, idev); | 1212 | status = sci_remote_device_ea_construct(iport, idev); |
1213 | else | 1213 | else |
1214 | status = scic_remote_device_da_construct(iport, idev); | 1214 | status = sci_remote_device_da_construct(iport, idev); |
1215 | 1215 | ||
1216 | if (status != SCI_SUCCESS) { | 1216 | if (status != SCI_SUCCESS) { |
1217 | dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", | 1217 | dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", |
@@ -1221,7 +1221,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, | |||
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | /* start the device. */ | 1223 | /* start the device. */ |
1224 | status = scic_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); | 1224 | status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); |
1225 | 1225 | ||
1226 | if (status != SCI_SUCCESS) | 1226 | if (status != SCI_SUCCESS) |
1227 | dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", | 1227 | dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", |
@@ -1322,7 +1322,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem | |||
1322 | set_bit(IDEV_STOP_PENDING, &idev->flags); | 1322 | set_bit(IDEV_STOP_PENDING, &idev->flags); |
1323 | 1323 | ||
1324 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1324 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1325 | status = scic_remote_device_stop(idev, 50); | 1325 | status = sci_remote_device_stop(idev, 50); |
1326 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1326 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1327 | 1327 | ||
1328 | /* Wait for the stop complete callback. */ | 1328 | /* Wait for the stop complete callback. */ |
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index bc4da20a13fa..fa9a0e6cc309 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h | |||
@@ -61,7 +61,7 @@ | |||
61 | #include "remote_node_context.h" | 61 | #include "remote_node_context.h" |
62 | #include "port.h" | 62 | #include "port.h" |
63 | 63 | ||
64 | enum scic_remote_device_not_ready_reason_code { | 64 | enum sci_remote_device_not_ready_reason_code { |
65 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, | 65 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, |
66 | SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, | 66 | SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, |
67 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, | 67 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, |
@@ -97,7 +97,7 @@ struct isci_remote_device { | |||
97 | enum sas_linkrate connection_rate; | 97 | enum sas_linkrate connection_rate; |
98 | bool is_direct_attached; | 98 | bool is_direct_attached; |
99 | struct isci_port *owning_port; | 99 | struct isci_port *owning_port; |
100 | struct scic_sds_remote_node_context rnc; | 100 | struct sci_remote_node_context rnc; |
101 | /* XXX unify with device reference counting and delete */ | 101 | /* XXX unify with device reference counting and delete */ |
102 | u32 started_request_count; | 102 | u32 started_request_count; |
103 | struct isci_request *working_request; | 103 | struct isci_request *working_request; |
@@ -106,7 +106,7 @@ struct isci_remote_device { | |||
106 | 106 | ||
107 | #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 | 107 | #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 |
108 | 108 | ||
109 | /* device reference routines must be called under scic_lock */ | 109 | /* device reference routines must be called under sci_lock */ |
110 | static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) | 110 | static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) |
111 | { | 111 | { |
112 | struct isci_remote_device *idev = dev->lldd_dev; | 112 | struct isci_remote_device *idev = dev->lldd_dev; |
@@ -137,7 +137,7 @@ bool isci_device_is_reset_pending(struct isci_host *ihost, | |||
137 | void isci_device_clear_reset_pending(struct isci_host *ihost, | 137 | void isci_device_clear_reset_pending(struct isci_host *ihost, |
138 | struct isci_remote_device *idev); | 138 | struct isci_remote_device *idev); |
139 | /** | 139 | /** |
140 | * scic_remote_device_stop() - This method will stop both transmission and | 140 | * sci_remote_device_stop() - This method will stop both transmission and |
141 | * reception of link activity for the supplied remote device. This method | 141 | * reception of link activity for the supplied remote device. This method |
142 | * disables normal IO requests from flowing through to the remote device. | 142 | * disables normal IO requests from flowing through to the remote device. |
143 | * @remote_device: This parameter specifies the device to be stopped. | 143 | * @remote_device: This parameter specifies the device to be stopped. |
@@ -148,12 +148,12 @@ void isci_device_clear_reset_pending(struct isci_host *ihost, | |||
148 | * This value is returned if the transmission and reception for the device was | 148 | * This value is returned if the transmission and reception for the device was |
149 | * successfully stopped. | 149 | * successfully stopped. |
150 | */ | 150 | */ |
151 | enum sci_status scic_remote_device_stop( | 151 | enum sci_status sci_remote_device_stop( |
152 | struct isci_remote_device *idev, | 152 | struct isci_remote_device *idev, |
153 | u32 timeout); | 153 | u32 timeout); |
154 | 154 | ||
155 | /** | 155 | /** |
156 | * scic_remote_device_reset() - This method will reset the device making it | 156 | * sci_remote_device_reset() - This method will reset the device making it |
157 | * ready for operation. This method must be called anytime the device is | 157 | * ready for operation. This method must be called anytime the device is |
158 | * reset either through a SMP phy control or a port hard reset request. | 158 | * reset either through a SMP phy control or a port hard reset request. |
159 | * @remote_device: This parameter specifies the device to be reset. | 159 | * @remote_device: This parameter specifies the device to be reset. |
@@ -164,11 +164,11 @@ enum sci_status scic_remote_device_stop( | |||
164 | * was accepted. SCI_SUCCESS This value is returned if the device reset is | 164 | * was accepted. SCI_SUCCESS This value is returned if the device reset is |
165 | * started. | 165 | * started. |
166 | */ | 166 | */ |
167 | enum sci_status scic_remote_device_reset( | 167 | enum sci_status sci_remote_device_reset( |
168 | struct isci_remote_device *idev); | 168 | struct isci_remote_device *idev); |
169 | 169 | ||
170 | /** | 170 | /** |
171 | * scic_remote_device_reset_complete() - This method informs the device object | 171 | * sci_remote_device_reset_complete() - This method informs the device object |
172 | * that the reset operation is complete and the device can resume operation | 172 | * that the reset operation is complete and the device can resume operation |
173 | * again. | 173 | * again. |
174 | * @remote_device: This parameter specifies the device which is to be informed | 174 | * @remote_device: This parameter specifies the device which is to be informed |
@@ -177,18 +177,16 @@ enum sci_status scic_remote_device_reset( | |||
177 | * An indication that the device is resuming operation. SCI_SUCCESS the device | 177 | * An indication that the device is resuming operation. SCI_SUCCESS the device |
178 | * is resuming operation. | 178 | * is resuming operation. |
179 | */ | 179 | */ |
180 | enum sci_status scic_remote_device_reset_complete( | 180 | enum sci_status sci_remote_device_reset_complete( |
181 | struct isci_remote_device *idev); | 181 | struct isci_remote_device *idev); |
182 | 182 | ||
183 | #define scic_remote_device_is_atapi(device_handle) false | ||
184 | |||
185 | /** | 183 | /** |
186 | * enum scic_sds_remote_device_states - This enumeration depicts all the states | 184 | * enum sci_remote_device_states - This enumeration depicts all the states |
187 | * for the common remote device state machine. | 185 | * for the common remote device state machine. |
188 | * | 186 | * |
189 | * | 187 | * |
190 | */ | 188 | */ |
191 | enum scic_sds_remote_device_states { | 189 | enum sci_remote_device_states { |
192 | /** | 190 | /** |
193 | * Simply the initial state for the base remote device state machine. | 191 | * Simply the initial state for the base remote device state machine. |
194 | */ | 192 | */ |
@@ -293,7 +291,7 @@ enum scic_sds_remote_device_states { | |||
293 | SCI_DEV_FINAL, | 291 | SCI_DEV_FINAL, |
294 | }; | 292 | }; |
295 | 293 | ||
296 | static inline struct isci_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc) | 294 | static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) |
297 | { | 295 | { |
298 | struct isci_remote_device *idev; | 296 | struct isci_remote_device *idev; |
299 | 297 | ||
@@ -308,122 +306,120 @@ static inline bool dev_is_expander(struct domain_device *dev) | |||
308 | } | 306 | } |
309 | 307 | ||
310 | /** | 308 | /** |
311 | * scic_sds_remote_device_increment_request_count() - | 309 | * sci_remote_device_increment_request_count() - |
312 | * | 310 | * |
313 | * This macro incrments the request count for this device | 311 | * This macro incrments the request count for this device |
314 | */ | 312 | */ |
315 | #define scic_sds_remote_device_increment_request_count(idev) \ | 313 | #define sci_remote_device_increment_request_count(idev) \ |
316 | ((idev)->started_request_count++) | 314 | ((idev)->started_request_count++) |
317 | 315 | ||
318 | /** | 316 | /** |
319 | * scic_sds_remote_device_decrement_request_count() - | 317 | * sci_remote_device_decrement_request_count() - |
320 | * | 318 | * |
321 | * This macro decrements the request count for this device. This count will | 319 | * This macro decrements the request count for this device. This count will |
322 | * never decrment past 0. | 320 | * never decrment past 0. |
323 | */ | 321 | */ |
324 | #define scic_sds_remote_device_decrement_request_count(idev) \ | 322 | #define sci_remote_device_decrement_request_count(idev) \ |
325 | ((idev)->started_request_count > 0 ? \ | 323 | ((idev)->started_request_count > 0 ? \ |
326 | (idev)->started_request_count-- : 0) | 324 | (idev)->started_request_count-- : 0) |
327 | 325 | ||
328 | /** | 326 | /** |
329 | * scic_sds_remote_device_get_request_count() - | 327 | * sci_remote_device_get_request_count() - |
330 | * | 328 | * |
331 | * This is a helper macro to return the current device request count. | 329 | * This is a helper macro to return the current device request count. |
332 | */ | 330 | */ |
333 | #define scic_sds_remote_device_get_request_count(idev) \ | 331 | #define sci_remote_device_get_request_count(idev) \ |
334 | ((idev)->started_request_count) | 332 | ((idev)->started_request_count) |
335 | 333 | ||
336 | /** | 334 | /** |
337 | * scic_sds_remote_device_get_controller() - | 335 | * sci_remote_device_get_controller() - |
338 | * | 336 | * |
339 | * This macro returns the controller object that contains this device object | 337 | * This macro returns the controller object that contains this device object |
340 | */ | 338 | */ |
341 | #define scic_sds_remote_device_get_controller(idev) \ | 339 | #define sci_remote_device_get_controller(idev) \ |
342 | scic_sds_port_get_controller(scic_sds_remote_device_get_port(idev)) | 340 | sci_port_get_controller(sci_remote_device_get_port(idev)) |
343 | 341 | ||
344 | /** | 342 | /** |
345 | * scic_sds_remote_device_get_port() - | 343 | * sci_remote_device_get_port() - |
346 | * | 344 | * |
347 | * This macro returns the owning port of this device | 345 | * This macro returns the owning port of this device |
348 | */ | 346 | */ |
349 | #define scic_sds_remote_device_get_port(idev) \ | 347 | #define sci_remote_device_get_port(idev) \ |
350 | ((idev)->owning_port) | 348 | ((idev)->owning_port) |
351 | 349 | ||
352 | /** | 350 | /** |
353 | * scic_sds_remote_device_get_controller_peg() - | 351 | * sci_remote_device_get_controller_peg() - |
354 | * | 352 | * |
355 | * This macro returns the controllers protocol engine group | 353 | * This macro returns the controllers protocol engine group |
356 | */ | 354 | */ |
357 | #define scic_sds_remote_device_get_controller_peg(idev) \ | 355 | #define sci_remote_device_get_controller_peg(idev) \ |
358 | (\ | 356 | (\ |
359 | scic_sds_controller_get_protocol_engine_group(\ | 357 | sci_controller_get_protocol_engine_group(\ |
360 | scic_sds_port_get_controller(\ | 358 | sci_port_get_controller(\ |
361 | scic_sds_remote_device_get_port(idev) \ | 359 | sci_remote_device_get_port(idev) \ |
362 | ) \ | 360 | ) \ |
363 | ) \ | 361 | ) \ |
364 | ) | 362 | ) |
365 | 363 | ||
366 | /** | 364 | /** |
367 | * scic_sds_remote_device_get_index() - | 365 | * sci_remote_device_get_index() - |
368 | * | 366 | * |
369 | * This macro returns the remote node index for this device object | 367 | * This macro returns the remote node index for this device object |
370 | */ | 368 | */ |
371 | #define scic_sds_remote_device_get_index(idev) \ | 369 | #define sci_remote_device_get_index(idev) \ |
372 | ((idev)->rnc.remote_node_index) | 370 | ((idev)->rnc.remote_node_index) |
373 | 371 | ||
374 | /** | 372 | /** |
375 | * scic_sds_remote_device_build_command_context() - | 373 | * sci_remote_device_build_command_context() - |
376 | * | 374 | * |
377 | * This macro builds a remote device context for the SCU post request operation | 375 | * This macro builds a remote device context for the SCU post request operation |
378 | */ | 376 | */ |
379 | #define scic_sds_remote_device_build_command_context(device, command) \ | 377 | #define sci_remote_device_build_command_context(device, command) \ |
380 | ((command) \ | 378 | ((command) \ |
381 | | (scic_sds_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \ | 379 | | (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \ |
382 | | ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \ | 380 | | ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \ |
383 | | (scic_sds_remote_device_get_index((device))) \ | 381 | | (sci_remote_device_get_index((device))) \ |
384 | ) | 382 | ) |
385 | 383 | ||
386 | /** | 384 | /** |
387 | * scic_sds_remote_device_set_working_request() - | 385 | * sci_remote_device_set_working_request() - |
388 | * | 386 | * |
389 | * This macro makes the working request assingment for the remote device | 387 | * This macro makes the working request assingment for the remote device |
390 | * object. To clear the working request use this macro with a NULL request | 388 | * object. To clear the working request use this macro with a NULL request |
391 | * object. | 389 | * object. |
392 | */ | 390 | */ |
393 | #define scic_sds_remote_device_set_working_request(device, request) \ | 391 | #define sci_remote_device_set_working_request(device, request) \ |
394 | ((device)->working_request = (request)) | 392 | ((device)->working_request = (request)) |
395 | 393 | ||
396 | enum sci_status scic_sds_remote_device_frame_handler( | 394 | enum sci_status sci_remote_device_frame_handler( |
397 | struct isci_remote_device *idev, | 395 | struct isci_remote_device *idev, |
398 | u32 frame_index); | 396 | u32 frame_index); |
399 | 397 | ||
400 | enum sci_status scic_sds_remote_device_event_handler( | 398 | enum sci_status sci_remote_device_event_handler( |
401 | struct isci_remote_device *idev, | 399 | struct isci_remote_device *idev, |
402 | u32 event_code); | 400 | u32 event_code); |
403 | 401 | ||
404 | enum sci_status scic_sds_remote_device_start_io( | 402 | enum sci_status sci_remote_device_start_io( |
405 | struct isci_host *ihost, | 403 | struct isci_host *ihost, |
406 | struct isci_remote_device *idev, | 404 | struct isci_remote_device *idev, |
407 | struct isci_request *ireq); | 405 | struct isci_request *ireq); |
408 | 406 | ||
409 | enum sci_status scic_sds_remote_device_start_task( | 407 | enum sci_status sci_remote_device_start_task( |
410 | struct isci_host *ihost, | 408 | struct isci_host *ihost, |
411 | struct isci_remote_device *idev, | 409 | struct isci_remote_device *idev, |
412 | struct isci_request *ireq); | 410 | struct isci_request *ireq); |
413 | 411 | ||
414 | enum sci_status scic_sds_remote_device_complete_io( | 412 | enum sci_status sci_remote_device_complete_io( |
415 | struct isci_host *ihost, | 413 | struct isci_host *ihost, |
416 | struct isci_remote_device *idev, | 414 | struct isci_remote_device *idev, |
417 | struct isci_request *ireq); | 415 | struct isci_request *ireq); |
418 | 416 | ||
419 | enum sci_status scic_sds_remote_device_suspend( | 417 | enum sci_status sci_remote_device_suspend( |
420 | struct isci_remote_device *idev, | 418 | struct isci_remote_device *idev, |
421 | u32 suspend_type); | 419 | u32 suspend_type); |
422 | 420 | ||
423 | void scic_sds_remote_device_post_request( | 421 | void sci_remote_device_post_request( |
424 | struct isci_remote_device *idev, | 422 | struct isci_remote_device *idev, |
425 | u32 request); | 423 | u32 request); |
426 | 424 | ||
427 | #define scic_sds_remote_device_is_atapi(idev) false | ||
428 | |||
429 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ | 425 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ |
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 8a5203b6eb09..c2dfd5a72181 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c | |||
@@ -81,8 +81,8 @@ | |||
81 | * otherwise it will return false bool true if the remote node context is in | 81 | * otherwise it will return false bool true if the remote node context is in |
82 | * the ready state. false if the remote node context is not in the ready state. | 82 | * the ready state. false if the remote node context is not in the ready state. |
83 | */ | 83 | */ |
84 | bool scic_sds_remote_node_context_is_ready( | 84 | bool sci_remote_node_context_is_ready( |
85 | struct scic_sds_remote_node_context *sci_rnc) | 85 | struct sci_remote_node_context *sci_rnc) |
86 | { | 86 | { |
87 | u32 current_state = sci_rnc->sm.current_state_id; | 87 | u32 current_state = sci_rnc->sm.current_state_id; |
88 | 88 | ||
@@ -93,15 +93,16 @@ bool scic_sds_remote_node_context_is_ready( | |||
93 | return false; | 93 | return false; |
94 | } | 94 | } |
95 | 95 | ||
96 | /** | 96 | static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) |
97 | * | 97 | { |
98 | * @sci_dev: The remote device to use to construct the RNC buffer. | 98 | if (id < ihost->remote_node_entries && |
99 | * @rnc: The buffer into which the remote device data will be copied. | 99 | ihost->device_table[id]) |
100 | * | 100 | return &ihost->remote_node_context_table[id]; |
101 | * This method will construct the RNC buffer for this remote device object. none | 101 | |
102 | */ | 102 | return NULL; |
103 | static void scic_sds_remote_node_context_construct_buffer( | 103 | } |
104 | struct scic_sds_remote_node_context *sci_rnc) | 104 | |
105 | static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) | ||
105 | { | 106 | { |
106 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | 107 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); |
107 | struct domain_device *dev = idev->domain_dev; | 108 | struct domain_device *dev = idev->domain_dev; |
@@ -110,11 +111,11 @@ static void scic_sds_remote_node_context_construct_buffer( | |||
110 | struct isci_host *ihost; | 111 | struct isci_host *ihost; |
111 | __le64 sas_addr; | 112 | __le64 sas_addr; |
112 | 113 | ||
113 | ihost = scic_sds_remote_device_get_controller(idev); | 114 | ihost = sci_remote_device_get_controller(idev); |
114 | rnc = scic_sds_controller_get_remote_node_context_buffer(ihost, rni); | 115 | rnc = sci_rnc_by_id(ihost, rni); |
115 | 116 | ||
116 | memset(rnc, 0, sizeof(union scu_remote_node_context) | 117 | memset(rnc, 0, sizeof(union scu_remote_node_context) |
117 | * scic_sds_remote_device_node_count(idev)); | 118 | * sci_remote_device_node_count(idev)); |
118 | 119 | ||
119 | rnc->ssp.remote_node_index = rni; | 120 | rnc->ssp.remote_node_index = rni; |
120 | rnc->ssp.remote_node_port_width = idev->device_port_width; | 121 | rnc->ssp.remote_node_port_width = idev->device_port_width; |
@@ -135,14 +136,14 @@ static void scic_sds_remote_node_context_construct_buffer( | |||
135 | 136 | ||
136 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | 137 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { |
137 | rnc->ssp.connection_occupancy_timeout = | 138 | rnc->ssp.connection_occupancy_timeout = |
138 | ihost->user_parameters.sds1.stp_max_occupancy_timeout; | 139 | ihost->user_parameters.stp_max_occupancy_timeout; |
139 | rnc->ssp.connection_inactivity_timeout = | 140 | rnc->ssp.connection_inactivity_timeout = |
140 | ihost->user_parameters.sds1.stp_inactivity_timeout; | 141 | ihost->user_parameters.stp_inactivity_timeout; |
141 | } else { | 142 | } else { |
142 | rnc->ssp.connection_occupancy_timeout = | 143 | rnc->ssp.connection_occupancy_timeout = |
143 | ihost->user_parameters.sds1.ssp_max_occupancy_timeout; | 144 | ihost->user_parameters.ssp_max_occupancy_timeout; |
144 | rnc->ssp.connection_inactivity_timeout = | 145 | rnc->ssp.connection_inactivity_timeout = |
145 | ihost->user_parameters.sds1.ssp_inactivity_timeout; | 146 | ihost->user_parameters.ssp_inactivity_timeout; |
146 | } | 147 | } |
147 | 148 | ||
148 | rnc->ssp.initial_arbitration_wait_time = 0; | 149 | rnc->ssp.initial_arbitration_wait_time = 0; |
@@ -164,8 +165,8 @@ static void scic_sds_remote_node_context_construct_buffer( | |||
164 | * to its ready state. If the remote node context is already setup to | 165 | * to its ready state. If the remote node context is already setup to |
165 | * transition to its final state then this function does nothing. none | 166 | * transition to its final state then this function does nothing. none |
166 | */ | 167 | */ |
167 | static void scic_sds_remote_node_context_setup_to_resume( | 168 | static void sci_remote_node_context_setup_to_resume( |
168 | struct scic_sds_remote_node_context *sci_rnc, | 169 | struct sci_remote_node_context *sci_rnc, |
169 | scics_sds_remote_node_context_callback callback, | 170 | scics_sds_remote_node_context_callback callback, |
170 | void *callback_parameter) | 171 | void *callback_parameter) |
171 | { | 172 | { |
@@ -176,8 +177,8 @@ static void scic_sds_remote_node_context_setup_to_resume( | |||
176 | } | 177 | } |
177 | } | 178 | } |
178 | 179 | ||
179 | static void scic_sds_remote_node_context_setup_to_destory( | 180 | static void sci_remote_node_context_setup_to_destory( |
180 | struct scic_sds_remote_node_context *sci_rnc, | 181 | struct sci_remote_node_context *sci_rnc, |
181 | scics_sds_remote_node_context_callback callback, | 182 | scics_sds_remote_node_context_callback callback, |
182 | void *callback_parameter) | 183 | void *callback_parameter) |
183 | { | 184 | { |
@@ -192,8 +193,8 @@ static void scic_sds_remote_node_context_setup_to_destory( | |||
192 | * This method just calls the user callback function and then resets the | 193 | * This method just calls the user callback function and then resets the |
193 | * callback. | 194 | * callback. |
194 | */ | 195 | */ |
195 | static void scic_sds_remote_node_context_notify_user( | 196 | static void sci_remote_node_context_notify_user( |
196 | struct scic_sds_remote_node_context *rnc) | 197 | struct sci_remote_node_context *rnc) |
197 | { | 198 | { |
198 | if (rnc->user_callback != NULL) { | 199 | if (rnc->user_callback != NULL) { |
199 | (*rnc->user_callback)(rnc->user_cookie); | 200 | (*rnc->user_callback)(rnc->user_cookie); |
@@ -203,99 +204,80 @@ static void scic_sds_remote_node_context_notify_user( | |||
203 | } | 204 | } |
204 | } | 205 | } |
205 | 206 | ||
206 | static void scic_sds_remote_node_context_continue_state_transitions(struct scic_sds_remote_node_context *rnc) | 207 | static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) |
207 | { | 208 | { |
208 | if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) | 209 | if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) |
209 | scic_sds_remote_node_context_resume(rnc, rnc->user_callback, | 210 | sci_remote_node_context_resume(rnc, rnc->user_callback, |
210 | rnc->user_cookie); | 211 | rnc->user_cookie); |
211 | } | 212 | } |
212 | 213 | ||
213 | /** | 214 | static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) |
214 | * | ||
215 | * @sci_rnc: The remote node context object that is to be validated. | ||
216 | * | ||
217 | * This method will mark the rnc buffer as being valid and post the request to | ||
218 | * the hardware. none | ||
219 | */ | ||
220 | static void scic_sds_remote_node_context_validate_context_buffer( | ||
221 | struct scic_sds_remote_node_context *sci_rnc) | ||
222 | { | 215 | { |
216 | union scu_remote_node_context *rnc_buffer; | ||
223 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | 217 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); |
224 | struct domain_device *dev = idev->domain_dev; | 218 | struct domain_device *dev = idev->domain_dev; |
225 | union scu_remote_node_context *rnc_buffer; | 219 | struct isci_host *ihost = idev->owning_port->owning_controller; |
226 | 220 | ||
227 | rnc_buffer = scic_sds_controller_get_remote_node_context_buffer( | 221 | rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); |
228 | scic_sds_remote_device_get_controller(idev), | ||
229 | sci_rnc->remote_node_index | ||
230 | ); | ||
231 | 222 | ||
232 | rnc_buffer->ssp.is_valid = true; | 223 | rnc_buffer->ssp.is_valid = true; |
233 | 224 | ||
234 | if (!idev->is_direct_attached && | 225 | if (!idev->is_direct_attached && |
235 | (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { | 226 | (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { |
236 | scic_sds_remote_device_post_request(idev, | 227 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); |
237 | SCU_CONTEXT_COMMAND_POST_RNC_96); | ||
238 | } else { | 228 | } else { |
239 | scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); | 229 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); |
240 | 230 | ||
241 | if (idev->is_direct_attached) { | 231 | if (idev->is_direct_attached) |
242 | scic_sds_port_setup_transports(idev->owning_port, | 232 | sci_port_setup_transports(idev->owning_port, |
243 | sci_rnc->remote_node_index); | 233 | sci_rnc->remote_node_index); |
244 | } | ||
245 | } | 234 | } |
246 | } | 235 | } |
247 | 236 | ||
248 | /** | 237 | static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) |
249 | * | ||
250 | * @sci_rnc: The remote node context object that is to be invalidated. | ||
251 | * | ||
252 | * This method will update the RNC buffer and post the invalidate request. none | ||
253 | */ | ||
254 | static void scic_sds_remote_node_context_invalidate_context_buffer( | ||
255 | struct scic_sds_remote_node_context *sci_rnc) | ||
256 | { | 238 | { |
257 | union scu_remote_node_context *rnc_buffer; | 239 | union scu_remote_node_context *rnc_buffer; |
240 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | ||
241 | struct isci_host *ihost = idev->owning_port->owning_controller; | ||
258 | 242 | ||
259 | rnc_buffer = scic_sds_controller_get_remote_node_context_buffer( | 243 | rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); |
260 | scic_sds_remote_device_get_controller(rnc_to_dev(sci_rnc)), | ||
261 | sci_rnc->remote_node_index); | ||
262 | 244 | ||
263 | rnc_buffer->ssp.is_valid = false; | 245 | rnc_buffer->ssp.is_valid = false; |
264 | 246 | ||
265 | scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc), | 247 | sci_remote_device_post_request(rnc_to_dev(sci_rnc), |
266 | SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); | 248 | SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); |
267 | } | 249 | } |
268 | 250 | ||
269 | static void scic_sds_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) | 251 | static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) |
270 | { | 252 | { |
271 | struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 253 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
272 | 254 | ||
273 | /* Check to see if we have gotten back to the initial state because | 255 | /* Check to see if we have gotten back to the initial state because |
274 | * someone requested to destroy the remote node context object. | 256 | * someone requested to destroy the remote node context object. |
275 | */ | 257 | */ |
276 | if (sm->previous_state_id == SCI_RNC_INVALIDATING) { | 258 | if (sm->previous_state_id == SCI_RNC_INVALIDATING) { |
277 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; | 259 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; |
278 | scic_sds_remote_node_context_notify_user(rnc); | 260 | sci_remote_node_context_notify_user(rnc); |
279 | } | 261 | } |
280 | } | 262 | } |
281 | 263 | ||
282 | static void scic_sds_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) | 264 | static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) |
283 | { | 265 | { |
284 | struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); | 266 | struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); |
285 | 267 | ||
286 | scic_sds_remote_node_context_validate_context_buffer(sci_rnc); | 268 | sci_remote_node_context_validate_context_buffer(sci_rnc); |
287 | } | 269 | } |
288 | 270 | ||
289 | static void scic_sds_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) | 271 | static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) |
290 | { | 272 | { |
291 | struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 273 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
292 | 274 | ||
293 | scic_sds_remote_node_context_invalidate_context_buffer(rnc); | 275 | sci_remote_node_context_invalidate_context_buffer(rnc); |
294 | } | 276 | } |
295 | 277 | ||
296 | static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) | 278 | static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) |
297 | { | 279 | { |
298 | struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 280 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
299 | struct isci_remote_device *idev; | 281 | struct isci_remote_device *idev; |
300 | struct domain_device *dev; | 282 | struct domain_device *dev; |
301 | 283 | ||
@@ -310,73 +292,73 @@ static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_st | |||
310 | */ | 292 | */ |
311 | if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && | 293 | if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && |
312 | idev->is_direct_attached) | 294 | idev->is_direct_attached) |
313 | scic_sds_port_setup_transports(idev->owning_port, | 295 | sci_port_setup_transports(idev->owning_port, |
314 | rnc->remote_node_index); | 296 | rnc->remote_node_index); |
315 | 297 | ||
316 | scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); | 298 | sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); |
317 | } | 299 | } |
318 | 300 | ||
319 | static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) | 301 | static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) |
320 | { | 302 | { |
321 | struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 303 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
322 | 304 | ||
323 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; | 305 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; |
324 | 306 | ||
325 | if (rnc->user_callback) | 307 | if (rnc->user_callback) |
326 | scic_sds_remote_node_context_notify_user(rnc); | 308 | sci_remote_node_context_notify_user(rnc); |
327 | } | 309 | } |
328 | 310 | ||
329 | static void scic_sds_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) | 311 | static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) |
330 | { | 312 | { |
331 | struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 313 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
332 | 314 | ||
333 | scic_sds_remote_node_context_continue_state_transitions(rnc); | 315 | sci_remote_node_context_continue_state_transitions(rnc); |
334 | } | 316 | } |
335 | 317 | ||
336 | static void scic_sds_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) | 318 | static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) |
337 | { | 319 | { |
338 | struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); | 320 | struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); |
339 | 321 | ||
340 | scic_sds_remote_node_context_continue_state_transitions(rnc); | 322 | sci_remote_node_context_continue_state_transitions(rnc); |
341 | } | 323 | } |
342 | 324 | ||
343 | static const struct sci_base_state scic_sds_remote_node_context_state_table[] = { | 325 | static const struct sci_base_state sci_remote_node_context_state_table[] = { |
344 | [SCI_RNC_INITIAL] = { | 326 | [SCI_RNC_INITIAL] = { |
345 | .enter_state = scic_sds_remote_node_context_initial_state_enter, | 327 | .enter_state = sci_remote_node_context_initial_state_enter, |
346 | }, | 328 | }, |
347 | [SCI_RNC_POSTING] = { | 329 | [SCI_RNC_POSTING] = { |
348 | .enter_state = scic_sds_remote_node_context_posting_state_enter, | 330 | .enter_state = sci_remote_node_context_posting_state_enter, |
349 | }, | 331 | }, |
350 | [SCI_RNC_INVALIDATING] = { | 332 | [SCI_RNC_INVALIDATING] = { |
351 | .enter_state = scic_sds_remote_node_context_invalidating_state_enter, | 333 | .enter_state = sci_remote_node_context_invalidating_state_enter, |
352 | }, | 334 | }, |
353 | [SCI_RNC_RESUMING] = { | 335 | [SCI_RNC_RESUMING] = { |
354 | .enter_state = scic_sds_remote_node_context_resuming_state_enter, | 336 | .enter_state = sci_remote_node_context_resuming_state_enter, |
355 | }, | 337 | }, |
356 | [SCI_RNC_READY] = { | 338 | [SCI_RNC_READY] = { |
357 | .enter_state = scic_sds_remote_node_context_ready_state_enter, | 339 | .enter_state = sci_remote_node_context_ready_state_enter, |
358 | }, | 340 | }, |
359 | [SCI_RNC_TX_SUSPENDED] = { | 341 | [SCI_RNC_TX_SUSPENDED] = { |
360 | .enter_state = scic_sds_remote_node_context_tx_suspended_state_enter, | 342 | .enter_state = sci_remote_node_context_tx_suspended_state_enter, |
361 | }, | 343 | }, |
362 | [SCI_RNC_TX_RX_SUSPENDED] = { | 344 | [SCI_RNC_TX_RX_SUSPENDED] = { |
363 | .enter_state = scic_sds_remote_node_context_tx_rx_suspended_state_enter, | 345 | .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, |
364 | }, | 346 | }, |
365 | [SCI_RNC_AWAIT_SUSPENSION] = { }, | 347 | [SCI_RNC_AWAIT_SUSPENSION] = { }, |
366 | }; | 348 | }; |
367 | 349 | ||
368 | void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc, | 350 | void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, |
369 | u16 remote_node_index) | 351 | u16 remote_node_index) |
370 | { | 352 | { |
371 | memset(rnc, 0, sizeof(struct scic_sds_remote_node_context)); | 353 | memset(rnc, 0, sizeof(struct sci_remote_node_context)); |
372 | 354 | ||
373 | rnc->remote_node_index = remote_node_index; | 355 | rnc->remote_node_index = remote_node_index; |
374 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; | 356 | rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; |
375 | 357 | ||
376 | sci_init_sm(&rnc->sm, scic_sds_remote_node_context_state_table, SCI_RNC_INITIAL); | 358 | sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); |
377 | } | 359 | } |
378 | 360 | ||
379 | enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc, | 361 | enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, |
380 | u32 event_code) | 362 | u32 event_code) |
381 | { | 363 | { |
382 | enum scis_sds_remote_node_context_states state; | 364 | enum scis_sds_remote_node_context_states state; |
@@ -476,7 +458,7 @@ enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remot | |||
476 | 458 | ||
477 | } | 459 | } |
478 | 460 | ||
479 | enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc, | 461 | enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, |
480 | scics_sds_remote_node_context_callback cb_fn, | 462 | scics_sds_remote_node_context_callback cb_fn, |
481 | void *cb_p) | 463 | void *cb_p) |
482 | { | 464 | { |
@@ -485,7 +467,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod | |||
485 | state = sci_rnc->sm.current_state_id; | 467 | state = sci_rnc->sm.current_state_id; |
486 | switch (state) { | 468 | switch (state) { |
487 | case SCI_RNC_INVALIDATING: | 469 | case SCI_RNC_INVALIDATING: |
488 | scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); | 470 | sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); |
489 | return SCI_SUCCESS; | 471 | return SCI_SUCCESS; |
490 | case SCI_RNC_POSTING: | 472 | case SCI_RNC_POSTING: |
491 | case SCI_RNC_RESUMING: | 473 | case SCI_RNC_RESUMING: |
@@ -493,7 +475,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod | |||
493 | case SCI_RNC_TX_SUSPENDED: | 475 | case SCI_RNC_TX_SUSPENDED: |
494 | case SCI_RNC_TX_RX_SUSPENDED: | 476 | case SCI_RNC_TX_RX_SUSPENDED: |
495 | case SCI_RNC_AWAIT_SUSPENSION: | 477 | case SCI_RNC_AWAIT_SUSPENSION: |
496 | scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); | 478 | sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); |
497 | sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); | 479 | sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); |
498 | return SCI_SUCCESS; | 480 | return SCI_SUCCESS; |
499 | case SCI_RNC_INITIAL: | 481 | case SCI_RNC_INITIAL: |
@@ -511,7 +493,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod | |||
511 | } | 493 | } |
512 | } | 494 | } |
513 | 495 | ||
514 | enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc, | 496 | enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, |
515 | u32 suspend_type, | 497 | u32 suspend_type, |
516 | scics_sds_remote_node_context_callback cb_fn, | 498 | scics_sds_remote_node_context_callback cb_fn, |
517 | void *cb_p) | 499 | void *cb_p) |
@@ -530,7 +512,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node | |||
530 | sci_rnc->suspension_code = suspend_type; | 512 | sci_rnc->suspension_code = suspend_type; |
531 | 513 | ||
532 | if (suspend_type == SCI_SOFTWARE_SUSPENSION) { | 514 | if (suspend_type == SCI_SOFTWARE_SUSPENSION) { |
533 | scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc), | 515 | sci_remote_device_post_request(rnc_to_dev(sci_rnc), |
534 | SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); | 516 | SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); |
535 | } | 517 | } |
536 | 518 | ||
@@ -538,7 +520,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node | |||
538 | return SCI_SUCCESS; | 520 | return SCI_SUCCESS; |
539 | } | 521 | } |
540 | 522 | ||
541 | enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc, | 523 | enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, |
542 | scics_sds_remote_node_context_callback cb_fn, | 524 | scics_sds_remote_node_context_callback cb_fn, |
543 | void *cb_p) | 525 | void *cb_p) |
544 | { | 526 | { |
@@ -550,8 +532,8 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ | |||
550 | if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) | 532 | if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) |
551 | return SCI_FAILURE_INVALID_STATE; | 533 | return SCI_FAILURE_INVALID_STATE; |
552 | 534 | ||
553 | scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | 535 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); |
554 | scic_sds_remote_node_context_construct_buffer(sci_rnc); | 536 | sci_remote_node_context_construct_buffer(sci_rnc); |
555 | sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); | 537 | sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); |
556 | return SCI_SUCCESS; | 538 | return SCI_SUCCESS; |
557 | case SCI_RNC_POSTING: | 539 | case SCI_RNC_POSTING: |
@@ -567,7 +549,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ | |||
567 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); | 549 | struct isci_remote_device *idev = rnc_to_dev(sci_rnc); |
568 | struct domain_device *dev = idev->domain_dev; | 550 | struct domain_device *dev = idev->domain_dev; |
569 | 551 | ||
570 | scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | 552 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); |
571 | 553 | ||
572 | /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ | 554 | /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ |
573 | if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) | 555 | if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) |
@@ -584,11 +566,11 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ | |||
584 | return SCI_SUCCESS; | 566 | return SCI_SUCCESS; |
585 | } | 567 | } |
586 | case SCI_RNC_TX_RX_SUSPENDED: | 568 | case SCI_RNC_TX_RX_SUSPENDED: |
587 | scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | 569 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); |
588 | sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); | 570 | sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); |
589 | return SCI_FAILURE_INVALID_STATE; | 571 | return SCI_FAILURE_INVALID_STATE; |
590 | case SCI_RNC_AWAIT_SUSPENSION: | 572 | case SCI_RNC_AWAIT_SUSPENSION: |
591 | scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); | 573 | sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); |
592 | return SCI_SUCCESS; | 574 | return SCI_SUCCESS; |
593 | default: | 575 | default: |
594 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 576 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
@@ -597,7 +579,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_ | |||
597 | } | 579 | } |
598 | } | 580 | } |
599 | 581 | ||
600 | enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, | 582 | enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, |
601 | struct isci_request *ireq) | 583 | struct isci_request *ireq) |
602 | { | 584 | { |
603 | enum scis_sds_remote_node_context_states state; | 585 | enum scis_sds_remote_node_context_states state; |
@@ -622,7 +604,7 @@ enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_nod | |||
622 | return SCI_FAILURE_INVALID_STATE; | 604 | return SCI_FAILURE_INVALID_STATE; |
623 | } | 605 | } |
624 | 606 | ||
625 | enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, | 607 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, |
626 | struct isci_request *ireq) | 608 | struct isci_request *ireq) |
627 | { | 609 | { |
628 | enum scis_sds_remote_node_context_states state; | 610 | enum scis_sds_remote_node_context_states state; |
@@ -635,7 +617,7 @@ enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_n | |||
635 | return SCI_SUCCESS; | 617 | return SCI_SUCCESS; |
636 | case SCI_RNC_TX_SUSPENDED: | 618 | case SCI_RNC_TX_SUSPENDED: |
637 | case SCI_RNC_TX_RX_SUSPENDED: | 619 | case SCI_RNC_TX_RX_SUSPENDED: |
638 | scic_sds_remote_node_context_resume(sci_rnc, NULL, NULL); | 620 | sci_remote_node_context_resume(sci_rnc, NULL, NULL); |
639 | return SCI_SUCCESS; | 621 | return SCI_SUCCESS; |
640 | default: | 622 | default: |
641 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), | 623 | dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), |
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index 7a24c7a12de1..b475c5c26642 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h | |||
@@ -80,7 +80,7 @@ | |||
80 | 80 | ||
81 | struct isci_request; | 81 | struct isci_request; |
82 | struct isci_remote_device; | 82 | struct isci_remote_device; |
83 | struct scic_sds_remote_node_context; | 83 | struct sci_remote_node_context; |
84 | 84 | ||
85 | typedef void (*scics_sds_remote_node_context_callback)(void *); | 85 | typedef void (*scics_sds_remote_node_context_callback)(void *); |
86 | 86 | ||
@@ -147,19 +147,19 @@ enum scis_sds_remote_node_context_states { | |||
147 | * This enumeration is used to define the end destination state for the remote | 147 | * This enumeration is used to define the end destination state for the remote |
148 | * node context. | 148 | * node context. |
149 | */ | 149 | */ |
150 | enum scic_sds_remote_node_context_destination_state { | 150 | enum sci_remote_node_context_destination_state { |
151 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, | 151 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, |
152 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, | 152 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, |
153 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL | 153 | SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL |
154 | }; | 154 | }; |
155 | 155 | ||
156 | /** | 156 | /** |
157 | * struct scic_sds_remote_node_context - This structure contains the data | 157 | * struct sci_remote_node_context - This structure contains the data |
158 | * associated with the remote node context object. The remote node context | 158 | * associated with the remote node context object. The remote node context |
159 | * (RNC) object models the the remote device information necessary to manage | 159 | * (RNC) object models the the remote device information necessary to manage |
160 | * the silicon RNC. | 160 | * the silicon RNC. |
161 | */ | 161 | */ |
162 | struct scic_sds_remote_node_context { | 162 | struct sci_remote_node_context { |
163 | /** | 163 | /** |
164 | * This field indicates the remote node index (RNI) associated with | 164 | * This field indicates the remote node index (RNI) associated with |
165 | * this RNC. | 165 | * this RNC. |
@@ -177,7 +177,7 @@ struct scic_sds_remote_node_context { | |||
177 | * state. This can cause an automatic resume on receiving a suspension | 177 | * state. This can cause an automatic resume on receiving a suspension |
178 | * notification. | 178 | * notification. |
179 | */ | 179 | */ |
180 | enum scic_sds_remote_node_context_destination_state destination_state; | 180 | enum sci_remote_node_context_destination_state destination_state; |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * This field contains the callback function that the user requested to be | 183 | * This field contains the callback function that the user requested to be |
@@ -197,31 +197,31 @@ struct scic_sds_remote_node_context { | |||
197 | struct sci_base_state_machine sm; | 197 | struct sci_base_state_machine sm; |
198 | }; | 198 | }; |
199 | 199 | ||
200 | void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc, | 200 | void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, |
201 | u16 remote_node_index); | 201 | u16 remote_node_index); |
202 | 202 | ||
203 | 203 | ||
204 | bool scic_sds_remote_node_context_is_ready( | 204 | bool sci_remote_node_context_is_ready( |
205 | struct scic_sds_remote_node_context *sci_rnc); | 205 | struct sci_remote_node_context *sci_rnc); |
206 | 206 | ||
207 | #define scic_sds_remote_node_context_get_remote_node_index(rcn) \ | 207 | #define sci_remote_node_context_get_remote_node_index(rcn) \ |
208 | ((rnc)->remote_node_index) | 208 | ((rnc)->remote_node_index) |
209 | 209 | ||
210 | enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc, | 210 | enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, |
211 | u32 event_code); | 211 | u32 event_code); |
212 | enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc, | 212 | enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, |
213 | scics_sds_remote_node_context_callback callback, | 213 | scics_sds_remote_node_context_callback callback, |
214 | void *callback_parameter); | 214 | void *callback_parameter); |
215 | enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc, | 215 | enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, |
216 | u32 suspend_type, | 216 | u32 suspend_type, |
217 | scics_sds_remote_node_context_callback cb_fn, | 217 | scics_sds_remote_node_context_callback cb_fn, |
218 | void *cb_p); | 218 | void *cb_p); |
219 | enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc, | 219 | enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, |
220 | scics_sds_remote_node_context_callback cb_fn, | 220 | scics_sds_remote_node_context_callback cb_fn, |
221 | void *cb_p); | 221 | void *cb_p); |
222 | enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc, | 222 | enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, |
223 | struct isci_request *ireq); | 223 | struct isci_request *ireq); |
224 | enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc, | 224 | enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, |
225 | struct isci_request *ireq); | 225 | struct isci_request *ireq); |
226 | 226 | ||
227 | #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ | 227 | #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ |
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c index 6b9465a5a217..301b3141945e 100644 --- a/drivers/scsi/isci/remote_node_table.c +++ b/drivers/scsi/isci/remote_node_table.c | |||
@@ -74,8 +74,8 @@ | |||
74 | * just bit position. u32 This is the absolute bit position for an available | 74 | * just bit position. u32 This is the absolute bit position for an available |
75 | * group. | 75 | * group. |
76 | */ | 76 | */ |
77 | static u32 scic_sds_remote_node_table_get_group_index( | 77 | static u32 sci_remote_node_table_get_group_index( |
78 | struct scic_remote_node_table *remote_node_table, | 78 | struct sci_remote_node_table *remote_node_table, |
79 | u32 group_table_index) | 79 | u32 group_table_index) |
80 | { | 80 | { |
81 | u32 dword_index; | 81 | u32 dword_index; |
@@ -108,8 +108,8 @@ static u32 scic_sds_remote_node_table_get_group_index( | |||
108 | * This method will clear the group index entry in the specified group index | 108 | * This method will clear the group index entry in the specified group index |
109 | * table. none | 109 | * table. none |
110 | */ | 110 | */ |
111 | static void scic_sds_remote_node_table_clear_group_index( | 111 | static void sci_remote_node_table_clear_group_index( |
112 | struct scic_remote_node_table *remote_node_table, | 112 | struct sci_remote_node_table *remote_node_table, |
113 | u32 group_table_index, | 113 | u32 group_table_index, |
114 | u32 group_index) | 114 | u32 group_index) |
115 | { | 115 | { |
@@ -138,8 +138,8 @@ static void scic_sds_remote_node_table_clear_group_index( | |||
138 | * This method will set the group index bit entry in the specified gropu index | 138 | * This method will set the group index bit entry in the specified gropu index |
139 | * table. none | 139 | * table. none |
140 | */ | 140 | */ |
141 | static void scic_sds_remote_node_table_set_group_index( | 141 | static void sci_remote_node_table_set_group_index( |
142 | struct scic_remote_node_table *remote_node_table, | 142 | struct sci_remote_node_table *remote_node_table, |
143 | u32 group_table_index, | 143 | u32 group_table_index, |
144 | u32 group_index) | 144 | u32 group_index) |
145 | { | 145 | { |
@@ -167,8 +167,8 @@ static void scic_sds_remote_node_table_set_group_index( | |||
167 | * This method will set the remote to available in the remote node allocation | 167 | * This method will set the remote to available in the remote node allocation |
168 | * table. none | 168 | * table. none |
169 | */ | 169 | */ |
170 | static void scic_sds_remote_node_table_set_node_index( | 170 | static void sci_remote_node_table_set_node_index( |
171 | struct scic_remote_node_table *remote_node_table, | 171 | struct sci_remote_node_table *remote_node_table, |
172 | u32 remote_node_index) | 172 | u32 remote_node_index) |
173 | { | 173 | { |
174 | u32 dword_location; | 174 | u32 dword_location; |
@@ -200,8 +200,8 @@ static void scic_sds_remote_node_table_set_node_index( | |||
200 | * This method clears the remote node index from the table of available remote | 200 | * This method clears the remote node index from the table of available remote |
201 | * nodes. none | 201 | * nodes. none |
202 | */ | 202 | */ |
203 | static void scic_sds_remote_node_table_clear_node_index( | 203 | static void sci_remote_node_table_clear_node_index( |
204 | struct scic_remote_node_table *remote_node_table, | 204 | struct sci_remote_node_table *remote_node_table, |
205 | u32 remote_node_index) | 205 | u32 remote_node_index) |
206 | { | 206 | { |
207 | u32 dword_location; | 207 | u32 dword_location; |
@@ -231,8 +231,8 @@ static void scic_sds_remote_node_table_clear_node_index( | |||
231 | * | 231 | * |
232 | * This method clears the entire table slot at the specified slot index. none | 232 | * This method clears the entire table slot at the specified slot index. none |
233 | */ | 233 | */ |
234 | static void scic_sds_remote_node_table_clear_group( | 234 | static void sci_remote_node_table_clear_group( |
235 | struct scic_remote_node_table *remote_node_table, | 235 | struct sci_remote_node_table *remote_node_table, |
236 | u32 group_index) | 236 | u32 group_index) |
237 | { | 237 | { |
238 | u32 dword_location; | 238 | u32 dword_location; |
@@ -258,8 +258,8 @@ static void scic_sds_remote_node_table_clear_group( | |||
258 | * | 258 | * |
259 | * THis method sets an entire remote node group in the remote node table. | 259 | * THis method sets an entire remote node group in the remote node table. |
260 | */ | 260 | */ |
261 | static void scic_sds_remote_node_table_set_group( | 261 | static void sci_remote_node_table_set_group( |
262 | struct scic_remote_node_table *remote_node_table, | 262 | struct sci_remote_node_table *remote_node_table, |
263 | u32 group_index) | 263 | u32 group_index) |
264 | { | 264 | { |
265 | u32 dword_location; | 265 | u32 dword_location; |
@@ -288,8 +288,8 @@ static void scic_sds_remote_node_table_set_group( | |||
288 | * This method will return the group value for the specified group index. The | 288 | * This method will return the group value for the specified group index. The |
289 | * bit values at the specified remote node group index. | 289 | * bit values at the specified remote node group index. |
290 | */ | 290 | */ |
291 | static u8 scic_sds_remote_node_table_get_group_value( | 291 | static u8 sci_remote_node_table_get_group_value( |
292 | struct scic_remote_node_table *remote_node_table, | 292 | struct sci_remote_node_table *remote_node_table, |
293 | u32 group_index) | 293 | u32 group_index) |
294 | { | 294 | { |
295 | u32 dword_location; | 295 | u32 dword_location; |
@@ -313,8 +313,8 @@ static u8 scic_sds_remote_node_table_get_group_value( | |||
313 | * | 313 | * |
314 | * This method will initialize the remote node table for use. none | 314 | * This method will initialize the remote node table for use. none |
315 | */ | 315 | */ |
316 | void scic_sds_remote_node_table_initialize( | 316 | void sci_remote_node_table_initialize( |
317 | struct scic_remote_node_table *remote_node_table, | 317 | struct sci_remote_node_table *remote_node_table, |
318 | u32 remote_node_entries) | 318 | u32 remote_node_entries) |
319 | { | 319 | { |
320 | u32 index; | 320 | u32 index; |
@@ -342,7 +342,7 @@ void scic_sds_remote_node_table_initialize( | |||
342 | 342 | ||
343 | /* Initialize each full DWORD to a FULL SET of remote nodes */ | 343 | /* Initialize each full DWORD to a FULL SET of remote nodes */ |
344 | for (index = 0; index < remote_node_entries; index++) { | 344 | for (index = 0; index < remote_node_entries; index++) { |
345 | scic_sds_remote_node_table_set_node_index(remote_node_table, index); | 345 | sci_remote_node_table_set_node_index(remote_node_table, index); |
346 | } | 346 | } |
347 | 347 | ||
348 | remote_node_table->group_array_size = (u16) | 348 | remote_node_table->group_array_size = (u16) |
@@ -353,14 +353,14 @@ void scic_sds_remote_node_table_initialize( | |||
353 | /* | 353 | /* |
354 | * These are all guaranteed to be full slot values so fill them in the | 354 | * These are all guaranteed to be full slot values so fill them in the |
355 | * available sets of 3 remote nodes */ | 355 | * available sets of 3 remote nodes */ |
356 | scic_sds_remote_node_table_set_group_index(remote_node_table, 2, index); | 356 | sci_remote_node_table_set_group_index(remote_node_table, 2, index); |
357 | } | 357 | } |
358 | 358 | ||
359 | /* Now fill in any remainders that we may find */ | 359 | /* Now fill in any remainders that we may find */ |
360 | if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) { | 360 | if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) { |
361 | scic_sds_remote_node_table_set_group_index(remote_node_table, 1, index); | 361 | sci_remote_node_table_set_group_index(remote_node_table, 1, index); |
362 | } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) { | 362 | } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) { |
363 | scic_sds_remote_node_table_set_group_index(remote_node_table, 0, index); | 363 | sci_remote_node_table_set_group_index(remote_node_table, 0, index); |
364 | } | 364 | } |
365 | } | 365 | } |
366 | 366 | ||
@@ -379,8 +379,8 @@ void scic_sds_remote_node_table_initialize( | |||
379 | * updated. The RNi value or an invalid remote node context if an RNi can not | 379 | * updated. The RNi value or an invalid remote node context if an RNi can not |
380 | * be found. | 380 | * be found. |
381 | */ | 381 | */ |
382 | static u16 scic_sds_remote_node_table_allocate_single_remote_node( | 382 | static u16 sci_remote_node_table_allocate_single_remote_node( |
383 | struct scic_remote_node_table *remote_node_table, | 383 | struct sci_remote_node_table *remote_node_table, |
384 | u32 group_table_index) | 384 | u32 group_table_index) |
385 | { | 385 | { |
386 | u8 index; | 386 | u8 index; |
@@ -388,12 +388,12 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( | |||
388 | u32 group_index; | 388 | u32 group_index; |
389 | u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; | 389 | u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; |
390 | 390 | ||
391 | group_index = scic_sds_remote_node_table_get_group_index( | 391 | group_index = sci_remote_node_table_get_group_index( |
392 | remote_node_table, group_table_index); | 392 | remote_node_table, group_table_index); |
393 | 393 | ||
394 | /* We could not find an available slot in the table selector 0 */ | 394 | /* We could not find an available slot in the table selector 0 */ |
395 | if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { | 395 | if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { |
396 | group_value = scic_sds_remote_node_table_get_group_value( | 396 | group_value = sci_remote_node_table_get_group_value( |
397 | remote_node_table, group_index); | 397 | remote_node_table, group_index); |
398 | 398 | ||
399 | for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) { | 399 | for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) { |
@@ -402,16 +402,16 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( | |||
402 | remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT | 402 | remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT |
403 | + index); | 403 | + index); |
404 | 404 | ||
405 | scic_sds_remote_node_table_clear_group_index( | 405 | sci_remote_node_table_clear_group_index( |
406 | remote_node_table, group_table_index, group_index | 406 | remote_node_table, group_table_index, group_index |
407 | ); | 407 | ); |
408 | 408 | ||
409 | scic_sds_remote_node_table_clear_node_index( | 409 | sci_remote_node_table_clear_node_index( |
410 | remote_node_table, remote_node_index | 410 | remote_node_table, remote_node_index |
411 | ); | 411 | ); |
412 | 412 | ||
413 | if (group_table_index > 0) { | 413 | if (group_table_index > 0) { |
414 | scic_sds_remote_node_table_set_group_index( | 414 | sci_remote_node_table_set_group_index( |
415 | remote_node_table, group_table_index - 1, group_index | 415 | remote_node_table, group_table_index - 1, group_index |
416 | ); | 416 | ); |
417 | } | 417 | } |
@@ -436,24 +436,24 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node( | |||
436 | * The remote node index that represents three consecutive remote node entries | 436 | * The remote node index that represents three consecutive remote node entries |
437 | * or an invalid remote node context if none can be found. | 437 | * or an invalid remote node context if none can be found. |
438 | */ | 438 | */ |
439 | static u16 scic_sds_remote_node_table_allocate_triple_remote_node( | 439 | static u16 sci_remote_node_table_allocate_triple_remote_node( |
440 | struct scic_remote_node_table *remote_node_table, | 440 | struct sci_remote_node_table *remote_node_table, |
441 | u32 group_table_index) | 441 | u32 group_table_index) |
442 | { | 442 | { |
443 | u32 group_index; | 443 | u32 group_index; |
444 | u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; | 444 | u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; |
445 | 445 | ||
446 | group_index = scic_sds_remote_node_table_get_group_index( | 446 | group_index = sci_remote_node_table_get_group_index( |
447 | remote_node_table, group_table_index); | 447 | remote_node_table, group_table_index); |
448 | 448 | ||
449 | if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { | 449 | if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) { |
450 | remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT; | 450 | remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT; |
451 | 451 | ||
452 | scic_sds_remote_node_table_clear_group_index( | 452 | sci_remote_node_table_clear_group_index( |
453 | remote_node_table, group_table_index, group_index | 453 | remote_node_table, group_table_index, group_index |
454 | ); | 454 | ); |
455 | 455 | ||
456 | scic_sds_remote_node_table_clear_group( | 456 | sci_remote_node_table_clear_group( |
457 | remote_node_table, group_index | 457 | remote_node_table, group_index |
458 | ); | 458 | ); |
459 | } | 459 | } |
@@ -473,31 +473,31 @@ static u16 scic_sds_remote_node_table_allocate_triple_remote_node( | |||
473 | * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is | 473 | * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is |
474 | * the remote node index that is returned or an invalid remote node context. | 474 | * the remote node index that is returned or an invalid remote node context. |
475 | */ | 475 | */ |
476 | u16 scic_sds_remote_node_table_allocate_remote_node( | 476 | u16 sci_remote_node_table_allocate_remote_node( |
477 | struct scic_remote_node_table *remote_node_table, | 477 | struct sci_remote_node_table *remote_node_table, |
478 | u32 remote_node_count) | 478 | u32 remote_node_count) |
479 | { | 479 | { |
480 | u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; | 480 | u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; |
481 | 481 | ||
482 | if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { | 482 | if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { |
483 | remote_node_index = | 483 | remote_node_index = |
484 | scic_sds_remote_node_table_allocate_single_remote_node( | 484 | sci_remote_node_table_allocate_single_remote_node( |
485 | remote_node_table, 0); | 485 | remote_node_table, 0); |
486 | 486 | ||
487 | if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { | 487 | if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { |
488 | remote_node_index = | 488 | remote_node_index = |
489 | scic_sds_remote_node_table_allocate_single_remote_node( | 489 | sci_remote_node_table_allocate_single_remote_node( |
490 | remote_node_table, 1); | 490 | remote_node_table, 1); |
491 | } | 491 | } |
492 | 492 | ||
493 | if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { | 493 | if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { |
494 | remote_node_index = | 494 | remote_node_index = |
495 | scic_sds_remote_node_table_allocate_single_remote_node( | 495 | sci_remote_node_table_allocate_single_remote_node( |
496 | remote_node_table, 2); | 496 | remote_node_table, 2); |
497 | } | 497 | } |
498 | } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { | 498 | } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { |
499 | remote_node_index = | 499 | remote_node_index = |
500 | scic_sds_remote_node_table_allocate_triple_remote_node( | 500 | sci_remote_node_table_allocate_triple_remote_node( |
501 | remote_node_table, 2); | 501 | remote_node_table, 2); |
502 | } | 502 | } |
503 | 503 | ||
@@ -511,8 +511,8 @@ u16 scic_sds_remote_node_table_allocate_remote_node( | |||
511 | * This method will free a single remote node index back to the remote node | 511 | * This method will free a single remote node index back to the remote node |
512 | * table. This routine will update the remote node groups | 512 | * table. This routine will update the remote node groups |
513 | */ | 513 | */ |
514 | static void scic_sds_remote_node_table_release_single_remote_node( | 514 | static void sci_remote_node_table_release_single_remote_node( |
515 | struct scic_remote_node_table *remote_node_table, | 515 | struct sci_remote_node_table *remote_node_table, |
516 | u16 remote_node_index) | 516 | u16 remote_node_index) |
517 | { | 517 | { |
518 | u32 group_index; | 518 | u32 group_index; |
@@ -520,7 +520,7 @@ static void scic_sds_remote_node_table_release_single_remote_node( | |||
520 | 520 | ||
521 | group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; | 521 | group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; |
522 | 522 | ||
523 | group_value = scic_sds_remote_node_table_get_group_value(remote_node_table, group_index); | 523 | group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index); |
524 | 524 | ||
525 | /* | 525 | /* |
526 | * Assert that we are not trying to add an entry to a slot that is already | 526 | * Assert that we are not trying to add an entry to a slot that is already |
@@ -531,22 +531,22 @@ static void scic_sds_remote_node_table_release_single_remote_node( | |||
531 | /* | 531 | /* |
532 | * There are no entries in this slot so it must be added to the single | 532 | * There are no entries in this slot so it must be added to the single |
533 | * slot table. */ | 533 | * slot table. */ |
534 | scic_sds_remote_node_table_set_group_index(remote_node_table, 0, group_index); | 534 | sci_remote_node_table_set_group_index(remote_node_table, 0, group_index); |
535 | } else if ((group_value & (group_value - 1)) == 0) { | 535 | } else if ((group_value & (group_value - 1)) == 0) { |
536 | /* | 536 | /* |
537 | * There is only one entry in this slot so it must be moved from the | 537 | * There is only one entry in this slot so it must be moved from the |
538 | * single slot table to the dual slot table */ | 538 | * single slot table to the dual slot table */ |
539 | scic_sds_remote_node_table_clear_group_index(remote_node_table, 0, group_index); | 539 | sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index); |
540 | scic_sds_remote_node_table_set_group_index(remote_node_table, 1, group_index); | 540 | sci_remote_node_table_set_group_index(remote_node_table, 1, group_index); |
541 | } else { | 541 | } else { |
542 | /* | 542 | /* |
543 | * There are two entries in the slot so it must be moved from the dual | 543 | * There are two entries in the slot so it must be moved from the dual |
544 | * slot table to the tripple slot table. */ | 544 | * slot table to the tripple slot table. */ |
545 | scic_sds_remote_node_table_clear_group_index(remote_node_table, 1, group_index); | 545 | sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index); |
546 | scic_sds_remote_node_table_set_group_index(remote_node_table, 2, group_index); | 546 | sci_remote_node_table_set_group_index(remote_node_table, 2, group_index); |
547 | } | 547 | } |
548 | 548 | ||
549 | scic_sds_remote_node_table_set_node_index(remote_node_table, remote_node_index); | 549 | sci_remote_node_table_set_node_index(remote_node_table, remote_node_index); |
550 | } | 550 | } |
551 | 551 | ||
552 | /** | 552 | /** |
@@ -557,19 +557,19 @@ static void scic_sds_remote_node_table_release_single_remote_node( | |||
557 | * This method will release a group of three consecutive remote nodes back to | 557 | * This method will release a group of three consecutive remote nodes back to |
558 | * the free remote nodes. | 558 | * the free remote nodes. |
559 | */ | 559 | */ |
560 | static void scic_sds_remote_node_table_release_triple_remote_node( | 560 | static void sci_remote_node_table_release_triple_remote_node( |
561 | struct scic_remote_node_table *remote_node_table, | 561 | struct sci_remote_node_table *remote_node_table, |
562 | u16 remote_node_index) | 562 | u16 remote_node_index) |
563 | { | 563 | { |
564 | u32 group_index; | 564 | u32 group_index; |
565 | 565 | ||
566 | group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; | 566 | group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT; |
567 | 567 | ||
568 | scic_sds_remote_node_table_set_group_index( | 568 | sci_remote_node_table_set_group_index( |
569 | remote_node_table, 2, group_index | 569 | remote_node_table, 2, group_index |
570 | ); | 570 | ); |
571 | 571 | ||
572 | scic_sds_remote_node_table_set_group(remote_node_table, group_index); | 572 | sci_remote_node_table_set_group(remote_node_table, group_index); |
573 | } | 573 | } |
574 | 574 | ||
575 | /** | 575 | /** |
@@ -582,16 +582,16 @@ static void scic_sds_remote_node_table_release_triple_remote_node( | |||
582 | * This method will release the remote node index back into the remote node | 582 | * This method will release the remote node index back into the remote node |
583 | * table free pool. | 583 | * table free pool. |
584 | */ | 584 | */ |
585 | void scic_sds_remote_node_table_release_remote_node_index( | 585 | void sci_remote_node_table_release_remote_node_index( |
586 | struct scic_remote_node_table *remote_node_table, | 586 | struct sci_remote_node_table *remote_node_table, |
587 | u32 remote_node_count, | 587 | u32 remote_node_count, |
588 | u16 remote_node_index) | 588 | u16 remote_node_index) |
589 | { | 589 | { |
590 | if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { | 590 | if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) { |
591 | scic_sds_remote_node_table_release_single_remote_node( | 591 | sci_remote_node_table_release_single_remote_node( |
592 | remote_node_table, remote_node_index); | 592 | remote_node_table, remote_node_index); |
593 | } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { | 593 | } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) { |
594 | scic_sds_remote_node_table_release_triple_remote_node( | 594 | sci_remote_node_table_release_triple_remote_node( |
595 | remote_node_table, remote_node_index); | 595 | remote_node_table, remote_node_index); |
596 | } | 596 | } |
597 | } | 597 | } |
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h index 5737d9a30cca..721ab982d2ac 100644 --- a/drivers/scsi/isci/remote_node_table.h +++ b/drivers/scsi/isci/remote_node_table.h | |||
@@ -130,11 +130,11 @@ | |||
130 | #define SCU_SATA_REMOTE_NODE_COUNT 1 | 130 | #define SCU_SATA_REMOTE_NODE_COUNT 1 |
131 | 131 | ||
132 | /** | 132 | /** |
133 | * struct scic_remote_node_table - | 133 | * struct sci_remote_node_table - |
134 | * | 134 | * |
135 | * | 135 | * |
136 | */ | 136 | */ |
137 | struct scic_remote_node_table { | 137 | struct sci_remote_node_table { |
138 | /** | 138 | /** |
139 | * This field contains the array size in dwords | 139 | * This field contains the array size in dwords |
140 | */ | 140 | */ |
@@ -172,16 +172,16 @@ struct scic_remote_node_table { | |||
172 | 172 | ||
173 | /* --------------------------------------------------------------------------- */ | 173 | /* --------------------------------------------------------------------------- */ |
174 | 174 | ||
175 | void scic_sds_remote_node_table_initialize( | 175 | void sci_remote_node_table_initialize( |
176 | struct scic_remote_node_table *remote_node_table, | 176 | struct sci_remote_node_table *remote_node_table, |
177 | u32 remote_node_entries); | 177 | u32 remote_node_entries); |
178 | 178 | ||
179 | u16 scic_sds_remote_node_table_allocate_remote_node( | 179 | u16 sci_remote_node_table_allocate_remote_node( |
180 | struct scic_remote_node_table *remote_node_table, | 180 | struct sci_remote_node_table *remote_node_table, |
181 | u32 remote_node_count); | 181 | u32 remote_node_count); |
182 | 182 | ||
183 | void scic_sds_remote_node_table_release_remote_node_index( | 183 | void sci_remote_node_table_release_remote_node_index( |
184 | struct scic_remote_node_table *remote_node_table, | 184 | struct sci_remote_node_table *remote_node_table, |
185 | u32 remote_node_count, | 185 | u32 remote_node_count, |
186 | u16 remote_node_index); | 186 | u16 remote_node_index); |
187 | 187 | ||
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 36e674896bc5..bcb3c08c19a7 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -89,7 +89,7 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, | |||
89 | return ihost->task_context_dma + offset; | 89 | return ihost->task_context_dma + offset; |
90 | } | 90 | } |
91 | 91 | ||
92 | return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); | 92 | return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); |
93 | } | 93 | } |
94 | 94 | ||
95 | static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) | 95 | static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) |
@@ -100,7 +100,7 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) | |||
100 | e->address_modifier = 0; | 100 | e->address_modifier = 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static void scic_sds_request_build_sgl(struct isci_request *ireq) | 103 | static void sci_request_build_sgl(struct isci_request *ireq) |
104 | { | 104 | { |
105 | struct isci_host *ihost = ireq->isci_host; | 105 | struct isci_host *ihost = ireq->isci_host; |
106 | struct sas_task *task = isci_request_access_task(ireq); | 106 | struct sas_task *task = isci_request_access_task(ireq); |
@@ -158,7 +158,7 @@ static void scic_sds_request_build_sgl(struct isci_request *ireq) | |||
158 | } | 158 | } |
159 | } | 159 | } |
160 | 160 | ||
161 | static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) | 161 | static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) |
162 | { | 162 | { |
163 | struct ssp_cmd_iu *cmd_iu; | 163 | struct ssp_cmd_iu *cmd_iu; |
164 | struct sas_task *task = isci_request_access_task(ireq); | 164 | struct sas_task *task = isci_request_access_task(ireq); |
@@ -178,7 +178,7 @@ static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) | |||
178 | sizeof(task->ssp_task.cdb) / sizeof(u32)); | 178 | sizeof(task->ssp_task.cdb) / sizeof(u32)); |
179 | } | 179 | } |
180 | 180 | ||
181 | static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq) | 181 | static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) |
182 | { | 182 | { |
183 | struct ssp_task_iu *task_iu; | 183 | struct ssp_task_iu *task_iu; |
184 | struct sas_task *task = isci_request_access_task(ireq); | 184 | struct sas_task *task = isci_request_access_task(ireq); |
@@ -211,8 +211,8 @@ static void scu_ssp_reqeust_construct_task_context( | |||
211 | struct isci_remote_device *idev; | 211 | struct isci_remote_device *idev; |
212 | struct isci_port *iport; | 212 | struct isci_port *iport; |
213 | 213 | ||
214 | idev = scic_sds_request_get_device(ireq); | 214 | idev = sci_request_get_device(ireq); |
215 | iport = scic_sds_request_get_port(ireq); | 215 | iport = sci_request_get_port(ireq); |
216 | 216 | ||
217 | /* Fill in the TC with the its required data */ | 217 | /* Fill in the TC with the its required data */ |
218 | task_context->abort = 0; | 218 | task_context->abort = 0; |
@@ -220,13 +220,13 @@ static void scu_ssp_reqeust_construct_task_context( | |||
220 | task_context->initiator_request = 1; | 220 | task_context->initiator_request = 1; |
221 | task_context->connection_rate = idev->connection_rate; | 221 | task_context->connection_rate = idev->connection_rate; |
222 | task_context->protocol_engine_index = | 222 | task_context->protocol_engine_index = |
223 | scic_sds_controller_get_protocol_engine_group(controller); | 223 | sci_controller_get_protocol_engine_group(controller); |
224 | task_context->logical_port_index = scic_sds_port_get_index(iport); | 224 | task_context->logical_port_index = sci_port_get_index(iport); |
225 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; | 225 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; |
226 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 226 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
227 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; | 227 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
228 | 228 | ||
229 | task_context->remote_node_index = scic_sds_remote_device_get_index(idev); | 229 | task_context->remote_node_index = sci_remote_device_get_index(idev); |
230 | task_context->command_code = 0; | 230 | task_context->command_code = 0; |
231 | 231 | ||
232 | task_context->link_layer_control = 0; | 232 | task_context->link_layer_control = 0; |
@@ -242,9 +242,9 @@ static void scu_ssp_reqeust_construct_task_context( | |||
242 | task_context->task_phase = 0x01; | 242 | task_context->task_phase = 0x01; |
243 | 243 | ||
244 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 244 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
245 | (scic_sds_controller_get_protocol_engine_group(controller) << | 245 | (sci_controller_get_protocol_engine_group(controller) << |
246 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 246 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
247 | (scic_sds_port_get_index(iport) << | 247 | (sci_port_get_index(iport) << |
248 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 248 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
249 | ISCI_TAG_TCI(ireq->io_tag)); | 249 | ISCI_TAG_TCI(ireq->io_tag)); |
250 | 250 | ||
@@ -252,7 +252,7 @@ static void scu_ssp_reqeust_construct_task_context( | |||
252 | * Copy the physical address for the command buffer to the | 252 | * Copy the physical address for the command buffer to the |
253 | * SCU Task Context | 253 | * SCU Task Context |
254 | */ | 254 | */ |
255 | dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); | 255 | dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); |
256 | 256 | ||
257 | task_context->command_iu_upper = upper_32_bits(dma_addr); | 257 | task_context->command_iu_upper = upper_32_bits(dma_addr); |
258 | task_context->command_iu_lower = lower_32_bits(dma_addr); | 258 | task_context->command_iu_lower = lower_32_bits(dma_addr); |
@@ -261,7 +261,7 @@ static void scu_ssp_reqeust_construct_task_context( | |||
261 | * Copy the physical address for the response buffer to the | 261 | * Copy the physical address for the response buffer to the |
262 | * SCU Task Context | 262 | * SCU Task Context |
263 | */ | 263 | */ |
264 | dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); | 264 | dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); |
265 | 265 | ||
266 | task_context->response_iu_upper = upper_32_bits(dma_addr); | 266 | task_context->response_iu_upper = upper_32_bits(dma_addr); |
267 | task_context->response_iu_lower = lower_32_bits(dma_addr); | 267 | task_context->response_iu_lower = lower_32_bits(dma_addr); |
@@ -298,7 +298,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, | |||
298 | task_context->transfer_length_bytes = len; | 298 | task_context->transfer_length_bytes = len; |
299 | 299 | ||
300 | if (task_context->transfer_length_bytes > 0) | 300 | if (task_context->transfer_length_bytes > 0) |
301 | scic_sds_request_build_sgl(ireq); | 301 | sci_request_build_sgl(ireq); |
302 | } | 302 | } |
303 | 303 | ||
304 | /** | 304 | /** |
@@ -349,8 +349,8 @@ static void scu_sata_reqeust_construct_task_context( | |||
349 | struct isci_remote_device *idev; | 349 | struct isci_remote_device *idev; |
350 | struct isci_port *iport; | 350 | struct isci_port *iport; |
351 | 351 | ||
352 | idev = scic_sds_request_get_device(ireq); | 352 | idev = sci_request_get_device(ireq); |
353 | iport = scic_sds_request_get_port(ireq); | 353 | iport = sci_request_get_port(ireq); |
354 | 354 | ||
355 | /* Fill in the TC with the its required data */ | 355 | /* Fill in the TC with the its required data */ |
356 | task_context->abort = 0; | 356 | task_context->abort = 0; |
@@ -358,14 +358,14 @@ static void scu_sata_reqeust_construct_task_context( | |||
358 | task_context->initiator_request = 1; | 358 | task_context->initiator_request = 1; |
359 | task_context->connection_rate = idev->connection_rate; | 359 | task_context->connection_rate = idev->connection_rate; |
360 | task_context->protocol_engine_index = | 360 | task_context->protocol_engine_index = |
361 | scic_sds_controller_get_protocol_engine_group(controller); | 361 | sci_controller_get_protocol_engine_group(controller); |
362 | task_context->logical_port_index = | 362 | task_context->logical_port_index = |
363 | scic_sds_port_get_index(iport); | 363 | sci_port_get_index(iport); |
364 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; | 364 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; |
365 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 365 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
366 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; | 366 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
367 | 367 | ||
368 | task_context->remote_node_index = scic_sds_remote_device_get_index(idev); | 368 | task_context->remote_node_index = sci_remote_device_get_index(idev); |
369 | task_context->command_code = 0; | 369 | task_context->command_code = 0; |
370 | 370 | ||
371 | task_context->link_layer_control = 0; | 371 | task_context->link_layer_control = 0; |
@@ -385,9 +385,9 @@ static void scu_sata_reqeust_construct_task_context( | |||
385 | task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; | 385 | task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; |
386 | 386 | ||
387 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 387 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
388 | (scic_sds_controller_get_protocol_engine_group(controller) << | 388 | (sci_controller_get_protocol_engine_group(controller) << |
389 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 389 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
390 | (scic_sds_port_get_index(iport) << | 390 | (sci_port_get_index(iport) << |
391 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 391 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
392 | ISCI_TAG_TCI(ireq->io_tag)); | 392 | ISCI_TAG_TCI(ireq->io_tag)); |
393 | /* | 393 | /* |
@@ -395,7 +395,7 @@ static void scu_sata_reqeust_construct_task_context( | |||
395 | * Context. We must offset the command buffer by 4 bytes because the | 395 | * Context. We must offset the command buffer by 4 bytes because the |
396 | * first 4 bytes are transfered in the body of the TC. | 396 | * first 4 bytes are transfered in the body of the TC. |
397 | */ | 397 | */ |
398 | dma_addr = scic_io_request_get_dma_addr(ireq, | 398 | dma_addr = sci_io_request_get_dma_addr(ireq, |
399 | ((char *) &ireq->stp.cmd) + | 399 | ((char *) &ireq->stp.cmd) + |
400 | sizeof(u32)); | 400 | sizeof(u32)); |
401 | 401 | ||
@@ -420,7 +420,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq | |||
420 | task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); | 420 | task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); |
421 | } | 421 | } |
422 | 422 | ||
423 | static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq, | 423 | static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, |
424 | bool copy_rx_frame) | 424 | bool copy_rx_frame) |
425 | { | 425 | { |
426 | struct isci_stp_request *stp_req = &ireq->stp.req; | 426 | struct isci_stp_request *stp_req = &ireq->stp.req; |
@@ -432,7 +432,7 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *i | |||
432 | stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; | 432 | stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; |
433 | 433 | ||
434 | if (copy_rx_frame) { | 434 | if (copy_rx_frame) { |
435 | scic_sds_request_build_sgl(ireq); | 435 | sci_request_build_sgl(ireq); |
436 | stp_req->sgl.index = 0; | 436 | stp_req->sgl.index = 0; |
437 | } else { | 437 | } else { |
438 | /* The user does not want the data copied to the SGL buffer location */ | 438 | /* The user does not want the data copied to the SGL buffer location */ |
@@ -454,7 +454,7 @@ static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *i | |||
454 | * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method | 454 | * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method |
455 | * returns an indication as to whether the construction was successful. | 455 | * returns an indication as to whether the construction was successful. |
456 | */ | 456 | */ |
457 | static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, | 457 | static void sci_stp_optimized_request_construct(struct isci_request *ireq, |
458 | u8 optimized_task_type, | 458 | u8 optimized_task_type, |
459 | u32 len, | 459 | u32 len, |
460 | enum dma_data_direction dir) | 460 | enum dma_data_direction dir) |
@@ -465,7 +465,7 @@ static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, | |||
465 | scu_sata_reqeust_construct_task_context(ireq, task_context); | 465 | scu_sata_reqeust_construct_task_context(ireq, task_context); |
466 | 466 | ||
467 | /* Copy over the SGL elements */ | 467 | /* Copy over the SGL elements */ |
468 | scic_sds_request_build_sgl(ireq); | 468 | sci_request_build_sgl(ireq); |
469 | 469 | ||
470 | /* Copy over the number of bytes to be transfered */ | 470 | /* Copy over the number of bytes to be transfered */ |
471 | task_context->transfer_length_bytes = len; | 471 | task_context->transfer_length_bytes = len; |
@@ -490,7 +490,7 @@ static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, | |||
490 | 490 | ||
491 | 491 | ||
492 | static enum sci_status | 492 | static enum sci_status |
493 | scic_io_request_construct_sata(struct isci_request *ireq, | 493 | sci_io_request_construct_sata(struct isci_request *ireq, |
494 | u32 len, | 494 | u32 len, |
495 | enum dma_data_direction dir, | 495 | enum dma_data_direction dir, |
496 | bool copy) | 496 | bool copy) |
@@ -533,7 +533,7 @@ scic_io_request_construct_sata(struct isci_request *ireq, | |||
533 | 533 | ||
534 | /* NCQ */ | 534 | /* NCQ */ |
535 | if (task->ata_task.use_ncq) { | 535 | if (task->ata_task.use_ncq) { |
536 | scic_sds_stp_optimized_request_construct(ireq, | 536 | sci_stp_optimized_request_construct(ireq, |
537 | SCU_TASK_TYPE_FPDMAQ_READ, | 537 | SCU_TASK_TYPE_FPDMAQ_READ, |
538 | len, dir); | 538 | len, dir); |
539 | return SCI_SUCCESS; | 539 | return SCI_SUCCESS; |
@@ -541,17 +541,17 @@ scic_io_request_construct_sata(struct isci_request *ireq, | |||
541 | 541 | ||
542 | /* DMA */ | 542 | /* DMA */ |
543 | if (task->ata_task.dma_xfer) { | 543 | if (task->ata_task.dma_xfer) { |
544 | scic_sds_stp_optimized_request_construct(ireq, | 544 | sci_stp_optimized_request_construct(ireq, |
545 | SCU_TASK_TYPE_DMA_IN, | 545 | SCU_TASK_TYPE_DMA_IN, |
546 | len, dir); | 546 | len, dir); |
547 | return SCI_SUCCESS; | 547 | return SCI_SUCCESS; |
548 | } else /* PIO */ | 548 | } else /* PIO */ |
549 | return scic_sds_stp_pio_request_construct(ireq, copy); | 549 | return sci_stp_pio_request_construct(ireq, copy); |
550 | 550 | ||
551 | return status; | 551 | return status; |
552 | } | 552 | } |
553 | 553 | ||
554 | static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq) | 554 | static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) |
555 | { | 555 | { |
556 | struct sas_task *task = isci_request_access_task(ireq); | 556 | struct sas_task *task = isci_request_access_task(ireq); |
557 | 557 | ||
@@ -561,28 +561,28 @@ static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request * | |||
561 | task->data_dir, | 561 | task->data_dir, |
562 | task->total_xfer_len); | 562 | task->total_xfer_len); |
563 | 563 | ||
564 | scic_sds_io_request_build_ssp_command_iu(ireq); | 564 | sci_io_request_build_ssp_command_iu(ireq); |
565 | 565 | ||
566 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); | 566 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); |
567 | 567 | ||
568 | return SCI_SUCCESS; | 568 | return SCI_SUCCESS; |
569 | } | 569 | } |
570 | 570 | ||
571 | enum sci_status scic_task_request_construct_ssp( | 571 | enum sci_status sci_task_request_construct_ssp( |
572 | struct isci_request *ireq) | 572 | struct isci_request *ireq) |
573 | { | 573 | { |
574 | /* Construct the SSP Task SCU Task Context */ | 574 | /* Construct the SSP Task SCU Task Context */ |
575 | scu_ssp_task_request_construct_task_context(ireq); | 575 | scu_ssp_task_request_construct_task_context(ireq); |
576 | 576 | ||
577 | /* Fill in the SSP Task IU */ | 577 | /* Fill in the SSP Task IU */ |
578 | scic_sds_task_request_build_ssp_task_iu(ireq); | 578 | sci_task_request_build_ssp_task_iu(ireq); |
579 | 579 | ||
580 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); | 580 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); |
581 | 581 | ||
582 | return SCI_SUCCESS; | 582 | return SCI_SUCCESS; |
583 | } | 583 | } |
584 | 584 | ||
585 | static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq) | 585 | static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) |
586 | { | 586 | { |
587 | enum sci_status status; | 587 | enum sci_status status; |
588 | bool copy = false; | 588 | bool copy = false; |
@@ -592,7 +592,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct isci_request | |||
592 | 592 | ||
593 | copy = (task->data_dir == DMA_NONE) ? false : true; | 593 | copy = (task->data_dir == DMA_NONE) ? false : true; |
594 | 594 | ||
595 | status = scic_io_request_construct_sata(ireq, | 595 | status = sci_io_request_construct_sata(ireq, |
596 | task->total_xfer_len, | 596 | task->total_xfer_len, |
597 | task->data_dir, | 597 | task->data_dir, |
598 | copy); | 598 | copy); |
@@ -603,7 +603,7 @@ static enum sci_status scic_io_request_construct_basic_sata(struct isci_request | |||
603 | return status; | 603 | return status; |
604 | } | 604 | } |
605 | 605 | ||
606 | enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) | 606 | enum sci_status sci_task_request_construct_sata(struct isci_request *ireq) |
607 | { | 607 | { |
608 | enum sci_status status = SCI_SUCCESS; | 608 | enum sci_status status = SCI_SUCCESS; |
609 | 609 | ||
@@ -648,7 +648,7 @@ static u32 sci_req_tx_bytes(struct isci_request *ireq) | |||
648 | * BAR1 is the scu_registers | 648 | * BAR1 is the scu_registers |
649 | * 0x20002C = 0x200000 + 0x2c | 649 | * 0x20002C = 0x200000 + 0x2c |
650 | * = start of task context SRAM + offset of (type.ssp.data_offset) | 650 | * = start of task context SRAM + offset of (type.ssp.data_offset) |
651 | * TCi is the io_tag of struct scic_sds_request | 651 | * TCi is the io_tag of struct sci_request |
652 | */ | 652 | */ |
653 | ret_val = readl(scu_reg_base + | 653 | ret_val = readl(scu_reg_base + |
654 | (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + | 654 | (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + |
@@ -658,7 +658,7 @@ static u32 sci_req_tx_bytes(struct isci_request *ireq) | |||
658 | return ret_val; | 658 | return ret_val; |
659 | } | 659 | } |
660 | 660 | ||
661 | enum sci_status scic_sds_request_start(struct isci_request *ireq) | 661 | enum sci_status sci_request_start(struct isci_request *ireq) |
662 | { | 662 | { |
663 | enum sci_base_request_states state; | 663 | enum sci_base_request_states state; |
664 | struct scu_task_context *tc = ireq->tc; | 664 | struct scu_task_context *tc = ireq->tc; |
@@ -708,7 +708,7 @@ enum sci_status scic_sds_request_start(struct isci_request *ireq) | |||
708 | } | 708 | } |
709 | 709 | ||
710 | enum sci_status | 710 | enum sci_status |
711 | scic_sds_io_request_terminate(struct isci_request *ireq) | 711 | sci_io_request_terminate(struct isci_request *ireq) |
712 | { | 712 | { |
713 | enum sci_base_request_states state; | 713 | enum sci_base_request_states state; |
714 | 714 | ||
@@ -716,7 +716,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) | |||
716 | 716 | ||
717 | switch (state) { | 717 | switch (state) { |
718 | case SCI_REQ_CONSTRUCTED: | 718 | case SCI_REQ_CONSTRUCTED: |
719 | scic_sds_request_set_status(ireq, | 719 | sci_request_set_status(ireq, |
720 | SCU_TASK_DONE_TASK_ABORT, | 720 | SCU_TASK_DONE_TASK_ABORT, |
721 | SCI_FAILURE_IO_TERMINATED); | 721 | SCI_FAILURE_IO_TERMINATED); |
722 | 722 | ||
@@ -759,7 +759,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) | |||
759 | return SCI_FAILURE_INVALID_STATE; | 759 | return SCI_FAILURE_INVALID_STATE; |
760 | } | 760 | } |
761 | 761 | ||
762 | enum sci_status scic_sds_request_complete(struct isci_request *ireq) | 762 | enum sci_status sci_request_complete(struct isci_request *ireq) |
763 | { | 763 | { |
764 | enum sci_base_request_states state; | 764 | enum sci_base_request_states state; |
765 | struct isci_host *ihost = ireq->owning_controller; | 765 | struct isci_host *ihost = ireq->owning_controller; |
@@ -770,7 +770,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq) | |||
770 | return SCI_FAILURE_INVALID_STATE; | 770 | return SCI_FAILURE_INVALID_STATE; |
771 | 771 | ||
772 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) | 772 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) |
773 | scic_sds_controller_release_frame(ihost, | 773 | sci_controller_release_frame(ihost, |
774 | ireq->saved_rx_frame_index); | 774 | ireq->saved_rx_frame_index); |
775 | 775 | ||
776 | /* XXX can we just stop the machine and remove the 'final' state? */ | 776 | /* XXX can we just stop the machine and remove the 'final' state? */ |
@@ -778,7 +778,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq) | |||
778 | return SCI_SUCCESS; | 778 | return SCI_SUCCESS; |
779 | } | 779 | } |
780 | 780 | ||
781 | enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, | 781 | enum sci_status sci_io_request_event_handler(struct isci_request *ireq, |
782 | u32 event_code) | 782 | u32 event_code) |
783 | { | 783 | { |
784 | enum sci_base_request_states state; | 784 | enum sci_base_request_states state; |
@@ -818,7 +818,7 @@ enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, | |||
818 | * @sci_req: This parameter specifies the request object for which to copy | 818 | * @sci_req: This parameter specifies the request object for which to copy |
819 | * the response data. | 819 | * the response data. |
820 | */ | 820 | */ |
821 | static void scic_sds_io_request_copy_response(struct isci_request *ireq) | 821 | static void sci_io_request_copy_response(struct isci_request *ireq) |
822 | { | 822 | { |
823 | void *resp_buf; | 823 | void *resp_buf; |
824 | u32 len; | 824 | u32 len; |
@@ -848,7 +848,7 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
848 | */ | 848 | */ |
849 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 849 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
850 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 850 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
851 | scic_sds_request_set_status(ireq, | 851 | sci_request_set_status(ireq, |
852 | SCU_TASK_DONE_GOOD, | 852 | SCU_TASK_DONE_GOOD, |
853 | SCI_SUCCESS); | 853 | SCI_SUCCESS); |
854 | break; | 854 | break; |
@@ -868,11 +868,11 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
868 | word_cnt); | 868 | word_cnt); |
869 | 869 | ||
870 | if (resp->status == 0) { | 870 | if (resp->status == 0) { |
871 | scic_sds_request_set_status(ireq, | 871 | sci_request_set_status(ireq, |
872 | SCU_TASK_DONE_GOOD, | 872 | SCU_TASK_DONE_GOOD, |
873 | SCI_SUCCESS_IO_DONE_EARLY); | 873 | SCI_SUCCESS_IO_DONE_EARLY); |
874 | } else { | 874 | } else { |
875 | scic_sds_request_set_status(ireq, | 875 | sci_request_set_status(ireq, |
876 | SCU_TASK_DONE_CHECK_RESPONSE, | 876 | SCU_TASK_DONE_CHECK_RESPONSE, |
877 | SCI_FAILURE_IO_RESPONSE_VALID); | 877 | SCI_FAILURE_IO_RESPONSE_VALID); |
878 | } | 878 | } |
@@ -885,7 +885,7 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
885 | &ireq->ssp.rsp, | 885 | &ireq->ssp.rsp, |
886 | word_cnt); | 886 | word_cnt); |
887 | 887 | ||
888 | scic_sds_request_set_status(ireq, | 888 | sci_request_set_status(ireq, |
889 | SCU_TASK_DONE_CHECK_RESPONSE, | 889 | SCU_TASK_DONE_CHECK_RESPONSE, |
890 | SCI_FAILURE_IO_RESPONSE_VALID); | 890 | SCI_FAILURE_IO_RESPONSE_VALID); |
891 | break; | 891 | break; |
@@ -900,11 +900,11 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
900 | datapres = resp_iu->datapres; | 900 | datapres = resp_iu->datapres; |
901 | 901 | ||
902 | if (datapres == 1 || datapres == 2) { | 902 | if (datapres == 1 || datapres == 2) { |
903 | scic_sds_request_set_status(ireq, | 903 | sci_request_set_status(ireq, |
904 | SCU_TASK_DONE_CHECK_RESPONSE, | 904 | SCU_TASK_DONE_CHECK_RESPONSE, |
905 | SCI_FAILURE_IO_RESPONSE_VALID); | 905 | SCI_FAILURE_IO_RESPONSE_VALID); |
906 | } else | 906 | } else |
907 | scic_sds_request_set_status(ireq, | 907 | sci_request_set_status(ireq, |
908 | SCU_TASK_DONE_GOOD, | 908 | SCU_TASK_DONE_GOOD, |
909 | SCI_SUCCESS); | 909 | SCI_SUCCESS); |
910 | break; | 910 | break; |
@@ -921,12 +921,12 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
921 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): | 921 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
922 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): | 922 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): |
923 | if (ireq->protocol == SCIC_STP_PROTOCOL) { | 923 | if (ireq->protocol == SCIC_STP_PROTOCOL) { |
924 | scic_sds_request_set_status(ireq, | 924 | sci_request_set_status(ireq, |
925 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 925 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
926 | SCU_COMPLETION_TL_STATUS_SHIFT, | 926 | SCU_COMPLETION_TL_STATUS_SHIFT, |
927 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); | 927 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); |
928 | } else { | 928 | } else { |
929 | scic_sds_request_set_status(ireq, | 929 | sci_request_set_status(ireq, |
930 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 930 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
931 | SCU_COMPLETION_TL_STATUS_SHIFT, | 931 | SCU_COMPLETION_TL_STATUS_SHIFT, |
932 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 932 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
@@ -944,7 +944,7 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
944 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): | 944 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): |
945 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): | 945 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): |
946 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): | 946 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): |
947 | scic_sds_request_set_status(ireq, | 947 | sci_request_set_status(ireq, |
948 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 948 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
949 | SCU_COMPLETION_TL_STATUS_SHIFT, | 949 | SCU_COMPLETION_TL_STATUS_SHIFT, |
950 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); | 950 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); |
@@ -967,7 +967,7 @@ request_started_state_tc_event(struct isci_request *ireq, | |||
967 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): | 967 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): |
968 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): | 968 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): |
969 | default: | 969 | default: |
970 | scic_sds_request_set_status( | 970 | sci_request_set_status( |
971 | ireq, | 971 | ireq, |
972 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 972 | SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
973 | SCU_COMPLETION_TL_STATUS_SHIFT, | 973 | SCU_COMPLETION_TL_STATUS_SHIFT, |
@@ -991,7 +991,7 @@ request_aborting_state_tc_event(struct isci_request *ireq, | |||
991 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 991 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
992 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): | 992 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): |
993 | case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): | 993 | case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): |
994 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, | 994 | sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, |
995 | SCI_FAILURE_IO_TERMINATED); | 995 | SCI_FAILURE_IO_TERMINATED); |
996 | 996 | ||
997 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 997 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
@@ -1012,7 +1012,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq | |||
1012 | { | 1012 | { |
1013 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1013 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1014 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1014 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1015 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1015 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, |
1016 | SCI_SUCCESS); | 1016 | SCI_SUCCESS); |
1017 | 1017 | ||
1018 | sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); | 1018 | sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); |
@@ -1036,7 +1036,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq | |||
1036 | * If a NAK was received, then it is up to the user to retry | 1036 | * If a NAK was received, then it is up to the user to retry |
1037 | * the request. | 1037 | * the request. |
1038 | */ | 1038 | */ |
1039 | scic_sds_request_set_status(ireq, | 1039 | sci_request_set_status(ireq, |
1040 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1040 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
1041 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1041 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
1042 | 1042 | ||
@@ -1057,7 +1057,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, | |||
1057 | * unexpected. but if the TC has success status, we | 1057 | * unexpected. but if the TC has success status, we |
1058 | * complete the IO anyway. | 1058 | * complete the IO anyway. |
1059 | */ | 1059 | */ |
1060 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1060 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, |
1061 | SCI_SUCCESS); | 1061 | SCI_SUCCESS); |
1062 | 1062 | ||
1063 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1063 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
@@ -1074,7 +1074,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, | |||
1074 | * these SMP_XXX_XX_ERR status. For these type of error, | 1074 | * these SMP_XXX_XX_ERR status. For these type of error, |
1075 | * we ask ihost user to retry the request. | 1075 | * we ask ihost user to retry the request. |
1076 | */ | 1076 | */ |
1077 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, | 1077 | sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, |
1078 | SCI_FAILURE_RETRY_REQUIRED); | 1078 | SCI_FAILURE_RETRY_REQUIRED); |
1079 | 1079 | ||
1080 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1080 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
@@ -1084,7 +1084,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, | |||
1084 | /* All other completion status cause the IO to be complete. If a NAK | 1084 | /* All other completion status cause the IO to be complete. If a NAK |
1085 | * was received, then it is up to the user to retry the request | 1085 | * was received, then it is up to the user to retry the request |
1086 | */ | 1086 | */ |
1087 | scic_sds_request_set_status(ireq, | 1087 | sci_request_set_status(ireq, |
1088 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1088 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
1089 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1089 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
1090 | 1090 | ||
@@ -1101,7 +1101,7 @@ smp_request_await_tc_event(struct isci_request *ireq, | |||
1101 | { | 1101 | { |
1102 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1102 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1103 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1103 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1104 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1104 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, |
1105 | SCI_SUCCESS); | 1105 | SCI_SUCCESS); |
1106 | 1106 | ||
1107 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1107 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
@@ -1111,7 +1111,7 @@ smp_request_await_tc_event(struct isci_request *ireq, | |||
1111 | * complete. If a NAK was received, then it is up to | 1111 | * complete. If a NAK was received, then it is up to |
1112 | * the user to retry the request. | 1112 | * the user to retry the request. |
1113 | */ | 1113 | */ |
1114 | scic_sds_request_set_status(ireq, | 1114 | sci_request_set_status(ireq, |
1115 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1115 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
1116 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1116 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
1117 | 1117 | ||
@@ -1122,7 +1122,7 @@ smp_request_await_tc_event(struct isci_request *ireq, | |||
1122 | return SCI_SUCCESS; | 1122 | return SCI_SUCCESS; |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, | 1125 | void sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, |
1126 | u16 ncq_tag) | 1126 | u16 ncq_tag) |
1127 | { | 1127 | { |
1128 | /** | 1128 | /** |
@@ -1171,7 +1171,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, | |||
1171 | { | 1171 | { |
1172 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1172 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1173 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1173 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1174 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1174 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, |
1175 | SCI_SUCCESS); | 1175 | SCI_SUCCESS); |
1176 | 1176 | ||
1177 | sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); | 1177 | sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); |
@@ -1182,7 +1182,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, | |||
1182 | * complete. If a NAK was received, then it is up to | 1182 | * complete. If a NAK was received, then it is up to |
1183 | * the user to retry the request. | 1183 | * the user to retry the request. |
1184 | */ | 1184 | */ |
1185 | scic_sds_request_set_status(ireq, | 1185 | sci_request_set_status(ireq, |
1186 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1186 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
1187 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1187 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
1188 | 1188 | ||
@@ -1198,7 +1198,7 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, | |||
1198 | /* transmit DATA_FIS from (current sgl + offset) for input | 1198 | /* transmit DATA_FIS from (current sgl + offset) for input |
1199 | * parameter length. current sgl and offset is alreay stored in the IO request | 1199 | * parameter length. current sgl and offset is alreay stored in the IO request |
1200 | */ | 1200 | */ |
1201 | static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( | 1201 | static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( |
1202 | struct isci_request *ireq, | 1202 | struct isci_request *ireq, |
1203 | u32 length) | 1203 | u32 length) |
1204 | { | 1204 | { |
@@ -1223,10 +1223,10 @@ static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame( | |||
1223 | task_context->type.stp.fis_type = FIS_DATA; | 1223 | task_context->type.stp.fis_type = FIS_DATA; |
1224 | 1224 | ||
1225 | /* send the new TC out. */ | 1225 | /* send the new TC out. */ |
1226 | return scic_controller_continue_io(ireq); | 1226 | return sci_controller_continue_io(ireq); |
1227 | } | 1227 | } |
1228 | 1228 | ||
1229 | static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) | 1229 | static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) |
1230 | { | 1230 | { |
1231 | struct isci_stp_request *stp_req = &ireq->stp.req; | 1231 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1232 | struct scu_sgl_element_pair *sgl_pair; | 1232 | struct scu_sgl_element_pair *sgl_pair; |
@@ -1252,7 +1252,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is | |||
1252 | return SCI_SUCCESS; | 1252 | return SCI_SUCCESS; |
1253 | 1253 | ||
1254 | if (stp_req->pio_len >= len) { | 1254 | if (stp_req->pio_len >= len) { |
1255 | status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len); | 1255 | status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); |
1256 | if (status != SCI_SUCCESS) | 1256 | if (status != SCI_SUCCESS) |
1257 | return status; | 1257 | return status; |
1258 | stp_req->pio_len -= len; | 1258 | stp_req->pio_len -= len; |
@@ -1261,7 +1261,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is | |||
1261 | sgl = pio_sgl_next(stp_req); | 1261 | sgl = pio_sgl_next(stp_req); |
1262 | offset = 0; | 1262 | offset = 0; |
1263 | } else if (stp_req->pio_len < len) { | 1263 | } else if (stp_req->pio_len < len) { |
1264 | scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); | 1264 | sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); |
1265 | 1265 | ||
1266 | /* Sgl offset will be adjusted and saved for future */ | 1266 | /* Sgl offset will be adjusted and saved for future */ |
1267 | offset += stp_req->pio_len; | 1267 | offset += stp_req->pio_len; |
@@ -1284,7 +1284,7 @@ static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct is | |||
1284 | * specified data region. enum sci_status | 1284 | * specified data region. enum sci_status |
1285 | */ | 1285 | */ |
1286 | static enum sci_status | 1286 | static enum sci_status |
1287 | scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, | 1287 | sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, |
1288 | u8 *data_buf, u32 len) | 1288 | u8 *data_buf, u32 len) |
1289 | { | 1289 | { |
1290 | struct isci_request *ireq; | 1290 | struct isci_request *ireq; |
@@ -1328,7 +1328,7 @@ scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_r | |||
1328 | * | 1328 | * |
1329 | * Copy the data buffer to the io request data region. enum sci_status | 1329 | * Copy the data buffer to the io request data region. enum sci_status |
1330 | */ | 1330 | */ |
1331 | static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( | 1331 | static enum sci_status sci_stp_request_pio_data_in_copy_data( |
1332 | struct isci_stp_request *stp_req, | 1332 | struct isci_stp_request *stp_req, |
1333 | u8 *data_buffer) | 1333 | u8 *data_buffer) |
1334 | { | 1334 | { |
@@ -1338,14 +1338,14 @@ static enum sci_status scic_sds_stp_request_pio_data_in_copy_data( | |||
1338 | * If there is less than 1K remaining in the transfer request | 1338 | * If there is less than 1K remaining in the transfer request |
1339 | * copy just the data for the transfer */ | 1339 | * copy just the data for the transfer */ |
1340 | if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { | 1340 | if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { |
1341 | status = scic_sds_stp_request_pio_data_in_copy_data_buffer( | 1341 | status = sci_stp_request_pio_data_in_copy_data_buffer( |
1342 | stp_req, data_buffer, stp_req->pio_len); | 1342 | stp_req, data_buffer, stp_req->pio_len); |
1343 | 1343 | ||
1344 | if (status == SCI_SUCCESS) | 1344 | if (status == SCI_SUCCESS) |
1345 | stp_req->pio_len = 0; | 1345 | stp_req->pio_len = 0; |
1346 | } else { | 1346 | } else { |
1347 | /* We are transfering the whole frame so copy */ | 1347 | /* We are transfering the whole frame so copy */ |
1348 | status = scic_sds_stp_request_pio_data_in_copy_data_buffer( | 1348 | status = sci_stp_request_pio_data_in_copy_data_buffer( |
1349 | stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); | 1349 | stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); |
1350 | 1350 | ||
1351 | if (status == SCI_SUCCESS) | 1351 | if (status == SCI_SUCCESS) |
@@ -1363,7 +1363,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, | |||
1363 | 1363 | ||
1364 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1364 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1365 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1365 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1366 | scic_sds_request_set_status(ireq, | 1366 | sci_request_set_status(ireq, |
1367 | SCU_TASK_DONE_GOOD, | 1367 | SCU_TASK_DONE_GOOD, |
1368 | SCI_SUCCESS); | 1368 | SCI_SUCCESS); |
1369 | 1369 | ||
@@ -1375,7 +1375,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, | |||
1375 | * complete. If a NAK was received, then it is up to | 1375 | * complete. If a NAK was received, then it is up to |
1376 | * the user to retry the request. | 1376 | * the user to retry the request. |
1377 | */ | 1377 | */ |
1378 | scic_sds_request_set_status(ireq, | 1378 | sci_request_set_status(ireq, |
1379 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1379 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
1380 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1380 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
1381 | 1381 | ||
@@ -1398,7 +1398,7 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, | |||
1398 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1398 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1399 | /* Transmit data */ | 1399 | /* Transmit data */ |
1400 | if (stp_req->pio_len != 0) { | 1400 | if (stp_req->pio_len != 0) { |
1401 | status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); | 1401 | status = sci_stp_request_pio_data_out_transmit_data(ireq); |
1402 | if (status == SCI_SUCCESS) { | 1402 | if (status == SCI_SUCCESS) { |
1403 | if (stp_req->pio_len == 0) | 1403 | if (stp_req->pio_len == 0) |
1404 | all_frames_transferred = true; | 1404 | all_frames_transferred = true; |
@@ -1426,7 +1426,7 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, | |||
1426 | * If a NAK was received, then it is up to the user to retry | 1426 | * If a NAK was received, then it is up to the user to retry |
1427 | * the request. | 1427 | * the request. |
1428 | */ | 1428 | */ |
1429 | scic_sds_request_set_status( | 1429 | sci_request_set_status( |
1430 | ireq, | 1430 | ireq, |
1431 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1431 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
1432 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1432 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
@@ -1438,16 +1438,16 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, | |||
1438 | return status; | 1438 | return status; |
1439 | } | 1439 | } |
1440 | 1440 | ||
1441 | static void scic_sds_stp_request_udma_complete_request( | 1441 | static void sci_stp_request_udma_complete_request( |
1442 | struct isci_request *ireq, | 1442 | struct isci_request *ireq, |
1443 | u32 scu_status, | 1443 | u32 scu_status, |
1444 | enum sci_status sci_status) | 1444 | enum sci_status sci_status) |
1445 | { | 1445 | { |
1446 | scic_sds_request_set_status(ireq, scu_status, sci_status); | 1446 | sci_request_set_status(ireq, scu_status, sci_status); |
1447 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1447 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1448 | } | 1448 | } |
1449 | 1449 | ||
1450 | static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq, | 1450 | static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, |
1451 | u32 frame_index) | 1451 | u32 frame_index) |
1452 | { | 1452 | { |
1453 | struct isci_host *ihost = ireq->owning_controller; | 1453 | struct isci_host *ihost = ireq->owning_controller; |
@@ -1455,28 +1455,28 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct is | |||
1455 | enum sci_status status; | 1455 | enum sci_status status; |
1456 | u32 *frame_buffer; | 1456 | u32 *frame_buffer; |
1457 | 1457 | ||
1458 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 1458 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1459 | frame_index, | 1459 | frame_index, |
1460 | (void **)&frame_header); | 1460 | (void **)&frame_header); |
1461 | 1461 | ||
1462 | if ((status == SCI_SUCCESS) && | 1462 | if ((status == SCI_SUCCESS) && |
1463 | (frame_header->fis_type == FIS_REGD2H)) { | 1463 | (frame_header->fis_type == FIS_REGD2H)) { |
1464 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1464 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1465 | frame_index, | 1465 | frame_index, |
1466 | (void **)&frame_buffer); | 1466 | (void **)&frame_buffer); |
1467 | 1467 | ||
1468 | scic_sds_controller_copy_sata_response(&ireq->stp.rsp, | 1468 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1469 | frame_header, | 1469 | frame_header, |
1470 | frame_buffer); | 1470 | frame_buffer); |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | scic_sds_controller_release_frame(ihost, frame_index); | 1473 | sci_controller_release_frame(ihost, frame_index); |
1474 | 1474 | ||
1475 | return status; | 1475 | return status; |
1476 | } | 1476 | } |
1477 | 1477 | ||
1478 | enum sci_status | 1478 | enum sci_status |
1479 | scic_sds_io_request_frame_handler(struct isci_request *ireq, | 1479 | sci_io_request_frame_handler(struct isci_request *ireq, |
1480 | u32 frame_index) | 1480 | u32 frame_index) |
1481 | { | 1481 | { |
1482 | struct isci_host *ihost = ireq->owning_controller; | 1482 | struct isci_host *ihost = ireq->owning_controller; |
@@ -1491,7 +1491,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1491 | struct ssp_frame_hdr ssp_hdr; | 1491 | struct ssp_frame_hdr ssp_hdr; |
1492 | void *frame_header; | 1492 | void *frame_header; |
1493 | 1493 | ||
1494 | scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 1494 | sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1495 | frame_index, | 1495 | frame_index, |
1496 | &frame_header); | 1496 | &frame_header); |
1497 | 1497 | ||
@@ -1502,7 +1502,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1502 | struct ssp_response_iu *resp_iu; | 1502 | struct ssp_response_iu *resp_iu; |
1503 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); | 1503 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); |
1504 | 1504 | ||
1505 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1505 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1506 | frame_index, | 1506 | frame_index, |
1507 | (void **)&resp_iu); | 1507 | (void **)&resp_iu); |
1508 | 1508 | ||
@@ -1512,11 +1512,11 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1512 | 1512 | ||
1513 | if (resp_iu->datapres == 0x01 || | 1513 | if (resp_iu->datapres == 0x01 || |
1514 | resp_iu->datapres == 0x02) { | 1514 | resp_iu->datapres == 0x02) { |
1515 | scic_sds_request_set_status(ireq, | 1515 | sci_request_set_status(ireq, |
1516 | SCU_TASK_DONE_CHECK_RESPONSE, | 1516 | SCU_TASK_DONE_CHECK_RESPONSE, |
1517 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1517 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
1518 | } else | 1518 | } else |
1519 | scic_sds_request_set_status(ireq, | 1519 | sci_request_set_status(ireq, |
1520 | SCU_TASK_DONE_GOOD, | 1520 | SCU_TASK_DONE_GOOD, |
1521 | SCI_SUCCESS); | 1521 | SCI_SUCCESS); |
1522 | } else { | 1522 | } else { |
@@ -1531,22 +1531,22 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1531 | * In any case we are done with this frame buffer return it to | 1531 | * In any case we are done with this frame buffer return it to |
1532 | * the controller | 1532 | * the controller |
1533 | */ | 1533 | */ |
1534 | scic_sds_controller_release_frame(ihost, frame_index); | 1534 | sci_controller_release_frame(ihost, frame_index); |
1535 | 1535 | ||
1536 | return SCI_SUCCESS; | 1536 | return SCI_SUCCESS; |
1537 | } | 1537 | } |
1538 | 1538 | ||
1539 | case SCI_REQ_TASK_WAIT_TC_RESP: | 1539 | case SCI_REQ_TASK_WAIT_TC_RESP: |
1540 | scic_sds_io_request_copy_response(ireq); | 1540 | sci_io_request_copy_response(ireq); |
1541 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1541 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1542 | scic_sds_controller_release_frame(ihost,frame_index); | 1542 | sci_controller_release_frame(ihost, frame_index); |
1543 | return SCI_SUCCESS; | 1543 | return SCI_SUCCESS; |
1544 | 1544 | ||
1545 | case SCI_REQ_SMP_WAIT_RESP: { | 1545 | case SCI_REQ_SMP_WAIT_RESP: { |
1546 | struct smp_resp *rsp_hdr = &ireq->smp.rsp; | 1546 | struct smp_resp *rsp_hdr = &ireq->smp.rsp; |
1547 | void *frame_header; | 1547 | void *frame_header; |
1548 | 1548 | ||
1549 | scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 1549 | sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1550 | frame_index, | 1550 | frame_index, |
1551 | &frame_header); | 1551 | &frame_header); |
1552 | 1552 | ||
@@ -1557,7 +1557,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1557 | if (rsp_hdr->frame_type == SMP_RESPONSE) { | 1557 | if (rsp_hdr->frame_type == SMP_RESPONSE) { |
1558 | void *smp_resp; | 1558 | void *smp_resp; |
1559 | 1559 | ||
1560 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1560 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1561 | frame_index, | 1561 | frame_index, |
1562 | &smp_resp); | 1562 | &smp_resp); |
1563 | 1563 | ||
@@ -1567,7 +1567,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1567 | sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, | 1567 | sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, |
1568 | smp_resp, word_cnt); | 1568 | smp_resp, word_cnt); |
1569 | 1569 | ||
1570 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1570 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, |
1571 | SCI_SUCCESS); | 1571 | SCI_SUCCESS); |
1572 | 1572 | ||
1573 | sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); | 1573 | sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); |
@@ -1584,31 +1584,31 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1584 | frame_index, | 1584 | frame_index, |
1585 | rsp_hdr->frame_type); | 1585 | rsp_hdr->frame_type); |
1586 | 1586 | ||
1587 | scic_sds_request_set_status(ireq, | 1587 | sci_request_set_status(ireq, |
1588 | SCU_TASK_DONE_SMP_FRM_TYPE_ERR, | 1588 | SCU_TASK_DONE_SMP_FRM_TYPE_ERR, |
1589 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1589 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
1590 | 1590 | ||
1591 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1591 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | scic_sds_controller_release_frame(ihost, frame_index); | 1594 | sci_controller_release_frame(ihost, frame_index); |
1595 | 1595 | ||
1596 | return SCI_SUCCESS; | 1596 | return SCI_SUCCESS; |
1597 | } | 1597 | } |
1598 | 1598 | ||
1599 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: | 1599 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: |
1600 | return scic_sds_stp_request_udma_general_frame_handler(ireq, | 1600 | return sci_stp_request_udma_general_frame_handler(ireq, |
1601 | frame_index); | 1601 | frame_index); |
1602 | 1602 | ||
1603 | case SCI_REQ_STP_UDMA_WAIT_D2H: | 1603 | case SCI_REQ_STP_UDMA_WAIT_D2H: |
1604 | /* Use the general frame handler to copy the resposne data */ | 1604 | /* Use the general frame handler to copy the resposne data */ |
1605 | status = scic_sds_stp_request_udma_general_frame_handler(ireq, | 1605 | status = sci_stp_request_udma_general_frame_handler(ireq, |
1606 | frame_index); | 1606 | frame_index); |
1607 | 1607 | ||
1608 | if (status != SCI_SUCCESS) | 1608 | if (status != SCI_SUCCESS) |
1609 | return status; | 1609 | return status; |
1610 | 1610 | ||
1611 | scic_sds_stp_request_udma_complete_request(ireq, | 1611 | sci_stp_request_udma_complete_request(ireq, |
1612 | SCU_TASK_DONE_CHECK_RESPONSE, | 1612 | SCU_TASK_DONE_CHECK_RESPONSE, |
1613 | SCI_FAILURE_IO_RESPONSE_VALID); | 1613 | SCI_FAILURE_IO_RESPONSE_VALID); |
1614 | 1614 | ||
@@ -1618,7 +1618,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1618 | struct dev_to_host_fis *frame_header; | 1618 | struct dev_to_host_fis *frame_header; |
1619 | u32 *frame_buffer; | 1619 | u32 *frame_buffer; |
1620 | 1620 | ||
1621 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 1621 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1622 | frame_index, | 1622 | frame_index, |
1623 | (void **)&frame_header); | 1623 | (void **)&frame_header); |
1624 | 1624 | ||
@@ -1636,16 +1636,16 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1636 | 1636 | ||
1637 | switch (frame_header->fis_type) { | 1637 | switch (frame_header->fis_type) { |
1638 | case FIS_REGD2H: | 1638 | case FIS_REGD2H: |
1639 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1639 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1640 | frame_index, | 1640 | frame_index, |
1641 | (void **)&frame_buffer); | 1641 | (void **)&frame_buffer); |
1642 | 1642 | ||
1643 | scic_sds_controller_copy_sata_response(&ireq->stp.rsp, | 1643 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1644 | frame_header, | 1644 | frame_header, |
1645 | frame_buffer); | 1645 | frame_buffer); |
1646 | 1646 | ||
1647 | /* The command has completed with error */ | 1647 | /* The command has completed with error */ |
1648 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, | 1648 | sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, |
1649 | SCI_FAILURE_IO_RESPONSE_VALID); | 1649 | SCI_FAILURE_IO_RESPONSE_VALID); |
1650 | break; | 1650 | break; |
1651 | 1651 | ||
@@ -1655,7 +1655,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1655 | "violation occurred\n", __func__, stp_req, | 1655 | "violation occurred\n", __func__, stp_req, |
1656 | frame_index); | 1656 | frame_index); |
1657 | 1657 | ||
1658 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, | 1658 | sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, |
1659 | SCI_FAILURE_PROTOCOL_VIOLATION); | 1659 | SCI_FAILURE_PROTOCOL_VIOLATION); |
1660 | break; | 1660 | break; |
1661 | } | 1661 | } |
@@ -1663,7 +1663,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1663 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1663 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1664 | 1664 | ||
1665 | /* Frame has been decoded return it to the controller */ | 1665 | /* Frame has been decoded return it to the controller */ |
1666 | scic_sds_controller_release_frame(ihost, frame_index); | 1666 | sci_controller_release_frame(ihost, frame_index); |
1667 | 1667 | ||
1668 | return status; | 1668 | return status; |
1669 | } | 1669 | } |
@@ -1673,7 +1673,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1673 | struct dev_to_host_fis *frame_header; | 1673 | struct dev_to_host_fis *frame_header; |
1674 | u32 *frame_buffer; | 1674 | u32 *frame_buffer; |
1675 | 1675 | ||
1676 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 1676 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1677 | frame_index, | 1677 | frame_index, |
1678 | (void **)&frame_header); | 1678 | (void **)&frame_header); |
1679 | 1679 | ||
@@ -1688,7 +1688,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1688 | switch (frame_header->fis_type) { | 1688 | switch (frame_header->fis_type) { |
1689 | case FIS_PIO_SETUP: | 1689 | case FIS_PIO_SETUP: |
1690 | /* Get from the frame buffer the PIO Setup Data */ | 1690 | /* Get from the frame buffer the PIO Setup Data */ |
1691 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1691 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1692 | frame_index, | 1692 | frame_index, |
1693 | (void **)&frame_buffer); | 1693 | (void **)&frame_buffer); |
1694 | 1694 | ||
@@ -1704,7 +1704,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1704 | /* status: 4th byte in the 3rd dword */ | 1704 | /* status: 4th byte in the 3rd dword */ |
1705 | stp_req->status = (frame_buffer[2] >> 24) & 0xff; | 1705 | stp_req->status = (frame_buffer[2] >> 24) & 0xff; |
1706 | 1706 | ||
1707 | scic_sds_controller_copy_sata_response(&ireq->stp.rsp, | 1707 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1708 | frame_header, | 1708 | frame_header, |
1709 | frame_buffer); | 1709 | frame_buffer); |
1710 | 1710 | ||
@@ -1717,7 +1717,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1717 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); | 1717 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); |
1718 | } else if (task->data_dir == DMA_TO_DEVICE) { | 1718 | } else if (task->data_dir == DMA_TO_DEVICE) { |
1719 | /* Transmit data */ | 1719 | /* Transmit data */ |
1720 | status = scic_sds_stp_request_pio_data_out_transmit_data(ireq); | 1720 | status = sci_stp_request_pio_data_out_transmit_data(ireq); |
1721 | if (status != SCI_SUCCESS) | 1721 | if (status != SCI_SUCCESS) |
1722 | break; | 1722 | break; |
1723 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); | 1723 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); |
@@ -1745,15 +1745,15 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1745 | break; | 1745 | break; |
1746 | } | 1746 | } |
1747 | 1747 | ||
1748 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1748 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1749 | frame_index, | 1749 | frame_index, |
1750 | (void **)&frame_buffer); | 1750 | (void **)&frame_buffer); |
1751 | 1751 | ||
1752 | scic_sds_controller_copy_sata_response(&ireq->stp.req, | 1752 | sci_controller_copy_sata_response(&ireq->stp.req, |
1753 | frame_header, | 1753 | frame_header, |
1754 | frame_buffer); | 1754 | frame_buffer); |
1755 | 1755 | ||
1756 | scic_sds_request_set_status(ireq, | 1756 | sci_request_set_status(ireq, |
1757 | SCU_TASK_DONE_CHECK_RESPONSE, | 1757 | SCU_TASK_DONE_CHECK_RESPONSE, |
1758 | SCI_FAILURE_IO_RESPONSE_VALID); | 1758 | SCI_FAILURE_IO_RESPONSE_VALID); |
1759 | 1759 | ||
@@ -1766,7 +1766,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1766 | } | 1766 | } |
1767 | 1767 | ||
1768 | /* Frame is decoded return it to the controller */ | 1768 | /* Frame is decoded return it to the controller */ |
1769 | scic_sds_controller_release_frame(ihost, frame_index); | 1769 | sci_controller_release_frame(ihost, frame_index); |
1770 | 1770 | ||
1771 | return status; | 1771 | return status; |
1772 | } | 1772 | } |
@@ -1775,7 +1775,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1775 | struct dev_to_host_fis *frame_header; | 1775 | struct dev_to_host_fis *frame_header; |
1776 | struct sata_fis_data *frame_buffer; | 1776 | struct sata_fis_data *frame_buffer; |
1777 | 1777 | ||
1778 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 1778 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1779 | frame_index, | 1779 | frame_index, |
1780 | (void **)&frame_header); | 1780 | (void **)&frame_header); |
1781 | 1781 | ||
@@ -1800,14 +1800,14 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1800 | frame_index, | 1800 | frame_index, |
1801 | frame_header->fis_type); | 1801 | frame_header->fis_type); |
1802 | 1802 | ||
1803 | scic_sds_request_set_status(ireq, | 1803 | sci_request_set_status(ireq, |
1804 | SCU_TASK_DONE_GOOD, | 1804 | SCU_TASK_DONE_GOOD, |
1805 | SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); | 1805 | SCI_FAILURE_IO_REQUIRES_SCSI_ABORT); |
1806 | 1806 | ||
1807 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1807 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1808 | 1808 | ||
1809 | /* Frame is decoded return it to the controller */ | 1809 | /* Frame is decoded return it to the controller */ |
1810 | scic_sds_controller_release_frame(ihost, frame_index); | 1810 | sci_controller_release_frame(ihost, frame_index); |
1811 | return status; | 1811 | return status; |
1812 | } | 1812 | } |
1813 | 1813 | ||
@@ -1815,15 +1815,15 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1815 | ireq->saved_rx_frame_index = frame_index; | 1815 | ireq->saved_rx_frame_index = frame_index; |
1816 | stp_req->pio_len = 0; | 1816 | stp_req->pio_len = 0; |
1817 | } else { | 1817 | } else { |
1818 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1818 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1819 | frame_index, | 1819 | frame_index, |
1820 | (void **)&frame_buffer); | 1820 | (void **)&frame_buffer); |
1821 | 1821 | ||
1822 | status = scic_sds_stp_request_pio_data_in_copy_data(stp_req, | 1822 | status = sci_stp_request_pio_data_in_copy_data(stp_req, |
1823 | (u8 *)frame_buffer); | 1823 | (u8 *)frame_buffer); |
1824 | 1824 | ||
1825 | /* Frame is decoded return it to the controller */ | 1825 | /* Frame is decoded return it to the controller */ |
1826 | scic_sds_controller_release_frame(ihost, frame_index); | 1826 | sci_controller_release_frame(ihost, frame_index); |
1827 | } | 1827 | } |
1828 | 1828 | ||
1829 | /* Check for the end of the transfer, are there more | 1829 | /* Check for the end of the transfer, are there more |
@@ -1833,7 +1833,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1833 | return status; | 1833 | return status; |
1834 | 1834 | ||
1835 | if ((stp_req->status & ATA_BUSY) == 0) { | 1835 | if ((stp_req->status & ATA_BUSY) == 0) { |
1836 | scic_sds_request_set_status(ireq, | 1836 | sci_request_set_status(ireq, |
1837 | SCU_TASK_DONE_CHECK_RESPONSE, | 1837 | SCU_TASK_DONE_CHECK_RESPONSE, |
1838 | SCI_FAILURE_IO_RESPONSE_VALID); | 1838 | SCI_FAILURE_IO_RESPONSE_VALID); |
1839 | 1839 | ||
@@ -1848,7 +1848,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1848 | struct dev_to_host_fis *frame_header; | 1848 | struct dev_to_host_fis *frame_header; |
1849 | u32 *frame_buffer; | 1849 | u32 *frame_buffer; |
1850 | 1850 | ||
1851 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, | 1851 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1852 | frame_index, | 1852 | frame_index, |
1853 | (void **)&frame_header); | 1853 | (void **)&frame_header); |
1854 | if (status != SCI_SUCCESS) { | 1854 | if (status != SCI_SUCCESS) { |
@@ -1864,16 +1864,16 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1864 | 1864 | ||
1865 | switch (frame_header->fis_type) { | 1865 | switch (frame_header->fis_type) { |
1866 | case FIS_REGD2H: | 1866 | case FIS_REGD2H: |
1867 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1867 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1868 | frame_index, | 1868 | frame_index, |
1869 | (void **)&frame_buffer); | 1869 | (void **)&frame_buffer); |
1870 | 1870 | ||
1871 | scic_sds_controller_copy_sata_response(&ireq->stp.rsp, | 1871 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1872 | frame_header, | 1872 | frame_header, |
1873 | frame_buffer); | 1873 | frame_buffer); |
1874 | 1874 | ||
1875 | /* The command has completed with error */ | 1875 | /* The command has completed with error */ |
1876 | scic_sds_request_set_status(ireq, | 1876 | sci_request_set_status(ireq, |
1877 | SCU_TASK_DONE_CHECK_RESPONSE, | 1877 | SCU_TASK_DONE_CHECK_RESPONSE, |
1878 | SCI_FAILURE_IO_RESPONSE_VALID); | 1878 | SCI_FAILURE_IO_RESPONSE_VALID); |
1879 | break; | 1879 | break; |
@@ -1886,7 +1886,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1886 | stp_req, | 1886 | stp_req, |
1887 | frame_index); | 1887 | frame_index); |
1888 | 1888 | ||
1889 | scic_sds_request_set_status(ireq, | 1889 | sci_request_set_status(ireq, |
1890 | SCU_TASK_DONE_UNEXP_FIS, | 1890 | SCU_TASK_DONE_UNEXP_FIS, |
1891 | SCI_FAILURE_PROTOCOL_VIOLATION); | 1891 | SCI_FAILURE_PROTOCOL_VIOLATION); |
1892 | break; | 1892 | break; |
@@ -1895,7 +1895,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1895 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1895 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1896 | 1896 | ||
1897 | /* Frame has been decoded return it to the controller */ | 1897 | /* Frame has been decoded return it to the controller */ |
1898 | scic_sds_controller_release_frame(ihost, frame_index); | 1898 | sci_controller_release_frame(ihost, frame_index); |
1899 | 1899 | ||
1900 | return status; | 1900 | return status; |
1901 | } | 1901 | } |
@@ -1904,7 +1904,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1904 | * TODO: Is it even possible to get an unsolicited frame in the | 1904 | * TODO: Is it even possible to get an unsolicited frame in the |
1905 | * aborting state? | 1905 | * aborting state? |
1906 | */ | 1906 | */ |
1907 | scic_sds_controller_release_frame(ihost, frame_index); | 1907 | sci_controller_release_frame(ihost, frame_index); |
1908 | return SCI_SUCCESS; | 1908 | return SCI_SUCCESS; |
1909 | 1909 | ||
1910 | default: | 1910 | default: |
@@ -1915,7 +1915,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1915 | frame_index, | 1915 | frame_index, |
1916 | state); | 1916 | state); |
1917 | 1917 | ||
1918 | scic_sds_controller_release_frame(ihost, frame_index); | 1918 | sci_controller_release_frame(ihost, frame_index); |
1919 | return SCI_FAILURE_INVALID_STATE; | 1919 | return SCI_FAILURE_INVALID_STATE; |
1920 | } | 1920 | } |
1921 | } | 1921 | } |
@@ -1927,7 +1927,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
1927 | 1927 | ||
1928 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1928 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1929 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1929 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1930 | scic_sds_stp_request_udma_complete_request(ireq, | 1930 | sci_stp_request_udma_complete_request(ireq, |
1931 | SCU_TASK_DONE_GOOD, | 1931 | SCU_TASK_DONE_GOOD, |
1932 | SCI_SUCCESS); | 1932 | SCI_SUCCESS); |
1933 | break; | 1933 | break; |
@@ -1938,10 +1938,10 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
1938 | * completion. | 1938 | * completion. |
1939 | */ | 1939 | */ |
1940 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { | 1940 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { |
1941 | scic_sds_remote_device_suspend(ireq->target_device, | 1941 | sci_remote_device_suspend(ireq->target_device, |
1942 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); | 1942 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); |
1943 | 1943 | ||
1944 | scic_sds_stp_request_udma_complete_request(ireq, | 1944 | sci_stp_request_udma_complete_request(ireq, |
1945 | SCU_TASK_DONE_CHECK_RESPONSE, | 1945 | SCU_TASK_DONE_CHECK_RESPONSE, |
1946 | SCI_FAILURE_IO_RESPONSE_VALID); | 1946 | SCI_FAILURE_IO_RESPONSE_VALID); |
1947 | } else { | 1947 | } else { |
@@ -1965,12 +1965,12 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq | |||
1965 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): | 1965 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): |
1966 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): | 1966 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): |
1967 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): | 1967 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): |
1968 | scic_sds_remote_device_suspend(ireq->target_device, | 1968 | sci_remote_device_suspend(ireq->target_device, |
1969 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); | 1969 | SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); |
1970 | /* Fall through to the default case */ | 1970 | /* Fall through to the default case */ |
1971 | default: | 1971 | default: |
1972 | /* All other completion status cause the IO to be complete. */ | 1972 | /* All other completion status cause the IO to be complete. */ |
1973 | scic_sds_stp_request_udma_complete_request(ireq, | 1973 | sci_stp_request_udma_complete_request(ireq, |
1974 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 1974 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
1975 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 1975 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
1976 | break; | 1976 | break; |
@@ -1985,7 +1985,7 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, | |||
1985 | { | 1985 | { |
1986 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1986 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1987 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1987 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1988 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 1988 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, |
1989 | SCI_SUCCESS); | 1989 | SCI_SUCCESS); |
1990 | 1990 | ||
1991 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); | 1991 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); |
@@ -1997,7 +1997,7 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, | |||
1997 | * If a NAK was received, then it is up to the user to retry | 1997 | * If a NAK was received, then it is up to the user to retry |
1998 | * the request. | 1998 | * the request. |
1999 | */ | 1999 | */ |
2000 | scic_sds_request_set_status(ireq, | 2000 | sci_request_set_status(ireq, |
2001 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 2001 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
2002 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 2002 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
2003 | 2003 | ||
@@ -2014,7 +2014,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, | |||
2014 | { | 2014 | { |
2015 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 2015 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2016 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 2016 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
2017 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, | 2017 | sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, |
2018 | SCI_SUCCESS); | 2018 | SCI_SUCCESS); |
2019 | 2019 | ||
2020 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); | 2020 | sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); |
@@ -2025,7 +2025,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, | |||
2025 | * a NAK was received, then it is up to the user to retry the | 2025 | * a NAK was received, then it is up to the user to retry the |
2026 | * request. | 2026 | * request. |
2027 | */ | 2027 | */ |
2028 | scic_sds_request_set_status(ireq, | 2028 | sci_request_set_status(ireq, |
2029 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), | 2029 | SCU_NORMALIZE_COMPLETION_STATUS(completion_code), |
2030 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); | 2030 | SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); |
2031 | 2031 | ||
@@ -2037,7 +2037,7 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, | |||
2037 | } | 2037 | } |
2038 | 2038 | ||
2039 | enum sci_status | 2039 | enum sci_status |
2040 | scic_sds_io_request_tc_completion(struct isci_request *ireq, | 2040 | sci_io_request_tc_completion(struct isci_request *ireq, |
2041 | u32 completion_code) | 2041 | u32 completion_code) |
2042 | { | 2042 | { |
2043 | enum sci_base_request_states state; | 2043 | enum sci_base_request_states state; |
@@ -2832,7 +2832,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2832 | ); | 2832 | ); |
2833 | 2833 | ||
2834 | /* complete the io request to the core. */ | 2834 | /* complete the io request to the core. */ |
2835 | scic_controller_complete_io(ihost, request->target_device, request); | 2835 | sci_controller_complete_io(ihost, request->target_device, request); |
2836 | isci_put_device(idev); | 2836 | isci_put_device(idev); |
2837 | 2837 | ||
2838 | /* set terminated handle so it cannot be completed or | 2838 | /* set terminated handle so it cannot be completed or |
@@ -2842,7 +2842,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, | |||
2842 | set_bit(IREQ_TERMINATED, &request->flags); | 2842 | set_bit(IREQ_TERMINATED, &request->flags); |
2843 | } | 2843 | } |
2844 | 2844 | ||
2845 | static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm) | 2845 | static void sci_request_started_state_enter(struct sci_base_state_machine *sm) |
2846 | { | 2846 | { |
2847 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2847 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2848 | struct domain_device *dev = ireq->target_device->domain_dev; | 2848 | struct domain_device *dev = ireq->target_device->domain_dev; |
@@ -2879,7 +2879,7 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine * | |||
2879 | } | 2879 | } |
2880 | } | 2880 | } |
2881 | 2881 | ||
2882 | static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) | 2882 | static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) |
2883 | { | 2883 | { |
2884 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2884 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2885 | struct isci_host *ihost = ireq->owning_controller; | 2885 | struct isci_host *ihost = ireq->owning_controller; |
@@ -2892,7 +2892,7 @@ static void scic_sds_request_completed_state_enter(struct sci_base_state_machine | |||
2892 | isci_task_request_complete(ihost, ireq, ireq->sci_status); | 2892 | isci_task_request_complete(ihost, ireq, ireq->sci_status); |
2893 | } | 2893 | } |
2894 | 2894 | ||
2895 | static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm) | 2895 | static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) |
2896 | { | 2896 | { |
2897 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2897 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2898 | 2898 | ||
@@ -2900,31 +2900,31 @@ static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine | |||
2900 | ireq->tc->abort = 1; | 2900 | ireq->tc->abort = 1; |
2901 | } | 2901 | } |
2902 | 2902 | ||
2903 | static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) | 2903 | static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
2904 | { | 2904 | { |
2905 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2905 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2906 | 2906 | ||
2907 | scic_sds_remote_device_set_working_request(ireq->target_device, | 2907 | sci_remote_device_set_working_request(ireq->target_device, |
2908 | ireq); | 2908 | ireq); |
2909 | } | 2909 | } |
2910 | 2910 | ||
2911 | static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) | 2911 | static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
2912 | { | 2912 | { |
2913 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2913 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2914 | 2914 | ||
2915 | scic_sds_remote_device_set_working_request(ireq->target_device, | 2915 | sci_remote_device_set_working_request(ireq->target_device, |
2916 | ireq); | 2916 | ireq); |
2917 | } | 2917 | } |
2918 | 2918 | ||
2919 | static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) | 2919 | static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) |
2920 | { | 2920 | { |
2921 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2921 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2922 | 2922 | ||
2923 | scic_sds_remote_device_set_working_request(ireq->target_device, | 2923 | sci_remote_device_set_working_request(ireq->target_device, |
2924 | ireq); | 2924 | ireq); |
2925 | } | 2925 | } |
2926 | 2926 | ||
2927 | static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) | 2927 | static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) |
2928 | { | 2928 | { |
2929 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2929 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2930 | struct scu_task_context *tc = ireq->tc; | 2930 | struct scu_task_context *tc = ireq->tc; |
@@ -2938,22 +2938,22 @@ static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_complet | |||
2938 | /* Clear the TC control bit */ | 2938 | /* Clear the TC control bit */ |
2939 | tc->control_frame = 0; | 2939 | tc->control_frame = 0; |
2940 | 2940 | ||
2941 | status = scic_controller_continue_io(ireq); | 2941 | status = sci_controller_continue_io(ireq); |
2942 | WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); | 2942 | WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n"); |
2943 | } | 2943 | } |
2944 | 2944 | ||
2945 | static const struct sci_base_state scic_sds_request_state_table[] = { | 2945 | static const struct sci_base_state sci_request_state_table[] = { |
2946 | [SCI_REQ_INIT] = { }, | 2946 | [SCI_REQ_INIT] = { }, |
2947 | [SCI_REQ_CONSTRUCTED] = { }, | 2947 | [SCI_REQ_CONSTRUCTED] = { }, |
2948 | [SCI_REQ_STARTED] = { | 2948 | [SCI_REQ_STARTED] = { |
2949 | .enter_state = scic_sds_request_started_state_enter, | 2949 | .enter_state = sci_request_started_state_enter, |
2950 | }, | 2950 | }, |
2951 | [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { | 2951 | [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { |
2952 | .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter, | 2952 | .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, |
2953 | }, | 2953 | }, |
2954 | [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, | 2954 | [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, |
2955 | [SCI_REQ_STP_PIO_WAIT_H2D] = { | 2955 | [SCI_REQ_STP_PIO_WAIT_H2D] = { |
2956 | .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter, | 2956 | .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, |
2957 | }, | 2957 | }, |
2958 | [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, | 2958 | [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, |
2959 | [SCI_REQ_STP_PIO_DATA_IN] = { }, | 2959 | [SCI_REQ_STP_PIO_DATA_IN] = { }, |
@@ -2961,10 +2961,10 @@ static const struct sci_base_state scic_sds_request_state_table[] = { | |||
2961 | [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, | 2961 | [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, |
2962 | [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, | 2962 | [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, |
2963 | [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { | 2963 | [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = { |
2964 | .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, | 2964 | .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter, |
2965 | }, | 2965 | }, |
2966 | [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { | 2966 | [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = { |
2967 | .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, | 2967 | .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter, |
2968 | }, | 2968 | }, |
2969 | [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, | 2969 | [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { }, |
2970 | [SCI_REQ_TASK_WAIT_TC_COMP] = { }, | 2970 | [SCI_REQ_TASK_WAIT_TC_COMP] = { }, |
@@ -2972,20 +2972,20 @@ static const struct sci_base_state scic_sds_request_state_table[] = { | |||
2972 | [SCI_REQ_SMP_WAIT_RESP] = { }, | 2972 | [SCI_REQ_SMP_WAIT_RESP] = { }, |
2973 | [SCI_REQ_SMP_WAIT_TC_COMP] = { }, | 2973 | [SCI_REQ_SMP_WAIT_TC_COMP] = { }, |
2974 | [SCI_REQ_COMPLETED] = { | 2974 | [SCI_REQ_COMPLETED] = { |
2975 | .enter_state = scic_sds_request_completed_state_enter, | 2975 | .enter_state = sci_request_completed_state_enter, |
2976 | }, | 2976 | }, |
2977 | [SCI_REQ_ABORTING] = { | 2977 | [SCI_REQ_ABORTING] = { |
2978 | .enter_state = scic_sds_request_aborting_state_enter, | 2978 | .enter_state = sci_request_aborting_state_enter, |
2979 | }, | 2979 | }, |
2980 | [SCI_REQ_FINAL] = { }, | 2980 | [SCI_REQ_FINAL] = { }, |
2981 | }; | 2981 | }; |
2982 | 2982 | ||
2983 | static void | 2983 | static void |
2984 | scic_sds_general_request_construct(struct isci_host *ihost, | 2984 | sci_general_request_construct(struct isci_host *ihost, |
2985 | struct isci_remote_device *idev, | 2985 | struct isci_remote_device *idev, |
2986 | struct isci_request *ireq) | 2986 | struct isci_request *ireq) |
2987 | { | 2987 | { |
2988 | sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT); | 2988 | sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); |
2989 | 2989 | ||
2990 | ireq->target_device = idev; | 2990 | ireq->target_device = idev; |
2991 | ireq->protocol = SCIC_NO_PROTOCOL; | 2991 | ireq->protocol = SCIC_NO_PROTOCOL; |
@@ -2997,7 +2997,7 @@ scic_sds_general_request_construct(struct isci_host *ihost, | |||
2997 | } | 2997 | } |
2998 | 2998 | ||
2999 | static enum sci_status | 2999 | static enum sci_status |
3000 | scic_io_request_construct(struct isci_host *ihost, | 3000 | sci_io_request_construct(struct isci_host *ihost, |
3001 | struct isci_remote_device *idev, | 3001 | struct isci_remote_device *idev, |
3002 | struct isci_request *ireq) | 3002 | struct isci_request *ireq) |
3003 | { | 3003 | { |
@@ -3005,7 +3005,7 @@ scic_io_request_construct(struct isci_host *ihost, | |||
3005 | enum sci_status status = SCI_SUCCESS; | 3005 | enum sci_status status = SCI_SUCCESS; |
3006 | 3006 | ||
3007 | /* Build the common part of the request */ | 3007 | /* Build the common part of the request */ |
3008 | scic_sds_general_request_construct(ihost, idev, ireq); | 3008 | sci_general_request_construct(ihost, idev, ireq); |
3009 | 3009 | ||
3010 | if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) | 3010 | if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) |
3011 | return SCI_FAILURE_INVALID_REMOTE_DEVICE; | 3011 | return SCI_FAILURE_INVALID_REMOTE_DEVICE; |
@@ -3024,7 +3024,7 @@ scic_io_request_construct(struct isci_host *ihost, | |||
3024 | return status; | 3024 | return status; |
3025 | } | 3025 | } |
3026 | 3026 | ||
3027 | enum sci_status scic_task_request_construct(struct isci_host *ihost, | 3027 | enum sci_status sci_task_request_construct(struct isci_host *ihost, |
3028 | struct isci_remote_device *idev, | 3028 | struct isci_remote_device *idev, |
3029 | u16 io_tag, struct isci_request *ireq) | 3029 | u16 io_tag, struct isci_request *ireq) |
3030 | { | 3030 | { |
@@ -3032,7 +3032,7 @@ enum sci_status scic_task_request_construct(struct isci_host *ihost, | |||
3032 | enum sci_status status = SCI_SUCCESS; | 3032 | enum sci_status status = SCI_SUCCESS; |
3033 | 3033 | ||
3034 | /* Build the common part of the request */ | 3034 | /* Build the common part of the request */ |
3035 | scic_sds_general_request_construct(ihost, idev, ireq); | 3035 | sci_general_request_construct(ihost, idev, ireq); |
3036 | 3036 | ||
3037 | if (dev->dev_type == SAS_END_DEV || | 3037 | if (dev->dev_type == SAS_END_DEV || |
3038 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | 3038 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { |
@@ -3053,7 +3053,7 @@ static enum sci_status isci_request_ssp_request_construct( | |||
3053 | "%s: request = %p\n", | 3053 | "%s: request = %p\n", |
3054 | __func__, | 3054 | __func__, |
3055 | request); | 3055 | request); |
3056 | status = scic_io_request_construct_basic_ssp(request); | 3056 | status = sci_io_request_construct_basic_ssp(request); |
3057 | return status; | 3057 | return status; |
3058 | } | 3058 | } |
3059 | 3059 | ||
@@ -3074,7 +3074,7 @@ static enum sci_status isci_request_stp_request_construct( | |||
3074 | */ | 3074 | */ |
3075 | register_fis = isci_sata_task_to_fis_copy(task); | 3075 | register_fis = isci_sata_task_to_fis_copy(task); |
3076 | 3076 | ||
3077 | status = scic_io_request_construct_basic_sata(request); | 3077 | status = sci_io_request_construct_basic_sata(request); |
3078 | 3078 | ||
3079 | /* Set the ncq tag in the fis, from the queue | 3079 | /* Set the ncq tag in the fis, from the queue |
3080 | * command in the task. | 3080 | * command in the task. |
@@ -3091,7 +3091,7 @@ static enum sci_status isci_request_stp_request_construct( | |||
3091 | } | 3091 | } |
3092 | 3092 | ||
3093 | static enum sci_status | 3093 | static enum sci_status |
3094 | scic_io_request_construct_smp(struct device *dev, | 3094 | sci_io_request_construct_smp(struct device *dev, |
3095 | struct isci_request *ireq, | 3095 | struct isci_request *ireq, |
3096 | struct sas_task *task) | 3096 | struct sas_task *task) |
3097 | { | 3097 | { |
@@ -3141,8 +3141,8 @@ scic_io_request_construct_smp(struct device *dev, | |||
3141 | 3141 | ||
3142 | task_context = ireq->tc; | 3142 | task_context = ireq->tc; |
3143 | 3143 | ||
3144 | idev = scic_sds_request_get_device(ireq); | 3144 | idev = sci_request_get_device(ireq); |
3145 | iport = scic_sds_request_get_port(ireq); | 3145 | iport = sci_request_get_port(ireq); |
3146 | 3146 | ||
3147 | /* | 3147 | /* |
3148 | * Fill in the TC with the its required data | 3148 | * Fill in the TC with the its required data |
@@ -3152,8 +3152,8 @@ scic_io_request_construct_smp(struct device *dev, | |||
3152 | task_context->initiator_request = 1; | 3152 | task_context->initiator_request = 1; |
3153 | task_context->connection_rate = idev->connection_rate; | 3153 | task_context->connection_rate = idev->connection_rate; |
3154 | task_context->protocol_engine_index = | 3154 | task_context->protocol_engine_index = |
3155 | scic_sds_controller_get_protocol_engine_group(ihost); | 3155 | sci_controller_get_protocol_engine_group(ihost); |
3156 | task_context->logical_port_index = scic_sds_port_get_index(iport); | 3156 | task_context->logical_port_index = sci_port_get_index(iport); |
3157 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; | 3157 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; |
3158 | task_context->abort = 0; | 3158 | task_context->abort = 0; |
3159 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 3159 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
@@ -3195,9 +3195,9 @@ scic_io_request_construct_smp(struct device *dev, | |||
3195 | task_context->task_phase = 0; | 3195 | task_context->task_phase = 0; |
3196 | 3196 | ||
3197 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 3197 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
3198 | (scic_sds_controller_get_protocol_engine_group(ihost) << | 3198 | (sci_controller_get_protocol_engine_group(ihost) << |
3199 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 3199 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
3200 | (scic_sds_port_get_index(iport) << | 3200 | (sci_port_get_index(iport) << |
3201 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 3201 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
3202 | ISCI_TAG_TCI(ireq->io_tag)); | 3202 | ISCI_TAG_TCI(ireq->io_tag)); |
3203 | /* | 3203 | /* |
@@ -3229,7 +3229,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) | |||
3229 | struct device *dev = &ireq->isci_host->pdev->dev; | 3229 | struct device *dev = &ireq->isci_host->pdev->dev; |
3230 | enum sci_status status = SCI_FAILURE; | 3230 | enum sci_status status = SCI_FAILURE; |
3231 | 3231 | ||
3232 | status = scic_io_request_construct_smp(dev, ireq, task); | 3232 | status = sci_io_request_construct_smp(dev, ireq, task); |
3233 | if (status != SCI_SUCCESS) | 3233 | if (status != SCI_SUCCESS) |
3234 | dev_warn(&ireq->isci_host->pdev->dev, | 3234 | dev_warn(&ireq->isci_host->pdev->dev, |
3235 | "%s: failed with status = %d\n", | 3235 | "%s: failed with status = %d\n", |
@@ -3283,7 +3283,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost, | |||
3283 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; | 3283 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; |
3284 | } | 3284 | } |
3285 | 3285 | ||
3286 | status = scic_io_request_construct(ihost, idev, request); | 3286 | status = sci_io_request_construct(ihost, idev, request); |
3287 | 3287 | ||
3288 | if (status != SCI_SUCCESS) { | 3288 | if (status != SCI_SUCCESS) { |
3289 | dev_warn(&ihost->pdev->dev, | 3289 | dev_warn(&ihost->pdev->dev, |
@@ -3388,7 +3388,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3388 | * request was built that way (ie. | 3388 | * request was built that way (ie. |
3389 | * ireq->is_task_management_request is false). | 3389 | * ireq->is_task_management_request is false). |
3390 | */ | 3390 | */ |
3391 | status = scic_controller_start_task(ihost, | 3391 | status = sci_controller_start_task(ihost, |
3392 | idev, | 3392 | idev, |
3393 | ireq); | 3393 | ireq); |
3394 | } else { | 3394 | } else { |
@@ -3396,7 +3396,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3396 | } | 3396 | } |
3397 | } else { | 3397 | } else { |
3398 | /* send the request, let the core assign the IO TAG. */ | 3398 | /* send the request, let the core assign the IO TAG. */ |
3399 | status = scic_controller_start_io(ihost, idev, | 3399 | status = sci_controller_start_io(ihost, idev, |
3400 | ireq); | 3400 | ireq); |
3401 | } | 3401 | } |
3402 | 3402 | ||
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 0cafcead7a01..08fcf98e70f4 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h | |||
@@ -301,75 +301,75 @@ enum sci_base_request_states { | |||
301 | }; | 301 | }; |
302 | 302 | ||
303 | /** | 303 | /** |
304 | * scic_sds_request_get_controller() - | 304 | * sci_request_get_controller() - |
305 | * | 305 | * |
306 | * This macro will return the controller for this io request object | 306 | * This macro will return the controller for this io request object |
307 | */ | 307 | */ |
308 | #define scic_sds_request_get_controller(ireq) \ | 308 | #define sci_request_get_controller(ireq) \ |
309 | ((ireq)->owning_controller) | 309 | ((ireq)->owning_controller) |
310 | 310 | ||
311 | /** | 311 | /** |
312 | * scic_sds_request_get_device() - | 312 | * sci_request_get_device() - |
313 | * | 313 | * |
314 | * This macro will return the device for this io request object | 314 | * This macro will return the device for this io request object |
315 | */ | 315 | */ |
316 | #define scic_sds_request_get_device(ireq) \ | 316 | #define sci_request_get_device(ireq) \ |
317 | ((ireq)->target_device) | 317 | ((ireq)->target_device) |
318 | 318 | ||
319 | /** | 319 | /** |
320 | * scic_sds_request_get_port() - | 320 | * sci_request_get_port() - |
321 | * | 321 | * |
322 | * This macro will return the port for this io request object | 322 | * This macro will return the port for this io request object |
323 | */ | 323 | */ |
324 | #define scic_sds_request_get_port(ireq) \ | 324 | #define sci_request_get_port(ireq) \ |
325 | scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq)) | 325 | sci_remote_device_get_port(sci_request_get_device(ireq)) |
326 | 326 | ||
327 | /** | 327 | /** |
328 | * scic_sds_request_get_post_context() - | 328 | * sci_request_get_post_context() - |
329 | * | 329 | * |
330 | * This macro returns the constructed post context result for the io request. | 330 | * This macro returns the constructed post context result for the io request. |
331 | */ | 331 | */ |
332 | #define scic_sds_request_get_post_context(ireq) \ | 332 | #define sci_request_get_post_context(ireq) \ |
333 | ((ireq)->post_context) | 333 | ((ireq)->post_context) |
334 | 334 | ||
335 | /** | 335 | /** |
336 | * scic_sds_request_get_task_context() - | 336 | * sci_request_get_task_context() - |
337 | * | 337 | * |
338 | * This is a helper macro to return the os handle for this request object. | 338 | * This is a helper macro to return the os handle for this request object. |
339 | */ | 339 | */ |
340 | #define scic_sds_request_get_task_context(request) \ | 340 | #define sci_request_get_task_context(request) \ |
341 | ((request)->task_context_buffer) | 341 | ((request)->task_context_buffer) |
342 | 342 | ||
343 | /** | 343 | /** |
344 | * scic_sds_request_set_status() - | 344 | * sci_request_set_status() - |
345 | * | 345 | * |
346 | * This macro will set the scu hardware status and sci request completion | 346 | * This macro will set the scu hardware status and sci request completion |
347 | * status for an io request. | 347 | * status for an io request. |
348 | */ | 348 | */ |
349 | #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \ | 349 | #define sci_request_set_status(request, scu_status_code, sci_status_code) \ |
350 | { \ | 350 | { \ |
351 | (request)->scu_status = (scu_status_code); \ | 351 | (request)->scu_status = (scu_status_code); \ |
352 | (request)->sci_status = (sci_status_code); \ | 352 | (request)->sci_status = (sci_status_code); \ |
353 | } | 353 | } |
354 | 354 | ||
355 | enum sci_status scic_sds_request_start(struct isci_request *ireq); | 355 | enum sci_status sci_request_start(struct isci_request *ireq); |
356 | enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq); | 356 | enum sci_status sci_io_request_terminate(struct isci_request *ireq); |
357 | enum sci_status | 357 | enum sci_status |
358 | scic_sds_io_request_event_handler(struct isci_request *ireq, | 358 | sci_io_request_event_handler(struct isci_request *ireq, |
359 | u32 event_code); | 359 | u32 event_code); |
360 | enum sci_status | 360 | enum sci_status |
361 | scic_sds_io_request_frame_handler(struct isci_request *ireq, | 361 | sci_io_request_frame_handler(struct isci_request *ireq, |
362 | u32 frame_index); | 362 | u32 frame_index); |
363 | enum sci_status | 363 | enum sci_status |
364 | scic_sds_task_request_terminate(struct isci_request *ireq); | 364 | sci_task_request_terminate(struct isci_request *ireq); |
365 | extern enum sci_status | 365 | extern enum sci_status |
366 | scic_sds_request_complete(struct isci_request *ireq); | 366 | sci_request_complete(struct isci_request *ireq); |
367 | extern enum sci_status | 367 | extern enum sci_status |
368 | scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code); | 368 | sci_io_request_tc_completion(struct isci_request *ireq, u32 code); |
369 | 369 | ||
370 | /* XXX open code in caller */ | 370 | /* XXX open code in caller */ |
371 | static inline dma_addr_t | 371 | static inline dma_addr_t |
372 | scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) | 372 | sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) |
373 | { | 373 | { |
374 | 374 | ||
375 | char *requested_addr = (char *)virt_addr; | 375 | char *requested_addr = (char *)virt_addr; |
@@ -500,17 +500,17 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
500 | void isci_terminate_pending_requests(struct isci_host *ihost, | 500 | void isci_terminate_pending_requests(struct isci_host *ihost, |
501 | struct isci_remote_device *idev); | 501 | struct isci_remote_device *idev); |
502 | enum sci_status | 502 | enum sci_status |
503 | scic_task_request_construct(struct isci_host *ihost, | 503 | sci_task_request_construct(struct isci_host *ihost, |
504 | struct isci_remote_device *idev, | 504 | struct isci_remote_device *idev, |
505 | u16 io_tag, | 505 | u16 io_tag, |
506 | struct isci_request *ireq); | 506 | struct isci_request *ireq); |
507 | enum sci_status | 507 | enum sci_status |
508 | scic_task_request_construct_ssp(struct isci_request *ireq); | 508 | sci_task_request_construct_ssp(struct isci_request *ireq); |
509 | enum sci_status | 509 | enum sci_status |
510 | scic_task_request_construct_sata(struct isci_request *ireq); | 510 | sci_task_request_construct_sata(struct isci_request *ireq); |
511 | void | 511 | void |
512 | scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag); | 512 | sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag); |
513 | void scic_sds_smp_request_copy_response(struct isci_request *ireq); | 513 | void sci_smp_request_copy_response(struct isci_request *ireq); |
514 | 514 | ||
515 | static inline int isci_task_is_ncq_recovery(struct sas_task *task) | 515 | static inline int isci_task_is_ncq_recovery(struct sas_task *task) |
516 | { | 516 | { |
diff --git a/drivers/scsi/isci/sata.c b/drivers/scsi/isci/sata.c index 87d8cc1a6e39..47b96c21548f 100644 --- a/drivers/scsi/isci/sata.c +++ b/drivers/scsi/isci/sata.c | |||
@@ -116,7 +116,7 @@ void isci_sata_set_ncq_tag( | |||
116 | struct isci_request *request = task->lldd_task; | 116 | struct isci_request *request = task->lldd_task; |
117 | 117 | ||
118 | register_fis->sector_count = qc->tag << 3; | 118 | register_fis->sector_count = qc->tag << 3; |
119 | scic_stp_io_request_set_ncq_tag(request, qc->tag); | 119 | sci_stp_io_request_set_ncq_tag(request, qc->tag); |
120 | } | 120 | } |
121 | 121 | ||
122 | /** | 122 | /** |
@@ -187,7 +187,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire | |||
187 | /* core builds the protocol specific request | 187 | /* core builds the protocol specific request |
188 | * based on the h2d fis. | 188 | * based on the h2d fis. |
189 | */ | 189 | */ |
190 | status = scic_task_request_construct_sata(ireq); | 190 | status = sci_task_request_construct_sata(ireq); |
191 | 191 | ||
192 | return status; | 192 | return status; |
193 | } | 193 | } |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 3a1fc55a7557..d040aa2f3722 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -257,12 +257,12 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
257 | return NULL; | 257 | return NULL; |
258 | 258 | ||
259 | /* let the core do it's construct. */ | 259 | /* let the core do it's construct. */ |
260 | status = scic_task_request_construct(ihost, idev, tag, | 260 | status = sci_task_request_construct(ihost, idev, tag, |
261 | ireq); | 261 | ireq); |
262 | 262 | ||
263 | if (status != SCI_SUCCESS) { | 263 | if (status != SCI_SUCCESS) { |
264 | dev_warn(&ihost->pdev->dev, | 264 | dev_warn(&ihost->pdev->dev, |
265 | "%s: scic_task_request_construct failed - " | 265 | "%s: sci_task_request_construct failed - " |
266 | "status = 0x%x\n", | 266 | "status = 0x%x\n", |
267 | __func__, | 267 | __func__, |
268 | status); | 268 | status); |
@@ -272,7 +272,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
272 | /* XXX convert to get this from task->tproto like other drivers */ | 272 | /* XXX convert to get this from task->tproto like other drivers */ |
273 | if (dev->dev_type == SAS_END_DEV) { | 273 | if (dev->dev_type == SAS_END_DEV) { |
274 | isci_tmf->proto = SAS_PROTOCOL_SSP; | 274 | isci_tmf->proto = SAS_PROTOCOL_SSP; |
275 | status = scic_task_request_construct_ssp(ireq); | 275 | status = sci_task_request_construct_ssp(ireq); |
276 | if (status != SCI_SUCCESS) | 276 | if (status != SCI_SUCCESS) |
277 | return NULL; | 277 | return NULL; |
278 | } | 278 | } |
@@ -332,7 +332,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
332 | spin_lock_irqsave(&ihost->scic_lock, flags); | 332 | spin_lock_irqsave(&ihost->scic_lock, flags); |
333 | 333 | ||
334 | /* start the TMF io. */ | 334 | /* start the TMF io. */ |
335 | status = scic_controller_start_task(ihost, idev, ireq); | 335 | status = sci_controller_start_task(ihost, idev, ireq); |
336 | 336 | ||
337 | if (status != SCI_TASK_SUCCESS) { | 337 | if (status != SCI_TASK_SUCCESS) { |
338 | dev_warn(&ihost->pdev->dev, | 338 | dev_warn(&ihost->pdev->dev, |
@@ -364,7 +364,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
364 | if (tmf->cb_state_func != NULL) | 364 | if (tmf->cb_state_func != NULL) |
365 | tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); | 365 | tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); |
366 | 366 | ||
367 | scic_controller_terminate_request(ihost, | 367 | sci_controller_terminate_request(ihost, |
368 | idev, | 368 | idev, |
369 | ireq); | 369 | ireq); |
370 | 370 | ||
@@ -556,7 +556,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, | |||
556 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { | 556 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { |
557 | was_terminated = true; | 557 | was_terminated = true; |
558 | needs_cleanup_handling = true; | 558 | needs_cleanup_handling = true; |
559 | status = scic_controller_terminate_request(ihost, | 559 | status = sci_controller_terminate_request(ihost, |
560 | idev, | 560 | idev, |
561 | isci_request); | 561 | isci_request); |
562 | } | 562 | } |
@@ -569,7 +569,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, | |||
569 | */ | 569 | */ |
570 | if (status != SCI_SUCCESS) { | 570 | if (status != SCI_SUCCESS) { |
571 | dev_err(&ihost->pdev->dev, | 571 | dev_err(&ihost->pdev->dev, |
572 | "%s: scic_controller_terminate_request" | 572 | "%s: sci_controller_terminate_request" |
573 | " returned = 0x%x\n", | 573 | " returned = 0x%x\n", |
574 | __func__, status); | 574 | __func__, status); |
575 | 575 | ||
@@ -1251,7 +1251,7 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1251 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ | 1251 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ |
1252 | tmf_complete = tmf->complete; | 1252 | tmf_complete = tmf->complete; |
1253 | 1253 | ||
1254 | scic_controller_complete_io(ihost, ireq->target_device, ireq); | 1254 | sci_controller_complete_io(ihost, ireq->target_device, ireq); |
1255 | /* set the 'terminated' flag handle to make sure it cannot be terminated | 1255 | /* set the 'terminated' flag handle to make sure it cannot be terminated |
1256 | * or completed again. | 1256 | * or completed again. |
1257 | */ | 1257 | */ |
@@ -1514,12 +1514,12 @@ static int isci_reset_device(struct isci_host *ihost, | |||
1514 | dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); | 1514 | dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); |
1515 | 1515 | ||
1516 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1516 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1517 | status = scic_remote_device_reset(idev); | 1517 | status = sci_remote_device_reset(idev); |
1518 | if (status != SCI_SUCCESS) { | 1518 | if (status != SCI_SUCCESS) { |
1519 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1519 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1520 | 1520 | ||
1521 | dev_warn(&ihost->pdev->dev, | 1521 | dev_warn(&ihost->pdev->dev, |
1522 | "%s: scic_remote_device_reset(%p) returned %d!\n", | 1522 | "%s: sci_remote_device_reset(%p) returned %d!\n", |
1523 | __func__, idev, status); | 1523 | __func__, idev, status); |
1524 | 1524 | ||
1525 | return TMF_RESP_FUNC_FAILED; | 1525 | return TMF_RESP_FUNC_FAILED; |
@@ -1540,7 +1540,7 @@ static int isci_reset_device(struct isci_host *ihost, | |||
1540 | 1540 | ||
1541 | /* Since all pending TCs have been cleaned, resume the RNC. */ | 1541 | /* Since all pending TCs have been cleaned, resume the RNC. */ |
1542 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1542 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1543 | status = scic_remote_device_reset_complete(idev); | 1543 | status = sci_remote_device_reset_complete(idev); |
1544 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1544 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1545 | 1545 | ||
1546 | /* If this is a device on an expander, bring the phy back up. */ | 1546 | /* If this is a device on an expander, bring the phy back up. */ |
@@ -1560,7 +1560,7 @@ static int isci_reset_device(struct isci_host *ihost, | |||
1560 | 1560 | ||
1561 | if (status != SCI_SUCCESS) { | 1561 | if (status != SCI_SUCCESS) { |
1562 | dev_warn(&ihost->pdev->dev, | 1562 | dev_warn(&ihost->pdev->dev, |
1563 | "%s: scic_remote_device_reset_complete(%p) " | 1563 | "%s: sci_remote_device_reset_complete(%p) " |
1564 | "returned %d!\n", __func__, idev, status); | 1564 | "returned %d!\n", __func__, idev, status); |
1565 | } | 1565 | } |
1566 | 1566 | ||
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index a0e6f89fc6a1..e9e1e2abacb9 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c | |||
@@ -57,10 +57,10 @@ | |||
57 | #include "unsolicited_frame_control.h" | 57 | #include "unsolicited_frame_control.h" |
58 | #include "registers.h" | 58 | #include "registers.h" |
59 | 59 | ||
60 | int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost) | 60 | int sci_unsolicited_frame_control_construct(struct isci_host *ihost) |
61 | { | 61 | { |
62 | struct scic_sds_unsolicited_frame_control *uf_control = &ihost->uf_control; | 62 | struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; |
63 | struct scic_sds_unsolicited_frame *uf; | 63 | struct sci_unsolicited_frame *uf; |
64 | u32 buf_len, header_len, i; | 64 | u32 buf_len, header_len, i; |
65 | dma_addr_t dma; | 65 | dma_addr_t dma; |
66 | size_t size; | 66 | size_t size; |
@@ -139,23 +139,14 @@ int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost) | |||
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | 141 | ||
142 | /** | 142 | enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, |
143 | * This method returns the frame header for the specified frame index. | 143 | u32 frame_index, |
144 | * @uf_control: | 144 | void **frame_header) |
145 | * @frame_index: | ||
146 | * @frame_header: | ||
147 | * | ||
148 | * enum sci_status | ||
149 | */ | ||
150 | enum sci_status scic_sds_unsolicited_frame_control_get_header( | ||
151 | struct scic_sds_unsolicited_frame_control *uf_control, | ||
152 | u32 frame_index, | ||
153 | void **frame_header) | ||
154 | { | 145 | { |
155 | if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { | 146 | if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { |
156 | /* | 147 | /* Skip the first word in the frame since this is a controll word used |
157 | * Skip the first word in the frame since this is a controll word used | 148 | * by the hardware. |
158 | * by the hardware. */ | 149 | */ |
159 | *frame_header = &uf_control->buffers.array[frame_index].header->data; | 150 | *frame_header = &uf_control->buffers.array[frame_index].header->data; |
160 | 151 | ||
161 | return SCI_SUCCESS; | 152 | return SCI_SUCCESS; |
@@ -164,18 +155,9 @@ enum sci_status scic_sds_unsolicited_frame_control_get_header( | |||
164 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | 155 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; |
165 | } | 156 | } |
166 | 157 | ||
167 | /** | 158 | enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control, |
168 | * This method returns the frame buffer for the specified frame index. | 159 | u32 frame_index, |
169 | * @uf_control: | 160 | void **frame_buffer) |
170 | * @frame_index: | ||
171 | * @frame_buffer: | ||
172 | * | ||
173 | * enum sci_status | ||
174 | */ | ||
175 | enum sci_status scic_sds_unsolicited_frame_control_get_buffer( | ||
176 | struct scic_sds_unsolicited_frame_control *uf_control, | ||
177 | u32 frame_index, | ||
178 | void **frame_buffer) | ||
179 | { | 161 | { |
180 | if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { | 162 | if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) { |
181 | *frame_buffer = uf_control->buffers.array[frame_index].buffer; | 163 | *frame_buffer = uf_control->buffers.array[frame_index].buffer; |
@@ -186,19 +168,8 @@ enum sci_status scic_sds_unsolicited_frame_control_get_buffer( | |||
186 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | 168 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; |
187 | } | 169 | } |
188 | 170 | ||
189 | /** | 171 | bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control, |
190 | * This method releases the frame once this is done the frame is available for | 172 | u32 frame_index) |
191 | * re-use by the hardware. The data contained in the frame header and frame | ||
192 | * buffer is no longer valid. | ||
193 | * @uf_control: This parameter specifies the UF control object | ||
194 | * @frame_index: This parameter specifies the frame index to attempt to release. | ||
195 | * | ||
196 | * This method returns an indication to the caller as to whether the | ||
197 | * unsolicited frame get pointer should be updated. | ||
198 | */ | ||
199 | bool scic_sds_unsolicited_frame_control_release_frame( | ||
200 | struct scic_sds_unsolicited_frame_control *uf_control, | ||
201 | u32 frame_index) | ||
202 | { | 173 | { |
203 | u32 frame_get; | 174 | u32 frame_get; |
204 | u32 frame_cycle; | 175 | u32 frame_cycle; |
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index c0285a3db562..b849a84af34f 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h | |||
@@ -92,12 +92,12 @@ enum unsolicited_frame_state { | |||
92 | }; | 92 | }; |
93 | 93 | ||
94 | /** | 94 | /** |
95 | * struct scic_sds_unsolicited_frame - | 95 | * struct sci_unsolicited_frame - |
96 | * | 96 | * |
97 | * This is the unsolicited frame data structure it acts as the container for | 97 | * This is the unsolicited frame data structure it acts as the container for |
98 | * the current frame state, frame header and frame buffer. | 98 | * the current frame state, frame header and frame buffer. |
99 | */ | 99 | */ |
100 | struct scic_sds_unsolicited_frame { | 100 | struct sci_unsolicited_frame { |
101 | /** | 101 | /** |
102 | * This field contains the current frame state | 102 | * This field contains the current frame state |
103 | */ | 103 | */ |
@@ -116,11 +116,11 @@ struct scic_sds_unsolicited_frame { | |||
116 | }; | 116 | }; |
117 | 117 | ||
118 | /** | 118 | /** |
119 | * struct scic_sds_uf_header_array - | 119 | * struct sci_uf_header_array - |
120 | * | 120 | * |
121 | * This structure contains all of the unsolicited frame header information. | 121 | * This structure contains all of the unsolicited frame header information. |
122 | */ | 122 | */ |
123 | struct scic_sds_uf_header_array { | 123 | struct sci_uf_header_array { |
124 | /** | 124 | /** |
125 | * This field is represents a virtual pointer to the start | 125 | * This field is represents a virtual pointer to the start |
126 | * address of the UF address table. The table contains | 126 | * address of the UF address table. The table contains |
@@ -137,19 +137,19 @@ struct scic_sds_uf_header_array { | |||
137 | }; | 137 | }; |
138 | 138 | ||
139 | /** | 139 | /** |
140 | * struct scic_sds_uf_buffer_array - | 140 | * struct sci_uf_buffer_array - |
141 | * | 141 | * |
142 | * This structure contains all of the unsolicited frame buffer (actual payload) | 142 | * This structure contains all of the unsolicited frame buffer (actual payload) |
143 | * information. | 143 | * information. |
144 | */ | 144 | */ |
145 | struct scic_sds_uf_buffer_array { | 145 | struct sci_uf_buffer_array { |
146 | /** | 146 | /** |
147 | * This field is the unsolicited frame data its used to manage | 147 | * This field is the unsolicited frame data its used to manage |
148 | * the data for the unsolicited frame requests. It also represents | 148 | * the data for the unsolicited frame requests. It also represents |
149 | * the virtual address location that corresponds to the | 149 | * the virtual address location that corresponds to the |
150 | * physical_address field. | 150 | * physical_address field. |
151 | */ | 151 | */ |
152 | struct scic_sds_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; | 152 | struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES]; |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * This field specifies the physical address location for the UF | 155 | * This field specifies the physical address location for the UF |
@@ -159,13 +159,13 @@ struct scic_sds_uf_buffer_array { | |||
159 | }; | 159 | }; |
160 | 160 | ||
161 | /** | 161 | /** |
162 | * struct scic_sds_uf_address_table_array - | 162 | * struct sci_uf_address_table_array - |
163 | * | 163 | * |
164 | * This object maintains all of the unsolicited frame address table specific | 164 | * This object maintains all of the unsolicited frame address table specific |
165 | * data. The address table is a collection of 64-bit pointers that point to | 165 | * data. The address table is a collection of 64-bit pointers that point to |
166 | * 1KB buffers into which the silicon will DMA unsolicited frames. | 166 | * 1KB buffers into which the silicon will DMA unsolicited frames. |
167 | */ | 167 | */ |
168 | struct scic_sds_uf_address_table_array { | 168 | struct sci_uf_address_table_array { |
169 | /** | 169 | /** |
170 | * This field represents a virtual pointer that refers to the | 170 | * This field represents a virtual pointer that refers to the |
171 | * starting address of the UF address table. | 171 | * starting address of the UF address table. |
@@ -182,11 +182,11 @@ struct scic_sds_uf_address_table_array { | |||
182 | }; | 182 | }; |
183 | 183 | ||
184 | /** | 184 | /** |
185 | * struct scic_sds_unsolicited_frame_control - | 185 | * struct sci_unsolicited_frame_control - |
186 | * | 186 | * |
187 | * This object contains all of the data necessary to handle unsolicited frames. | 187 | * This object contains all of the data necessary to handle unsolicited frames. |
188 | */ | 188 | */ |
189 | struct scic_sds_unsolicited_frame_control { | 189 | struct sci_unsolicited_frame_control { |
190 | /** | 190 | /** |
191 | * This field is the software copy of the unsolicited frame queue | 191 | * This field is the software copy of the unsolicited frame queue |
192 | * get pointer. The controller object writes this value to the | 192 | * get pointer. The controller object writes this value to the |
@@ -198,38 +198,38 @@ struct scic_sds_unsolicited_frame_control { | |||
198 | * This field contains all of the unsolicited frame header | 198 | * This field contains all of the unsolicited frame header |
199 | * specific fields. | 199 | * specific fields. |
200 | */ | 200 | */ |
201 | struct scic_sds_uf_header_array headers; | 201 | struct sci_uf_header_array headers; |
202 | 202 | ||
203 | /** | 203 | /** |
204 | * This field contains all of the unsolicited frame buffer | 204 | * This field contains all of the unsolicited frame buffer |
205 | * specific fields. | 205 | * specific fields. |
206 | */ | 206 | */ |
207 | struct scic_sds_uf_buffer_array buffers; | 207 | struct sci_uf_buffer_array buffers; |
208 | 208 | ||
209 | /** | 209 | /** |
210 | * This field contains all of the unsolicited frame address table | 210 | * This field contains all of the unsolicited frame address table |
211 | * specific fields. | 211 | * specific fields. |
212 | */ | 212 | */ |
213 | struct scic_sds_uf_address_table_array address_table; | 213 | struct sci_uf_address_table_array address_table; |
214 | 214 | ||
215 | }; | 215 | }; |
216 | 216 | ||
217 | struct isci_host; | 217 | struct isci_host; |
218 | 218 | ||
219 | int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost); | 219 | int sci_unsolicited_frame_control_construct(struct isci_host *ihost); |
220 | 220 | ||
221 | enum sci_status scic_sds_unsolicited_frame_control_get_header( | 221 | enum sci_status sci_unsolicited_frame_control_get_header( |
222 | struct scic_sds_unsolicited_frame_control *uf_control, | 222 | struct sci_unsolicited_frame_control *uf_control, |
223 | u32 frame_index, | 223 | u32 frame_index, |
224 | void **frame_header); | 224 | void **frame_header); |
225 | 225 | ||
226 | enum sci_status scic_sds_unsolicited_frame_control_get_buffer( | 226 | enum sci_status sci_unsolicited_frame_control_get_buffer( |
227 | struct scic_sds_unsolicited_frame_control *uf_control, | 227 | struct sci_unsolicited_frame_control *uf_control, |
228 | u32 frame_index, | 228 | u32 frame_index, |
229 | void **frame_buffer); | 229 | void **frame_buffer); |
230 | 230 | ||
231 | bool scic_sds_unsolicited_frame_control_release_frame( | 231 | bool sci_unsolicited_frame_control_release_frame( |
232 | struct scic_sds_unsolicited_frame_control *uf_control, | 232 | struct sci_unsolicited_frame_control *uf_control, |
233 | u32 frame_index); | 233 | u32 frame_index); |
234 | 234 | ||
235 | #endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */ | 235 | #endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */ |