diff options
-rw-r--r-- | drivers/scsi/isci/host.c | 1000 | ||||
-rw-r--r-- | drivers/scsi/isci/host.h | 210 | ||||
-rw-r--r-- | drivers/scsi/isci/init.c | 10 | ||||
-rw-r--r-- | drivers/scsi/isci/phy.c | 40 | ||||
-rw-r--r-- | drivers/scsi/isci/port.c | 89 | ||||
-rw-r--r-- | drivers/scsi/isci/port.h | 4 | ||||
-rw-r--r-- | drivers/scsi/isci/port_config.c | 82 | ||||
-rw-r--r-- | drivers/scsi/isci/probe_roms.h | 2 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_device.c | 76 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_device.h | 6 | ||||
-rw-r--r-- | drivers/scsi/isci/remote_node_context.c | 14 | ||||
-rw-r--r-- | drivers/scsi/isci/request.c | 200 | ||||
-rw-r--r-- | drivers/scsi/isci/request.h | 4 | ||||
-rw-r--r-- | drivers/scsi/isci/task.c | 44 | ||||
-rw-r--r-- | drivers/scsi/isci/unsolicited_frame_control.c | 6 | ||||
-rw-r--r-- | drivers/scsi/isci/unsolicited_frame_control.h | 4 |
16 files changed, 806 insertions, 985 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 45d7f71c609a..bb298f8f609a 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c | |||
@@ -181,35 +181,35 @@ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) | |||
181 | } | 181 | } |
182 | 182 | ||
183 | static bool scic_sds_controller_completion_queue_has_entries( | 183 | static bool scic_sds_controller_completion_queue_has_entries( |
184 | struct scic_sds_controller *scic) | 184 | struct isci_host *ihost) |
185 | { | 185 | { |
186 | u32 get_value = scic->completion_queue_get; | 186 | u32 get_value = ihost->completion_queue_get; |
187 | u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; | 187 | u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; |
188 | 188 | ||
189 | if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == | 189 | if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == |
190 | COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])) | 190 | COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) |
191 | return true; | 191 | return true; |
192 | 192 | ||
193 | return false; | 193 | return false; |
194 | } | 194 | } |
195 | 195 | ||
196 | static bool scic_sds_controller_isr(struct scic_sds_controller *scic) | 196 | static bool scic_sds_controller_isr(struct isci_host *ihost) |
197 | { | 197 | { |
198 | if (scic_sds_controller_completion_queue_has_entries(scic)) { | 198 | if (scic_sds_controller_completion_queue_has_entries(ihost)) { |
199 | return true; | 199 | return true; |
200 | } else { | 200 | } else { |
201 | /* | 201 | /* |
202 | * we have a spurious interrupt it could be that we have already | 202 | * we have a spurious interrupt it could be that we have already |
203 | * emptied the completion queue from a previous interrupt */ | 203 | * emptied the completion queue from a previous interrupt */ |
204 | writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); | 204 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); |
205 | 205 | ||
206 | /* | 206 | /* |
207 | * There is a race in the hardware that could cause us not to be notified | 207 | * There is a race in the hardware that could cause us not to be notified |
208 | * of an interrupt completion if we do not take this step. We will mask | 208 | * of an interrupt completion if we do not take this step. We will mask |
209 | * then unmask the interrupts so if there is another interrupt pending | 209 | * then unmask the interrupts so if there is another interrupt pending |
210 | * the clearing of the interrupt source we get the next interrupt message. */ | 210 | * the clearing of the interrupt source we get the next interrupt message. */ |
211 | writel(0xFF000000, &scic->smu_registers->interrupt_mask); | 211 | writel(0xFF000000, &ihost->smu_registers->interrupt_mask); |
212 | writel(0, &scic->smu_registers->interrupt_mask); | 212 | writel(0, &ihost->smu_registers->interrupt_mask); |
213 | } | 213 | } |
214 | 214 | ||
215 | return false; | 215 | return false; |
@@ -219,18 +219,18 @@ irqreturn_t isci_msix_isr(int vec, void *data) | |||
219 | { | 219 | { |
220 | struct isci_host *ihost = data; | 220 | struct isci_host *ihost = data; |
221 | 221 | ||
222 | if (scic_sds_controller_isr(&ihost->sci)) | 222 | if (scic_sds_controller_isr(ihost)) |
223 | tasklet_schedule(&ihost->completion_tasklet); | 223 | tasklet_schedule(&ihost->completion_tasklet); |
224 | 224 | ||
225 | return IRQ_HANDLED; | 225 | return IRQ_HANDLED; |
226 | } | 226 | } |
227 | 227 | ||
228 | static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic) | 228 | static bool scic_sds_controller_error_isr(struct isci_host *ihost) |
229 | { | 229 | { |
230 | u32 interrupt_status; | 230 | u32 interrupt_status; |
231 | 231 | ||
232 | interrupt_status = | 232 | interrupt_status = |
233 | readl(&scic->smu_registers->interrupt_status); | 233 | readl(&ihost->smu_registers->interrupt_status); |
234 | interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); | 234 | interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); |
235 | 235 | ||
236 | if (interrupt_status != 0) { | 236 | if (interrupt_status != 0) { |
@@ -246,28 +246,27 @@ static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic) | |||
246 | * then unmask the error interrupts so if there was another interrupt | 246 | * then unmask the error interrupts so if there was another interrupt |
247 | * pending we will be notified. | 247 | * pending we will be notified. |
248 | * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ | 248 | * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ |
249 | writel(0xff, &scic->smu_registers->interrupt_mask); | 249 | writel(0xff, &ihost->smu_registers->interrupt_mask); |
250 | writel(0, &scic->smu_registers->interrupt_mask); | 250 | writel(0, &ihost->smu_registers->interrupt_mask); |
251 | 251 | ||
252 | return false; | 252 | return false; |
253 | } | 253 | } |
254 | 254 | ||
255 | static void scic_sds_controller_task_completion(struct scic_sds_controller *scic, | 255 | static void scic_sds_controller_task_completion(struct isci_host *ihost, |
256 | u32 completion_entry) | 256 | u32 completion_entry) |
257 | { | 257 | { |
258 | u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); | 258 | u32 index = SCU_GET_COMPLETION_INDEX(completion_entry); |
259 | struct isci_host *ihost = scic_to_ihost(scic); | ||
260 | struct isci_request *ireq = ihost->reqs[index]; | 259 | struct isci_request *ireq = ihost->reqs[index]; |
261 | 260 | ||
262 | /* Make sure that we really want to process this IO request */ | 261 | /* Make sure that we really want to process this IO request */ |
263 | if (test_bit(IREQ_ACTIVE, &ireq->flags) && | 262 | if (test_bit(IREQ_ACTIVE, &ireq->flags) && |
264 | ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && | 263 | ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && |
265 | ISCI_TAG_SEQ(ireq->io_tag) == scic->io_request_sequence[index]) | 264 | ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) |
266 | /* Yep this is a valid io request pass it along to the io request handler */ | 265 | /* Yep this is a valid io request pass it along to the io request handler */ |
267 | scic_sds_io_request_tc_completion(ireq, completion_entry); | 266 | scic_sds_io_request_tc_completion(ireq, completion_entry); |
268 | } | 267 | } |
269 | 268 | ||
270 | static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic, | 269 | static void scic_sds_controller_sdma_completion(struct isci_host *ihost, |
271 | u32 completion_entry) | 270 | u32 completion_entry) |
272 | { | 271 | { |
273 | u32 index; | 272 | u32 index; |
@@ -279,8 +278,8 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic | |||
279 | switch (scu_get_command_request_type(completion_entry)) { | 278 | switch (scu_get_command_request_type(completion_entry)) { |
280 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: | 279 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: |
281 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: | 280 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: |
282 | ireq = scic_to_ihost(scic)->reqs[index]; | 281 | ireq = ihost->reqs[index]; |
283 | dev_warn(scic_to_dev(scic), "%s: %x for io request %p\n", | 282 | dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", |
284 | __func__, completion_entry, ireq); | 283 | __func__, completion_entry, ireq); |
285 | /* @todo For a post TC operation we need to fail the IO | 284 | /* @todo For a post TC operation we need to fail the IO |
286 | * request | 285 | * request |
@@ -289,27 +288,26 @@ static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic | |||
289 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: | 288 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: |
290 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: | 289 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: |
291 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: | 290 | case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: |
292 | idev = scic->device_table[index]; | 291 | idev = ihost->device_table[index]; |
293 | dev_warn(scic_to_dev(scic), "%s: %x for device %p\n", | 292 | dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", |
294 | __func__, completion_entry, idev); | 293 | __func__, completion_entry, idev); |
295 | /* @todo For a port RNC operation we need to fail the | 294 | /* @todo For a port RNC operation we need to fail the |
296 | * device | 295 | * device |
297 | */ | 296 | */ |
298 | break; | 297 | break; |
299 | default: | 298 | default: |
300 | dev_warn(scic_to_dev(scic), "%s: unknown completion type %x\n", | 299 | dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", |
301 | __func__, completion_entry); | 300 | __func__, completion_entry); |
302 | break; | 301 | break; |
303 | } | 302 | } |
304 | } | 303 | } |
305 | 304 | ||
306 | static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic, | 305 | static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost, |
307 | u32 completion_entry) | 306 | u32 completion_entry) |
308 | { | 307 | { |
309 | u32 index; | 308 | u32 index; |
310 | u32 frame_index; | 309 | u32 frame_index; |
311 | 310 | ||
312 | struct isci_host *ihost = scic_to_ihost(scic); | ||
313 | struct scu_unsolicited_frame_header *frame_header; | 311 | struct scu_unsolicited_frame_header *frame_header; |
314 | struct isci_phy *iphy; | 312 | struct isci_phy *iphy; |
315 | struct isci_remote_device *idev; | 313 | struct isci_remote_device *idev; |
@@ -318,15 +316,15 @@ static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *sc | |||
318 | 316 | ||
319 | frame_index = SCU_GET_FRAME_INDEX(completion_entry); | 317 | frame_index = SCU_GET_FRAME_INDEX(completion_entry); |
320 | 318 | ||
321 | frame_header = scic->uf_control.buffers.array[frame_index].header; | 319 | frame_header = ihost->uf_control.buffers.array[frame_index].header; |
322 | scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; | 320 | ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; |
323 | 321 | ||
324 | if (SCU_GET_FRAME_ERROR(completion_entry)) { | 322 | if (SCU_GET_FRAME_ERROR(completion_entry)) { |
325 | /* | 323 | /* |
326 | * / @todo If the IAF frame or SIGNATURE FIS frame has an error will | 324 | * / @todo If the IAF frame or SIGNATURE FIS frame has an error will |
327 | * / this cause a problem? We expect the phy initialization will | 325 | * / this cause a problem? We expect the phy initialization will |
328 | * / fail if there is an error in the frame. */ | 326 | * / fail if there is an error in the frame. */ |
329 | scic_sds_controller_release_frame(scic, frame_index); | 327 | scic_sds_controller_release_frame(ihost, frame_index); |
330 | return; | 328 | return; |
331 | } | 329 | } |
332 | 330 | ||
@@ -347,15 +345,15 @@ static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *sc | |||
347 | iphy = &ihost->phys[index]; | 345 | iphy = &ihost->phys[index]; |
348 | result = scic_sds_phy_frame_handler(iphy, frame_index); | 346 | result = scic_sds_phy_frame_handler(iphy, frame_index); |
349 | } else { | 347 | } else { |
350 | if (index < scic->remote_node_entries) | 348 | if (index < ihost->remote_node_entries) |
351 | idev = scic->device_table[index]; | 349 | idev = ihost->device_table[index]; |
352 | else | 350 | else |
353 | idev = NULL; | 351 | idev = NULL; |
354 | 352 | ||
355 | if (idev != NULL) | 353 | if (idev != NULL) |
356 | result = scic_sds_remote_device_frame_handler(idev, frame_index); | 354 | result = scic_sds_remote_device_frame_handler(idev, frame_index); |
357 | else | 355 | else |
358 | scic_sds_controller_release_frame(scic, frame_index); | 356 | scic_sds_controller_release_frame(ihost, frame_index); |
359 | } | 357 | } |
360 | } | 358 | } |
361 | 359 | ||
@@ -366,10 +364,9 @@ static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *sc | |||
366 | } | 364 | } |
367 | } | 365 | } |
368 | 366 | ||
369 | static void scic_sds_controller_event_completion(struct scic_sds_controller *scic, | 367 | static void scic_sds_controller_event_completion(struct isci_host *ihost, |
370 | u32 completion_entry) | 368 | u32 completion_entry) |
371 | { | 369 | { |
372 | struct isci_host *ihost = scic_to_ihost(scic); | ||
373 | struct isci_remote_device *idev; | 370 | struct isci_remote_device *idev; |
374 | struct isci_request *ireq; | 371 | struct isci_request *ireq; |
375 | struct isci_phy *iphy; | 372 | struct isci_phy *iphy; |
@@ -380,11 +377,11 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci | |||
380 | switch (scu_get_event_type(completion_entry)) { | 377 | switch (scu_get_event_type(completion_entry)) { |
381 | case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: | 378 | case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: |
382 | /* / @todo The driver did something wrong and we need to fix the condtion. */ | 379 | /* / @todo The driver did something wrong and we need to fix the condtion. */ |
383 | dev_err(scic_to_dev(scic), | 380 | dev_err(&ihost->pdev->dev, |
384 | "%s: SCIC Controller 0x%p received SMU command error " | 381 | "%s: SCIC Controller 0x%p received SMU command error " |
385 | "0x%x\n", | 382 | "0x%x\n", |
386 | __func__, | 383 | __func__, |
387 | scic, | 384 | ihost, |
388 | completion_entry); | 385 | completion_entry); |
389 | break; | 386 | break; |
390 | 387 | ||
@@ -394,11 +391,11 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci | |||
394 | /* | 391 | /* |
395 | * / @todo This is a hardware failure and its likely that we want to | 392 | * / @todo This is a hardware failure and its likely that we want to |
396 | * / reset the controller. */ | 393 | * / reset the controller. */ |
397 | dev_err(scic_to_dev(scic), | 394 | dev_err(&ihost->pdev->dev, |
398 | "%s: SCIC Controller 0x%p received fatal controller " | 395 | "%s: SCIC Controller 0x%p received fatal controller " |
399 | "event 0x%x\n", | 396 | "event 0x%x\n", |
400 | __func__, | 397 | __func__, |
401 | scic, | 398 | ihost, |
402 | completion_entry); | 399 | completion_entry); |
403 | break; | 400 | break; |
404 | 401 | ||
@@ -415,27 +412,27 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci | |||
415 | if (ireq != NULL) | 412 | if (ireq != NULL) |
416 | scic_sds_io_request_event_handler(ireq, completion_entry); | 413 | scic_sds_io_request_event_handler(ireq, completion_entry); |
417 | else | 414 | else |
418 | dev_warn(scic_to_dev(scic), | 415 | dev_warn(&ihost->pdev->dev, |
419 | "%s: SCIC Controller 0x%p received " | 416 | "%s: SCIC Controller 0x%p received " |
420 | "event 0x%x for io request object " | 417 | "event 0x%x for io request object " |
421 | "that doesnt exist.\n", | 418 | "that doesnt exist.\n", |
422 | __func__, | 419 | __func__, |
423 | scic, | 420 | ihost, |
424 | completion_entry); | 421 | completion_entry); |
425 | 422 | ||
426 | break; | 423 | break; |
427 | 424 | ||
428 | case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: | 425 | case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: |
429 | idev = scic->device_table[index]; | 426 | idev = ihost->device_table[index]; |
430 | if (idev != NULL) | 427 | if (idev != NULL) |
431 | scic_sds_remote_device_event_handler(idev, completion_entry); | 428 | scic_sds_remote_device_event_handler(idev, completion_entry); |
432 | else | 429 | else |
433 | dev_warn(scic_to_dev(scic), | 430 | dev_warn(&ihost->pdev->dev, |
434 | "%s: SCIC Controller 0x%p received " | 431 | "%s: SCIC Controller 0x%p received " |
435 | "event 0x%x for remote device object " | 432 | "event 0x%x for remote device object " |
436 | "that doesnt exist.\n", | 433 | "that doesnt exist.\n", |
437 | __func__, | 434 | __func__, |
438 | scic, | 435 | ihost, |
439 | completion_entry); | 436 | completion_entry); |
440 | 437 | ||
441 | break; | 438 | break; |
@@ -459,25 +456,25 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci | |||
459 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: | 456 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: |
460 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: | 457 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: |
461 | case SCU_EVENT_TYPE_RNC_OPS_MISC: | 458 | case SCU_EVENT_TYPE_RNC_OPS_MISC: |
462 | if (index < scic->remote_node_entries) { | 459 | if (index < ihost->remote_node_entries) { |
463 | idev = scic->device_table[index]; | 460 | idev = ihost->device_table[index]; |
464 | 461 | ||
465 | if (idev != NULL) | 462 | if (idev != NULL) |
466 | scic_sds_remote_device_event_handler(idev, completion_entry); | 463 | scic_sds_remote_device_event_handler(idev, completion_entry); |
467 | } else | 464 | } else |
468 | dev_err(scic_to_dev(scic), | 465 | dev_err(&ihost->pdev->dev, |
469 | "%s: SCIC Controller 0x%p received event 0x%x " | 466 | "%s: SCIC Controller 0x%p received event 0x%x " |
470 | "for remote device object 0x%0x that doesnt " | 467 | "for remote device object 0x%0x that doesnt " |
471 | "exist.\n", | 468 | "exist.\n", |
472 | __func__, | 469 | __func__, |
473 | scic, | 470 | ihost, |
474 | completion_entry, | 471 | completion_entry, |
475 | index); | 472 | index); |
476 | 473 | ||
477 | break; | 474 | break; |
478 | 475 | ||
479 | default: | 476 | default: |
480 | dev_warn(scic_to_dev(scic), | 477 | dev_warn(&ihost->pdev->dev, |
481 | "%s: SCIC Controller received unknown event code %x\n", | 478 | "%s: SCIC Controller received unknown event code %x\n", |
482 | __func__, | 479 | __func__, |
483 | completion_entry); | 480 | completion_entry); |
@@ -485,7 +482,7 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci | |||
485 | } | 482 | } |
486 | } | 483 | } |
487 | 484 | ||
488 | static void scic_sds_controller_process_completions(struct scic_sds_controller *scic) | 485 | static void scic_sds_controller_process_completions(struct isci_host *ihost) |
489 | { | 486 | { |
490 | u32 completion_count = 0; | 487 | u32 completion_count = 0; |
491 | u32 completion_entry; | 488 | u32 completion_entry; |
@@ -494,47 +491,47 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller * | |||
494 | u32 event_get; | 491 | u32 event_get; |
495 | u32 event_cycle; | 492 | u32 event_cycle; |
496 | 493 | ||
497 | dev_dbg(scic_to_dev(scic), | 494 | dev_dbg(&ihost->pdev->dev, |
498 | "%s: completion queue begining get:0x%08x\n", | 495 | "%s: completion queue begining get:0x%08x\n", |
499 | __func__, | 496 | __func__, |
500 | scic->completion_queue_get); | 497 | ihost->completion_queue_get); |
501 | 498 | ||
502 | /* Get the component parts of the completion queue */ | 499 | /* Get the component parts of the completion queue */ |
503 | get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get); | 500 | get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get); |
504 | get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get; | 501 | get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get; |
505 | 502 | ||
506 | event_get = NORMALIZE_EVENT_POINTER(scic->completion_queue_get); | 503 | event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get); |
507 | event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get; | 504 | event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get; |
508 | 505 | ||
509 | while ( | 506 | while ( |
510 | NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) | 507 | NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) |
511 | == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]) | 508 | == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]) |
512 | ) { | 509 | ) { |
513 | completion_count++; | 510 | completion_count++; |
514 | 511 | ||
515 | completion_entry = scic->completion_queue[get_index]; | 512 | completion_entry = ihost->completion_queue[get_index]; |
516 | 513 | ||
517 | /* increment the get pointer and check for rollover to toggle the cycle bit */ | 514 | /* increment the get pointer and check for rollover to toggle the cycle bit */ |
518 | get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << | 515 | get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << |
519 | (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); | 516 | (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); |
520 | get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); | 517 | get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); |
521 | 518 | ||
522 | dev_dbg(scic_to_dev(scic), | 519 | dev_dbg(&ihost->pdev->dev, |
523 | "%s: completion queue entry:0x%08x\n", | 520 | "%s: completion queue entry:0x%08x\n", |
524 | __func__, | 521 | __func__, |
525 | completion_entry); | 522 | completion_entry); |
526 | 523 | ||
527 | switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { | 524 | switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { |
528 | case SCU_COMPLETION_TYPE_TASK: | 525 | case SCU_COMPLETION_TYPE_TASK: |
529 | scic_sds_controller_task_completion(scic, completion_entry); | 526 | scic_sds_controller_task_completion(ihost, completion_entry); |
530 | break; | 527 | break; |
531 | 528 | ||
532 | case SCU_COMPLETION_TYPE_SDMA: | 529 | case SCU_COMPLETION_TYPE_SDMA: |
533 | scic_sds_controller_sdma_completion(scic, completion_entry); | 530 | scic_sds_controller_sdma_completion(ihost, completion_entry); |
534 | break; | 531 | break; |
535 | 532 | ||
536 | case SCU_COMPLETION_TYPE_UFI: | 533 | case SCU_COMPLETION_TYPE_UFI: |
537 | scic_sds_controller_unsolicited_frame(scic, completion_entry); | 534 | scic_sds_controller_unsolicited_frame(ihost, completion_entry); |
538 | break; | 535 | break; |
539 | 536 | ||
540 | case SCU_COMPLETION_TYPE_EVENT: | 537 | case SCU_COMPLETION_TYPE_EVENT: |
@@ -543,11 +540,11 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller * | |||
543 | (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); | 540 | (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); |
544 | event_get = (event_get+1) & (SCU_MAX_EVENTS-1); | 541 | event_get = (event_get+1) & (SCU_MAX_EVENTS-1); |
545 | 542 | ||
546 | scic_sds_controller_event_completion(scic, completion_entry); | 543 | scic_sds_controller_event_completion(ihost, completion_entry); |
547 | break; | 544 | break; |
548 | } | 545 | } |
549 | default: | 546 | default: |
550 | dev_warn(scic_to_dev(scic), | 547 | dev_warn(&ihost->pdev->dev, |
551 | "%s: SCIC Controller received unknown " | 548 | "%s: SCIC Controller received unknown " |
552 | "completion type %x\n", | 549 | "completion type %x\n", |
553 | __func__, | 550 | __func__, |
@@ -558,7 +555,7 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller * | |||
558 | 555 | ||
559 | /* Update the get register if we completed one or more entries */ | 556 | /* Update the get register if we completed one or more entries */ |
560 | if (completion_count > 0) { | 557 | if (completion_count > 0) { |
561 | scic->completion_queue_get = | 558 | ihost->completion_queue_get = |
562 | SMU_CQGR_GEN_BIT(ENABLE) | | 559 | SMU_CQGR_GEN_BIT(ENABLE) | |
563 | SMU_CQGR_GEN_BIT(EVENT_ENABLE) | | 560 | SMU_CQGR_GEN_BIT(EVENT_ENABLE) | |
564 | event_cycle | | 561 | event_cycle | |
@@ -566,35 +563,35 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller * | |||
566 | get_cycle | | 563 | get_cycle | |
567 | SMU_CQGR_GEN_VAL(POINTER, get_index); | 564 | SMU_CQGR_GEN_VAL(POINTER, get_index); |
568 | 565 | ||
569 | writel(scic->completion_queue_get, | 566 | writel(ihost->completion_queue_get, |
570 | &scic->smu_registers->completion_queue_get); | 567 | &ihost->smu_registers->completion_queue_get); |
571 | 568 | ||
572 | } | 569 | } |
573 | 570 | ||
574 | dev_dbg(scic_to_dev(scic), | 571 | dev_dbg(&ihost->pdev->dev, |
575 | "%s: completion queue ending get:0x%08x\n", | 572 | "%s: completion queue ending get:0x%08x\n", |
576 | __func__, | 573 | __func__, |
577 | scic->completion_queue_get); | 574 | ihost->completion_queue_get); |
578 | 575 | ||
579 | } | 576 | } |
580 | 577 | ||
581 | static void scic_sds_controller_error_handler(struct scic_sds_controller *scic) | 578 | static void scic_sds_controller_error_handler(struct isci_host *ihost) |
582 | { | 579 | { |
583 | u32 interrupt_status; | 580 | u32 interrupt_status; |
584 | 581 | ||
585 | interrupt_status = | 582 | interrupt_status = |
586 | readl(&scic->smu_registers->interrupt_status); | 583 | readl(&ihost->smu_registers->interrupt_status); |
587 | 584 | ||
588 | if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && | 585 | if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && |
589 | scic_sds_controller_completion_queue_has_entries(scic)) { | 586 | scic_sds_controller_completion_queue_has_entries(ihost)) { |
590 | 587 | ||
591 | scic_sds_controller_process_completions(scic); | 588 | scic_sds_controller_process_completions(ihost); |
592 | writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status); | 589 | writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); |
593 | } else { | 590 | } else { |
594 | dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__, | 591 | dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, |
595 | interrupt_status); | 592 | interrupt_status); |
596 | 593 | ||
597 | sci_change_state(&scic->sm, SCIC_FAILED); | 594 | sci_change_state(&ihost->sm, SCIC_FAILED); |
598 | 595 | ||
599 | return; | 596 | return; |
600 | } | 597 | } |
@@ -602,22 +599,21 @@ static void scic_sds_controller_error_handler(struct scic_sds_controller *scic) | |||
602 | /* If we dont process any completions I am not sure that we want to do this. | 599 | /* If we dont process any completions I am not sure that we want to do this. |
603 | * We are in the middle of a hardware fault and should probably be reset. | 600 | * We are in the middle of a hardware fault and should probably be reset. |
604 | */ | 601 | */ |
605 | writel(0, &scic->smu_registers->interrupt_mask); | 602 | writel(0, &ihost->smu_registers->interrupt_mask); |
606 | } | 603 | } |
607 | 604 | ||
608 | irqreturn_t isci_intx_isr(int vec, void *data) | 605 | irqreturn_t isci_intx_isr(int vec, void *data) |
609 | { | 606 | { |
610 | irqreturn_t ret = IRQ_NONE; | 607 | irqreturn_t ret = IRQ_NONE; |
611 | struct isci_host *ihost = data; | 608 | struct isci_host *ihost = data; |
612 | struct scic_sds_controller *scic = &ihost->sci; | ||
613 | 609 | ||
614 | if (scic_sds_controller_isr(scic)) { | 610 | if (scic_sds_controller_isr(ihost)) { |
615 | writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); | 611 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); |
616 | tasklet_schedule(&ihost->completion_tasklet); | 612 | tasklet_schedule(&ihost->completion_tasklet); |
617 | ret = IRQ_HANDLED; | 613 | ret = IRQ_HANDLED; |
618 | } else if (scic_sds_controller_error_isr(scic)) { | 614 | } else if (scic_sds_controller_error_isr(ihost)) { |
619 | spin_lock(&ihost->scic_lock); | 615 | spin_lock(&ihost->scic_lock); |
620 | scic_sds_controller_error_handler(scic); | 616 | scic_sds_controller_error_handler(ihost); |
621 | spin_unlock(&ihost->scic_lock); | 617 | spin_unlock(&ihost->scic_lock); |
622 | ret = IRQ_HANDLED; | 618 | ret = IRQ_HANDLED; |
623 | } | 619 | } |
@@ -629,8 +625,8 @@ irqreturn_t isci_error_isr(int vec, void *data) | |||
629 | { | 625 | { |
630 | struct isci_host *ihost = data; | 626 | struct isci_host *ihost = data; |
631 | 627 | ||
632 | if (scic_sds_controller_error_isr(&ihost->sci)) | 628 | if (scic_sds_controller_error_isr(ihost)) |
633 | scic_sds_controller_error_handler(&ihost->sci); | 629 | scic_sds_controller_error_handler(ihost); |
634 | 630 | ||
635 | return IRQ_HANDLED; | 631 | return IRQ_HANDLED; |
636 | } | 632 | } |
@@ -685,11 +681,10 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
685 | * This method returns the number of milliseconds for the suggested start | 681 | * This method returns the number of milliseconds for the suggested start |
686 | * operation timeout. | 682 | * operation timeout. |
687 | */ | 683 | */ |
688 | static u32 scic_controller_get_suggested_start_timeout( | 684 | static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost) |
689 | struct scic_sds_controller *sc) | ||
690 | { | 685 | { |
691 | /* Validate the user supplied parameters. */ | 686 | /* Validate the user supplied parameters. */ |
692 | if (sc == NULL) | 687 | if (!ihost) |
693 | return 0; | 688 | return 0; |
694 | 689 | ||
695 | /* | 690 | /* |
@@ -711,35 +706,32 @@ static u32 scic_controller_get_suggested_start_timeout( | |||
711 | + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); | 706 | + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); |
712 | } | 707 | } |
713 | 708 | ||
714 | static void scic_controller_enable_interrupts( | 709 | static void scic_controller_enable_interrupts(struct isci_host *ihost) |
715 | struct scic_sds_controller *scic) | ||
716 | { | 710 | { |
717 | BUG_ON(scic->smu_registers == NULL); | 711 | BUG_ON(ihost->smu_registers == NULL); |
718 | writel(0, &scic->smu_registers->interrupt_mask); | 712 | writel(0, &ihost->smu_registers->interrupt_mask); |
719 | } | 713 | } |
720 | 714 | ||
721 | void scic_controller_disable_interrupts( | 715 | void scic_controller_disable_interrupts(struct isci_host *ihost) |
722 | struct scic_sds_controller *scic) | ||
723 | { | 716 | { |
724 | BUG_ON(scic->smu_registers == NULL); | 717 | BUG_ON(ihost->smu_registers == NULL); |
725 | writel(0xffffffff, &scic->smu_registers->interrupt_mask); | 718 | writel(0xffffffff, &ihost->smu_registers->interrupt_mask); |
726 | } | 719 | } |
727 | 720 | ||
728 | static void scic_sds_controller_enable_port_task_scheduler( | 721 | static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *ihost) |
729 | struct scic_sds_controller *scic) | ||
730 | { | 722 | { |
731 | u32 port_task_scheduler_value; | 723 | u32 port_task_scheduler_value; |
732 | 724 | ||
733 | port_task_scheduler_value = | 725 | port_task_scheduler_value = |
734 | readl(&scic->scu_registers->peg0.ptsg.control); | 726 | readl(&ihost->scu_registers->peg0.ptsg.control); |
735 | port_task_scheduler_value |= | 727 | port_task_scheduler_value |= |
736 | (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | | 728 | (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | |
737 | SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); | 729 | SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); |
738 | writel(port_task_scheduler_value, | 730 | writel(port_task_scheduler_value, |
739 | &scic->scu_registers->peg0.ptsg.control); | 731 | &ihost->scu_registers->peg0.ptsg.control); |
740 | } | 732 | } |
741 | 733 | ||
742 | static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic) | 734 | static void scic_sds_controller_assign_task_entries(struct isci_host *ihost) |
743 | { | 735 | { |
744 | u32 task_assignment; | 736 | u32 task_assignment; |
745 | 737 | ||
@@ -749,32 +741,32 @@ static void scic_sds_controller_assign_task_entries(struct scic_sds_controller * | |||
749 | */ | 741 | */ |
750 | 742 | ||
751 | task_assignment = | 743 | task_assignment = |
752 | readl(&scic->smu_registers->task_context_assignment[0]); | 744 | readl(&ihost->smu_registers->task_context_assignment[0]); |
753 | 745 | ||
754 | task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | | 746 | task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | |
755 | (SMU_TCA_GEN_VAL(ENDING, scic->task_context_entries - 1)) | | 747 | (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) | |
756 | (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); | 748 | (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); |
757 | 749 | ||
758 | writel(task_assignment, | 750 | writel(task_assignment, |
759 | &scic->smu_registers->task_context_assignment[0]); | 751 | &ihost->smu_registers->task_context_assignment[0]); |
760 | 752 | ||
761 | } | 753 | } |
762 | 754 | ||
763 | static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic) | 755 | static void scic_sds_controller_initialize_completion_queue(struct isci_host *ihost) |
764 | { | 756 | { |
765 | u32 index; | 757 | u32 index; |
766 | u32 completion_queue_control_value; | 758 | u32 completion_queue_control_value; |
767 | u32 completion_queue_get_value; | 759 | u32 completion_queue_get_value; |
768 | u32 completion_queue_put_value; | 760 | u32 completion_queue_put_value; |
769 | 761 | ||
770 | scic->completion_queue_get = 0; | 762 | ihost->completion_queue_get = 0; |
771 | 763 | ||
772 | completion_queue_control_value = | 764 | completion_queue_control_value = |
773 | (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | | 765 | (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | |
774 | SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); | 766 | SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); |
775 | 767 | ||
776 | writel(completion_queue_control_value, | 768 | writel(completion_queue_control_value, |
777 | &scic->smu_registers->completion_queue_control); | 769 | &ihost->smu_registers->completion_queue_control); |
778 | 770 | ||
779 | 771 | ||
780 | /* Set the completion queue get pointer and enable the queue */ | 772 | /* Set the completion queue get pointer and enable the queue */ |
@@ -786,7 +778,7 @@ static void scic_sds_controller_initialize_completion_queue(struct scic_sds_cont | |||
786 | ); | 778 | ); |
787 | 779 | ||
788 | writel(completion_queue_get_value, | 780 | writel(completion_queue_get_value, |
789 | &scic->smu_registers->completion_queue_get); | 781 | &ihost->smu_registers->completion_queue_get); |
790 | 782 | ||
791 | /* Set the completion queue put pointer */ | 783 | /* Set the completion queue put pointer */ |
792 | completion_queue_put_value = ( | 784 | completion_queue_put_value = ( |
@@ -795,7 +787,7 @@ static void scic_sds_controller_initialize_completion_queue(struct scic_sds_cont | |||
795 | ); | 787 | ); |
796 | 788 | ||
797 | writel(completion_queue_put_value, | 789 | writel(completion_queue_put_value, |
798 | &scic->smu_registers->completion_queue_put); | 790 | &ihost->smu_registers->completion_queue_put); |
799 | 791 | ||
800 | /* Initialize the cycle bit of the completion queue entries */ | 792 | /* Initialize the cycle bit of the completion queue entries */ |
801 | for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { | 793 | for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { |
@@ -803,11 +795,11 @@ static void scic_sds_controller_initialize_completion_queue(struct scic_sds_cont | |||
803 | * If get.cycle_bit != completion_queue.cycle_bit | 795 | * If get.cycle_bit != completion_queue.cycle_bit |
804 | * its not a valid completion queue entry | 796 | * its not a valid completion queue entry |
805 | * so at system start all entries are invalid */ | 797 | * so at system start all entries are invalid */ |
806 | scic->completion_queue[index] = 0x80000000; | 798 | ihost->completion_queue[index] = 0x80000000; |
807 | } | 799 | } |
808 | } | 800 | } |
809 | 801 | ||
810 | static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic) | 802 | static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) |
811 | { | 803 | { |
812 | u32 frame_queue_control_value; | 804 | u32 frame_queue_control_value; |
813 | u32 frame_queue_get_value; | 805 | u32 frame_queue_get_value; |
@@ -818,7 +810,7 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_s | |||
818 | SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); | 810 | SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); |
819 | 811 | ||
820 | writel(frame_queue_control_value, | 812 | writel(frame_queue_control_value, |
821 | &scic->scu_registers->sdma.unsolicited_frame_queue_control); | 813 | &ihost->scu_registers->sdma.unsolicited_frame_queue_control); |
822 | 814 | ||
823 | /* Setup the get pointer for the unsolicited frame queue */ | 815 | /* Setup the get pointer for the unsolicited frame queue */ |
824 | frame_queue_get_value = ( | 816 | frame_queue_get_value = ( |
@@ -827,11 +819,11 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_s | |||
827 | ); | 819 | ); |
828 | 820 | ||
829 | writel(frame_queue_get_value, | 821 | writel(frame_queue_get_value, |
830 | &scic->scu_registers->sdma.unsolicited_frame_get_pointer); | 822 | &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); |
831 | /* Setup the put pointer for the unsolicited frame queue */ | 823 | /* Setup the put pointer for the unsolicited frame queue */ |
832 | frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); | 824 | frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); |
833 | writel(frame_queue_put_value, | 825 | writel(frame_queue_put_value, |
834 | &scic->scu_registers->sdma.unsolicited_frame_put_pointer); | 826 | &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); |
835 | } | 827 | } |
836 | 828 | ||
837 | /** | 829 | /** |
@@ -846,17 +838,16 @@ static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_s | |||
846 | * none. | 838 | * none. |
847 | */ | 839 | */ |
848 | static void scic_sds_controller_transition_to_ready( | 840 | static void scic_sds_controller_transition_to_ready( |
849 | struct scic_sds_controller *scic, | 841 | struct isci_host *ihost, |
850 | enum sci_status status) | 842 | enum sci_status status) |
851 | { | 843 | { |
852 | struct isci_host *ihost = scic_to_ihost(scic); | ||
853 | 844 | ||
854 | if (scic->sm.current_state_id == SCIC_STARTING) { | 845 | if (ihost->sm.current_state_id == SCIC_STARTING) { |
855 | /* | 846 | /* |
856 | * We move into the ready state, because some of the phys/ports | 847 | * We move into the ready state, because some of the phys/ports |
857 | * may be up and operational. | 848 | * may be up and operational. |
858 | */ | 849 | */ |
859 | sci_change_state(&scic->sm, SCIC_READY); | 850 | sci_change_state(&ihost->sm, SCIC_READY); |
860 | 851 | ||
861 | isci_host_start_complete(ihost, status); | 852 | isci_host_start_complete(ihost, status); |
862 | } | 853 | } |
@@ -892,19 +883,18 @@ static bool is_phy_starting(struct isci_phy *iphy) | |||
892 | * controller to the READY state and inform the user | 883 | * controller to the READY state and inform the user |
893 | * (scic_cb_controller_start_complete()). | 884 | * (scic_cb_controller_start_complete()). |
894 | */ | 885 | */ |
895 | static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic) | 886 | static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihost) |
896 | { | 887 | { |
897 | struct isci_host *ihost = scic_to_ihost(scic); | 888 | struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; |
898 | struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; | ||
899 | struct isci_phy *iphy; | 889 | struct isci_phy *iphy; |
900 | enum sci_status status; | 890 | enum sci_status status; |
901 | 891 | ||
902 | status = SCI_SUCCESS; | 892 | status = SCI_SUCCESS; |
903 | 893 | ||
904 | if (scic->phy_startup_timer_pending) | 894 | if (ihost->phy_startup_timer_pending) |
905 | return status; | 895 | return status; |
906 | 896 | ||
907 | if (scic->next_phy_to_start >= SCI_MAX_PHYS) { | 897 | if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { |
908 | bool is_controller_start_complete = true; | 898 | bool is_controller_start_complete = true; |
909 | u32 state; | 899 | u32 state; |
910 | u8 index; | 900 | u8 index; |
@@ -934,16 +924,16 @@ static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_contro | |||
934 | * The controller has successfully finished the start process. | 924 | * The controller has successfully finished the start process. |
935 | * Inform the SCI Core user and transition to the READY state. */ | 925 | * Inform the SCI Core user and transition to the READY state. */ |
936 | if (is_controller_start_complete == true) { | 926 | if (is_controller_start_complete == true) { |
937 | scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS); | 927 | scic_sds_controller_transition_to_ready(ihost, SCI_SUCCESS); |
938 | sci_del_timer(&scic->phy_timer); | 928 | sci_del_timer(&ihost->phy_timer); |
939 | scic->phy_startup_timer_pending = false; | 929 | ihost->phy_startup_timer_pending = false; |
940 | } | 930 | } |
941 | } else { | 931 | } else { |
942 | iphy = &ihost->phys[scic->next_phy_to_start]; | 932 | iphy = &ihost->phys[ihost->next_phy_to_start]; |
943 | 933 | ||
944 | if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { | 934 | if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { |
945 | if (phy_get_non_dummy_port(iphy) == NULL) { | 935 | if (phy_get_non_dummy_port(iphy) == NULL) { |
946 | scic->next_phy_to_start++; | 936 | ihost->next_phy_to_start++; |
947 | 937 | ||
948 | /* Caution recursion ahead be forwarned | 938 | /* Caution recursion ahead be forwarned |
949 | * | 939 | * |
@@ -954,27 +944,27 @@ static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_contro | |||
954 | * incorrectly for the PORT or it was never | 944 | * incorrectly for the PORT or it was never |
955 | * assigned to a PORT | 945 | * assigned to a PORT |
956 | */ | 946 | */ |
957 | return scic_sds_controller_start_next_phy(scic); | 947 | return scic_sds_controller_start_next_phy(ihost); |
958 | } | 948 | } |
959 | } | 949 | } |
960 | 950 | ||
961 | status = scic_sds_phy_start(iphy); | 951 | status = scic_sds_phy_start(iphy); |
962 | 952 | ||
963 | if (status == SCI_SUCCESS) { | 953 | if (status == SCI_SUCCESS) { |
964 | sci_mod_timer(&scic->phy_timer, | 954 | sci_mod_timer(&ihost->phy_timer, |
965 | SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); | 955 | SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); |
966 | scic->phy_startup_timer_pending = true; | 956 | ihost->phy_startup_timer_pending = true; |
967 | } else { | 957 | } else { |
968 | dev_warn(scic_to_dev(scic), | 958 | dev_warn(&ihost->pdev->dev, |
969 | "%s: Controller stop operation failed " | 959 | "%s: Controller stop operation failed " |
970 | "to stop phy %d because of status " | 960 | "to stop phy %d because of status " |
971 | "%d.\n", | 961 | "%d.\n", |
972 | __func__, | 962 | __func__, |
973 | ihost->phys[scic->next_phy_to_start].phy_index, | 963 | ihost->phys[ihost->next_phy_to_start].phy_index, |
974 | status); | 964 | status); |
975 | } | 965 | } |
976 | 966 | ||
977 | scic->next_phy_to_start++; | 967 | ihost->next_phy_to_start++; |
978 | } | 968 | } |
979 | 969 | ||
980 | return status; | 970 | return status; |
@@ -983,8 +973,7 @@ static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_contro | |||
983 | static void phy_startup_timeout(unsigned long data) | 973 | static void phy_startup_timeout(unsigned long data) |
984 | { | 974 | { |
985 | struct sci_timer *tmr = (struct sci_timer *)data; | 975 | struct sci_timer *tmr = (struct sci_timer *)data; |
986 | struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), phy_timer); | 976 | struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); |
987 | struct isci_host *ihost = scic_to_ihost(scic); | ||
988 | unsigned long flags; | 977 | unsigned long flags; |
989 | enum sci_status status; | 978 | enum sci_status status; |
990 | 979 | ||
@@ -993,10 +982,10 @@ static void phy_startup_timeout(unsigned long data) | |||
993 | if (tmr->cancel) | 982 | if (tmr->cancel) |
994 | goto done; | 983 | goto done; |
995 | 984 | ||
996 | scic->phy_startup_timer_pending = false; | 985 | ihost->phy_startup_timer_pending = false; |
997 | 986 | ||
998 | do { | 987 | do { |
999 | status = scic_sds_controller_start_next_phy(scic); | 988 | status = scic_sds_controller_start_next_phy(ihost); |
1000 | } while (status != SCI_SUCCESS); | 989 | } while (status != SCI_SUCCESS); |
1001 | 990 | ||
1002 | done: | 991 | done: |
@@ -1008,15 +997,14 @@ static u16 isci_tci_active(struct isci_host *ihost) | |||
1008 | return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); | 997 | return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); |
1009 | } | 998 | } |
1010 | 999 | ||
1011 | static enum sci_status scic_controller_start(struct scic_sds_controller *scic, | 1000 | static enum sci_status scic_controller_start(struct isci_host *ihost, |
1012 | u32 timeout) | 1001 | u32 timeout) |
1013 | { | 1002 | { |
1014 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1015 | enum sci_status result; | 1003 | enum sci_status result; |
1016 | u16 index; | 1004 | u16 index; |
1017 | 1005 | ||
1018 | if (scic->sm.current_state_id != SCIC_INITIALIZED) { | 1006 | if (ihost->sm.current_state_id != SCIC_INITIALIZED) { |
1019 | dev_warn(scic_to_dev(scic), | 1007 | dev_warn(&ihost->pdev->dev, |
1020 | "SCIC Controller start operation requested in " | 1008 | "SCIC Controller start operation requested in " |
1021 | "invalid state\n"); | 1009 | "invalid state\n"); |
1022 | return SCI_FAILURE_INVALID_STATE; | 1010 | return SCI_FAILURE_INVALID_STATE; |
@@ -1026,34 +1014,34 @@ static enum sci_status scic_controller_start(struct scic_sds_controller *scic, | |||
1026 | BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); | 1014 | BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); |
1027 | ihost->tci_head = 0; | 1015 | ihost->tci_head = 0; |
1028 | ihost->tci_tail = 0; | 1016 | ihost->tci_tail = 0; |
1029 | for (index = 0; index < scic->task_context_entries; index++) | 1017 | for (index = 0; index < ihost->task_context_entries; index++) |
1030 | isci_tci_free(ihost, index); | 1018 | isci_tci_free(ihost, index); |
1031 | 1019 | ||
1032 | /* Build the RNi free pool */ | 1020 | /* Build the RNi free pool */ |
1033 | scic_sds_remote_node_table_initialize( | 1021 | scic_sds_remote_node_table_initialize( |
1034 | &scic->available_remote_nodes, | 1022 | &ihost->available_remote_nodes, |
1035 | scic->remote_node_entries); | 1023 | ihost->remote_node_entries); |
1036 | 1024 | ||
1037 | /* | 1025 | /* |
1038 | * Before anything else lets make sure we will not be | 1026 | * Before anything else lets make sure we will not be |
1039 | * interrupted by the hardware. | 1027 | * interrupted by the hardware. |
1040 | */ | 1028 | */ |
1041 | scic_controller_disable_interrupts(scic); | 1029 | scic_controller_disable_interrupts(ihost); |
1042 | 1030 | ||
1043 | /* Enable the port task scheduler */ | 1031 | /* Enable the port task scheduler */ |
1044 | scic_sds_controller_enable_port_task_scheduler(scic); | 1032 | scic_sds_controller_enable_port_task_scheduler(ihost); |
1045 | 1033 | ||
1046 | /* Assign all the task entries to scic physical function */ | 1034 | /* Assign all the task entries to ihost physical function */ |
1047 | scic_sds_controller_assign_task_entries(scic); | 1035 | scic_sds_controller_assign_task_entries(ihost); |
1048 | 1036 | ||
1049 | /* Now initialize the completion queue */ | 1037 | /* Now initialize the completion queue */ |
1050 | scic_sds_controller_initialize_completion_queue(scic); | 1038 | scic_sds_controller_initialize_completion_queue(ihost); |
1051 | 1039 | ||
1052 | /* Initialize the unsolicited frame queue for use */ | 1040 | /* Initialize the unsolicited frame queue for use */ |
1053 | scic_sds_controller_initialize_unsolicited_frame_queue(scic); | 1041 | scic_sds_controller_initialize_unsolicited_frame_queue(ihost); |
1054 | 1042 | ||
1055 | /* Start all of the ports on this controller */ | 1043 | /* Start all of the ports on this controller */ |
1056 | for (index = 0; index < scic->logical_port_entries; index++) { | 1044 | for (index = 0; index < ihost->logical_port_entries; index++) { |
1057 | struct isci_port *iport = &ihost->ports[index]; | 1045 | struct isci_port *iport = &ihost->ports[index]; |
1058 | 1046 | ||
1059 | result = scic_sds_port_start(iport); | 1047 | result = scic_sds_port_start(iport); |
@@ -1061,11 +1049,11 @@ static enum sci_status scic_controller_start(struct scic_sds_controller *scic, | |||
1061 | return result; | 1049 | return result; |
1062 | } | 1050 | } |
1063 | 1051 | ||
1064 | scic_sds_controller_start_next_phy(scic); | 1052 | scic_sds_controller_start_next_phy(ihost); |
1065 | 1053 | ||
1066 | sci_mod_timer(&scic->timer, timeout); | 1054 | sci_mod_timer(&ihost->timer, timeout); |
1067 | 1055 | ||
1068 | sci_change_state(&scic->sm, SCIC_STARTING); | 1056 | sci_change_state(&ihost->sm, SCIC_STARTING); |
1069 | 1057 | ||
1070 | return SCI_SUCCESS; | 1058 | return SCI_SUCCESS; |
1071 | } | 1059 | } |
@@ -1073,35 +1061,35 @@ static enum sci_status scic_controller_start(struct scic_sds_controller *scic, | |||
1073 | void isci_host_scan_start(struct Scsi_Host *shost) | 1061 | void isci_host_scan_start(struct Scsi_Host *shost) |
1074 | { | 1062 | { |
1075 | struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; | 1063 | struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; |
1076 | unsigned long tmo = scic_controller_get_suggested_start_timeout(&ihost->sci); | 1064 | unsigned long tmo = scic_controller_get_suggested_start_timeout(ihost); |
1077 | 1065 | ||
1078 | set_bit(IHOST_START_PENDING, &ihost->flags); | 1066 | set_bit(IHOST_START_PENDING, &ihost->flags); |
1079 | 1067 | ||
1080 | spin_lock_irq(&ihost->scic_lock); | 1068 | spin_lock_irq(&ihost->scic_lock); |
1081 | scic_controller_start(&ihost->sci, tmo); | 1069 | scic_controller_start(ihost, tmo); |
1082 | scic_controller_enable_interrupts(&ihost->sci); | 1070 | scic_controller_enable_interrupts(ihost); |
1083 | spin_unlock_irq(&ihost->scic_lock); | 1071 | spin_unlock_irq(&ihost->scic_lock); |
1084 | } | 1072 | } |
1085 | 1073 | ||
1086 | static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) | 1074 | static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) |
1087 | { | 1075 | { |
1088 | isci_host_change_state(ihost, isci_stopped); | 1076 | isci_host_change_state(ihost, isci_stopped); |
1089 | scic_controller_disable_interrupts(&ihost->sci); | 1077 | scic_controller_disable_interrupts(ihost); |
1090 | clear_bit(IHOST_STOP_PENDING, &ihost->flags); | 1078 | clear_bit(IHOST_STOP_PENDING, &ihost->flags); |
1091 | wake_up(&ihost->eventq); | 1079 | wake_up(&ihost->eventq); |
1092 | } | 1080 | } |
1093 | 1081 | ||
1094 | static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic) | 1082 | static void scic_sds_controller_completion_handler(struct isci_host *ihost) |
1095 | { | 1083 | { |
1096 | /* Empty out the completion queue */ | 1084 | /* Empty out the completion queue */ |
1097 | if (scic_sds_controller_completion_queue_has_entries(scic)) | 1085 | if (scic_sds_controller_completion_queue_has_entries(ihost)) |
1098 | scic_sds_controller_process_completions(scic); | 1086 | scic_sds_controller_process_completions(ihost); |
1099 | 1087 | ||
1100 | /* Clear the interrupt and enable all interrupts again */ | 1088 | /* Clear the interrupt and enable all interrupts again */ |
1101 | writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); | 1089 | writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); |
1102 | /* Could we write the value of SMU_ISR_COMPLETION? */ | 1090 | /* Could we write the value of SMU_ISR_COMPLETION? */ |
1103 | writel(0xFF000000, &scic->smu_registers->interrupt_mask); | 1091 | writel(0xFF000000, &ihost->smu_registers->interrupt_mask); |
1104 | writel(0, &scic->smu_registers->interrupt_mask); | 1092 | writel(0, &ihost->smu_registers->interrupt_mask); |
1105 | } | 1093 | } |
1106 | 1094 | ||
1107 | /** | 1095 | /** |
@@ -1114,7 +1102,7 @@ static void scic_sds_controller_completion_handler(struct scic_sds_controller *s | |||
1114 | */ | 1102 | */ |
1115 | static void isci_host_completion_routine(unsigned long data) | 1103 | static void isci_host_completion_routine(unsigned long data) |
1116 | { | 1104 | { |
1117 | struct isci_host *isci_host = (struct isci_host *)data; | 1105 | struct isci_host *ihost = (struct isci_host *)data; |
1118 | struct list_head completed_request_list; | 1106 | struct list_head completed_request_list; |
1119 | struct list_head errored_request_list; | 1107 | struct list_head errored_request_list; |
1120 | struct list_head *current_position; | 1108 | struct list_head *current_position; |
@@ -1126,20 +1114,20 @@ static void isci_host_completion_routine(unsigned long data) | |||
1126 | INIT_LIST_HEAD(&completed_request_list); | 1114 | INIT_LIST_HEAD(&completed_request_list); |
1127 | INIT_LIST_HEAD(&errored_request_list); | 1115 | INIT_LIST_HEAD(&errored_request_list); |
1128 | 1116 | ||
1129 | spin_lock_irq(&isci_host->scic_lock); | 1117 | spin_lock_irq(&ihost->scic_lock); |
1130 | 1118 | ||
1131 | scic_sds_controller_completion_handler(&isci_host->sci); | 1119 | scic_sds_controller_completion_handler(ihost); |
1132 | 1120 | ||
1133 | /* Take the lists of completed I/Os from the host. */ | 1121 | /* Take the lists of completed I/Os from the host. */ |
1134 | 1122 | ||
1135 | list_splice_init(&isci_host->requests_to_complete, | 1123 | list_splice_init(&ihost->requests_to_complete, |
1136 | &completed_request_list); | 1124 | &completed_request_list); |
1137 | 1125 | ||
1138 | /* Take the list of errored I/Os from the host. */ | 1126 | /* Take the list of errored I/Os from the host. */ |
1139 | list_splice_init(&isci_host->requests_to_errorback, | 1127 | list_splice_init(&ihost->requests_to_errorback, |
1140 | &errored_request_list); | 1128 | &errored_request_list); |
1141 | 1129 | ||
1142 | spin_unlock_irq(&isci_host->scic_lock); | 1130 | spin_unlock_irq(&ihost->scic_lock); |
1143 | 1131 | ||
1144 | /* Process any completions in the lists. */ | 1132 | /* Process any completions in the lists. */ |
1145 | list_for_each_safe(current_position, next_position, | 1133 | list_for_each_safe(current_position, next_position, |
@@ -1150,7 +1138,7 @@ static void isci_host_completion_routine(unsigned long data) | |||
1150 | task = isci_request_access_task(request); | 1138 | task = isci_request_access_task(request); |
1151 | 1139 | ||
1152 | /* Normal notification (task_done) */ | 1140 | /* Normal notification (task_done) */ |
1153 | dev_dbg(&isci_host->pdev->dev, | 1141 | dev_dbg(&ihost->pdev->dev, |
1154 | "%s: Normal - request/task = %p/%p\n", | 1142 | "%s: Normal - request/task = %p/%p\n", |
1155 | __func__, | 1143 | __func__, |
1156 | request, | 1144 | request, |
@@ -1169,9 +1157,9 @@ static void isci_host_completion_routine(unsigned long data) | |||
1169 | } | 1157 | } |
1170 | } | 1158 | } |
1171 | 1159 | ||
1172 | spin_lock_irq(&isci_host->scic_lock); | 1160 | spin_lock_irq(&ihost->scic_lock); |
1173 | isci_free_tag(isci_host, request->io_tag); | 1161 | isci_free_tag(ihost, request->io_tag); |
1174 | spin_unlock_irq(&isci_host->scic_lock); | 1162 | spin_unlock_irq(&ihost->scic_lock); |
1175 | } | 1163 | } |
1176 | list_for_each_entry_safe(request, next_request, &errored_request_list, | 1164 | list_for_each_entry_safe(request, next_request, &errored_request_list, |
1177 | completed_node) { | 1165 | completed_node) { |
@@ -1179,7 +1167,7 @@ static void isci_host_completion_routine(unsigned long data) | |||
1179 | task = isci_request_access_task(request); | 1167 | task = isci_request_access_task(request); |
1180 | 1168 | ||
1181 | /* Use sas_task_abort */ | 1169 | /* Use sas_task_abort */ |
1182 | dev_warn(&isci_host->pdev->dev, | 1170 | dev_warn(&ihost->pdev->dev, |
1183 | "%s: Error - request/task = %p/%p\n", | 1171 | "%s: Error - request/task = %p/%p\n", |
1184 | __func__, | 1172 | __func__, |
1185 | request, | 1173 | request, |
@@ -1202,13 +1190,13 @@ static void isci_host_completion_routine(unsigned long data) | |||
1202 | * it. | 1190 | * it. |
1203 | */ | 1191 | */ |
1204 | 1192 | ||
1205 | spin_lock_irq(&isci_host->scic_lock); | 1193 | spin_lock_irq(&ihost->scic_lock); |
1206 | /* Remove the request from the remote device's list | 1194 | /* Remove the request from the remote device's list |
1207 | * of pending requests. | 1195 | * of pending requests. |
1208 | */ | 1196 | */ |
1209 | list_del_init(&request->dev_node); | 1197 | list_del_init(&request->dev_node); |
1210 | isci_free_tag(isci_host, request->io_tag); | 1198 | isci_free_tag(ihost, request->io_tag); |
1211 | spin_unlock_irq(&isci_host->scic_lock); | 1199 | spin_unlock_irq(&ihost->scic_lock); |
1212 | } | 1200 | } |
1213 | } | 1201 | } |
1214 | 1202 | ||
@@ -1232,18 +1220,18 @@ static void isci_host_completion_routine(unsigned long data) | |||
1232 | * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the | 1220 | * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the |
1233 | * controller is not either in the STARTED or STOPPED states. | 1221 | * controller is not either in the STARTED or STOPPED states. |
1234 | */ | 1222 | */ |
1235 | static enum sci_status scic_controller_stop(struct scic_sds_controller *scic, | 1223 | static enum sci_status scic_controller_stop(struct isci_host *ihost, |
1236 | u32 timeout) | 1224 | u32 timeout) |
1237 | { | 1225 | { |
1238 | if (scic->sm.current_state_id != SCIC_READY) { | 1226 | if (ihost->sm.current_state_id != SCIC_READY) { |
1239 | dev_warn(scic_to_dev(scic), | 1227 | dev_warn(&ihost->pdev->dev, |
1240 | "SCIC Controller stop operation requested in " | 1228 | "SCIC Controller stop operation requested in " |
1241 | "invalid state\n"); | 1229 | "invalid state\n"); |
1242 | return SCI_FAILURE_INVALID_STATE; | 1230 | return SCI_FAILURE_INVALID_STATE; |
1243 | } | 1231 | } |
1244 | 1232 | ||
1245 | sci_mod_timer(&scic->timer, timeout); | 1233 | sci_mod_timer(&ihost->timer, timeout); |
1246 | sci_change_state(&scic->sm, SCIC_STOPPING); | 1234 | sci_change_state(&ihost->sm, SCIC_STOPPING); |
1247 | return SCI_SUCCESS; | 1235 | return SCI_SUCCESS; |
1248 | } | 1236 | } |
1249 | 1237 | ||
@@ -1259,9 +1247,9 @@ static enum sci_status scic_controller_stop(struct scic_sds_controller *scic, | |||
1259 | * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if | 1247 | * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if |
1260 | * the controller reset operation is unable to complete. | 1248 | * the controller reset operation is unable to complete. |
1261 | */ | 1249 | */ |
1262 | static enum sci_status scic_controller_reset(struct scic_sds_controller *scic) | 1250 | static enum sci_status scic_controller_reset(struct isci_host *ihost) |
1263 | { | 1251 | { |
1264 | switch (scic->sm.current_state_id) { | 1252 | switch (ihost->sm.current_state_id) { |
1265 | case SCIC_RESET: | 1253 | case SCIC_RESET: |
1266 | case SCIC_READY: | 1254 | case SCIC_READY: |
1267 | case SCIC_STOPPED: | 1255 | case SCIC_STOPPED: |
@@ -1270,10 +1258,10 @@ static enum sci_status scic_controller_reset(struct scic_sds_controller *scic) | |||
1270 | * The reset operation is not a graceful cleanup, just | 1258 | * The reset operation is not a graceful cleanup, just |
1271 | * perform the state transition. | 1259 | * perform the state transition. |
1272 | */ | 1260 | */ |
1273 | sci_change_state(&scic->sm, SCIC_RESETTING); | 1261 | sci_change_state(&ihost->sm, SCIC_RESETTING); |
1274 | return SCI_SUCCESS; | 1262 | return SCI_SUCCESS; |
1275 | default: | 1263 | default: |
1276 | dev_warn(scic_to_dev(scic), | 1264 | dev_warn(&ihost->pdev->dev, |
1277 | "SCIC Controller reset operation requested in " | 1265 | "SCIC Controller reset operation requested in " |
1278 | "invalid state\n"); | 1266 | "invalid state\n"); |
1279 | return SCI_FAILURE_INVALID_STATE; | 1267 | return SCI_FAILURE_INVALID_STATE; |
@@ -1298,14 +1286,14 @@ void isci_host_deinit(struct isci_host *ihost) | |||
1298 | set_bit(IHOST_STOP_PENDING, &ihost->flags); | 1286 | set_bit(IHOST_STOP_PENDING, &ihost->flags); |
1299 | 1287 | ||
1300 | spin_lock_irq(&ihost->scic_lock); | 1288 | spin_lock_irq(&ihost->scic_lock); |
1301 | scic_controller_stop(&ihost->sci, SCIC_CONTROLLER_STOP_TIMEOUT); | 1289 | scic_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); |
1302 | spin_unlock_irq(&ihost->scic_lock); | 1290 | spin_unlock_irq(&ihost->scic_lock); |
1303 | 1291 | ||
1304 | wait_for_stop(ihost); | 1292 | wait_for_stop(ihost); |
1305 | scic_controller_reset(&ihost->sci); | 1293 | scic_controller_reset(ihost); |
1306 | 1294 | ||
1307 | /* Cancel any/all outstanding port timers */ | 1295 | /* Cancel any/all outstanding port timers */ |
1308 | for (i = 0; i < ihost->sci.logical_port_entries; i++) { | 1296 | for (i = 0; i < ihost->logical_port_entries; i++) { |
1309 | struct isci_port *iport = &ihost->ports[i]; | 1297 | struct isci_port *iport = &ihost->ports[i]; |
1310 | del_timer_sync(&iport->timer.timer); | 1298 | del_timer_sync(&iport->timer.timer); |
1311 | } | 1299 | } |
@@ -1316,13 +1304,13 @@ void isci_host_deinit(struct isci_host *ihost) | |||
1316 | del_timer_sync(&iphy->sata_timer.timer); | 1304 | del_timer_sync(&iphy->sata_timer.timer); |
1317 | } | 1305 | } |
1318 | 1306 | ||
1319 | del_timer_sync(&ihost->sci.port_agent.timer.timer); | 1307 | del_timer_sync(&ihost->port_agent.timer.timer); |
1320 | 1308 | ||
1321 | del_timer_sync(&ihost->sci.power_control.timer.timer); | 1309 | del_timer_sync(&ihost->power_control.timer.timer); |
1322 | 1310 | ||
1323 | del_timer_sync(&ihost->sci.timer.timer); | 1311 | del_timer_sync(&ihost->timer.timer); |
1324 | 1312 | ||
1325 | del_timer_sync(&ihost->sci.phy_timer.timer); | 1313 | del_timer_sync(&ihost->phy_timer.timer); |
1326 | } | 1314 | } |
1327 | 1315 | ||
1328 | static void __iomem *scu_base(struct isci_host *isci_host) | 1316 | static void __iomem *scu_base(struct isci_host *isci_host) |
@@ -1369,16 +1357,16 @@ static void isci_user_parameters_get( | |||
1369 | 1357 | ||
1370 | static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm) | 1358 | static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm) |
1371 | { | 1359 | { |
1372 | struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); | 1360 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1373 | 1361 | ||
1374 | sci_change_state(&scic->sm, SCIC_RESET); | 1362 | sci_change_state(&ihost->sm, SCIC_RESET); |
1375 | } | 1363 | } |
1376 | 1364 | ||
1377 | static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm) | 1365 | static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm) |
1378 | { | 1366 | { |
1379 | struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); | 1367 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1380 | 1368 | ||
1381 | sci_del_timer(&scic->timer); | 1369 | sci_del_timer(&ihost->timer); |
1382 | } | 1370 | } |
1383 | 1371 | ||
1384 | #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 | 1372 | #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 |
@@ -1405,10 +1393,10 @@ static inline void scic_sds_controller_starting_state_exit(struct sci_base_state | |||
1405 | * SCI_SUCCESS The user successfully updated the interrutp coalescence. | 1393 | * SCI_SUCCESS The user successfully updated the interrutp coalescence. |
1406 | * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. | 1394 | * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. |
1407 | */ | 1395 | */ |
1408 | static enum sci_status scic_controller_set_interrupt_coalescence( | 1396 | static enum sci_status |
1409 | struct scic_sds_controller *scic_controller, | 1397 | scic_controller_set_interrupt_coalescence(struct isci_host *ihost, |
1410 | u32 coalesce_number, | 1398 | u32 coalesce_number, |
1411 | u32 coalesce_timeout) | 1399 | u32 coalesce_timeout) |
1412 | { | 1400 | { |
1413 | u8 timeout_encode = 0; | 1401 | u8 timeout_encode = 0; |
1414 | u32 min = 0; | 1402 | u32 min = 0; |
@@ -1491,11 +1479,11 @@ static enum sci_status scic_controller_set_interrupt_coalescence( | |||
1491 | 1479 | ||
1492 | writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | | 1480 | writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | |
1493 | SMU_ICC_GEN_VAL(TIMER, timeout_encode), | 1481 | SMU_ICC_GEN_VAL(TIMER, timeout_encode), |
1494 | &scic_controller->smu_registers->interrupt_coalesce_control); | 1482 | &ihost->smu_registers->interrupt_coalesce_control); |
1495 | 1483 | ||
1496 | 1484 | ||
1497 | scic_controller->interrupt_coalesce_number = (u16)coalesce_number; | 1485 | ihost->interrupt_coalesce_number = (u16)coalesce_number; |
1498 | scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100; | 1486 | ihost->interrupt_coalesce_timeout = coalesce_timeout / 100; |
1499 | 1487 | ||
1500 | return SCI_SUCCESS; | 1488 | return SCI_SUCCESS; |
1501 | } | 1489 | } |
@@ -1503,26 +1491,25 @@ static enum sci_status scic_controller_set_interrupt_coalescence( | |||
1503 | 1491 | ||
1504 | static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm) | 1492 | static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm) |
1505 | { | 1493 | { |
1506 | struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); | 1494 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1507 | 1495 | ||
1508 | /* set the default interrupt coalescence number and timeout value. */ | 1496 | /* set the default interrupt coalescence number and timeout value. */ |
1509 | scic_controller_set_interrupt_coalescence(scic, 0x10, 250); | 1497 | scic_controller_set_interrupt_coalescence(ihost, 0x10, 250); |
1510 | } | 1498 | } |
1511 | 1499 | ||
1512 | static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm) | 1500 | static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm) |
1513 | { | 1501 | { |
1514 | struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); | 1502 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1515 | 1503 | ||
1516 | /* disable interrupt coalescence. */ | 1504 | /* disable interrupt coalescence. */ |
1517 | scic_controller_set_interrupt_coalescence(scic, 0, 0); | 1505 | scic_controller_set_interrupt_coalescence(ihost, 0, 0); |
1518 | } | 1506 | } |
1519 | 1507 | ||
1520 | static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic) | 1508 | static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost) |
1521 | { | 1509 | { |
1522 | u32 index; | 1510 | u32 index; |
1523 | enum sci_status status; | 1511 | enum sci_status status; |
1524 | enum sci_status phy_status; | 1512 | enum sci_status phy_status; |
1525 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1526 | 1513 | ||
1527 | status = SCI_SUCCESS; | 1514 | status = SCI_SUCCESS; |
1528 | 1515 | ||
@@ -1533,7 +1520,7 @@ static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller | |||
1533 | phy_status != SCI_FAILURE_INVALID_STATE) { | 1520 | phy_status != SCI_FAILURE_INVALID_STATE) { |
1534 | status = SCI_FAILURE; | 1521 | status = SCI_FAILURE; |
1535 | 1522 | ||
1536 | dev_warn(scic_to_dev(scic), | 1523 | dev_warn(&ihost->pdev->dev, |
1537 | "%s: Controller stop operation failed to stop " | 1524 | "%s: Controller stop operation failed to stop " |
1538 | "phy %d because of status %d.\n", | 1525 | "phy %d because of status %d.\n", |
1539 | __func__, | 1526 | __func__, |
@@ -1544,14 +1531,13 @@ static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller | |||
1544 | return status; | 1531 | return status; |
1545 | } | 1532 | } |
1546 | 1533 | ||
1547 | static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic) | 1534 | static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost) |
1548 | { | 1535 | { |
1549 | u32 index; | 1536 | u32 index; |
1550 | enum sci_status port_status; | 1537 | enum sci_status port_status; |
1551 | enum sci_status status = SCI_SUCCESS; | 1538 | enum sci_status status = SCI_SUCCESS; |
1552 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1553 | 1539 | ||
1554 | for (index = 0; index < scic->logical_port_entries; index++) { | 1540 | for (index = 0; index < ihost->logical_port_entries; index++) { |
1555 | struct isci_port *iport = &ihost->ports[index]; | 1541 | struct isci_port *iport = &ihost->ports[index]; |
1556 | 1542 | ||
1557 | port_status = scic_sds_port_stop(iport); | 1543 | port_status = scic_sds_port_stop(iport); |
@@ -1560,7 +1546,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller | |||
1560 | (port_status != SCI_FAILURE_INVALID_STATE)) { | 1546 | (port_status != SCI_FAILURE_INVALID_STATE)) { |
1561 | status = SCI_FAILURE; | 1547 | status = SCI_FAILURE; |
1562 | 1548 | ||
1563 | dev_warn(scic_to_dev(scic), | 1549 | dev_warn(&ihost->pdev->dev, |
1564 | "%s: Controller stop operation failed to " | 1550 | "%s: Controller stop operation failed to " |
1565 | "stop port %d because of status %d.\n", | 1551 | "stop port %d because of status %d.\n", |
1566 | __func__, | 1552 | __func__, |
@@ -1572,7 +1558,7 @@ static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller | |||
1572 | return status; | 1558 | return status; |
1573 | } | 1559 | } |
1574 | 1560 | ||
1575 | static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic) | 1561 | static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost) |
1576 | { | 1562 | { |
1577 | u32 index; | 1563 | u32 index; |
1578 | enum sci_status status; | 1564 | enum sci_status status; |
@@ -1580,19 +1566,19 @@ static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controll | |||
1580 | 1566 | ||
1581 | status = SCI_SUCCESS; | 1567 | status = SCI_SUCCESS; |
1582 | 1568 | ||
1583 | for (index = 0; index < scic->remote_node_entries; index++) { | 1569 | for (index = 0; index < ihost->remote_node_entries; index++) { |
1584 | if (scic->device_table[index] != NULL) { | 1570 | if (ihost->device_table[index] != NULL) { |
1585 | /* / @todo What timeout value do we want to provide to this request? */ | 1571 | /* / @todo What timeout value do we want to provide to this request? */ |
1586 | device_status = scic_remote_device_stop(scic->device_table[index], 0); | 1572 | device_status = scic_remote_device_stop(ihost->device_table[index], 0); |
1587 | 1573 | ||
1588 | if ((device_status != SCI_SUCCESS) && | 1574 | if ((device_status != SCI_SUCCESS) && |
1589 | (device_status != SCI_FAILURE_INVALID_STATE)) { | 1575 | (device_status != SCI_FAILURE_INVALID_STATE)) { |
1590 | dev_warn(scic_to_dev(scic), | 1576 | dev_warn(&ihost->pdev->dev, |
1591 | "%s: Controller stop operation failed " | 1577 | "%s: Controller stop operation failed " |
1592 | "to stop device 0x%p because of " | 1578 | "to stop device 0x%p because of " |
1593 | "status %d.\n", | 1579 | "status %d.\n", |
1594 | __func__, | 1580 | __func__, |
1595 | scic->device_table[index], device_status); | 1581 | ihost->device_table[index], device_status); |
1596 | } | 1582 | } |
1597 | } | 1583 | } |
1598 | } | 1584 | } |
@@ -1602,19 +1588,19 @@ static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controll | |||
1602 | 1588 | ||
1603 | static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm) | 1589 | static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm) |
1604 | { | 1590 | { |
1605 | struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); | 1591 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1606 | 1592 | ||
1607 | /* Stop all of the components for this controller */ | 1593 | /* Stop all of the components for this controller */ |
1608 | scic_sds_controller_stop_phys(scic); | 1594 | scic_sds_controller_stop_phys(ihost); |
1609 | scic_sds_controller_stop_ports(scic); | 1595 | scic_sds_controller_stop_ports(ihost); |
1610 | scic_sds_controller_stop_devices(scic); | 1596 | scic_sds_controller_stop_devices(ihost); |
1611 | } | 1597 | } |
1612 | 1598 | ||
1613 | static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm) | 1599 | static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm) |
1614 | { | 1600 | { |
1615 | struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); | 1601 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1616 | 1602 | ||
1617 | sci_del_timer(&scic->timer); | 1603 | sci_del_timer(&ihost->timer); |
1618 | } | 1604 | } |
1619 | 1605 | ||
1620 | 1606 | ||
@@ -1623,30 +1609,30 @@ static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machin | |||
1623 | * | 1609 | * |
1624 | * This method will reset the controller hardware. | 1610 | * This method will reset the controller hardware. |
1625 | */ | 1611 | */ |
1626 | static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic) | 1612 | static void scic_sds_controller_reset_hardware(struct isci_host *ihost) |
1627 | { | 1613 | { |
1628 | /* Disable interrupts so we dont take any spurious interrupts */ | 1614 | /* Disable interrupts so we dont take any spurious interrupts */ |
1629 | scic_controller_disable_interrupts(scic); | 1615 | scic_controller_disable_interrupts(ihost); |
1630 | 1616 | ||
1631 | /* Reset the SCU */ | 1617 | /* Reset the SCU */ |
1632 | writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control); | 1618 | writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); |
1633 | 1619 | ||
1634 | /* Delay for 1ms to before clearing the CQP and UFQPR. */ | 1620 | /* Delay for 1ms to before clearing the CQP and UFQPR. */ |
1635 | udelay(1000); | 1621 | udelay(1000); |
1636 | 1622 | ||
1637 | /* The write to the CQGR clears the CQP */ | 1623 | /* The write to the CQGR clears the CQP */ |
1638 | writel(0x00000000, &scic->smu_registers->completion_queue_get); | 1624 | writel(0x00000000, &ihost->smu_registers->completion_queue_get); |
1639 | 1625 | ||
1640 | /* The write to the UFQGP clears the UFQPR */ | 1626 | /* The write to the UFQGP clears the UFQPR */ |
1641 | writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer); | 1627 | writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); |
1642 | } | 1628 | } |
1643 | 1629 | ||
1644 | static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm) | 1630 | static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm) |
1645 | { | 1631 | { |
1646 | struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm); | 1632 | struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); |
1647 | 1633 | ||
1648 | scic_sds_controller_reset_hardware(scic); | 1634 | scic_sds_controller_reset_hardware(ihost); |
1649 | sci_change_state(&scic->sm, SCIC_RESET); | 1635 | sci_change_state(&ihost->sm, SCIC_RESET); |
1650 | } | 1636 | } |
1651 | 1637 | ||
1652 | static const struct sci_base_state scic_sds_controller_state_table[] = { | 1638 | static const struct sci_base_state scic_sds_controller_state_table[] = { |
@@ -1674,58 +1660,56 @@ static const struct sci_base_state scic_sds_controller_state_table[] = { | |||
1674 | [SCIC_FAILED] = {} | 1660 | [SCIC_FAILED] = {} |
1675 | }; | 1661 | }; |
1676 | 1662 | ||
1677 | static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic) | 1663 | static void scic_sds_controller_set_default_config_parameters(struct isci_host *ihost) |
1678 | { | 1664 | { |
1679 | /* these defaults are overridden by the platform / firmware */ | 1665 | /* these defaults are overridden by the platform / firmware */ |
1680 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1681 | u16 index; | 1666 | u16 index; |
1682 | 1667 | ||
1683 | /* Default to APC mode. */ | 1668 | /* Default to APC mode. */ |
1684 | scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; | 1669 | ihost->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; |
1685 | 1670 | ||
1686 | /* Default to APC mode. */ | 1671 | /* Default to APC mode. */ |
1687 | scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; | 1672 | ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; |
1688 | 1673 | ||
1689 | /* Default to no SSC operation. */ | 1674 | /* Default to no SSC operation. */ |
1690 | scic->oem_parameters.sds1.controller.do_enable_ssc = false; | 1675 | ihost->oem_parameters.sds1.controller.do_enable_ssc = false; |
1691 | 1676 | ||
1692 | /* Initialize all of the port parameter information to narrow ports. */ | 1677 | /* Initialize all of the port parameter information to narrow ports. */ |
1693 | for (index = 0; index < SCI_MAX_PORTS; index++) { | 1678 | for (index = 0; index < SCI_MAX_PORTS; index++) { |
1694 | scic->oem_parameters.sds1.ports[index].phy_mask = 0; | 1679 | ihost->oem_parameters.sds1.ports[index].phy_mask = 0; |
1695 | } | 1680 | } |
1696 | 1681 | ||
1697 | /* Initialize all of the phy parameter information. */ | 1682 | /* Initialize all of the phy parameter information. */ |
1698 | for (index = 0; index < SCI_MAX_PHYS; index++) { | 1683 | for (index = 0; index < SCI_MAX_PHYS; index++) { |
1699 | /* Default to 6G (i.e. Gen 3) for now. */ | 1684 | /* Default to 6G (i.e. Gen 3) for now. */ |
1700 | scic->user_parameters.sds1.phys[index].max_speed_generation = 3; | 1685 | ihost->user_parameters.sds1.phys[index].max_speed_generation = 3; |
1701 | 1686 | ||
1702 | /* the frequencies cannot be 0 */ | 1687 | /* the frequencies cannot be 0 */ |
1703 | scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; | 1688 | ihost->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; |
1704 | scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; | 1689 | ihost->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; |
1705 | scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; | 1690 | ihost->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; |
1706 | 1691 | ||
1707 | /* | 1692 | /* |
1708 | * Previous Vitesse based expanders had a arbitration issue that | 1693 | * Previous Vitesse based expanders had a arbitration issue that |
1709 | * is worked around by having the upper 32-bits of SAS address | 1694 | * is worked around by having the upper 32-bits of SAS address |
1710 | * with a value greater then the Vitesse company identifier. | 1695 | * with a value greater then the Vitesse company identifier. |
1711 | * Hence, usage of 0x5FCFFFFF. */ | 1696 | * Hence, usage of 0x5FCFFFFF. */ |
1712 | scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; | 1697 | ihost->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; |
1713 | scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; | 1698 | ihost->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; |
1714 | } | 1699 | } |
1715 | 1700 | ||
1716 | scic->user_parameters.sds1.stp_inactivity_timeout = 5; | 1701 | ihost->user_parameters.sds1.stp_inactivity_timeout = 5; |
1717 | scic->user_parameters.sds1.ssp_inactivity_timeout = 5; | 1702 | ihost->user_parameters.sds1.ssp_inactivity_timeout = 5; |
1718 | scic->user_parameters.sds1.stp_max_occupancy_timeout = 5; | 1703 | ihost->user_parameters.sds1.stp_max_occupancy_timeout = 5; |
1719 | scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20; | 1704 | ihost->user_parameters.sds1.ssp_max_occupancy_timeout = 20; |
1720 | scic->user_parameters.sds1.no_outbound_task_timeout = 20; | 1705 | ihost->user_parameters.sds1.no_outbound_task_timeout = 20; |
1721 | } | 1706 | } |
1722 | 1707 | ||
1723 | static void controller_timeout(unsigned long data) | 1708 | static void controller_timeout(unsigned long data) |
1724 | { | 1709 | { |
1725 | struct sci_timer *tmr = (struct sci_timer *)data; | 1710 | struct sci_timer *tmr = (struct sci_timer *)data; |
1726 | struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), timer); | 1711 | struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); |
1727 | struct isci_host *ihost = scic_to_ihost(scic); | 1712 | struct sci_base_state_machine *sm = &ihost->sm; |
1728 | struct sci_base_state_machine *sm = &scic->sm; | ||
1729 | unsigned long flags; | 1713 | unsigned long flags; |
1730 | 1714 | ||
1731 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1715 | spin_lock_irqsave(&ihost->scic_lock, flags); |
@@ -1734,12 +1718,12 @@ static void controller_timeout(unsigned long data) | |||
1734 | goto done; | 1718 | goto done; |
1735 | 1719 | ||
1736 | if (sm->current_state_id == SCIC_STARTING) | 1720 | if (sm->current_state_id == SCIC_STARTING) |
1737 | scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT); | 1721 | scic_sds_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); |
1738 | else if (sm->current_state_id == SCIC_STOPPING) { | 1722 | else if (sm->current_state_id == SCIC_STOPPING) { |
1739 | sci_change_state(sm, SCIC_FAILED); | 1723 | sci_change_state(sm, SCIC_FAILED); |
1740 | isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); | 1724 | isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); |
1741 | } else /* / @todo Now what do we want to do in this case? */ | 1725 | } else /* / @todo Now what do we want to do in this case? */ |
1742 | dev_err(scic_to_dev(scic), | 1726 | dev_err(&ihost->pdev->dev, |
1743 | "%s: Controller timer fired when controller was not " | 1727 | "%s: Controller timer fired when controller was not " |
1744 | "in a state being timed.\n", | 1728 | "in a state being timed.\n", |
1745 | __func__); | 1729 | __func__); |
@@ -1764,24 +1748,23 @@ done: | |||
1764 | * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the | 1748 | * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the |
1765 | * controller does not support the supplied initialization data version. | 1749 | * controller does not support the supplied initialization data version. |
1766 | */ | 1750 | */ |
1767 | static enum sci_status scic_controller_construct(struct scic_sds_controller *scic, | 1751 | static enum sci_status scic_controller_construct(struct isci_host *ihost, |
1768 | void __iomem *scu_base, | 1752 | void __iomem *scu_base, |
1769 | void __iomem *smu_base) | 1753 | void __iomem *smu_base) |
1770 | { | 1754 | { |
1771 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1772 | u8 i; | 1755 | u8 i; |
1773 | 1756 | ||
1774 | sci_init_sm(&scic->sm, scic_sds_controller_state_table, SCIC_INITIAL); | 1757 | sci_init_sm(&ihost->sm, scic_sds_controller_state_table, SCIC_INITIAL); |
1775 | 1758 | ||
1776 | scic->scu_registers = scu_base; | 1759 | ihost->scu_registers = scu_base; |
1777 | scic->smu_registers = smu_base; | 1760 | ihost->smu_registers = smu_base; |
1778 | 1761 | ||
1779 | scic_sds_port_configuration_agent_construct(&scic->port_agent); | 1762 | scic_sds_port_configuration_agent_construct(&ihost->port_agent); |
1780 | 1763 | ||
1781 | /* Construct the ports for this controller */ | 1764 | /* Construct the ports for this controller */ |
1782 | for (i = 0; i < SCI_MAX_PORTS; i++) | 1765 | for (i = 0; i < SCI_MAX_PORTS; i++) |
1783 | scic_sds_port_construct(&ihost->ports[i], i, scic); | 1766 | scic_sds_port_construct(&ihost->ports[i], i, ihost); |
1784 | scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, scic); | 1767 | scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); |
1785 | 1768 | ||
1786 | /* Construct the phys for this controller */ | 1769 | /* Construct the phys for this controller */ |
1787 | for (i = 0; i < SCI_MAX_PHYS; i++) { | 1770 | for (i = 0; i < SCI_MAX_PHYS; i++) { |
@@ -1790,14 +1773,14 @@ static enum sci_status scic_controller_construct(struct scic_sds_controller *sci | |||
1790 | &ihost->ports[SCI_MAX_PORTS], i); | 1773 | &ihost->ports[SCI_MAX_PORTS], i); |
1791 | } | 1774 | } |
1792 | 1775 | ||
1793 | scic->invalid_phy_mask = 0; | 1776 | ihost->invalid_phy_mask = 0; |
1794 | 1777 | ||
1795 | sci_init_timer(&scic->timer, controller_timeout); | 1778 | sci_init_timer(&ihost->timer, controller_timeout); |
1796 | 1779 | ||
1797 | /* Initialize the User and OEM parameters to default values. */ | 1780 | /* Initialize the User and OEM parameters to default values. */ |
1798 | scic_sds_controller_set_default_config_parameters(scic); | 1781 | scic_sds_controller_set_default_config_parameters(ihost); |
1799 | 1782 | ||
1800 | return scic_controller_reset(scic); | 1783 | return scic_controller_reset(ihost); |
1801 | } | 1784 | } |
1802 | 1785 | ||
1803 | int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) | 1786 | int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) |
@@ -1834,10 +1817,10 @@ int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) | |||
1834 | return 0; | 1817 | return 0; |
1835 | } | 1818 | } |
1836 | 1819 | ||
1837 | static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, | 1820 | static enum sci_status scic_oem_parameters_set(struct isci_host *ihost, |
1838 | union scic_oem_parameters *scic_parms) | 1821 | union scic_oem_parameters *scic_parms) |
1839 | { | 1822 | { |
1840 | u32 state = scic->sm.current_state_id; | 1823 | u32 state = ihost->sm.current_state_id; |
1841 | 1824 | ||
1842 | if (state == SCIC_RESET || | 1825 | if (state == SCIC_RESET || |
1843 | state == SCIC_INITIALIZING || | 1826 | state == SCIC_INITIALIZING || |
@@ -1845,7 +1828,7 @@ static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, | |||
1845 | 1828 | ||
1846 | if (scic_oem_parameters_validate(&scic_parms->sds1)) | 1829 | if (scic_oem_parameters_validate(&scic_parms->sds1)) |
1847 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | 1830 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; |
1848 | scic->oem_parameters.sds1 = scic_parms->sds1; | 1831 | ihost->oem_parameters.sds1 = scic_parms->sds1; |
1849 | 1832 | ||
1850 | return SCI_SUCCESS; | 1833 | return SCI_SUCCESS; |
1851 | } | 1834 | } |
@@ -1854,17 +1837,16 @@ static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, | |||
1854 | } | 1837 | } |
1855 | 1838 | ||
1856 | void scic_oem_parameters_get( | 1839 | void scic_oem_parameters_get( |
1857 | struct scic_sds_controller *scic, | 1840 | struct isci_host *ihost, |
1858 | union scic_oem_parameters *scic_parms) | 1841 | union scic_oem_parameters *scic_parms) |
1859 | { | 1842 | { |
1860 | memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms)); | 1843 | memcpy(scic_parms, (&ihost->oem_parameters), sizeof(*scic_parms)); |
1861 | } | 1844 | } |
1862 | 1845 | ||
1863 | static void power_control_timeout(unsigned long data) | 1846 | static void power_control_timeout(unsigned long data) |
1864 | { | 1847 | { |
1865 | struct sci_timer *tmr = (struct sci_timer *)data; | 1848 | struct sci_timer *tmr = (struct sci_timer *)data; |
1866 | struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), power_control.timer); | 1849 | struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); |
1867 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1868 | struct isci_phy *iphy; | 1850 | struct isci_phy *iphy; |
1869 | unsigned long flags; | 1851 | unsigned long flags; |
1870 | u8 i; | 1852 | u8 i; |
@@ -1874,29 +1856,29 @@ static void power_control_timeout(unsigned long data) | |||
1874 | if (tmr->cancel) | 1856 | if (tmr->cancel) |
1875 | goto done; | 1857 | goto done; |
1876 | 1858 | ||
1877 | scic->power_control.phys_granted_power = 0; | 1859 | ihost->power_control.phys_granted_power = 0; |
1878 | 1860 | ||
1879 | if (scic->power_control.phys_waiting == 0) { | 1861 | if (ihost->power_control.phys_waiting == 0) { |
1880 | scic->power_control.timer_started = false; | 1862 | ihost->power_control.timer_started = false; |
1881 | goto done; | 1863 | goto done; |
1882 | } | 1864 | } |
1883 | 1865 | ||
1884 | for (i = 0; i < SCI_MAX_PHYS; i++) { | 1866 | for (i = 0; i < SCI_MAX_PHYS; i++) { |
1885 | 1867 | ||
1886 | if (scic->power_control.phys_waiting == 0) | 1868 | if (ihost->power_control.phys_waiting == 0) |
1887 | break; | 1869 | break; |
1888 | 1870 | ||
1889 | iphy = scic->power_control.requesters[i]; | 1871 | iphy = ihost->power_control.requesters[i]; |
1890 | if (iphy == NULL) | 1872 | if (iphy == NULL) |
1891 | continue; | 1873 | continue; |
1892 | 1874 | ||
1893 | if (scic->power_control.phys_granted_power >= | 1875 | if (ihost->power_control.phys_granted_power >= |
1894 | scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) | 1876 | ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) |
1895 | break; | 1877 | break; |
1896 | 1878 | ||
1897 | scic->power_control.requesters[i] = NULL; | 1879 | ihost->power_control.requesters[i] = NULL; |
1898 | scic->power_control.phys_waiting--; | 1880 | ihost->power_control.phys_waiting--; |
1899 | scic->power_control.phys_granted_power++; | 1881 | ihost->power_control.phys_granted_power++; |
1900 | scic_sds_phy_consume_power_handler(iphy); | 1882 | scic_sds_phy_consume_power_handler(iphy); |
1901 | } | 1883 | } |
1902 | 1884 | ||
@@ -1905,7 +1887,7 @@ static void power_control_timeout(unsigned long data) | |||
1905 | * timer in case another phy becomes ready. | 1887 | * timer in case another phy becomes ready. |
1906 | */ | 1888 | */ |
1907 | sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); | 1889 | sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); |
1908 | scic->power_control.timer_started = true; | 1890 | ihost->power_control.timer_started = true; |
1909 | 1891 | ||
1910 | done: | 1892 | done: |
1911 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1893 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
@@ -1918,31 +1900,31 @@ done: | |||
1918 | * | 1900 | * |
1919 | */ | 1901 | */ |
1920 | void scic_sds_controller_power_control_queue_insert( | 1902 | void scic_sds_controller_power_control_queue_insert( |
1921 | struct scic_sds_controller *scic, | 1903 | struct isci_host *ihost, |
1922 | struct isci_phy *iphy) | 1904 | struct isci_phy *iphy) |
1923 | { | 1905 | { |
1924 | BUG_ON(iphy == NULL); | 1906 | BUG_ON(iphy == NULL); |
1925 | 1907 | ||
1926 | if (scic->power_control.phys_granted_power < | 1908 | if (ihost->power_control.phys_granted_power < |
1927 | scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { | 1909 | ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { |
1928 | scic->power_control.phys_granted_power++; | 1910 | ihost->power_control.phys_granted_power++; |
1929 | scic_sds_phy_consume_power_handler(iphy); | 1911 | scic_sds_phy_consume_power_handler(iphy); |
1930 | 1912 | ||
1931 | /* | 1913 | /* |
1932 | * stop and start the power_control timer. When the timer fires, the | 1914 | * stop and start the power_control timer. When the timer fires, the |
1933 | * no_of_phys_granted_power will be set to 0 | 1915 | * no_of_phys_granted_power will be set to 0 |
1934 | */ | 1916 | */ |
1935 | if (scic->power_control.timer_started) | 1917 | if (ihost->power_control.timer_started) |
1936 | sci_del_timer(&scic->power_control.timer); | 1918 | sci_del_timer(&ihost->power_control.timer); |
1937 | 1919 | ||
1938 | sci_mod_timer(&scic->power_control.timer, | 1920 | sci_mod_timer(&ihost->power_control.timer, |
1939 | SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); | 1921 | SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); |
1940 | scic->power_control.timer_started = true; | 1922 | ihost->power_control.timer_started = true; |
1941 | 1923 | ||
1942 | } else { | 1924 | } else { |
1943 | /* Add the phy in the waiting list */ | 1925 | /* Add the phy in the waiting list */ |
1944 | scic->power_control.requesters[iphy->phy_index] = iphy; | 1926 | ihost->power_control.requesters[iphy->phy_index] = iphy; |
1945 | scic->power_control.phys_waiting++; | 1927 | ihost->power_control.phys_waiting++; |
1946 | } | 1928 | } |
1947 | } | 1929 | } |
1948 | 1930 | ||
@@ -1953,16 +1935,16 @@ void scic_sds_controller_power_control_queue_insert( | |||
1953 | * | 1935 | * |
1954 | */ | 1936 | */ |
1955 | void scic_sds_controller_power_control_queue_remove( | 1937 | void scic_sds_controller_power_control_queue_remove( |
1956 | struct scic_sds_controller *scic, | 1938 | struct isci_host *ihost, |
1957 | struct isci_phy *iphy) | 1939 | struct isci_phy *iphy) |
1958 | { | 1940 | { |
1959 | BUG_ON(iphy == NULL); | 1941 | BUG_ON(iphy == NULL); |
1960 | 1942 | ||
1961 | if (scic->power_control.requesters[iphy->phy_index] != NULL) { | 1943 | if (ihost->power_control.requesters[iphy->phy_index] != NULL) { |
1962 | scic->power_control.phys_waiting--; | 1944 | ihost->power_control.phys_waiting--; |
1963 | } | 1945 | } |
1964 | 1946 | ||
1965 | scic->power_control.requesters[iphy->phy_index] = NULL; | 1947 | ihost->power_control.requesters[iphy->phy_index] = NULL; |
1966 | } | 1948 | } |
1967 | 1949 | ||
1968 | #define AFE_REGISTER_WRITE_DELAY 10 | 1950 | #define AFE_REGISTER_WRITE_DELAY 10 |
@@ -1970,50 +1952,50 @@ void scic_sds_controller_power_control_queue_remove( | |||
1970 | /* Initialize the AFE for this phy index. We need to read the AFE setup from | 1952 | /* Initialize the AFE for this phy index. We need to read the AFE setup from |
1971 | * the OEM parameters | 1953 | * the OEM parameters |
1972 | */ | 1954 | */ |
1973 | static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic) | 1955 | static void scic_sds_controller_afe_initialization(struct isci_host *ihost) |
1974 | { | 1956 | { |
1975 | const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; | 1957 | const struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1; |
1976 | u32 afe_status; | 1958 | u32 afe_status; |
1977 | u32 phy_id; | 1959 | u32 phy_id; |
1978 | 1960 | ||
1979 | /* Clear DFX Status registers */ | 1961 | /* Clear DFX Status registers */ |
1980 | writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0); | 1962 | writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); |
1981 | udelay(AFE_REGISTER_WRITE_DELAY); | 1963 | udelay(AFE_REGISTER_WRITE_DELAY); |
1982 | 1964 | ||
1983 | if (is_b0()) { | 1965 | if (is_b0()) { |
1984 | /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement | 1966 | /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement |
1985 | * Timer, PM Stagger Timer */ | 1967 | * Timer, PM Stagger Timer */ |
1986 | writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2); | 1968 | writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); |
1987 | udelay(AFE_REGISTER_WRITE_DELAY); | 1969 | udelay(AFE_REGISTER_WRITE_DELAY); |
1988 | } | 1970 | } |
1989 | 1971 | ||
1990 | /* Configure bias currents to normal */ | 1972 | /* Configure bias currents to normal */ |
1991 | if (is_a0()) | 1973 | if (is_a0()) |
1992 | writel(0x00005500, &scic->scu_registers->afe.afe_bias_control); | 1974 | writel(0x00005500, &ihost->scu_registers->afe.afe_bias_control); |
1993 | else if (is_a2()) | 1975 | else if (is_a2()) |
1994 | writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control); | 1976 | writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); |
1995 | else if (is_b0() || is_c0()) | 1977 | else if (is_b0() || is_c0()) |
1996 | writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control); | 1978 | writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); |
1997 | 1979 | ||
1998 | udelay(AFE_REGISTER_WRITE_DELAY); | 1980 | udelay(AFE_REGISTER_WRITE_DELAY); |
1999 | 1981 | ||
2000 | /* Enable PLL */ | 1982 | /* Enable PLL */ |
2001 | if (is_b0() || is_c0()) | 1983 | if (is_b0() || is_c0()) |
2002 | writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0); | 1984 | writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); |
2003 | else | 1985 | else |
2004 | writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0); | 1986 | writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); |
2005 | 1987 | ||
2006 | udelay(AFE_REGISTER_WRITE_DELAY); | 1988 | udelay(AFE_REGISTER_WRITE_DELAY); |
2007 | 1989 | ||
2008 | /* Wait for the PLL to lock */ | 1990 | /* Wait for the PLL to lock */ |
2009 | do { | 1991 | do { |
2010 | afe_status = readl(&scic->scu_registers->afe.afe_common_block_status); | 1992 | afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); |
2011 | udelay(AFE_REGISTER_WRITE_DELAY); | 1993 | udelay(AFE_REGISTER_WRITE_DELAY); |
2012 | } while ((afe_status & 0x00001000) == 0); | 1994 | } while ((afe_status & 0x00001000) == 0); |
2013 | 1995 | ||
2014 | if (is_a0() || is_a2()) { | 1996 | if (is_a0() || is_a2()) { |
2015 | /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ | 1997 | /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ |
2016 | writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0); | 1998 | writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); |
2017 | udelay(AFE_REGISTER_WRITE_DELAY); | 1999 | udelay(AFE_REGISTER_WRITE_DELAY); |
2018 | } | 2000 | } |
2019 | 2001 | ||
@@ -2022,26 +2004,26 @@ static void scic_sds_controller_afe_initialization(struct scic_sds_controller *s | |||
2022 | 2004 | ||
2023 | if (is_b0()) { | 2005 | if (is_b0()) { |
2024 | /* Configure transmitter SSC parameters */ | 2006 | /* Configure transmitter SSC parameters */ |
2025 | writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); | 2007 | writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); |
2026 | udelay(AFE_REGISTER_WRITE_DELAY); | 2008 | udelay(AFE_REGISTER_WRITE_DELAY); |
2027 | } else if (is_c0()) { | 2009 | } else if (is_c0()) { |
2028 | /* Configure transmitter SSC parameters */ | 2010 | /* Configure transmitter SSC parameters */ |
2029 | writel(0x0003000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); | 2011 | writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); |
2030 | udelay(AFE_REGISTER_WRITE_DELAY); | 2012 | udelay(AFE_REGISTER_WRITE_DELAY); |
2031 | 2013 | ||
2032 | /* | 2014 | /* |
2033 | * All defaults, except the Receive Word Alignament/Comma Detect | 2015 | * All defaults, except the Receive Word Alignament/Comma Detect |
2034 | * Enable....(0xe800) */ | 2016 | * Enable....(0xe800) */ |
2035 | writel(0x00004500, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); | 2017 | writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); |
2036 | udelay(AFE_REGISTER_WRITE_DELAY); | 2018 | udelay(AFE_REGISTER_WRITE_DELAY); |
2037 | } else { | 2019 | } else { |
2038 | /* | 2020 | /* |
2039 | * All defaults, except the Receive Word Alignament/Comma Detect | 2021 | * All defaults, except the Receive Word Alignament/Comma Detect |
2040 | * Enable....(0xe800) */ | 2022 | * Enable....(0xe800) */ |
2041 | writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); | 2023 | writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); |
2042 | udelay(AFE_REGISTER_WRITE_DELAY); | 2024 | udelay(AFE_REGISTER_WRITE_DELAY); |
2043 | 2025 | ||
2044 | writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); | 2026 | writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); |
2045 | udelay(AFE_REGISTER_WRITE_DELAY); | 2027 | udelay(AFE_REGISTER_WRITE_DELAY); |
2046 | } | 2028 | } |
2047 | 2029 | ||
@@ -2049,106 +2031,105 @@ static void scic_sds_controller_afe_initialization(struct scic_sds_controller *s | |||
2049 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) | 2031 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) |
2050 | * & increase TX int & ext bias 20%....(0xe85c) */ | 2032 | * & increase TX int & ext bias 20%....(0xe85c) */ |
2051 | if (is_a0()) | 2033 | if (is_a0()) |
2052 | writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); | 2034 | writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2053 | else if (is_a2()) | 2035 | else if (is_a2()) |
2054 | writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); | 2036 | writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2055 | else if (is_b0()) { | 2037 | else if (is_b0()) { |
2056 | /* Power down TX and RX (PWRDNTX and PWRDNRX) */ | 2038 | /* Power down TX and RX (PWRDNTX and PWRDNRX) */ |
2057 | writel(0x000003D7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); | 2039 | writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2058 | udelay(AFE_REGISTER_WRITE_DELAY); | 2040 | udelay(AFE_REGISTER_WRITE_DELAY); |
2059 | 2041 | ||
2060 | /* | 2042 | /* |
2061 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) | 2043 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) |
2062 | * & increase TX int & ext bias 20%....(0xe85c) */ | 2044 | * & increase TX int & ext bias 20%....(0xe85c) */ |
2063 | writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); | 2045 | writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2064 | } else { | 2046 | } else { |
2065 | writel(0x000001E7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); | 2047 | writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2066 | udelay(AFE_REGISTER_WRITE_DELAY); | 2048 | udelay(AFE_REGISTER_WRITE_DELAY); |
2067 | 2049 | ||
2068 | /* | 2050 | /* |
2069 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) | 2051 | * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) |
2070 | * & increase TX int & ext bias 20%....(0xe85c) */ | 2052 | * & increase TX int & ext bias 20%....(0xe85c) */ |
2071 | writel(0x000001E4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); | 2053 | writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); |
2072 | } | 2054 | } |
2073 | udelay(AFE_REGISTER_WRITE_DELAY); | 2055 | udelay(AFE_REGISTER_WRITE_DELAY); |
2074 | 2056 | ||
2075 | if (is_a0() || is_a2()) { | 2057 | if (is_a0() || is_a2()) { |
2076 | /* Enable TX equalization (0xe824) */ | 2058 | /* Enable TX equalization (0xe824) */ |
2077 | writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); | 2059 | writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); |
2078 | udelay(AFE_REGISTER_WRITE_DELAY); | 2060 | udelay(AFE_REGISTER_WRITE_DELAY); |
2079 | } | 2061 | } |
2080 | 2062 | ||
2081 | /* | 2063 | /* |
2082 | * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), | 2064 | * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), |
2083 | * RDD=0x0(RX Detect Enabled) ....(0xe800) */ | 2065 | * RDD=0x0(RX Detect Enabled) ....(0xe800) */ |
2084 | writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); | 2066 | writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); |
2085 | udelay(AFE_REGISTER_WRITE_DELAY); | 2067 | udelay(AFE_REGISTER_WRITE_DELAY); |
2086 | 2068 | ||
2087 | /* Leave DFE/FFE on */ | 2069 | /* Leave DFE/FFE on */ |
2088 | if (is_a0()) | 2070 | if (is_a0()) |
2089 | writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); | 2071 | writel(0x3F09983F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); |
2090 | else if (is_a2()) | 2072 | else if (is_a2()) |
2091 | writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); | 2073 | writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); |
2092 | else if (is_b0()) { | 2074 | else if (is_b0()) { |
2093 | writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); | 2075 | writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); |
2094 | udelay(AFE_REGISTER_WRITE_DELAY); | 2076 | udelay(AFE_REGISTER_WRITE_DELAY); |
2095 | /* Enable TX equalization (0xe824) */ | 2077 | /* Enable TX equalization (0xe824) */ |
2096 | writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); | 2078 | writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); |
2097 | } else { | 2079 | } else { |
2098 | writel(0x0140DF0F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); | 2080 | writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); |
2099 | udelay(AFE_REGISTER_WRITE_DELAY); | 2081 | udelay(AFE_REGISTER_WRITE_DELAY); |
2100 | 2082 | ||
2101 | writel(0x3F6F103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); | 2083 | writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); |
2102 | udelay(AFE_REGISTER_WRITE_DELAY); | 2084 | udelay(AFE_REGISTER_WRITE_DELAY); |
2103 | 2085 | ||
2104 | /* Enable TX equalization (0xe824) */ | 2086 | /* Enable TX equalization (0xe824) */ |
2105 | writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); | 2087 | writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); |
2106 | } | 2088 | } |
2107 | 2089 | ||
2108 | udelay(AFE_REGISTER_WRITE_DELAY); | 2090 | udelay(AFE_REGISTER_WRITE_DELAY); |
2109 | 2091 | ||
2110 | writel(oem_phy->afe_tx_amp_control0, | 2092 | writel(oem_phy->afe_tx_amp_control0, |
2111 | &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); | 2093 | &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); |
2112 | udelay(AFE_REGISTER_WRITE_DELAY); | 2094 | udelay(AFE_REGISTER_WRITE_DELAY); |
2113 | 2095 | ||
2114 | writel(oem_phy->afe_tx_amp_control1, | 2096 | writel(oem_phy->afe_tx_amp_control1, |
2115 | &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); | 2097 | &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); |
2116 | udelay(AFE_REGISTER_WRITE_DELAY); | 2098 | udelay(AFE_REGISTER_WRITE_DELAY); |
2117 | 2099 | ||
2118 | writel(oem_phy->afe_tx_amp_control2, | 2100 | writel(oem_phy->afe_tx_amp_control2, |
2119 | &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); | 2101 | &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); |
2120 | udelay(AFE_REGISTER_WRITE_DELAY); | 2102 | udelay(AFE_REGISTER_WRITE_DELAY); |
2121 | 2103 | ||
2122 | writel(oem_phy->afe_tx_amp_control3, | 2104 | writel(oem_phy->afe_tx_amp_control3, |
2123 | &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); | 2105 | &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); |
2124 | udelay(AFE_REGISTER_WRITE_DELAY); | 2106 | udelay(AFE_REGISTER_WRITE_DELAY); |
2125 | } | 2107 | } |
2126 | 2108 | ||
2127 | /* Transfer control to the PEs */ | 2109 | /* Transfer control to the PEs */ |
2128 | writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0); | 2110 | writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); |
2129 | udelay(AFE_REGISTER_WRITE_DELAY); | 2111 | udelay(AFE_REGISTER_WRITE_DELAY); |
2130 | } | 2112 | } |
2131 | 2113 | ||
2132 | static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic) | 2114 | static void scic_sds_controller_initialize_power_control(struct isci_host *ihost) |
2133 | { | 2115 | { |
2134 | sci_init_timer(&scic->power_control.timer, power_control_timeout); | 2116 | sci_init_timer(&ihost->power_control.timer, power_control_timeout); |
2135 | 2117 | ||
2136 | memset(scic->power_control.requesters, 0, | 2118 | memset(ihost->power_control.requesters, 0, |
2137 | sizeof(scic->power_control.requesters)); | 2119 | sizeof(ihost->power_control.requesters)); |
2138 | 2120 | ||
2139 | scic->power_control.phys_waiting = 0; | 2121 | ihost->power_control.phys_waiting = 0; |
2140 | scic->power_control.phys_granted_power = 0; | 2122 | ihost->power_control.phys_granted_power = 0; |
2141 | } | 2123 | } |
2142 | 2124 | ||
2143 | static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic) | 2125 | static enum sci_status scic_controller_initialize(struct isci_host *ihost) |
2144 | { | 2126 | { |
2145 | struct sci_base_state_machine *sm = &scic->sm; | 2127 | struct sci_base_state_machine *sm = &ihost->sm; |
2146 | struct isci_host *ihost = scic_to_ihost(scic); | ||
2147 | enum sci_status result = SCI_FAILURE; | 2128 | enum sci_status result = SCI_FAILURE; |
2148 | unsigned long i, state, val; | 2129 | unsigned long i, state, val; |
2149 | 2130 | ||
2150 | if (scic->sm.current_state_id != SCIC_RESET) { | 2131 | if (ihost->sm.current_state_id != SCIC_RESET) { |
2151 | dev_warn(scic_to_dev(scic), | 2132 | dev_warn(&ihost->pdev->dev, |
2152 | "SCIC Controller initialize operation requested " | 2133 | "SCIC Controller initialize operation requested " |
2153 | "in invalid state\n"); | 2134 | "in invalid state\n"); |
2154 | return SCI_FAILURE_INVALID_STATE; | 2135 | return SCI_FAILURE_INVALID_STATE; |
@@ -2156,23 +2137,23 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc | |||
2156 | 2137 | ||
2157 | sci_change_state(sm, SCIC_INITIALIZING); | 2138 | sci_change_state(sm, SCIC_INITIALIZING); |
2158 | 2139 | ||
2159 | sci_init_timer(&scic->phy_timer, phy_startup_timeout); | 2140 | sci_init_timer(&ihost->phy_timer, phy_startup_timeout); |
2160 | 2141 | ||
2161 | scic->next_phy_to_start = 0; | 2142 | ihost->next_phy_to_start = 0; |
2162 | scic->phy_startup_timer_pending = false; | 2143 | ihost->phy_startup_timer_pending = false; |
2163 | 2144 | ||
2164 | scic_sds_controller_initialize_power_control(scic); | 2145 | scic_sds_controller_initialize_power_control(ihost); |
2165 | 2146 | ||
2166 | /* | 2147 | /* |
2167 | * There is nothing to do here for B0 since we do not have to | 2148 | * There is nothing to do here for B0 since we do not have to |
2168 | * program the AFE registers. | 2149 | * program the AFE registers. |
2169 | * / @todo The AFE settings are supposed to be correct for the B0 but | 2150 | * / @todo The AFE settings are supposed to be correct for the B0 but |
2170 | * / presently they seem to be wrong. */ | 2151 | * / presently they seem to be wrong. */ |
2171 | scic_sds_controller_afe_initialization(scic); | 2152 | scic_sds_controller_afe_initialization(ihost); |
2172 | 2153 | ||
2173 | 2154 | ||
2174 | /* Take the hardware out of reset */ | 2155 | /* Take the hardware out of reset */ |
2175 | writel(0, &scic->smu_registers->soft_reset_control); | 2156 | writel(0, &ihost->smu_registers->soft_reset_control); |
2176 | 2157 | ||
2177 | /* | 2158 | /* |
2178 | * / @todo Provide meaningfull error code for hardware failure | 2159 | * / @todo Provide meaningfull error code for hardware failure |
@@ -2182,7 +2163,7 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc | |||
2182 | 2163 | ||
2183 | /* Loop until the hardware reports success */ | 2164 | /* Loop until the hardware reports success */ |
2184 | udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); | 2165 | udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); |
2185 | status = readl(&scic->smu_registers->control_status); | 2166 | status = readl(&ihost->smu_registers->control_status); |
2186 | 2167 | ||
2187 | if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) | 2168 | if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) |
2188 | break; | 2169 | break; |
@@ -2193,32 +2174,32 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc | |||
2193 | /* | 2174 | /* |
2194 | * Determine what are the actaul device capacities that the | 2175 | * Determine what are the actaul device capacities that the |
2195 | * hardware will support */ | 2176 | * hardware will support */ |
2196 | val = readl(&scic->smu_registers->device_context_capacity); | 2177 | val = readl(&ihost->smu_registers->device_context_capacity); |
2197 | 2178 | ||
2198 | /* Record the smaller of the two capacity values */ | 2179 | /* Record the smaller of the two capacity values */ |
2199 | scic->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); | 2180 | ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); |
2200 | scic->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); | 2181 | ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); |
2201 | scic->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); | 2182 | ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); |
2202 | 2183 | ||
2203 | /* | 2184 | /* |
2204 | * Make all PEs that are unassigned match up with the | 2185 | * Make all PEs that are unassigned match up with the |
2205 | * logical ports | 2186 | * logical ports |
2206 | */ | 2187 | */ |
2207 | for (i = 0; i < scic->logical_port_entries; i++) { | 2188 | for (i = 0; i < ihost->logical_port_entries; i++) { |
2208 | struct scu_port_task_scheduler_group_registers __iomem | 2189 | struct scu_port_task_scheduler_group_registers __iomem |
2209 | *ptsg = &scic->scu_registers->peg0.ptsg; | 2190 | *ptsg = &ihost->scu_registers->peg0.ptsg; |
2210 | 2191 | ||
2211 | writel(i, &ptsg->protocol_engine[i]); | 2192 | writel(i, &ptsg->protocol_engine[i]); |
2212 | } | 2193 | } |
2213 | 2194 | ||
2214 | /* Initialize hardware PCI Relaxed ordering in DMA engines */ | 2195 | /* Initialize hardware PCI Relaxed ordering in DMA engines */ |
2215 | val = readl(&scic->scu_registers->sdma.pdma_configuration); | 2196 | val = readl(&ihost->scu_registers->sdma.pdma_configuration); |
2216 | val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); | 2197 | val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); |
2217 | writel(val, &scic->scu_registers->sdma.pdma_configuration); | 2198 | writel(val, &ihost->scu_registers->sdma.pdma_configuration); |
2218 | 2199 | ||
2219 | val = readl(&scic->scu_registers->sdma.cdma_configuration); | 2200 | val = readl(&ihost->scu_registers->sdma.cdma_configuration); |
2220 | val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); | 2201 | val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); |
2221 | writel(val, &scic->scu_registers->sdma.cdma_configuration); | 2202 | writel(val, &ihost->scu_registers->sdma.cdma_configuration); |
2222 | 2203 | ||
2223 | /* | 2204 | /* |
2224 | * Initialize the PHYs before the PORTs because the PHY registers | 2205 | * Initialize the PHYs before the PORTs because the PHY registers |
@@ -2226,23 +2207,23 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc | |||
2226 | */ | 2207 | */ |
2227 | for (i = 0; i < SCI_MAX_PHYS; i++) { | 2208 | for (i = 0; i < SCI_MAX_PHYS; i++) { |
2228 | result = scic_sds_phy_initialize(&ihost->phys[i], | 2209 | result = scic_sds_phy_initialize(&ihost->phys[i], |
2229 | &scic->scu_registers->peg0.pe[i].tl, | 2210 | &ihost->scu_registers->peg0.pe[i].tl, |
2230 | &scic->scu_registers->peg0.pe[i].ll); | 2211 | &ihost->scu_registers->peg0.pe[i].ll); |
2231 | if (result != SCI_SUCCESS) | 2212 | if (result != SCI_SUCCESS) |
2232 | goto out; | 2213 | goto out; |
2233 | } | 2214 | } |
2234 | 2215 | ||
2235 | for (i = 0; i < scic->logical_port_entries; i++) { | 2216 | for (i = 0; i < ihost->logical_port_entries; i++) { |
2236 | result = scic_sds_port_initialize(&ihost->ports[i], | 2217 | result = scic_sds_port_initialize(&ihost->ports[i], |
2237 | &scic->scu_registers->peg0.ptsg.port[i], | 2218 | &ihost->scu_registers->peg0.ptsg.port[i], |
2238 | &scic->scu_registers->peg0.ptsg.protocol_engine, | 2219 | &ihost->scu_registers->peg0.ptsg.protocol_engine, |
2239 | &scic->scu_registers->peg0.viit[i]); | 2220 | &ihost->scu_registers->peg0.viit[i]); |
2240 | 2221 | ||
2241 | if (result != SCI_SUCCESS) | 2222 | if (result != SCI_SUCCESS) |
2242 | goto out; | 2223 | goto out; |
2243 | } | 2224 | } |
2244 | 2225 | ||
2245 | result = scic_sds_port_configuration_agent_initialize(scic, &scic->port_agent); | 2226 | result = scic_sds_port_configuration_agent_initialize(ihost, &ihost->port_agent); |
2246 | 2227 | ||
2247 | out: | 2228 | out: |
2248 | /* Advance the controller state machine */ | 2229 | /* Advance the controller state machine */ |
@@ -2256,10 +2237,10 @@ static enum sci_status scic_controller_initialize(struct scic_sds_controller *sc | |||
2256 | } | 2237 | } |
2257 | 2238 | ||
2258 | static enum sci_status scic_user_parameters_set( | 2239 | static enum sci_status scic_user_parameters_set( |
2259 | struct scic_sds_controller *scic, | 2240 | struct isci_host *ihost, |
2260 | union scic_user_parameters *scic_parms) | 2241 | union scic_user_parameters *scic_parms) |
2261 | { | 2242 | { |
2262 | u32 state = scic->sm.current_state_id; | 2243 | u32 state = ihost->sm.current_state_id; |
2263 | 2244 | ||
2264 | if (state == SCIC_RESET || | 2245 | if (state == SCIC_RESET || |
2265 | state == SCIC_INITIALIZING || | 2246 | state == SCIC_INITIALIZING || |
@@ -2301,7 +2282,7 @@ static enum sci_status scic_user_parameters_set( | |||
2301 | (scic_parms->sds1.no_outbound_task_timeout == 0)) | 2282 | (scic_parms->sds1.no_outbound_task_timeout == 0)) |
2302 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; | 2283 | return SCI_FAILURE_INVALID_PARAMETER_VALUE; |
2303 | 2284 | ||
2304 | memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms)); | 2285 | memcpy(&ihost->user_parameters, scic_parms, sizeof(*scic_parms)); |
2305 | 2286 | ||
2306 | return SCI_SUCCESS; | 2287 | return SCI_SUCCESS; |
2307 | } | 2288 | } |
@@ -2309,40 +2290,40 @@ static enum sci_status scic_user_parameters_set( | |||
2309 | return SCI_FAILURE_INVALID_STATE; | 2290 | return SCI_FAILURE_INVALID_STATE; |
2310 | } | 2291 | } |
2311 | 2292 | ||
2312 | static int scic_controller_mem_init(struct scic_sds_controller *scic) | 2293 | static int scic_controller_mem_init(struct isci_host *ihost) |
2313 | { | 2294 | { |
2314 | struct device *dev = scic_to_dev(scic); | 2295 | struct device *dev = &ihost->pdev->dev; |
2315 | dma_addr_t dma; | 2296 | dma_addr_t dma; |
2316 | size_t size; | 2297 | size_t size; |
2317 | int err; | 2298 | int err; |
2318 | 2299 | ||
2319 | size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); | 2300 | size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); |
2320 | scic->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); | 2301 | ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); |
2321 | if (!scic->completion_queue) | 2302 | if (!ihost->completion_queue) |
2322 | return -ENOMEM; | 2303 | return -ENOMEM; |
2323 | 2304 | ||
2324 | writel(lower_32_bits(dma), &scic->smu_registers->completion_queue_lower); | 2305 | writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower); |
2325 | writel(upper_32_bits(dma), &scic->smu_registers->completion_queue_upper); | 2306 | writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper); |
2326 | 2307 | ||
2327 | size = scic->remote_node_entries * sizeof(union scu_remote_node_context); | 2308 | size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); |
2328 | scic->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, | 2309 | ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, |
2329 | GFP_KERNEL); | 2310 | GFP_KERNEL); |
2330 | if (!scic->remote_node_context_table) | 2311 | if (!ihost->remote_node_context_table) |
2331 | return -ENOMEM; | 2312 | return -ENOMEM; |
2332 | 2313 | ||
2333 | writel(lower_32_bits(dma), &scic->smu_registers->remote_node_context_lower); | 2314 | writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower); |
2334 | writel(upper_32_bits(dma), &scic->smu_registers->remote_node_context_upper); | 2315 | writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper); |
2335 | 2316 | ||
2336 | size = scic->task_context_entries * sizeof(struct scu_task_context), | 2317 | size = ihost->task_context_entries * sizeof(struct scu_task_context), |
2337 | scic->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); | 2318 | ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); |
2338 | if (!scic->task_context_table) | 2319 | if (!ihost->task_context_table) |
2339 | return -ENOMEM; | 2320 | return -ENOMEM; |
2340 | 2321 | ||
2341 | scic->task_context_dma = dma; | 2322 | ihost->task_context_dma = dma; |
2342 | writel(lower_32_bits(dma), &scic->smu_registers->host_task_table_lower); | 2323 | writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); |
2343 | writel(upper_32_bits(dma), &scic->smu_registers->host_task_table_upper); | 2324 | writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); |
2344 | 2325 | ||
2345 | err = scic_sds_unsolicited_frame_control_construct(scic); | 2326 | err = scic_sds_unsolicited_frame_control_construct(ihost); |
2346 | if (err) | 2327 | if (err) |
2347 | return err; | 2328 | return err; |
2348 | 2329 | ||
@@ -2350,112 +2331,112 @@ static int scic_controller_mem_init(struct scic_sds_controller *scic) | |||
2350 | * Inform the silicon as to the location of the UF headers and | 2331 | * Inform the silicon as to the location of the UF headers and |
2351 | * address table. | 2332 | * address table. |
2352 | */ | 2333 | */ |
2353 | writel(lower_32_bits(scic->uf_control.headers.physical_address), | 2334 | writel(lower_32_bits(ihost->uf_control.headers.physical_address), |
2354 | &scic->scu_registers->sdma.uf_header_base_address_lower); | 2335 | &ihost->scu_registers->sdma.uf_header_base_address_lower); |
2355 | writel(upper_32_bits(scic->uf_control.headers.physical_address), | 2336 | writel(upper_32_bits(ihost->uf_control.headers.physical_address), |
2356 | &scic->scu_registers->sdma.uf_header_base_address_upper); | 2337 | &ihost->scu_registers->sdma.uf_header_base_address_upper); |
2357 | 2338 | ||
2358 | writel(lower_32_bits(scic->uf_control.address_table.physical_address), | 2339 | writel(lower_32_bits(ihost->uf_control.address_table.physical_address), |
2359 | &scic->scu_registers->sdma.uf_address_table_lower); | 2340 | &ihost->scu_registers->sdma.uf_address_table_lower); |
2360 | writel(upper_32_bits(scic->uf_control.address_table.physical_address), | 2341 | writel(upper_32_bits(ihost->uf_control.address_table.physical_address), |
2361 | &scic->scu_registers->sdma.uf_address_table_upper); | 2342 | &ihost->scu_registers->sdma.uf_address_table_upper); |
2362 | 2343 | ||
2363 | return 0; | 2344 | return 0; |
2364 | } | 2345 | } |
2365 | 2346 | ||
2366 | int isci_host_init(struct isci_host *isci_host) | 2347 | int isci_host_init(struct isci_host *ihost) |
2367 | { | 2348 | { |
2368 | int err = 0, i; | 2349 | int err = 0, i; |
2369 | enum sci_status status; | 2350 | enum sci_status status; |
2370 | union scic_oem_parameters oem; | 2351 | union scic_oem_parameters oem; |
2371 | union scic_user_parameters scic_user_params; | 2352 | union scic_user_parameters scic_user_params; |
2372 | struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev); | 2353 | struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); |
2373 | 2354 | ||
2374 | spin_lock_init(&isci_host->state_lock); | 2355 | spin_lock_init(&ihost->state_lock); |
2375 | spin_lock_init(&isci_host->scic_lock); | 2356 | spin_lock_init(&ihost->scic_lock); |
2376 | init_waitqueue_head(&isci_host->eventq); | 2357 | init_waitqueue_head(&ihost->eventq); |
2377 | 2358 | ||
2378 | isci_host_change_state(isci_host, isci_starting); | 2359 | isci_host_change_state(ihost, isci_starting); |
2379 | 2360 | ||
2380 | status = scic_controller_construct(&isci_host->sci, scu_base(isci_host), | 2361 | status = scic_controller_construct(ihost, scu_base(ihost), |
2381 | smu_base(isci_host)); | 2362 | smu_base(ihost)); |
2382 | 2363 | ||
2383 | if (status != SCI_SUCCESS) { | 2364 | if (status != SCI_SUCCESS) { |
2384 | dev_err(&isci_host->pdev->dev, | 2365 | dev_err(&ihost->pdev->dev, |
2385 | "%s: scic_controller_construct failed - status = %x\n", | 2366 | "%s: scic_controller_construct failed - status = %x\n", |
2386 | __func__, | 2367 | __func__, |
2387 | status); | 2368 | status); |
2388 | return -ENODEV; | 2369 | return -ENODEV; |
2389 | } | 2370 | } |
2390 | 2371 | ||
2391 | isci_host->sas_ha.dev = &isci_host->pdev->dev; | 2372 | ihost->sas_ha.dev = &ihost->pdev->dev; |
2392 | isci_host->sas_ha.lldd_ha = isci_host; | 2373 | ihost->sas_ha.lldd_ha = ihost; |
2393 | 2374 | ||
2394 | /* | 2375 | /* |
2395 | * grab initial values stored in the controller object for OEM and USER | 2376 | * grab initial values stored in the controller object for OEM and USER |
2396 | * parameters | 2377 | * parameters |
2397 | */ | 2378 | */ |
2398 | isci_user_parameters_get(isci_host, &scic_user_params); | 2379 | isci_user_parameters_get(ihost, &scic_user_params); |
2399 | status = scic_user_parameters_set(&isci_host->sci, | 2380 | status = scic_user_parameters_set(ihost, |
2400 | &scic_user_params); | 2381 | &scic_user_params); |
2401 | if (status != SCI_SUCCESS) { | 2382 | if (status != SCI_SUCCESS) { |
2402 | dev_warn(&isci_host->pdev->dev, | 2383 | dev_warn(&ihost->pdev->dev, |
2403 | "%s: scic_user_parameters_set failed\n", | 2384 | "%s: scic_user_parameters_set failed\n", |
2404 | __func__); | 2385 | __func__); |
2405 | return -ENODEV; | 2386 | return -ENODEV; |
2406 | } | 2387 | } |
2407 | 2388 | ||
2408 | scic_oem_parameters_get(&isci_host->sci, &oem); | 2389 | scic_oem_parameters_get(ihost, &oem); |
2409 | 2390 | ||
2410 | /* grab any OEM parameters specified in orom */ | 2391 | /* grab any OEM parameters specified in orom */ |
2411 | if (pci_info->orom) { | 2392 | if (pci_info->orom) { |
2412 | status = isci_parse_oem_parameters(&oem, | 2393 | status = isci_parse_oem_parameters(&oem, |
2413 | pci_info->orom, | 2394 | pci_info->orom, |
2414 | isci_host->id); | 2395 | ihost->id); |
2415 | if (status != SCI_SUCCESS) { | 2396 | if (status != SCI_SUCCESS) { |
2416 | dev_warn(&isci_host->pdev->dev, | 2397 | dev_warn(&ihost->pdev->dev, |
2417 | "parsing firmware oem parameters failed\n"); | 2398 | "parsing firmware oem parameters failed\n"); |
2418 | return -EINVAL; | 2399 | return -EINVAL; |
2419 | } | 2400 | } |
2420 | } | 2401 | } |
2421 | 2402 | ||
2422 | status = scic_oem_parameters_set(&isci_host->sci, &oem); | 2403 | status = scic_oem_parameters_set(ihost, &oem); |
2423 | if (status != SCI_SUCCESS) { | 2404 | if (status != SCI_SUCCESS) { |
2424 | dev_warn(&isci_host->pdev->dev, | 2405 | dev_warn(&ihost->pdev->dev, |
2425 | "%s: scic_oem_parameters_set failed\n", | 2406 | "%s: scic_oem_parameters_set failed\n", |
2426 | __func__); | 2407 | __func__); |
2427 | return -ENODEV; | 2408 | return -ENODEV; |
2428 | } | 2409 | } |
2429 | 2410 | ||
2430 | tasklet_init(&isci_host->completion_tasklet, | 2411 | tasklet_init(&ihost->completion_tasklet, |
2431 | isci_host_completion_routine, (unsigned long)isci_host); | 2412 | isci_host_completion_routine, (unsigned long)ihost); |
2432 | 2413 | ||
2433 | INIT_LIST_HEAD(&isci_host->requests_to_complete); | 2414 | INIT_LIST_HEAD(&ihost->requests_to_complete); |
2434 | INIT_LIST_HEAD(&isci_host->requests_to_errorback); | 2415 | INIT_LIST_HEAD(&ihost->requests_to_errorback); |
2435 | 2416 | ||
2436 | spin_lock_irq(&isci_host->scic_lock); | 2417 | spin_lock_irq(&ihost->scic_lock); |
2437 | status = scic_controller_initialize(&isci_host->sci); | 2418 | status = scic_controller_initialize(ihost); |
2438 | spin_unlock_irq(&isci_host->scic_lock); | 2419 | spin_unlock_irq(&ihost->scic_lock); |
2439 | if (status != SCI_SUCCESS) { | 2420 | if (status != SCI_SUCCESS) { |
2440 | dev_warn(&isci_host->pdev->dev, | 2421 | dev_warn(&ihost->pdev->dev, |
2441 | "%s: scic_controller_initialize failed -" | 2422 | "%s: scic_controller_initialize failed -" |
2442 | " status = 0x%x\n", | 2423 | " status = 0x%x\n", |
2443 | __func__, status); | 2424 | __func__, status); |
2444 | return -ENODEV; | 2425 | return -ENODEV; |
2445 | } | 2426 | } |
2446 | 2427 | ||
2447 | err = scic_controller_mem_init(&isci_host->sci); | 2428 | err = scic_controller_mem_init(ihost); |
2448 | if (err) | 2429 | if (err) |
2449 | return err; | 2430 | return err; |
2450 | 2431 | ||
2451 | for (i = 0; i < SCI_MAX_PORTS; i++) | 2432 | for (i = 0; i < SCI_MAX_PORTS; i++) |
2452 | isci_port_init(&isci_host->ports[i], isci_host, i); | 2433 | isci_port_init(&ihost->ports[i], ihost, i); |
2453 | 2434 | ||
2454 | for (i = 0; i < SCI_MAX_PHYS; i++) | 2435 | for (i = 0; i < SCI_MAX_PHYS; i++) |
2455 | isci_phy_init(&isci_host->phys[i], isci_host, i); | 2436 | isci_phy_init(&ihost->phys[i], ihost, i); |
2456 | 2437 | ||
2457 | for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { | 2438 | for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { |
2458 | struct isci_remote_device *idev = &isci_host->devices[i]; | 2439 | struct isci_remote_device *idev = &ihost->devices[i]; |
2459 | 2440 | ||
2460 | INIT_LIST_HEAD(&idev->reqs_in_process); | 2441 | INIT_LIST_HEAD(&idev->reqs_in_process); |
2461 | INIT_LIST_HEAD(&idev->node); | 2442 | INIT_LIST_HEAD(&idev->node); |
@@ -2465,63 +2446,62 @@ int isci_host_init(struct isci_host *isci_host) | |||
2465 | struct isci_request *ireq; | 2446 | struct isci_request *ireq; |
2466 | dma_addr_t dma; | 2447 | dma_addr_t dma; |
2467 | 2448 | ||
2468 | ireq = dmam_alloc_coherent(&isci_host->pdev->dev, | 2449 | ireq = dmam_alloc_coherent(&ihost->pdev->dev, |
2469 | sizeof(struct isci_request), &dma, | 2450 | sizeof(struct isci_request), &dma, |
2470 | GFP_KERNEL); | 2451 | GFP_KERNEL); |
2471 | if (!ireq) | 2452 | if (!ireq) |
2472 | return -ENOMEM; | 2453 | return -ENOMEM; |
2473 | 2454 | ||
2474 | ireq->tc = &isci_host->sci.task_context_table[i]; | 2455 | ireq->tc = &ihost->task_context_table[i]; |
2475 | ireq->owning_controller = &isci_host->sci; | 2456 | ireq->owning_controller = ihost; |
2476 | spin_lock_init(&ireq->state_lock); | 2457 | spin_lock_init(&ireq->state_lock); |
2477 | ireq->request_daddr = dma; | 2458 | ireq->request_daddr = dma; |
2478 | ireq->isci_host = isci_host; | 2459 | ireq->isci_host = ihost; |
2479 | 2460 | ihost->reqs[i] = ireq; | |
2480 | isci_host->reqs[i] = ireq; | ||
2481 | } | 2461 | } |
2482 | 2462 | ||
2483 | return 0; | 2463 | return 0; |
2484 | } | 2464 | } |
2485 | 2465 | ||
2486 | void scic_sds_controller_link_up(struct scic_sds_controller *scic, | 2466 | void scic_sds_controller_link_up(struct isci_host *ihost, |
2487 | struct isci_port *iport, struct isci_phy *iphy) | 2467 | struct isci_port *iport, struct isci_phy *iphy) |
2488 | { | 2468 | { |
2489 | switch (scic->sm.current_state_id) { | 2469 | switch (ihost->sm.current_state_id) { |
2490 | case SCIC_STARTING: | 2470 | case SCIC_STARTING: |
2491 | sci_del_timer(&scic->phy_timer); | 2471 | sci_del_timer(&ihost->phy_timer); |
2492 | scic->phy_startup_timer_pending = false; | 2472 | ihost->phy_startup_timer_pending = false; |
2493 | scic->port_agent.link_up_handler(scic, &scic->port_agent, | 2473 | ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, |
2494 | iport, iphy); | 2474 | iport, iphy); |
2495 | scic_sds_controller_start_next_phy(scic); | 2475 | scic_sds_controller_start_next_phy(ihost); |
2496 | break; | 2476 | break; |
2497 | case SCIC_READY: | 2477 | case SCIC_READY: |
2498 | scic->port_agent.link_up_handler(scic, &scic->port_agent, | 2478 | ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, |
2499 | iport, iphy); | 2479 | iport, iphy); |
2500 | break; | 2480 | break; |
2501 | default: | 2481 | default: |
2502 | dev_dbg(scic_to_dev(scic), | 2482 | dev_dbg(&ihost->pdev->dev, |
2503 | "%s: SCIC Controller linkup event from phy %d in " | 2483 | "%s: SCIC Controller linkup event from phy %d in " |
2504 | "unexpected state %d\n", __func__, iphy->phy_index, | 2484 | "unexpected state %d\n", __func__, iphy->phy_index, |
2505 | scic->sm.current_state_id); | 2485 | ihost->sm.current_state_id); |
2506 | } | 2486 | } |
2507 | } | 2487 | } |
2508 | 2488 | ||
2509 | void scic_sds_controller_link_down(struct scic_sds_controller *scic, | 2489 | void scic_sds_controller_link_down(struct isci_host *ihost, |
2510 | struct isci_port *iport, struct isci_phy *iphy) | 2490 | struct isci_port *iport, struct isci_phy *iphy) |
2511 | { | 2491 | { |
2512 | switch (scic->sm.current_state_id) { | 2492 | switch (ihost->sm.current_state_id) { |
2513 | case SCIC_STARTING: | 2493 | case SCIC_STARTING: |
2514 | case SCIC_READY: | 2494 | case SCIC_READY: |
2515 | scic->port_agent.link_down_handler(scic, &scic->port_agent, | 2495 | ihost->port_agent.link_down_handler(ihost, &ihost->port_agent, |
2516 | iport, iphy); | 2496 | iport, iphy); |
2517 | break; | 2497 | break; |
2518 | default: | 2498 | default: |
2519 | dev_dbg(scic_to_dev(scic), | 2499 | dev_dbg(&ihost->pdev->dev, |
2520 | "%s: SCIC Controller linkdown event from phy %d in " | 2500 | "%s: SCIC Controller linkdown event from phy %d in " |
2521 | "unexpected state %d\n", | 2501 | "unexpected state %d\n", |
2522 | __func__, | 2502 | __func__, |
2523 | iphy->phy_index, | 2503 | iphy->phy_index, |
2524 | scic->sm.current_state_id); | 2504 | ihost->sm.current_state_id); |
2525 | } | 2505 | } |
2526 | } | 2506 | } |
2527 | 2507 | ||
@@ -2530,14 +2510,13 @@ void scic_sds_controller_link_down(struct scic_sds_controller *scic, | |||
2530 | * controller are still in the stopping state. | 2510 | * controller are still in the stopping state. |
2531 | * | 2511 | * |
2532 | */ | 2512 | */ |
2533 | static bool scic_sds_controller_has_remote_devices_stopping( | 2513 | static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ihost) |
2534 | struct scic_sds_controller *controller) | ||
2535 | { | 2514 | { |
2536 | u32 index; | 2515 | u32 index; |
2537 | 2516 | ||
2538 | for (index = 0; index < controller->remote_node_entries; index++) { | 2517 | for (index = 0; index < ihost->remote_node_entries; index++) { |
2539 | if ((controller->device_table[index] != NULL) && | 2518 | if ((ihost->device_table[index] != NULL) && |
2540 | (controller->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) | 2519 | (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) |
2541 | return true; | 2520 | return true; |
2542 | } | 2521 | } |
2543 | 2522 | ||
@@ -2548,20 +2527,20 @@ static bool scic_sds_controller_has_remote_devices_stopping( | |||
2548 | * This method is called by the remote device to inform the controller | 2527 | * This method is called by the remote device to inform the controller |
2549 | * object that the remote device has stopped. | 2528 | * object that the remote device has stopped. |
2550 | */ | 2529 | */ |
2551 | void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic, | 2530 | void scic_sds_controller_remote_device_stopped(struct isci_host *ihost, |
2552 | struct isci_remote_device *idev) | 2531 | struct isci_remote_device *idev) |
2553 | { | 2532 | { |
2554 | if (scic->sm.current_state_id != SCIC_STOPPING) { | 2533 | if (ihost->sm.current_state_id != SCIC_STOPPING) { |
2555 | dev_dbg(scic_to_dev(scic), | 2534 | dev_dbg(&ihost->pdev->dev, |
2556 | "SCIC Controller 0x%p remote device stopped event " | 2535 | "SCIC Controller 0x%p remote device stopped event " |
2557 | "from device 0x%p in unexpected state %d\n", | 2536 | "from device 0x%p in unexpected state %d\n", |
2558 | scic, idev, | 2537 | ihost, idev, |
2559 | scic->sm.current_state_id); | 2538 | ihost->sm.current_state_id); |
2560 | return; | 2539 | return; |
2561 | } | 2540 | } |
2562 | 2541 | ||
2563 | if (!scic_sds_controller_has_remote_devices_stopping(scic)) { | 2542 | if (!scic_sds_controller_has_remote_devices_stopping(ihost)) { |
2564 | sci_change_state(&scic->sm, SCIC_STOPPED); | 2543 | sci_change_state(&ihost->sm, SCIC_STOPPED); |
2565 | } | 2544 | } |
2566 | } | 2545 | } |
2567 | 2546 | ||
@@ -2573,32 +2552,32 @@ void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic, | |||
2573 | * | 2552 | * |
2574 | */ | 2553 | */ |
2575 | void scic_sds_controller_post_request( | 2554 | void scic_sds_controller_post_request( |
2576 | struct scic_sds_controller *scic, | 2555 | struct isci_host *ihost, |
2577 | u32 request) | 2556 | u32 request) |
2578 | { | 2557 | { |
2579 | dev_dbg(scic_to_dev(scic), | 2558 | dev_dbg(&ihost->pdev->dev, |
2580 | "%s: SCIC Controller 0x%p post request 0x%08x\n", | 2559 | "%s: SCIC Controller 0x%p post request 0x%08x\n", |
2581 | __func__, | 2560 | __func__, |
2582 | scic, | 2561 | ihost, |
2583 | request); | 2562 | request); |
2584 | 2563 | ||
2585 | writel(request, &scic->smu_registers->post_context_port); | 2564 | writel(request, &ihost->smu_registers->post_context_port); |
2586 | } | 2565 | } |
2587 | 2566 | ||
2588 | struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 io_tag) | 2567 | struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag) |
2589 | { | 2568 | { |
2590 | u16 task_index; | 2569 | u16 task_index; |
2591 | u16 task_sequence; | 2570 | u16 task_sequence; |
2592 | 2571 | ||
2593 | task_index = ISCI_TAG_TCI(io_tag); | 2572 | task_index = ISCI_TAG_TCI(io_tag); |
2594 | 2573 | ||
2595 | if (task_index < scic->task_context_entries) { | 2574 | if (task_index < ihost->task_context_entries) { |
2596 | struct isci_request *ireq = scic_to_ihost(scic)->reqs[task_index]; | 2575 | struct isci_request *ireq = ihost->reqs[task_index]; |
2597 | 2576 | ||
2598 | if (test_bit(IREQ_ACTIVE, &ireq->flags)) { | 2577 | if (test_bit(IREQ_ACTIVE, &ireq->flags)) { |
2599 | task_sequence = ISCI_TAG_SEQ(io_tag); | 2578 | task_sequence = ISCI_TAG_SEQ(io_tag); |
2600 | 2579 | ||
2601 | if (task_sequence == scic->io_request_sequence[task_index]) | 2580 | if (task_sequence == ihost->io_request_sequence[task_index]) |
2602 | return ireq; | 2581 | return ireq; |
2603 | } | 2582 | } |
2604 | } | 2583 | } |
@@ -2621,7 +2600,7 @@ struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, u16 i | |||
2621 | * node index available. | 2600 | * node index available. |
2622 | */ | 2601 | */ |
2623 | enum sci_status scic_sds_controller_allocate_remote_node_context( | 2602 | enum sci_status scic_sds_controller_allocate_remote_node_context( |
2624 | struct scic_sds_controller *scic, | 2603 | struct isci_host *ihost, |
2625 | struct isci_remote_device *idev, | 2604 | struct isci_remote_device *idev, |
2626 | u16 *node_id) | 2605 | u16 *node_id) |
2627 | { | 2606 | { |
@@ -2629,11 +2608,11 @@ enum sci_status scic_sds_controller_allocate_remote_node_context( | |||
2629 | u32 remote_node_count = scic_sds_remote_device_node_count(idev); | 2608 | u32 remote_node_count = scic_sds_remote_device_node_count(idev); |
2630 | 2609 | ||
2631 | node_index = scic_sds_remote_node_table_allocate_remote_node( | 2610 | node_index = scic_sds_remote_node_table_allocate_remote_node( |
2632 | &scic->available_remote_nodes, remote_node_count | 2611 | &ihost->available_remote_nodes, remote_node_count |
2633 | ); | 2612 | ); |
2634 | 2613 | ||
2635 | if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { | 2614 | if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { |
2636 | scic->device_table[node_index] = idev; | 2615 | ihost->device_table[node_index] = idev; |
2637 | 2616 | ||
2638 | *node_id = node_index; | 2617 | *node_id = node_index; |
2639 | 2618 | ||
@@ -2653,17 +2632,17 @@ enum sci_status scic_sds_controller_allocate_remote_node_context( | |||
2653 | * | 2632 | * |
2654 | */ | 2633 | */ |
2655 | void scic_sds_controller_free_remote_node_context( | 2634 | void scic_sds_controller_free_remote_node_context( |
2656 | struct scic_sds_controller *scic, | 2635 | struct isci_host *ihost, |
2657 | struct isci_remote_device *idev, | 2636 | struct isci_remote_device *idev, |
2658 | u16 node_id) | 2637 | u16 node_id) |
2659 | { | 2638 | { |
2660 | u32 remote_node_count = scic_sds_remote_device_node_count(idev); | 2639 | u32 remote_node_count = scic_sds_remote_device_node_count(idev); |
2661 | 2640 | ||
2662 | if (scic->device_table[node_id] == idev) { | 2641 | if (ihost->device_table[node_id] == idev) { |
2663 | scic->device_table[node_id] = NULL; | 2642 | ihost->device_table[node_id] = NULL; |
2664 | 2643 | ||
2665 | scic_sds_remote_node_table_release_remote_node_index( | 2644 | scic_sds_remote_node_table_release_remote_node_index( |
2666 | &scic->available_remote_nodes, remote_node_count, node_id | 2645 | &ihost->available_remote_nodes, remote_node_count, node_id |
2667 | ); | 2646 | ); |
2668 | } | 2647 | } |
2669 | } | 2648 | } |
@@ -2677,14 +2656,14 @@ void scic_sds_controller_free_remote_node_context( | |||
2677 | * union scu_remote_node_context* | 2656 | * union scu_remote_node_context* |
2678 | */ | 2657 | */ |
2679 | union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( | 2658 | union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( |
2680 | struct scic_sds_controller *scic, | 2659 | struct isci_host *ihost, |
2681 | u16 node_id | 2660 | u16 node_id |
2682 | ) { | 2661 | ) { |
2683 | if ( | 2662 | if ( |
2684 | (node_id < scic->remote_node_entries) | 2663 | (node_id < ihost->remote_node_entries) |
2685 | && (scic->device_table[node_id] != NULL) | 2664 | && (ihost->device_table[node_id] != NULL) |
2686 | ) { | 2665 | ) { |
2687 | return &scic->remote_node_context_table[node_id]; | 2666 | return &ihost->remote_node_context_table[node_id]; |
2688 | } | 2667 | } |
2689 | 2668 | ||
2690 | return NULL; | 2669 | return NULL; |
@@ -2722,13 +2701,13 @@ void scic_sds_controller_copy_sata_response( | |||
2722 | * | 2701 | * |
2723 | */ | 2702 | */ |
2724 | void scic_sds_controller_release_frame( | 2703 | void scic_sds_controller_release_frame( |
2725 | struct scic_sds_controller *scic, | 2704 | struct isci_host *ihost, |
2726 | u32 frame_index) | 2705 | u32 frame_index) |
2727 | { | 2706 | { |
2728 | if (scic_sds_unsolicited_frame_control_release_frame( | 2707 | if (scic_sds_unsolicited_frame_control_release_frame( |
2729 | &scic->uf_control, frame_index) == true) | 2708 | &ihost->uf_control, frame_index) == true) |
2730 | writel(scic->uf_control.get, | 2709 | writel(ihost->uf_control.get, |
2731 | &scic->scu_registers->sdma.unsolicited_frame_get_pointer); | 2710 | &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); |
2732 | } | 2711 | } |
2733 | 2712 | ||
2734 | void isci_tci_free(struct isci_host *ihost, u16 tci) | 2713 | void isci_tci_free(struct isci_host *ihost, u16 tci) |
@@ -2757,7 +2736,7 @@ u16 isci_alloc_tag(struct isci_host *ihost) | |||
2757 | { | 2736 | { |
2758 | if (isci_tci_space(ihost)) { | 2737 | if (isci_tci_space(ihost)) { |
2759 | u16 tci = isci_tci_alloc(ihost); | 2738 | u16 tci = isci_tci_alloc(ihost); |
2760 | u8 seq = ihost->sci.io_request_sequence[tci]; | 2739 | u8 seq = ihost->io_request_sequence[tci]; |
2761 | 2740 | ||
2762 | return ISCI_TAG(seq, tci); | 2741 | return ISCI_TAG(seq, tci); |
2763 | } | 2742 | } |
@@ -2767,7 +2746,6 @@ u16 isci_alloc_tag(struct isci_host *ihost) | |||
2767 | 2746 | ||
2768 | enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) | 2747 | enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) |
2769 | { | 2748 | { |
2770 | struct scic_sds_controller *scic = &ihost->sci; | ||
2771 | u16 tci = ISCI_TAG_TCI(io_tag); | 2749 | u16 tci = ISCI_TAG_TCI(io_tag); |
2772 | u16 seq = ISCI_TAG_SEQ(io_tag); | 2750 | u16 seq = ISCI_TAG_SEQ(io_tag); |
2773 | 2751 | ||
@@ -2775,8 +2753,8 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) | |||
2775 | if (isci_tci_active(ihost) == 0) | 2753 | if (isci_tci_active(ihost) == 0) |
2776 | return SCI_FAILURE_INVALID_IO_TAG; | 2754 | return SCI_FAILURE_INVALID_IO_TAG; |
2777 | 2755 | ||
2778 | if (seq == scic->io_request_sequence[tci]) { | 2756 | if (seq == ihost->io_request_sequence[tci]) { |
2779 | scic->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); | 2757 | ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); |
2780 | 2758 | ||
2781 | isci_tci_free(ihost, tci); | 2759 | isci_tci_free(ihost, tci); |
2782 | 2760 | ||
@@ -2797,23 +2775,23 @@ enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) | |||
2797 | * @io_tag: This parameter specifies a previously allocated IO tag that the | 2775 | * @io_tag: This parameter specifies a previously allocated IO tag that the |
2798 | * user desires to be utilized for this request. | 2776 | * user desires to be utilized for this request. |
2799 | */ | 2777 | */ |
2800 | enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, | 2778 | enum sci_status scic_controller_start_io(struct isci_host *ihost, |
2801 | struct isci_remote_device *idev, | 2779 | struct isci_remote_device *idev, |
2802 | struct isci_request *ireq) | 2780 | struct isci_request *ireq) |
2803 | { | 2781 | { |
2804 | enum sci_status status; | 2782 | enum sci_status status; |
2805 | 2783 | ||
2806 | if (scic->sm.current_state_id != SCIC_READY) { | 2784 | if (ihost->sm.current_state_id != SCIC_READY) { |
2807 | dev_warn(scic_to_dev(scic), "invalid state to start I/O"); | 2785 | dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); |
2808 | return SCI_FAILURE_INVALID_STATE; | 2786 | return SCI_FAILURE_INVALID_STATE; |
2809 | } | 2787 | } |
2810 | 2788 | ||
2811 | status = scic_sds_remote_device_start_io(scic, idev, ireq); | 2789 | status = scic_sds_remote_device_start_io(ihost, idev, ireq); |
2812 | if (status != SCI_SUCCESS) | 2790 | if (status != SCI_SUCCESS) |
2813 | return status; | 2791 | return status; |
2814 | 2792 | ||
2815 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2793 | set_bit(IREQ_ACTIVE, &ireq->flags); |
2816 | scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq)); | 2794 | scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); |
2817 | return SCI_SUCCESS; | 2795 | return SCI_SUCCESS; |
2818 | } | 2796 | } |
2819 | 2797 | ||
@@ -2834,14 +2812,14 @@ enum sci_status scic_controller_start_io(struct scic_sds_controller *scic, | |||
2834 | * for the request. Determine the failure situations and return values. | 2812 | * for the request. Determine the failure situations and return values. |
2835 | */ | 2813 | */ |
2836 | enum sci_status scic_controller_terminate_request( | 2814 | enum sci_status scic_controller_terminate_request( |
2837 | struct scic_sds_controller *scic, | 2815 | struct isci_host *ihost, |
2838 | struct isci_remote_device *idev, | 2816 | struct isci_remote_device *idev, |
2839 | struct isci_request *ireq) | 2817 | struct isci_request *ireq) |
2840 | { | 2818 | { |
2841 | enum sci_status status; | 2819 | enum sci_status status; |
2842 | 2820 | ||
2843 | if (scic->sm.current_state_id != SCIC_READY) { | 2821 | if (ihost->sm.current_state_id != SCIC_READY) { |
2844 | dev_warn(scic_to_dev(scic), | 2822 | dev_warn(&ihost->pdev->dev, |
2845 | "invalid state to terminate request\n"); | 2823 | "invalid state to terminate request\n"); |
2846 | return SCI_FAILURE_INVALID_STATE; | 2824 | return SCI_FAILURE_INVALID_STATE; |
2847 | } | 2825 | } |
@@ -2854,7 +2832,7 @@ enum sci_status scic_controller_terminate_request( | |||
2854 | * Utilize the original post context command and or in the POST_TC_ABORT | 2832 | * Utilize the original post context command and or in the POST_TC_ABORT |
2855 | * request sub-type. | 2833 | * request sub-type. |
2856 | */ | 2834 | */ |
2857 | scic_sds_controller_post_request(scic, | 2835 | scic_sds_controller_post_request(ihost, |
2858 | scic_sds_request_get_post_context(ireq) | | 2836 | scic_sds_request_get_post_context(ireq) | |
2859 | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); | 2837 | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); |
2860 | return SCI_SUCCESS; | 2838 | return SCI_SUCCESS; |
@@ -2872,19 +2850,19 @@ enum sci_status scic_controller_terminate_request( | |||
2872 | * @io_request: the handle to the io request object to complete. | 2850 | * @io_request: the handle to the io request object to complete. |
2873 | */ | 2851 | */ |
2874 | enum sci_status scic_controller_complete_io( | 2852 | enum sci_status scic_controller_complete_io( |
2875 | struct scic_sds_controller *scic, | 2853 | struct isci_host *ihost, |
2876 | struct isci_remote_device *idev, | 2854 | struct isci_remote_device *idev, |
2877 | struct isci_request *ireq) | 2855 | struct isci_request *ireq) |
2878 | { | 2856 | { |
2879 | enum sci_status status; | 2857 | enum sci_status status; |
2880 | u16 index; | 2858 | u16 index; |
2881 | 2859 | ||
2882 | switch (scic->sm.current_state_id) { | 2860 | switch (ihost->sm.current_state_id) { |
2883 | case SCIC_STOPPING: | 2861 | case SCIC_STOPPING: |
2884 | /* XXX: Implement this function */ | 2862 | /* XXX: Implement this function */ |
2885 | return SCI_FAILURE; | 2863 | return SCI_FAILURE; |
2886 | case SCIC_READY: | 2864 | case SCIC_READY: |
2887 | status = scic_sds_remote_device_complete_io(scic, idev, ireq); | 2865 | status = scic_sds_remote_device_complete_io(ihost, idev, ireq); |
2888 | if (status != SCI_SUCCESS) | 2866 | if (status != SCI_SUCCESS) |
2889 | return status; | 2867 | return status; |
2890 | 2868 | ||
@@ -2892,7 +2870,7 @@ enum sci_status scic_controller_complete_io( | |||
2892 | clear_bit(IREQ_ACTIVE, &ireq->flags); | 2870 | clear_bit(IREQ_ACTIVE, &ireq->flags); |
2893 | return SCI_SUCCESS; | 2871 | return SCI_SUCCESS; |
2894 | default: | 2872 | default: |
2895 | dev_warn(scic_to_dev(scic), "invalid state to complete I/O"); | 2873 | dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); |
2896 | return SCI_FAILURE_INVALID_STATE; | 2874 | return SCI_FAILURE_INVALID_STATE; |
2897 | } | 2875 | } |
2898 | 2876 | ||
@@ -2900,15 +2878,15 @@ enum sci_status scic_controller_complete_io( | |||
2900 | 2878 | ||
2901 | enum sci_status scic_controller_continue_io(struct isci_request *ireq) | 2879 | enum sci_status scic_controller_continue_io(struct isci_request *ireq) |
2902 | { | 2880 | { |
2903 | struct scic_sds_controller *scic = ireq->owning_controller; | 2881 | struct isci_host *ihost = ireq->owning_controller; |
2904 | 2882 | ||
2905 | if (scic->sm.current_state_id != SCIC_READY) { | 2883 | if (ihost->sm.current_state_id != SCIC_READY) { |
2906 | dev_warn(scic_to_dev(scic), "invalid state to continue I/O"); | 2884 | dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); |
2907 | return SCI_FAILURE_INVALID_STATE; | 2885 | return SCI_FAILURE_INVALID_STATE; |
2908 | } | 2886 | } |
2909 | 2887 | ||
2910 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2888 | set_bit(IREQ_ACTIVE, &ireq->flags); |
2911 | scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(ireq)); | 2889 | scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq)); |
2912 | return SCI_SUCCESS; | 2890 | return SCI_SUCCESS; |
2913 | } | 2891 | } |
2914 | 2892 | ||
@@ -2922,21 +2900,21 @@ enum sci_status scic_controller_continue_io(struct isci_request *ireq) | |||
2922 | * @task_request: the handle to the task request object to start. | 2900 | * @task_request: the handle to the task request object to start. |
2923 | */ | 2901 | */ |
2924 | enum sci_task_status scic_controller_start_task( | 2902 | enum sci_task_status scic_controller_start_task( |
2925 | struct scic_sds_controller *scic, | 2903 | struct isci_host *ihost, |
2926 | struct isci_remote_device *idev, | 2904 | struct isci_remote_device *idev, |
2927 | struct isci_request *ireq) | 2905 | struct isci_request *ireq) |
2928 | { | 2906 | { |
2929 | enum sci_status status; | 2907 | enum sci_status status; |
2930 | 2908 | ||
2931 | if (scic->sm.current_state_id != SCIC_READY) { | 2909 | if (ihost->sm.current_state_id != SCIC_READY) { |
2932 | dev_warn(scic_to_dev(scic), | 2910 | dev_warn(&ihost->pdev->dev, |
2933 | "%s: SCIC Controller starting task from invalid " | 2911 | "%s: SCIC Controller starting task from invalid " |
2934 | "state\n", | 2912 | "state\n", |
2935 | __func__); | 2913 | __func__); |
2936 | return SCI_TASK_FAILURE_INVALID_STATE; | 2914 | return SCI_TASK_FAILURE_INVALID_STATE; |
2937 | } | 2915 | } |
2938 | 2916 | ||
2939 | status = scic_sds_remote_device_start_task(scic, idev, ireq); | 2917 | status = scic_sds_remote_device_start_task(ihost, idev, ireq); |
2940 | switch (status) { | 2918 | switch (status) { |
2941 | case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: | 2919 | case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: |
2942 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2920 | set_bit(IREQ_ACTIVE, &ireq->flags); |
@@ -2950,7 +2928,7 @@ enum sci_task_status scic_controller_start_task( | |||
2950 | case SCI_SUCCESS: | 2928 | case SCI_SUCCESS: |
2951 | set_bit(IREQ_ACTIVE, &ireq->flags); | 2929 | set_bit(IREQ_ACTIVE, &ireq->flags); |
2952 | 2930 | ||
2953 | scic_sds_controller_post_request(scic, | 2931 | scic_sds_controller_post_request(ihost, |
2954 | scic_sds_request_get_post_context(ireq)); | 2932 | scic_sds_request_get_post_context(ireq)); |
2955 | break; | 2933 | break; |
2956 | default: | 2934 | default: |
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index ca2e3b0ee0dd..013f672a8fd7 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -106,7 +106,7 @@ struct scic_power_control { | |||
106 | }; | 106 | }; |
107 | 107 | ||
108 | struct scic_sds_port_configuration_agent; | 108 | struct scic_sds_port_configuration_agent; |
109 | typedef void (*port_config_fn)(struct scic_sds_controller *, | 109 | typedef void (*port_config_fn)(struct isci_host *, |
110 | struct scic_sds_port_configuration_agent *, | 110 | struct scic_sds_port_configuration_agent *, |
111 | struct isci_port *, struct isci_phy *); | 111 | struct isci_port *, struct isci_phy *); |
112 | 112 | ||
@@ -124,171 +124,66 @@ struct scic_sds_port_configuration_agent { | |||
124 | }; | 124 | }; |
125 | 125 | ||
126 | /** | 126 | /** |
127 | * struct scic_sds_controller - | 127 | * isci_host - primary host/controller object |
128 | * | 128 | * @timer: timeout start/stop operations |
129 | * This structure represents the SCU controller object. | 129 | * @device_table: rni (hw remote node index) to remote device lookup table |
130 | * @available_remote_nodes: rni allocator | ||
131 | * @power_control: manage device spin up | ||
132 | * @io_request_sequence: generation number for tci's (task contexts) | ||
133 | * @task_context_table: hw task context table | ||
134 | * @remote_node_context_table: hw remote node context table | ||
135 | * @completion_queue: hw-producer driver-consumer communication ring | ||
136 | * @completion_queue_get: tracks the driver 'head' of the ring to notify hw | ||
137 | * @logical_port_entries: min({driver|silicon}-supported-port-count) | ||
138 | * @remote_node_entries: min({driver|silicon}-supported-node-count) | ||
139 | * @task_context_entries: min({driver|silicon}-supported-task-count) | ||
140 | * @phy_timer: phy startup timer | ||
141 | * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for | ||
142 | * the phy index is set so further notifications are not | ||
143 | * made. Once the phy reports link up and is made part of a | ||
144 | * port then this bit is cleared. | ||
145 | |||
130 | */ | 146 | */ |
131 | struct scic_sds_controller { | 147 | struct isci_host { |
132 | /** | ||
133 | * This field contains the information for the base controller state | ||
134 | * machine. | ||
135 | */ | ||
136 | struct sci_base_state_machine sm; | 148 | struct sci_base_state_machine sm; |
137 | 149 | /* XXX can we time this externally */ | |
138 | /** | ||
139 | * Timer for controller start/stop operations. | ||
140 | */ | ||
141 | struct sci_timer timer; | 150 | struct sci_timer timer; |
142 | 151 | /* XXX drop reference module params directly */ | |
143 | /** | ||
144 | * This field contains the user parameters to be utilized for this | ||
145 | * core controller object. | ||
146 | */ | ||
147 | union scic_user_parameters user_parameters; | 152 | union scic_user_parameters user_parameters; |
148 | 153 | /* XXX no need to be a union */ | |
149 | /** | ||
150 | * This field contains the OEM parameters to be utilized for this | ||
151 | * core controller object. | ||
152 | */ | ||
153 | union scic_oem_parameters oem_parameters; | 154 | union scic_oem_parameters oem_parameters; |
154 | |||
155 | /** | ||
156 | * This field contains the port configuration agent for this controller. | ||
157 | */ | ||
158 | struct scic_sds_port_configuration_agent port_agent; | 155 | struct scic_sds_port_configuration_agent port_agent; |
159 | |||
160 | /** | ||
161 | * This field is the array of device objects that are currently constructed | ||
162 | * for this controller object. This table is used as a fast lookup of device | ||
163 | * objects that need to handle device completion notifications from the | ||
164 | * hardware. The table is RNi based. | ||
165 | */ | ||
166 | struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; | 156 | struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; |
167 | |||
168 | /** | ||
169 | * This field is the free RNi data structure | ||
170 | */ | ||
171 | struct scic_remote_node_table available_remote_nodes; | 157 | struct scic_remote_node_table available_remote_nodes; |
172 | |||
173 | /** | ||
174 | * This filed is the struct scic_power_control data used to controll when direct | ||
175 | * attached devices can consume power. | ||
176 | */ | ||
177 | struct scic_power_control power_control; | 158 | struct scic_power_control power_control; |
178 | |||
179 | /* sequence number per tci */ | ||
180 | u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; | 159 | u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; |
181 | |||
182 | /** | ||
183 | * This field is a pointer to the memory allocated by the driver for the task | ||
184 | * context table. This data is shared between the hardware and software. | ||
185 | */ | ||
186 | struct scu_task_context *task_context_table; | 160 | struct scu_task_context *task_context_table; |
187 | dma_addr_t task_context_dma; | 161 | dma_addr_t task_context_dma; |
188 | |||
189 | /** | ||
190 | * This field is a pointer to the memory allocated by the driver for the | ||
191 | * remote node context table. This table is shared between the hardware and | ||
192 | * software. | ||
193 | */ | ||
194 | union scu_remote_node_context *remote_node_context_table; | 162 | union scu_remote_node_context *remote_node_context_table; |
195 | |||
196 | /** | ||
197 | * This field is a pointer to the completion queue. This memory is | ||
198 | * written to by the hardware and read by the software. | ||
199 | */ | ||
200 | u32 *completion_queue; | 163 | u32 *completion_queue; |
201 | |||
202 | /** | ||
203 | * This field is the software copy of the completion queue get pointer. The | ||
204 | * controller object writes this value to the hardware after processing the | ||
205 | * completion entries. | ||
206 | */ | ||
207 | u32 completion_queue_get; | 164 | u32 completion_queue_get; |
208 | |||
209 | /** | ||
210 | * This field is the minimum of the number of hardware supported port entries | ||
211 | * and the software requested port entries. | ||
212 | */ | ||
213 | u32 logical_port_entries; | 165 | u32 logical_port_entries; |
214 | |||
215 | /** | ||
216 | * This field is the minimum number of devices supported by the hardware and | ||
217 | * the number of devices requested by the software. | ||
218 | */ | ||
219 | u32 remote_node_entries; | 166 | u32 remote_node_entries; |
220 | |||
221 | /** | ||
222 | * This field is the minimum number of IO requests supported by the hardware | ||
223 | * and the number of IO requests requested by the software. | ||
224 | */ | ||
225 | u32 task_context_entries; | 167 | u32 task_context_entries; |
226 | |||
227 | /** | ||
228 | * This object contains all of the unsolicited frame specific | ||
229 | * data utilized by the core controller. | ||
230 | */ | ||
231 | struct scic_sds_unsolicited_frame_control uf_control; | 168 | struct scic_sds_unsolicited_frame_control uf_control; |
232 | 169 | ||
233 | /* Phy Startup Data */ | 170 | /* phy startup */ |
234 | /** | ||
235 | * Timer for controller phy request startup. On controller start the | ||
236 | * controller will start each PHY individually in order of phy index. | ||
237 | */ | ||
238 | struct sci_timer phy_timer; | 171 | struct sci_timer phy_timer; |
239 | 172 | /* XXX kill */ | |
240 | /** | ||
241 | * This field is set when the phy_timer is running and is cleared when | ||
242 | * the phy_timer is stopped. | ||
243 | */ | ||
244 | bool phy_startup_timer_pending; | 173 | bool phy_startup_timer_pending; |
245 | |||
246 | /** | ||
247 | * This field is the index of the next phy start. It is initialized to 0 and | ||
248 | * increments for each phy index that is started. | ||
249 | */ | ||
250 | u32 next_phy_to_start; | 174 | u32 next_phy_to_start; |
251 | |||
252 | /** | ||
253 | * This field controlls the invalid link up notifications to the SCI_USER. If | ||
254 | * an invalid_link_up notification is reported a bit for the PHY index is set | ||
255 | * so further notifications are not made. Once the PHY object reports link up | ||
256 | * and is made part of a port then this bit for the PHY index is cleared. | ||
257 | */ | ||
258 | u8 invalid_phy_mask; | 175 | u8 invalid_phy_mask; |
259 | 176 | ||
260 | /* | 177 | /* TODO attempt dynamic interrupt coalescing scheme */ |
261 | * This field saves the current interrupt coalescing number of the controller. | ||
262 | */ | ||
263 | u16 interrupt_coalesce_number; | 178 | u16 interrupt_coalesce_number; |
264 | |||
265 | /* | ||
266 | * This field saves the current interrupt coalescing timeout value in microseconds. | ||
267 | */ | ||
268 | u32 interrupt_coalesce_timeout; | 179 | u32 interrupt_coalesce_timeout; |
269 | |||
270 | /** | ||
271 | * This field is a pointer to the memory mapped register space for the | ||
272 | * struct smu_registers. | ||
273 | */ | ||
274 | struct smu_registers __iomem *smu_registers; | 180 | struct smu_registers __iomem *smu_registers; |
275 | |||
276 | /** | ||
277 | * This field is a pointer to the memory mapped register space for the | ||
278 | * struct scu_registers. | ||
279 | */ | ||
280 | struct scu_registers __iomem *scu_registers; | 181 | struct scu_registers __iomem *scu_registers; |
281 | 182 | ||
282 | }; | ||
283 | |||
284 | struct isci_host { | ||
285 | struct scic_sds_controller sci; | ||
286 | u16 tci_head; | 183 | u16 tci_head; |
287 | u16 tci_tail; | 184 | u16 tci_tail; |
288 | u16 tci_pool[SCI_MAX_IO_REQUESTS]; | 185 | u16 tci_pool[SCI_MAX_IO_REQUESTS]; |
289 | 186 | ||
290 | union scic_oem_parameters oem_parameters; | ||
291 | |||
292 | int id; /* unique within a given pci device */ | 187 | int id; /* unique within a given pci device */ |
293 | struct isci_phy phys[SCI_MAX_PHYS]; | 188 | struct isci_phy phys[SCI_MAX_PHYS]; |
294 | struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ | 189 | struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ |
@@ -464,14 +359,6 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) | |||
464 | return dev->port->ha->lldd_ha; | 359 | return dev->port->ha->lldd_ha; |
465 | } | 360 | } |
466 | 361 | ||
467 | static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic) | ||
468 | { | ||
469 | /* XXX delete after merging scic_sds_contoller and isci_host */ | ||
470 | struct isci_host *ihost = container_of(scic, typeof(*ihost), sci); | ||
471 | |||
472 | return ihost; | ||
473 | } | ||
474 | |||
475 | /** | 362 | /** |
476 | * scic_sds_controller_get_protocol_engine_group() - | 363 | * scic_sds_controller_get_protocol_engine_group() - |
477 | * | 364 | * |
@@ -518,11 +405,6 @@ static inline int scic_sds_remote_device_node_count(struct isci_remote_device *i | |||
518 | #define scic_sds_controller_clear_invalid_phy(controller, phy) \ | 405 | #define scic_sds_controller_clear_invalid_phy(controller, phy) \ |
519 | ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) | 406 | ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) |
520 | 407 | ||
521 | static inline struct device *scic_to_dev(struct scic_sds_controller *scic) | ||
522 | { | ||
523 | return &scic_to_ihost(scic)->pdev->dev; | ||
524 | } | ||
525 | |||
526 | static inline struct device *sciphy_to_dev(struct isci_phy *iphy) | 408 | static inline struct device *sciphy_to_dev(struct isci_phy *iphy) |
527 | { | 409 | { |
528 | 410 | ||
@@ -578,54 +460,54 @@ static inline bool is_c0(void) | |||
578 | return isci_si_rev > ISCI_SI_REVB0; | 460 | return isci_si_rev > ISCI_SI_REVB0; |
579 | } | 461 | } |
580 | 462 | ||
581 | void scic_sds_controller_post_request(struct scic_sds_controller *scic, | 463 | void scic_sds_controller_post_request(struct isci_host *ihost, |
582 | u32 request); | 464 | u32 request); |
583 | void scic_sds_controller_release_frame(struct scic_sds_controller *scic, | 465 | void scic_sds_controller_release_frame(struct isci_host *ihost, |
584 | u32 frame_index); | 466 | u32 frame_index); |
585 | void scic_sds_controller_copy_sata_response(void *response_buffer, | 467 | void scic_sds_controller_copy_sata_response(void *response_buffer, |
586 | void *frame_header, | 468 | void *frame_header, |
587 | void *frame_buffer); | 469 | void *frame_buffer); |
588 | enum sci_status scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller *scic, | 470 | enum sci_status scic_sds_controller_allocate_remote_node_context(struct isci_host *ihost, |
589 | struct isci_remote_device *idev, | 471 | struct isci_remote_device *idev, |
590 | u16 *node_id); | 472 | u16 *node_id); |
591 | void scic_sds_controller_free_remote_node_context( | 473 | void scic_sds_controller_free_remote_node_context( |
592 | struct scic_sds_controller *scic, | 474 | struct isci_host *ihost, |
593 | struct isci_remote_device *idev, | 475 | struct isci_remote_device *idev, |
594 | u16 node_id); | 476 | u16 node_id); |
595 | union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( | 477 | union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( |
596 | struct scic_sds_controller *scic, | 478 | struct isci_host *ihost, |
597 | u16 node_id); | 479 | u16 node_id); |
598 | 480 | ||
599 | struct isci_request *scic_request_by_tag(struct scic_sds_controller *scic, | 481 | struct isci_request *scic_request_by_tag(struct isci_host *ihost, |
600 | u16 io_tag); | 482 | u16 io_tag); |
601 | 483 | ||
602 | void scic_sds_controller_power_control_queue_insert( | 484 | void scic_sds_controller_power_control_queue_insert( |
603 | struct scic_sds_controller *scic, | 485 | struct isci_host *ihost, |
604 | struct isci_phy *iphy); | 486 | struct isci_phy *iphy); |
605 | 487 | ||
606 | void scic_sds_controller_power_control_queue_remove( | 488 | void scic_sds_controller_power_control_queue_remove( |
607 | struct scic_sds_controller *scic, | 489 | struct isci_host *ihost, |
608 | struct isci_phy *iphy); | 490 | struct isci_phy *iphy); |
609 | 491 | ||
610 | void scic_sds_controller_link_up( | 492 | void scic_sds_controller_link_up( |
611 | struct scic_sds_controller *scic, | 493 | struct isci_host *ihost, |
612 | struct isci_port *iport, | 494 | struct isci_port *iport, |
613 | struct isci_phy *iphy); | 495 | struct isci_phy *iphy); |
614 | 496 | ||
615 | void scic_sds_controller_link_down( | 497 | void scic_sds_controller_link_down( |
616 | struct scic_sds_controller *scic, | 498 | struct isci_host *ihost, |
617 | struct isci_port *iport, | 499 | struct isci_port *iport, |
618 | struct isci_phy *iphy); | 500 | struct isci_phy *iphy); |
619 | 501 | ||
620 | void scic_sds_controller_remote_device_stopped( | 502 | void scic_sds_controller_remote_device_stopped( |
621 | struct scic_sds_controller *scic, | 503 | struct isci_host *ihost, |
622 | struct isci_remote_device *idev); | 504 | struct isci_remote_device *idev); |
623 | 505 | ||
624 | void scic_sds_controller_copy_task_context( | 506 | void scic_sds_controller_copy_task_context( |
625 | struct scic_sds_controller *scic, | 507 | struct isci_host *ihost, |
626 | struct isci_request *ireq); | 508 | struct isci_request *ireq); |
627 | 509 | ||
628 | void scic_sds_controller_register_setup(struct scic_sds_controller *scic); | 510 | void scic_sds_controller_register_setup(struct isci_host *ihost); |
629 | 511 | ||
630 | enum sci_status scic_controller_continue_io(struct isci_request *ireq); | 512 | enum sci_status scic_controller_continue_io(struct isci_request *ireq); |
631 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); | 513 | int isci_host_scan_finished(struct Scsi_Host *, unsigned long); |
@@ -655,25 +537,25 @@ void isci_host_remote_device_start_complete( | |||
655 | enum sci_status); | 537 | enum sci_status); |
656 | 538 | ||
657 | void scic_controller_disable_interrupts( | 539 | void scic_controller_disable_interrupts( |
658 | struct scic_sds_controller *scic); | 540 | struct isci_host *ihost); |
659 | 541 | ||
660 | enum sci_status scic_controller_start_io( | 542 | enum sci_status scic_controller_start_io( |
661 | struct scic_sds_controller *scic, | 543 | struct isci_host *ihost, |
662 | struct isci_remote_device *idev, | 544 | struct isci_remote_device *idev, |
663 | struct isci_request *ireq); | 545 | struct isci_request *ireq); |
664 | 546 | ||
665 | enum sci_task_status scic_controller_start_task( | 547 | enum sci_task_status scic_controller_start_task( |
666 | struct scic_sds_controller *scic, | 548 | struct isci_host *ihost, |
667 | struct isci_remote_device *idev, | 549 | struct isci_remote_device *idev, |
668 | struct isci_request *ireq); | 550 | struct isci_request *ireq); |
669 | 551 | ||
670 | enum sci_status scic_controller_terminate_request( | 552 | enum sci_status scic_controller_terminate_request( |
671 | struct scic_sds_controller *scic, | 553 | struct isci_host *ihost, |
672 | struct isci_remote_device *idev, | 554 | struct isci_remote_device *idev, |
673 | struct isci_request *ireq); | 555 | struct isci_request *ireq); |
674 | 556 | ||
675 | enum sci_status scic_controller_complete_io( | 557 | enum sci_status scic_controller_complete_io( |
676 | struct scic_sds_controller *scic, | 558 | struct isci_host *ihost, |
677 | struct isci_remote_device *idev, | 559 | struct isci_remote_device *idev, |
678 | struct isci_request *ireq); | 560 | struct isci_request *ireq); |
679 | 561 | ||
@@ -681,6 +563,6 @@ void scic_sds_port_configuration_agent_construct( | |||
681 | struct scic_sds_port_configuration_agent *port_agent); | 563 | struct scic_sds_port_configuration_agent *port_agent); |
682 | 564 | ||
683 | enum sci_status scic_sds_port_configuration_agent_initialize( | 565 | enum sci_status scic_sds_port_configuration_agent_initialize( |
684 | struct scic_sds_controller *controller, | 566 | struct isci_host *ihost, |
685 | struct scic_sds_port_configuration_agent *port_agent); | 567 | struct scic_sds_port_configuration_agent *port_agent); |
686 | #endif | 568 | #endif |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index bbfb6e563207..68ca1a4f30af 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
@@ -548,13 +548,13 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic | |||
548 | 548 | ||
549 | static void __devexit isci_pci_remove(struct pci_dev *pdev) | 549 | static void __devexit isci_pci_remove(struct pci_dev *pdev) |
550 | { | 550 | { |
551 | struct isci_host *isci_host; | 551 | struct isci_host *ihost; |
552 | int i; | 552 | int i; |
553 | 553 | ||
554 | for_each_isci_host(i, isci_host, pdev) { | 554 | for_each_isci_host(i, ihost, pdev) { |
555 | isci_unregister(isci_host); | 555 | isci_unregister(ihost); |
556 | isci_host_deinit(isci_host); | 556 | isci_host_deinit(ihost); |
557 | scic_controller_disable_interrupts(&isci_host->sci); | 557 | scic_controller_disable_interrupts(ihost); |
558 | } | 558 | } |
559 | } | 559 | } |
560 | 560 | ||
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index fd0e9734e5d0..ca96b5ad0d52 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c | |||
@@ -112,13 +112,13 @@ static enum sci_status | |||
112 | scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, | 112 | scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, |
113 | struct scu_link_layer_registers __iomem *link_layer_registers) | 113 | struct scu_link_layer_registers __iomem *link_layer_registers) |
114 | { | 114 | { |
115 | struct scic_sds_controller *scic = | 115 | struct isci_host *ihost = |
116 | iphy->owning_port->owning_controller; | 116 | iphy->owning_port->owning_controller; |
117 | int phy_idx = iphy->phy_index; | 117 | int phy_idx = iphy->phy_index; |
118 | struct sci_phy_user_params *phy_user = | 118 | struct sci_phy_user_params *phy_user = |
119 | &scic->user_parameters.sds1.phys[phy_idx]; | 119 | &ihost->user_parameters.sds1.phys[phy_idx]; |
120 | struct sci_phy_oem_params *phy_oem = | 120 | struct sci_phy_oem_params *phy_oem = |
121 | &scic->oem_parameters.sds1.phys[phy_idx]; | 121 | &ihost->oem_parameters.sds1.phys[phy_idx]; |
122 | u32 phy_configuration; | 122 | u32 phy_configuration; |
123 | struct scic_phy_cap phy_cap; | 123 | struct scic_phy_cap phy_cap; |
124 | u32 parity_check = 0; | 124 | u32 parity_check = 0; |
@@ -169,7 +169,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, | |||
169 | phy_cap.gen3_no_ssc = 1; | 169 | phy_cap.gen3_no_ssc = 1; |
170 | phy_cap.gen2_no_ssc = 1; | 170 | phy_cap.gen2_no_ssc = 1; |
171 | phy_cap.gen1_no_ssc = 1; | 171 | phy_cap.gen1_no_ssc = 1; |
172 | if (scic->oem_parameters.sds1.controller.do_enable_ssc == true) { | 172 | if (ihost->oem_parameters.sds1.controller.do_enable_ssc == true) { |
173 | phy_cap.gen3_ssc = 1; | 173 | phy_cap.gen3_ssc = 1; |
174 | phy_cap.gen2_ssc = 1; | 174 | phy_cap.gen2_ssc = 1; |
175 | phy_cap.gen1_ssc = 1; | 175 | phy_cap.gen1_ssc = 1; |
@@ -216,7 +216,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy, | |||
216 | &iphy->link_layer_registers->afe_lookup_table_control); | 216 | &iphy->link_layer_registers->afe_lookup_table_control); |
217 | 217 | ||
218 | llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, | 218 | llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, |
219 | (u8)scic->user_parameters.sds1.no_outbound_task_timeout); | 219 | (u8)ihost->user_parameters.sds1.no_outbound_task_timeout); |
220 | 220 | ||
221 | switch(phy_user->max_speed_generation) { | 221 | switch(phy_user->max_speed_generation) { |
222 | case SCIC_SDS_PARM_GEN3_SPEED: | 222 | case SCIC_SDS_PARM_GEN3_SPEED: |
@@ -255,7 +255,7 @@ static void phy_sata_timeout(unsigned long data) | |||
255 | { | 255 | { |
256 | struct sci_timer *tmr = (struct sci_timer *)data; | 256 | struct sci_timer *tmr = (struct sci_timer *)data; |
257 | struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); | 257 | struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); |
258 | struct isci_host *ihost = scic_to_ihost(iphy->owning_port->owning_controller); | 258 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
259 | unsigned long flags; | 259 | unsigned long flags; |
260 | 260 | ||
261 | spin_lock_irqsave(&ihost->scic_lock, flags); | 261 | spin_lock_irqsave(&ihost->scic_lock, flags); |
@@ -890,7 +890,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
890 | u32 frame_index) | 890 | u32 frame_index) |
891 | { | 891 | { |
892 | enum scic_sds_phy_states state = iphy->sm.current_state_id; | 892 | enum scic_sds_phy_states state = iphy->sm.current_state_id; |
893 | struct scic_sds_controller *scic = iphy->owning_port->owning_controller; | 893 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
894 | enum sci_status result; | 894 | enum sci_status result; |
895 | unsigned long flags; | 895 | unsigned long flags; |
896 | 896 | ||
@@ -899,7 +899,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
899 | u32 *frame_words; | 899 | u32 *frame_words; |
900 | struct sas_identify_frame iaf; | 900 | struct sas_identify_frame iaf; |
901 | 901 | ||
902 | result = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 902 | result = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
903 | frame_index, | 903 | frame_index, |
904 | (void **)&frame_words); | 904 | (void **)&frame_words); |
905 | 905 | ||
@@ -933,7 +933,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
933 | "unexpected frame id %x\n", | 933 | "unexpected frame id %x\n", |
934 | __func__, frame_index); | 934 | __func__, frame_index); |
935 | 935 | ||
936 | scic_sds_controller_release_frame(scic, frame_index); | 936 | scic_sds_controller_release_frame(ihost, frame_index); |
937 | return result; | 937 | return result; |
938 | } | 938 | } |
939 | case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { | 939 | case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: { |
@@ -950,7 +950,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
950 | 950 | ||
951 | if ((frame_header->fis_type == FIS_REGD2H) && | 951 | if ((frame_header->fis_type == FIS_REGD2H) && |
952 | !(frame_header->status & ATA_BUSY)) { | 952 | !(frame_header->status & ATA_BUSY)) { |
953 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 953 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
954 | frame_index, | 954 | frame_index, |
955 | (void **)&fis_frame_data); | 955 | (void **)&fis_frame_data); |
956 | 956 | ||
@@ -971,7 +971,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy, | |||
971 | __func__, frame_index); | 971 | __func__, frame_index); |
972 | 972 | ||
973 | /* Regardless of the result we are done with this frame with it */ | 973 | /* Regardless of the result we are done with this frame with it */ |
974 | scic_sds_controller_release_frame(scic, frame_index); | 974 | scic_sds_controller_release_frame(ihost, frame_index); |
975 | 975 | ||
976 | return result; | 976 | return result; |
977 | } | 977 | } |
@@ -994,33 +994,33 @@ static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_m | |||
994 | static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) | 994 | static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm) |
995 | { | 995 | { |
996 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 996 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
997 | struct scic_sds_controller *scic = iphy->owning_port->owning_controller; | 997 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
998 | 998 | ||
999 | scic_sds_controller_power_control_queue_insert(scic, iphy); | 999 | scic_sds_controller_power_control_queue_insert(ihost, iphy); |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) | 1002 | static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm) |
1003 | { | 1003 | { |
1004 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1004 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1005 | struct scic_sds_controller *scic = iphy->owning_port->owning_controller; | 1005 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
1006 | 1006 | ||
1007 | scic_sds_controller_power_control_queue_remove(scic, iphy); | 1007 | scic_sds_controller_power_control_queue_remove(ihost, iphy); |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) | 1010 | static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm) |
1011 | { | 1011 | { |
1012 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1012 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1013 | struct scic_sds_controller *scic = iphy->owning_port->owning_controller; | 1013 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
1014 | 1014 | ||
1015 | scic_sds_controller_power_control_queue_insert(scic, iphy); | 1015 | scic_sds_controller_power_control_queue_insert(ihost, iphy); |
1016 | } | 1016 | } |
1017 | 1017 | ||
1018 | static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) | 1018 | static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm) |
1019 | { | 1019 | { |
1020 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); | 1020 | struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); |
1021 | struct scic_sds_controller *scic = iphy->owning_port->owning_controller; | 1021 | struct isci_host *ihost = iphy->owning_port->owning_controller; |
1022 | 1022 | ||
1023 | scic_sds_controller_power_control_queue_remove(scic, iphy); | 1023 | scic_sds_controller_power_control_queue_remove(ihost, iphy); |
1024 | } | 1024 | } |
1025 | 1025 | ||
1026 | static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) | 1026 | static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm) |
@@ -1313,7 +1313,7 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index) | |||
1313 | u64 sci_sas_addr; | 1313 | u64 sci_sas_addr; |
1314 | __be64 sas_addr; | 1314 | __be64 sas_addr; |
1315 | 1315 | ||
1316 | scic_oem_parameters_get(&ihost->sci, &oem); | 1316 | scic_oem_parameters_get(ihost, &oem); |
1317 | sci_sas_addr = oem.sds1.phys[index].sas_address.high; | 1317 | sci_sas_addr = oem.sds1.phys[index].sas_address.high; |
1318 | sci_sas_addr <<= 32; | 1318 | sci_sas_addr <<= 32; |
1319 | sci_sas_addr |= oem.sds1.phys[index].sas_address.low; | 1319 | sci_sas_addr |= oem.sds1.phys[index].sas_address.low; |
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index df37b1bf7d11..c434d5a0effa 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c | |||
@@ -365,11 +365,11 @@ static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *i | |||
365 | "%s: isci_port = %p\n", __func__, isci_port); | 365 | "%s: isci_port = %p\n", __func__, isci_port); |
366 | } | 366 | } |
367 | 367 | ||
368 | static void isci_port_stop_complete(struct scic_sds_controller *scic, | 368 | static void isci_port_stop_complete(struct isci_host *ihost, |
369 | struct isci_port *iport, | 369 | struct isci_port *iport, |
370 | enum sci_status completion_status) | 370 | enum sci_status completion_status) |
371 | { | 371 | { |
372 | dev_dbg(&scic_to_ihost(scic)->pdev->dev, "Port stop complete\n"); | 372 | dev_dbg(&ihost->pdev->dev, "Port stop complete\n"); |
373 | } | 373 | } |
374 | 374 | ||
375 | /** | 375 | /** |
@@ -541,8 +541,7 @@ static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport, | |||
541 | /* Make sure that this phy is part of this port */ | 541 | /* Make sure that this phy is part of this port */ |
542 | if (iport->phy_table[iphy->phy_index] == iphy && | 542 | if (iport->phy_table[iphy->phy_index] == iphy && |
543 | phy_get_non_dummy_port(iphy) == iport) { | 543 | phy_get_non_dummy_port(iphy) == iport) { |
544 | struct scic_sds_controller *scic = iport->owning_controller; | 544 | struct isci_host *ihost = iport->owning_controller; |
545 | struct isci_host *ihost = scic_to_ihost(scic); | ||
546 | 545 | ||
547 | /* Yep it is assigned to this port so remove it */ | 546 | /* Yep it is assigned to this port so remove it */ |
548 | scic_sds_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); | 547 | scic_sds_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); |
@@ -654,10 +653,10 @@ static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) | |||
654 | */ | 653 | */ |
655 | static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) | 654 | static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) |
656 | { | 655 | { |
657 | struct scic_sds_controller *scic = iport->owning_controller; | 656 | struct isci_host *ihost = iport->owning_controller; |
658 | struct scu_task_context *task_context; | 657 | struct scu_task_context *task_context; |
659 | 658 | ||
660 | task_context = &scic->task_context_table[ISCI_TAG_TCI(tag)]; | 659 | task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; |
661 | memset(task_context, 0, sizeof(struct scu_task_context)); | 660 | memset(task_context, 0, sizeof(struct scu_task_context)); |
662 | 661 | ||
663 | task_context->initiator_request = 1; | 662 | task_context->initiator_request = 1; |
@@ -674,13 +673,13 @@ static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag) | |||
674 | 673 | ||
675 | static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) | 674 | static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport) |
676 | { | 675 | { |
677 | struct scic_sds_controller *scic = iport->owning_controller; | 676 | struct isci_host *ihost = iport->owning_controller; |
678 | 677 | ||
679 | if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) | 678 | if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) |
680 | isci_free_tag(scic_to_ihost(scic), iport->reserved_tag); | 679 | isci_free_tag(ihost, iport->reserved_tag); |
681 | 680 | ||
682 | if (iport->reserved_rni != SCU_DUMMY_INDEX) | 681 | if (iport->reserved_rni != SCU_DUMMY_INDEX) |
683 | scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes, | 682 | scic_sds_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, |
684 | 1, iport->reserved_rni); | 683 | 1, iport->reserved_rni); |
685 | 684 | ||
686 | iport->reserved_rni = SCU_DUMMY_INDEX; | 685 | iport->reserved_rni = SCU_DUMMY_INDEX; |
@@ -749,15 +748,14 @@ static void scic_sds_port_activate_phy(struct isci_port *iport, | |||
749 | struct isci_phy *iphy, | 748 | struct isci_phy *iphy, |
750 | bool do_notify_user) | 749 | bool do_notify_user) |
751 | { | 750 | { |
752 | struct scic_sds_controller *scic = iport->owning_controller; | 751 | struct isci_host *ihost = iport->owning_controller; |
753 | struct isci_host *ihost = scic_to_ihost(scic); | ||
754 | 752 | ||
755 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) | 753 | if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) |
756 | scic_sds_phy_resume(iphy); | 754 | scic_sds_phy_resume(iphy); |
757 | 755 | ||
758 | iport->active_phy_mask |= 1 << iphy->phy_index; | 756 | iport->active_phy_mask |= 1 << iphy->phy_index; |
759 | 757 | ||
760 | scic_sds_controller_clear_invalid_phy(scic, iphy); | 758 | scic_sds_controller_clear_invalid_phy(ihost, iphy); |
761 | 759 | ||
762 | if (do_notify_user == true) | 760 | if (do_notify_user == true) |
763 | isci_port_link_up(ihost, iport, iphy); | 761 | isci_port_link_up(ihost, iport, iphy); |
@@ -767,8 +765,7 @@ void scic_sds_port_deactivate_phy(struct isci_port *iport, | |||
767 | struct isci_phy *iphy, | 765 | struct isci_phy *iphy, |
768 | bool do_notify_user) | 766 | bool do_notify_user) |
769 | { | 767 | { |
770 | struct scic_sds_controller *scic = scic_sds_port_get_controller(iport); | 768 | struct isci_host *ihost = scic_sds_port_get_controller(iport); |
771 | struct isci_host *ihost = scic_to_ihost(scic); | ||
772 | 769 | ||
773 | iport->active_phy_mask &= ~(1 << iphy->phy_index); | 770 | iport->active_phy_mask &= ~(1 << iphy->phy_index); |
774 | 771 | ||
@@ -793,16 +790,16 @@ void scic_sds_port_deactivate_phy(struct isci_port *iport, | |||
793 | static void scic_sds_port_invalid_link_up(struct isci_port *iport, | 790 | static void scic_sds_port_invalid_link_up(struct isci_port *iport, |
794 | struct isci_phy *iphy) | 791 | struct isci_phy *iphy) |
795 | { | 792 | { |
796 | struct scic_sds_controller *scic = iport->owning_controller; | 793 | struct isci_host *ihost = iport->owning_controller; |
797 | 794 | ||
798 | /* | 795 | /* |
799 | * Check to see if we have alreay reported this link as bad and if | 796 | * Check to see if we have alreay reported this link as bad and if |
800 | * not go ahead and tell the SCI_USER that we have discovered an | 797 | * not go ahead and tell the SCI_USER that we have discovered an |
801 | * invalid link. | 798 | * invalid link. |
802 | */ | 799 | */ |
803 | if ((scic->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { | 800 | if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { |
804 | scic_sds_controller_set_invalid_phy(scic, iphy); | 801 | scic_sds_controller_set_invalid_phy(ihost, iphy); |
805 | dev_warn(&scic_to_ihost(scic)->pdev->dev, "Invalid link up!\n"); | 802 | dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); |
806 | } | 803 | } |
807 | } | 804 | } |
808 | 805 | ||
@@ -931,7 +928,7 @@ static void port_timeout(unsigned long data) | |||
931 | { | 928 | { |
932 | struct sci_timer *tmr = (struct sci_timer *)data; | 929 | struct sci_timer *tmr = (struct sci_timer *)data; |
933 | struct isci_port *iport = container_of(tmr, typeof(*iport), timer); | 930 | struct isci_port *iport = container_of(tmr, typeof(*iport), timer); |
934 | struct isci_host *ihost = scic_to_ihost(iport->owning_controller); | 931 | struct isci_host *ihost = iport->owning_controller; |
935 | unsigned long flags; | 932 | unsigned long flags; |
936 | u32 current_state; | 933 | u32 current_state; |
937 | 934 | ||
@@ -1041,19 +1038,19 @@ static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport) | |||
1041 | */ | 1038 | */ |
1042 | static void scic_sds_port_post_dummy_request(struct isci_port *iport) | 1039 | static void scic_sds_port_post_dummy_request(struct isci_port *iport) |
1043 | { | 1040 | { |
1044 | struct scic_sds_controller *scic = iport->owning_controller; | 1041 | struct isci_host *ihost = iport->owning_controller; |
1045 | u16 tag = iport->reserved_tag; | 1042 | u16 tag = iport->reserved_tag; |
1046 | struct scu_task_context *tc; | 1043 | struct scu_task_context *tc; |
1047 | u32 command; | 1044 | u32 command; |
1048 | 1045 | ||
1049 | tc = &scic->task_context_table[ISCI_TAG_TCI(tag)]; | 1046 | tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; |
1050 | tc->abort = 0; | 1047 | tc->abort = 0; |
1051 | 1048 | ||
1052 | command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 1049 | command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
1053 | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | | 1050 | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | |
1054 | ISCI_TAG_TCI(tag); | 1051 | ISCI_TAG_TCI(tag); |
1055 | 1052 | ||
1056 | scic_sds_controller_post_request(scic, command); | 1053 | scic_sds_controller_post_request(ihost, command); |
1057 | } | 1054 | } |
1058 | 1055 | ||
1059 | /** | 1056 | /** |
@@ -1065,19 +1062,19 @@ static void scic_sds_port_post_dummy_request(struct isci_port *iport) | |||
1065 | */ | 1062 | */ |
1066 | static void scic_sds_port_abort_dummy_request(struct isci_port *iport) | 1063 | static void scic_sds_port_abort_dummy_request(struct isci_port *iport) |
1067 | { | 1064 | { |
1068 | struct scic_sds_controller *scic = iport->owning_controller; | 1065 | struct isci_host *ihost = iport->owning_controller; |
1069 | u16 tag = iport->reserved_tag; | 1066 | u16 tag = iport->reserved_tag; |
1070 | struct scu_task_context *tc; | 1067 | struct scu_task_context *tc; |
1071 | u32 command; | 1068 | u32 command; |
1072 | 1069 | ||
1073 | tc = &scic->task_context_table[ISCI_TAG_TCI(tag)]; | 1070 | tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; |
1074 | tc->abort = 1; | 1071 | tc->abort = 1; |
1075 | 1072 | ||
1076 | command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | | 1073 | command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | |
1077 | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | | 1074 | iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | |
1078 | ISCI_TAG_TCI(tag); | 1075 | ISCI_TAG_TCI(tag); |
1079 | 1076 | ||
1080 | scic_sds_controller_post_request(scic, command); | 1077 | scic_sds_controller_post_request(ihost, command); |
1081 | } | 1078 | } |
1082 | 1079 | ||
1083 | /** | 1080 | /** |
@@ -1115,8 +1112,7 @@ static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state | |||
1115 | { | 1112 | { |
1116 | u32 index; | 1113 | u32 index; |
1117 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1114 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1118 | struct scic_sds_controller *scic = iport->owning_controller; | 1115 | struct isci_host *ihost = iport->owning_controller; |
1119 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1120 | 1116 | ||
1121 | isci_port_ready(ihost, iport); | 1117 | isci_port_ready(ihost, iport); |
1122 | 1118 | ||
@@ -1141,13 +1137,13 @@ static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state | |||
1141 | 1137 | ||
1142 | static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) | 1138 | static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) |
1143 | { | 1139 | { |
1144 | struct scic_sds_controller *scic = iport->owning_controller; | 1140 | struct isci_host *ihost = iport->owning_controller; |
1145 | u8 phys_index = iport->physical_port_index; | 1141 | u8 phys_index = iport->physical_port_index; |
1146 | union scu_remote_node_context *rnc; | 1142 | union scu_remote_node_context *rnc; |
1147 | u16 rni = iport->reserved_rni; | 1143 | u16 rni = iport->reserved_rni; |
1148 | u32 command; | 1144 | u32 command; |
1149 | 1145 | ||
1150 | rnc = &scic->remote_node_context_table[rni]; | 1146 | rnc = &ihost->remote_node_context_table[rni]; |
1151 | 1147 | ||
1152 | rnc->ssp.is_valid = false; | 1148 | rnc->ssp.is_valid = false; |
1153 | 1149 | ||
@@ -1155,13 +1151,13 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) | |||
1155 | * controller and give it ample time to act before posting the rnc | 1151 | * controller and give it ample time to act before posting the rnc |
1156 | * invalidate | 1152 | * invalidate |
1157 | */ | 1153 | */ |
1158 | readl(&scic->smu_registers->interrupt_status); /* flush */ | 1154 | readl(&ihost->smu_registers->interrupt_status); /* flush */ |
1159 | udelay(10); | 1155 | udelay(10); |
1160 | 1156 | ||
1161 | command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | | 1157 | command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | |
1162 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; | 1158 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; |
1163 | 1159 | ||
1164 | scic_sds_controller_post_request(scic, command); | 1160 | scic_sds_controller_post_request(ihost, command); |
1165 | } | 1161 | } |
1166 | 1162 | ||
1167 | /** | 1163 | /** |
@@ -1175,8 +1171,7 @@ static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport) | |||
1175 | static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) | 1171 | static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) |
1176 | { | 1172 | { |
1177 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1173 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1178 | struct scic_sds_controller *scic = iport->owning_controller; | 1174 | struct isci_host *ihost = iport->owning_controller; |
1179 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1180 | 1175 | ||
1181 | /* | 1176 | /* |
1182 | * Kill the dummy task for this port if it has not yet posted | 1177 | * Kill the dummy task for this port if it has not yet posted |
@@ -1194,8 +1189,7 @@ static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_ | |||
1194 | static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) | 1189 | static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) |
1195 | { | 1190 | { |
1196 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1191 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1197 | struct scic_sds_controller *scic = iport->owning_controller; | 1192 | struct isci_host *ihost = iport->owning_controller; |
1198 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1199 | 1193 | ||
1200 | if (iport->active_phy_mask == 0) { | 1194 | if (iport->active_phy_mask == 0) { |
1201 | isci_port_not_ready(ihost, iport); | 1195 | isci_port_not_ready(ihost, iport); |
@@ -1218,7 +1212,7 @@ static void scic_sds_port_ready_substate_configuring_exit(struct sci_base_state_ | |||
1218 | 1212 | ||
1219 | enum sci_status scic_sds_port_start(struct isci_port *iport) | 1213 | enum sci_status scic_sds_port_start(struct isci_port *iport) |
1220 | { | 1214 | { |
1221 | struct scic_sds_controller *scic = iport->owning_controller; | 1215 | struct isci_host *ihost = iport->owning_controller; |
1222 | enum sci_status status = SCI_SUCCESS; | 1216 | enum sci_status status = SCI_SUCCESS; |
1223 | enum scic_sds_port_states state; | 1217 | enum scic_sds_port_states state; |
1224 | u32 phy_mask; | 1218 | u32 phy_mask; |
@@ -1241,7 +1235,7 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) | |||
1241 | 1235 | ||
1242 | if (iport->reserved_rni == SCU_DUMMY_INDEX) { | 1236 | if (iport->reserved_rni == SCU_DUMMY_INDEX) { |
1243 | u16 rni = scic_sds_remote_node_table_allocate_remote_node( | 1237 | u16 rni = scic_sds_remote_node_table_allocate_remote_node( |
1244 | &scic->available_remote_nodes, 1); | 1238 | &ihost->available_remote_nodes, 1); |
1245 | 1239 | ||
1246 | if (rni != SCU_DUMMY_INDEX) | 1240 | if (rni != SCU_DUMMY_INDEX) |
1247 | scic_sds_port_construct_dummy_rnc(iport, rni); | 1241 | scic_sds_port_construct_dummy_rnc(iport, rni); |
@@ -1251,7 +1245,6 @@ enum sci_status scic_sds_port_start(struct isci_port *iport) | |||
1251 | } | 1245 | } |
1252 | 1246 | ||
1253 | if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { | 1247 | if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { |
1254 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1255 | u16 tag; | 1248 | u16 tag; |
1256 | 1249 | ||
1257 | tag = isci_alloc_tag(ihost); | 1250 | tag = isci_alloc_tag(ihost); |
@@ -1634,30 +1627,30 @@ scic_sds_port_disable_port_task_scheduler(struct isci_port *iport) | |||
1634 | 1627 | ||
1635 | static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) | 1628 | static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport) |
1636 | { | 1629 | { |
1637 | struct scic_sds_controller *scic = iport->owning_controller; | 1630 | struct isci_host *ihost = iport->owning_controller; |
1638 | u8 phys_index = iport->physical_port_index; | 1631 | u8 phys_index = iport->physical_port_index; |
1639 | union scu_remote_node_context *rnc; | 1632 | union scu_remote_node_context *rnc; |
1640 | u16 rni = iport->reserved_rni; | 1633 | u16 rni = iport->reserved_rni; |
1641 | u32 command; | 1634 | u32 command; |
1642 | 1635 | ||
1643 | rnc = &scic->remote_node_context_table[rni]; | 1636 | rnc = &ihost->remote_node_context_table[rni]; |
1644 | rnc->ssp.is_valid = true; | 1637 | rnc->ssp.is_valid = true; |
1645 | 1638 | ||
1646 | command = SCU_CONTEXT_COMMAND_POST_RNC_32 | | 1639 | command = SCU_CONTEXT_COMMAND_POST_RNC_32 | |
1647 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; | 1640 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; |
1648 | 1641 | ||
1649 | scic_sds_controller_post_request(scic, command); | 1642 | scic_sds_controller_post_request(ihost, command); |
1650 | 1643 | ||
1651 | /* ensure hardware has seen the post rnc command and give it | 1644 | /* ensure hardware has seen the post rnc command and give it |
1652 | * ample time to act before sending the suspend | 1645 | * ample time to act before sending the suspend |
1653 | */ | 1646 | */ |
1654 | readl(&scic->smu_registers->interrupt_status); /* flush */ | 1647 | readl(&ihost->smu_registers->interrupt_status); /* flush */ |
1655 | udelay(10); | 1648 | udelay(10); |
1656 | 1649 | ||
1657 | command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | | 1650 | command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | |
1658 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; | 1651 | phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; |
1659 | 1652 | ||
1660 | scic_sds_controller_post_request(scic, command); | 1653 | scic_sds_controller_post_request(ihost, command); |
1661 | } | 1654 | } |
1662 | 1655 | ||
1663 | static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) | 1656 | static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm) |
@@ -1684,8 +1677,7 @@ static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm) | |||
1684 | static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) | 1677 | static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm) |
1685 | { | 1678 | { |
1686 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); | 1679 | struct isci_port *iport = container_of(sm, typeof(*iport), sm); |
1687 | struct scic_sds_controller *scic = iport->owning_controller; | 1680 | struct isci_host *ihost = iport->owning_controller; |
1688 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1689 | u32 prev_state; | 1681 | u32 prev_state; |
1690 | 1682 | ||
1691 | prev_state = iport->sm.previous_state_id; | 1683 | prev_state = iport->sm.previous_state_id; |
@@ -1758,7 +1750,7 @@ static const struct sci_base_state scic_sds_port_state_table[] = { | |||
1758 | }; | 1750 | }; |
1759 | 1751 | ||
1760 | void scic_sds_port_construct(struct isci_port *iport, u8 index, | 1752 | void scic_sds_port_construct(struct isci_port *iport, u8 index, |
1761 | struct scic_sds_controller *scic) | 1753 | struct isci_host *ihost) |
1762 | { | 1754 | { |
1763 | sci_init_sm(&iport->sm, scic_sds_port_state_table, SCI_PORT_STOPPED); | 1755 | sci_init_sm(&iport->sm, scic_sds_port_state_table, SCI_PORT_STOPPED); |
1764 | 1756 | ||
@@ -1767,7 +1759,7 @@ void scic_sds_port_construct(struct isci_port *iport, u8 index, | |||
1767 | iport->active_phy_mask = 0; | 1759 | iport->active_phy_mask = 0; |
1768 | iport->ready_exit = false; | 1760 | iport->ready_exit = false; |
1769 | 1761 | ||
1770 | iport->owning_controller = scic; | 1762 | iport->owning_controller = ihost; |
1771 | 1763 | ||
1772 | iport->started_request_count = 0; | 1764 | iport->started_request_count = 0; |
1773 | iport->assigned_device_count = 0; | 1765 | iport->assigned_device_count = 0; |
@@ -1810,8 +1802,7 @@ void scic_sds_port_broadcast_change_received( | |||
1810 | struct isci_port *iport, | 1802 | struct isci_port *iport, |
1811 | struct isci_phy *iphy) | 1803 | struct isci_phy *iphy) |
1812 | { | 1804 | { |
1813 | struct scic_sds_controller *scic = iport->owning_controller; | 1805 | struct isci_host *ihost = iport->owning_controller; |
1814 | struct isci_host *ihost = scic_to_ihost(scic); | ||
1815 | 1806 | ||
1816 | /* notify the user. */ | 1807 | /* notify the user. */ |
1817 | isci_port_bc_change_received(ihost, iport, iphy); | 1808 | isci_port_bc_change_received(ihost, iport, iphy); |
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index b9bc89bf6519..9a9be7b47b4a 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h | |||
@@ -115,7 +115,7 @@ struct isci_port { | |||
115 | u32 assigned_device_count; | 115 | u32 assigned_device_count; |
116 | u32 not_ready_reason; | 116 | u32 not_ready_reason; |
117 | struct isci_phy *phy_table[SCI_MAX_PHYS]; | 117 | struct isci_phy *phy_table[SCI_MAX_PHYS]; |
118 | struct scic_sds_controller *owning_controller; | 118 | struct isci_host *owning_controller; |
119 | struct sci_timer timer; | 119 | struct sci_timer timer; |
120 | struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers; | 120 | struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers; |
121 | /* XXX rework: only one register, no need to replicate per-port */ | 121 | /* XXX rework: only one register, no need to replicate per-port */ |
@@ -243,7 +243,7 @@ static inline void scic_sds_port_decrement_request_count(struct isci_port *iport | |||
243 | void scic_sds_port_construct( | 243 | void scic_sds_port_construct( |
244 | struct isci_port *iport, | 244 | struct isci_port *iport, |
245 | u8 port_index, | 245 | u8 port_index, |
246 | struct scic_sds_controller *scic); | 246 | struct isci_host *ihost); |
247 | 247 | ||
248 | enum sci_status scic_sds_port_initialize( | 248 | enum sci_status scic_sds_port_initialize( |
249 | struct isci_port *iport, | 249 | struct isci_port *iport, |
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index bb62d2a25217..a0a135d54e95 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c | |||
@@ -113,7 +113,7 @@ static s32 sci_sas_address_compare( | |||
113 | * NULL if there is no matching port for the phy. | 113 | * NULL if there is no matching port for the phy. |
114 | */ | 114 | */ |
115 | static struct isci_port *scic_sds_port_configuration_agent_find_port( | 115 | static struct isci_port *scic_sds_port_configuration_agent_find_port( |
116 | struct scic_sds_controller *scic, | 116 | struct isci_host *ihost, |
117 | struct isci_phy *iphy) | 117 | struct isci_phy *iphy) |
118 | { | 118 | { |
119 | u8 i; | 119 | u8 i; |
@@ -130,8 +130,7 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( | |||
130 | scic_sds_phy_get_sas_address(iphy, &phy_sas_address); | 130 | scic_sds_phy_get_sas_address(iphy, &phy_sas_address); |
131 | scic_sds_phy_get_attached_sas_address(iphy, &phy_attached_device_address); | 131 | scic_sds_phy_get_attached_sas_address(iphy, &phy_attached_device_address); |
132 | 132 | ||
133 | for (i = 0; i < scic->logical_port_entries; i++) { | 133 | for (i = 0; i < ihost->logical_port_entries; i++) { |
134 | struct isci_host *ihost = scic_to_ihost(scic); | ||
135 | struct isci_port *iport = &ihost->ports[i]; | 134 | struct isci_port *iport = &ihost->ports[i]; |
136 | 135 | ||
137 | scic_sds_port_get_sas_address(iport, &port_sas_address); | 136 | scic_sds_port_get_sas_address(iport, &port_sas_address); |
@@ -158,10 +157,9 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port( | |||
158 | * the port configuration is not valid for this port configuration agent. | 157 | * the port configuration is not valid for this port configuration agent. |
159 | */ | 158 | */ |
160 | static enum sci_status scic_sds_port_configuration_agent_validate_ports( | 159 | static enum sci_status scic_sds_port_configuration_agent_validate_ports( |
161 | struct scic_sds_controller *controller, | 160 | struct isci_host *ihost, |
162 | struct scic_sds_port_configuration_agent *port_agent) | 161 | struct scic_sds_port_configuration_agent *port_agent) |
163 | { | 162 | { |
164 | struct isci_host *ihost = scic_to_ihost(controller); | ||
165 | struct sci_sas_address first_address; | 163 | struct sci_sas_address first_address; |
166 | struct sci_sas_address second_address; | 164 | struct sci_sas_address second_address; |
167 | 165 | ||
@@ -239,17 +237,11 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports( | |||
239 | * Manual port configuration agent routines | 237 | * Manual port configuration agent routines |
240 | * ****************************************************************************** */ | 238 | * ****************************************************************************** */ |
241 | 239 | ||
242 | /** | 240 | /* verify all of the phys in the same port are using the same SAS address */ |
243 | * | 241 | static enum sci_status |
244 | * | 242 | scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost, |
245 | * This routine will verify that all of the phys in the same port are using the | 243 | struct scic_sds_port_configuration_agent *port_agent) |
246 | * same SAS address. | ||
247 | */ | ||
248 | static enum sci_status scic_sds_mpc_agent_validate_phy_configuration( | ||
249 | struct scic_sds_controller *controller, | ||
250 | struct scic_sds_port_configuration_agent *port_agent) | ||
251 | { | 244 | { |
252 | struct isci_host *ihost = scic_to_ihost(controller); | ||
253 | u32 phy_mask; | 245 | u32 phy_mask; |
254 | u32 assigned_phy_mask; | 246 | u32 assigned_phy_mask; |
255 | struct sci_sas_address sas_address; | 247 | struct sci_sas_address sas_address; |
@@ -262,7 +254,7 @@ static enum sci_status scic_sds_mpc_agent_validate_phy_configuration( | |||
262 | sas_address.low = 0; | 254 | sas_address.low = 0; |
263 | 255 | ||
264 | for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { | 256 | for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) { |
265 | phy_mask = controller->oem_parameters.sds1.ports[port_index].phy_mask; | 257 | phy_mask = ihost->oem_parameters.sds1.ports[port_index].phy_mask; |
266 | 258 | ||
267 | if (!phy_mask) | 259 | if (!phy_mask) |
268 | continue; | 260 | continue; |
@@ -324,7 +316,7 @@ static enum sci_status scic_sds_mpc_agent_validate_phy_configuration( | |||
324 | phy_index++; | 316 | phy_index++; |
325 | } | 317 | } |
326 | 318 | ||
327 | return scic_sds_port_configuration_agent_validate_ports(controller, port_agent); | 319 | return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); |
328 | } | 320 | } |
329 | 321 | ||
330 | static void mpc_agent_timeout(unsigned long data) | 322 | static void mpc_agent_timeout(unsigned long data) |
@@ -332,14 +324,12 @@ static void mpc_agent_timeout(unsigned long data) | |||
332 | u8 index; | 324 | u8 index; |
333 | struct sci_timer *tmr = (struct sci_timer *)data; | 325 | struct sci_timer *tmr = (struct sci_timer *)data; |
334 | struct scic_sds_port_configuration_agent *port_agent; | 326 | struct scic_sds_port_configuration_agent *port_agent; |
335 | struct scic_sds_controller *scic; | ||
336 | struct isci_host *ihost; | 327 | struct isci_host *ihost; |
337 | unsigned long flags; | 328 | unsigned long flags; |
338 | u16 configure_phy_mask; | 329 | u16 configure_phy_mask; |
339 | 330 | ||
340 | port_agent = container_of(tmr, typeof(*port_agent), timer); | 331 | port_agent = container_of(tmr, typeof(*port_agent), timer); |
341 | scic = container_of(port_agent, typeof(*scic), port_agent); | 332 | ihost = container_of(port_agent, typeof(*ihost), port_agent); |
342 | ihost = scic_to_ihost(scic); | ||
343 | 333 | ||
344 | spin_lock_irqsave(&ihost->scic_lock, flags); | 334 | spin_lock_irqsave(&ihost->scic_lock, flags); |
345 | 335 | ||
@@ -355,7 +345,7 @@ static void mpc_agent_timeout(unsigned long data) | |||
355 | struct isci_phy *iphy = &ihost->phys[index]; | 345 | struct isci_phy *iphy = &ihost->phys[index]; |
356 | 346 | ||
357 | if (configure_phy_mask & (1 << index)) { | 347 | if (configure_phy_mask & (1 << index)) { |
358 | port_agent->link_up_handler(scic, port_agent, | 348 | port_agent->link_up_handler(ihost, port_agent, |
359 | phy_get_non_dummy_port(iphy), | 349 | phy_get_non_dummy_port(iphy), |
360 | iphy); | 350 | iphy); |
361 | } | 351 | } |
@@ -365,7 +355,7 @@ done: | |||
365 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 355 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
366 | } | 356 | } |
367 | 357 | ||
368 | static void scic_sds_mpc_agent_link_up(struct scic_sds_controller *controller, | 358 | static void scic_sds_mpc_agent_link_up(struct isci_host *ihost, |
369 | struct scic_sds_port_configuration_agent *port_agent, | 359 | struct scic_sds_port_configuration_agent *port_agent, |
370 | struct isci_port *iport, | 360 | struct isci_port *iport, |
371 | struct isci_phy *iphy) | 361 | struct isci_phy *iphy) |
@@ -401,7 +391,7 @@ static void scic_sds_mpc_agent_link_up(struct scic_sds_controller *controller, | |||
401 | * link down notification from a phy that has no assocoated port? | 391 | * link down notification from a phy that has no assocoated port? |
402 | */ | 392 | */ |
403 | static void scic_sds_mpc_agent_link_down( | 393 | static void scic_sds_mpc_agent_link_down( |
404 | struct scic_sds_controller *scic, | 394 | struct isci_host *ihost, |
405 | struct scic_sds_port_configuration_agent *port_agent, | 395 | struct scic_sds_port_configuration_agent *port_agent, |
406 | struct isci_port *iport, | 396 | struct isci_port *iport, |
407 | struct isci_phy *iphy) | 397 | struct isci_phy *iphy) |
@@ -438,26 +428,17 @@ static void scic_sds_mpc_agent_link_down( | |||
438 | } | 428 | } |
439 | } | 429 | } |
440 | 430 | ||
441 | /* | 431 | /* verify phys are assigned a valid SAS address for automatic port |
442 | * ****************************************************************************** | 432 | * configuration mode. |
443 | * Automatic port configuration agent routines | ||
444 | * ****************************************************************************** */ | ||
445 | |||
446 | /** | ||
447 | * | ||
448 | * | ||
449 | * This routine will verify that the phys are assigned a valid SAS address for | ||
450 | * automatic port configuration mode. | ||
451 | */ | 433 | */ |
452 | static enum sci_status scic_sds_apc_agent_validate_phy_configuration( | 434 | static enum sci_status |
453 | struct scic_sds_controller *controller, | 435 | scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost, |
454 | struct scic_sds_port_configuration_agent *port_agent) | 436 | struct scic_sds_port_configuration_agent *port_agent) |
455 | { | 437 | { |
456 | u8 phy_index; | 438 | u8 phy_index; |
457 | u8 port_index; | 439 | u8 port_index; |
458 | struct sci_sas_address sas_address; | 440 | struct sci_sas_address sas_address; |
459 | struct sci_sas_address phy_assigned_address; | 441 | struct sci_sas_address phy_assigned_address; |
460 | struct isci_host *ihost = scic_to_ihost(controller); | ||
461 | 442 | ||
462 | phy_index = 0; | 443 | phy_index = 0; |
463 | 444 | ||
@@ -484,10 +465,10 @@ static enum sci_status scic_sds_apc_agent_validate_phy_configuration( | |||
484 | } | 465 | } |
485 | } | 466 | } |
486 | 467 | ||
487 | return scic_sds_port_configuration_agent_validate_ports(controller, port_agent); | 468 | return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent); |
488 | } | 469 | } |
489 | 470 | ||
490 | static void scic_sds_apc_agent_configure_ports(struct scic_sds_controller *controller, | 471 | static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost, |
491 | struct scic_sds_port_configuration_agent *port_agent, | 472 | struct scic_sds_port_configuration_agent *port_agent, |
492 | struct isci_phy *iphy, | 473 | struct isci_phy *iphy, |
493 | bool start_timer) | 474 | bool start_timer) |
@@ -496,9 +477,8 @@ static void scic_sds_apc_agent_configure_ports(struct scic_sds_controller *contr | |||
496 | enum sci_status status; | 477 | enum sci_status status; |
497 | struct isci_port *iport; | 478 | struct isci_port *iport; |
498 | enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; | 479 | enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY; |
499 | struct isci_host *ihost = scic_to_ihost(controller); | ||
500 | 480 | ||
501 | iport = scic_sds_port_configuration_agent_find_port(controller, iphy); | 481 | iport = scic_sds_port_configuration_agent_find_port(ihost, iphy); |
502 | 482 | ||
503 | if (iport) { | 483 | if (iport) { |
504 | if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) | 484 | if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) |
@@ -619,7 +599,7 @@ static void scic_sds_apc_agent_configure_ports(struct scic_sds_controller *contr | |||
619 | * notifications. Is it possible to get a link down notification from a phy | 599 | * notifications. Is it possible to get a link down notification from a phy |
620 | * that has no assocoated port? | 600 | * that has no assocoated port? |
621 | */ | 601 | */ |
622 | static void scic_sds_apc_agent_link_up(struct scic_sds_controller *scic, | 602 | static void scic_sds_apc_agent_link_up(struct isci_host *ihost, |
623 | struct scic_sds_port_configuration_agent *port_agent, | 603 | struct scic_sds_port_configuration_agent *port_agent, |
624 | struct isci_port *iport, | 604 | struct isci_port *iport, |
625 | struct isci_phy *iphy) | 605 | struct isci_phy *iphy) |
@@ -629,7 +609,7 @@ static void scic_sds_apc_agent_link_up(struct scic_sds_controller *scic, | |||
629 | if (!iport) { | 609 | if (!iport) { |
630 | /* the phy is not the part of this port */ | 610 | /* the phy is not the part of this port */ |
631 | port_agent->phy_ready_mask |= 1 << phy_index; | 611 | port_agent->phy_ready_mask |= 1 << phy_index; |
632 | scic_sds_apc_agent_configure_ports(scic, port_agent, iphy, true); | 612 | scic_sds_apc_agent_configure_ports(ihost, port_agent, iphy, true); |
633 | } else { | 613 | } else { |
634 | /* the phy is already the part of the port */ | 614 | /* the phy is already the part of the port */ |
635 | u32 port_state = iport->sm.current_state_id; | 615 | u32 port_state = iport->sm.current_state_id; |
@@ -658,7 +638,7 @@ static void scic_sds_apc_agent_link_up(struct scic_sds_controller *scic, | |||
658 | * port? | 638 | * port? |
659 | */ | 639 | */ |
660 | static void scic_sds_apc_agent_link_down( | 640 | static void scic_sds_apc_agent_link_down( |
661 | struct scic_sds_controller *controller, | 641 | struct isci_host *ihost, |
662 | struct scic_sds_port_configuration_agent *port_agent, | 642 | struct scic_sds_port_configuration_agent *port_agent, |
663 | struct isci_port *iport, | 643 | struct isci_port *iport, |
664 | struct isci_phy *iphy) | 644 | struct isci_phy *iphy) |
@@ -683,14 +663,12 @@ static void apc_agent_timeout(unsigned long data) | |||
683 | u32 index; | 663 | u32 index; |
684 | struct sci_timer *tmr = (struct sci_timer *)data; | 664 | struct sci_timer *tmr = (struct sci_timer *)data; |
685 | struct scic_sds_port_configuration_agent *port_agent; | 665 | struct scic_sds_port_configuration_agent *port_agent; |
686 | struct scic_sds_controller *scic; | ||
687 | struct isci_host *ihost; | 666 | struct isci_host *ihost; |
688 | unsigned long flags; | 667 | unsigned long flags; |
689 | u16 configure_phy_mask; | 668 | u16 configure_phy_mask; |
690 | 669 | ||
691 | port_agent = container_of(tmr, typeof(*port_agent), timer); | 670 | port_agent = container_of(tmr, typeof(*port_agent), timer); |
692 | scic = container_of(port_agent, typeof(*scic), port_agent); | 671 | ihost = container_of(port_agent, typeof(*ihost), port_agent); |
693 | ihost = scic_to_ihost(scic); | ||
694 | 672 | ||
695 | spin_lock_irqsave(&ihost->scic_lock, flags); | 673 | spin_lock_irqsave(&ihost->scic_lock, flags); |
696 | 674 | ||
@@ -708,7 +686,7 @@ static void apc_agent_timeout(unsigned long data) | |||
708 | if ((configure_phy_mask & (1 << index)) == 0) | 686 | if ((configure_phy_mask & (1 << index)) == 0) |
709 | continue; | 687 | continue; |
710 | 688 | ||
711 | scic_sds_apc_agent_configure_ports(scic, port_agent, | 689 | scic_sds_apc_agent_configure_ports(ihost, port_agent, |
712 | &ihost->phys[index], false); | 690 | &ihost->phys[index], false); |
713 | } | 691 | } |
714 | 692 | ||
@@ -748,17 +726,17 @@ void scic_sds_port_configuration_agent_construct( | |||
748 | } | 726 | } |
749 | 727 | ||
750 | enum sci_status scic_sds_port_configuration_agent_initialize( | 728 | enum sci_status scic_sds_port_configuration_agent_initialize( |
751 | struct scic_sds_controller *scic, | 729 | struct isci_host *ihost, |
752 | struct scic_sds_port_configuration_agent *port_agent) | 730 | struct scic_sds_port_configuration_agent *port_agent) |
753 | { | 731 | { |
754 | enum sci_status status; | 732 | enum sci_status status; |
755 | enum scic_port_configuration_mode mode; | 733 | enum scic_port_configuration_mode mode; |
756 | 734 | ||
757 | mode = scic->oem_parameters.sds1.controller.mode_type; | 735 | mode = ihost->oem_parameters.sds1.controller.mode_type; |
758 | 736 | ||
759 | if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { | 737 | if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { |
760 | status = scic_sds_mpc_agent_validate_phy_configuration( | 738 | status = scic_sds_mpc_agent_validate_phy_configuration( |
761 | scic, port_agent); | 739 | ihost, port_agent); |
762 | 740 | ||
763 | port_agent->link_up_handler = scic_sds_mpc_agent_link_up; | 741 | port_agent->link_up_handler = scic_sds_mpc_agent_link_up; |
764 | port_agent->link_down_handler = scic_sds_mpc_agent_link_down; | 742 | port_agent->link_down_handler = scic_sds_mpc_agent_link_down; |
@@ -766,7 +744,7 @@ enum sci_status scic_sds_port_configuration_agent_initialize( | |||
766 | sci_init_timer(&port_agent->timer, mpc_agent_timeout); | 744 | sci_init_timer(&port_agent->timer, mpc_agent_timeout); |
767 | } else { | 745 | } else { |
768 | status = scic_sds_apc_agent_validate_phy_configuration( | 746 | status = scic_sds_apc_agent_validate_phy_configuration( |
769 | scic, port_agent); | 747 | ihost, port_agent); |
770 | 748 | ||
771 | port_agent->link_up_handler = scic_sds_apc_agent_link_up; | 749 | port_agent->link_up_handler = scic_sds_apc_agent_link_up; |
772 | port_agent->link_down_handler = scic_sds_apc_agent_link_down; | 750 | port_agent->link_down_handler = scic_sds_apc_agent_link_down; |
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index 95c8d91aab8d..e40cb5f6eba5 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h | |||
@@ -165,7 +165,7 @@ struct scic_sds_oem_params; | |||
165 | int scic_oem_parameters_validate(struct scic_sds_oem_params *oem); | 165 | int scic_oem_parameters_validate(struct scic_sds_oem_params *oem); |
166 | 166 | ||
167 | union scic_oem_parameters; | 167 | union scic_oem_parameters; |
168 | void scic_oem_parameters_get(struct scic_sds_controller *scic, | 168 | void scic_oem_parameters_get(struct isci_host *ihost, |
169 | union scic_oem_parameters *oem); | 169 | union scic_oem_parameters *oem); |
170 | 170 | ||
171 | struct isci_orom; | 171 | struct isci_orom; |
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 3b0234049a3d..9043b458c999 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #include "task.h" | 62 | #include "task.h" |
63 | 63 | ||
64 | /** | 64 | /** |
65 | * isci_remote_device_not_ready() - This function is called by the scic when | 65 | * isci_remote_device_not_ready() - This function is called by the ihost when |
66 | * the remote device is not ready. We mark the isci device as ready (not | 66 | * the remote device is not ready. We mark the isci device as ready (not |
67 | * "ready_for_io") and signal the waiting proccess. | 67 | * "ready_for_io") and signal the waiting proccess. |
68 | * @isci_host: This parameter specifies the isci host object. | 68 | * @isci_host: This parameter specifies the isci host object. |
@@ -92,7 +92,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, | |||
92 | "%s: isci_device = %p request = %p\n", | 92 | "%s: isci_device = %p request = %p\n", |
93 | __func__, idev, ireq); | 93 | __func__, idev, ireq); |
94 | 94 | ||
95 | scic_controller_terminate_request(&ihost->sci, | 95 | scic_controller_terminate_request(ihost, |
96 | idev, | 96 | idev, |
97 | ireq); | 97 | ireq); |
98 | } | 98 | } |
@@ -104,7 +104,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, | |||
104 | } | 104 | } |
105 | 105 | ||
106 | /** | 106 | /** |
107 | * isci_remote_device_ready() - This function is called by the scic when the | 107 | * isci_remote_device_ready() - This function is called by the ihost when the |
108 | * remote device is ready. We mark the isci device as ready and signal the | 108 | * remote device is ready. We mark the isci device as ready and signal the |
109 | * waiting proccess. | 109 | * waiting proccess. |
110 | * @ihost: our valid isci_host | 110 | * @ihost: our valid isci_host |
@@ -135,8 +135,7 @@ static void rnc_destruct_done(void *_dev) | |||
135 | 135 | ||
136 | static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev) | 136 | static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev) |
137 | { | 137 | { |
138 | struct scic_sds_controller *scic = idev->owning_port->owning_controller; | 138 | struct isci_host *ihost = idev->owning_port->owning_controller; |
139 | struct isci_host *ihost = scic_to_ihost(scic); | ||
140 | enum sci_status status = SCI_SUCCESS; | 139 | enum sci_status status = SCI_SUCCESS; |
141 | u32 i; | 140 | u32 i; |
142 | 141 | ||
@@ -148,7 +147,7 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem | |||
148 | ireq->target_device != idev) | 147 | ireq->target_device != idev) |
149 | continue; | 148 | continue; |
150 | 149 | ||
151 | s = scic_controller_terminate_request(scic, idev, ireq); | 150 | s = scic_controller_terminate_request(ihost, idev, ireq); |
152 | if (s != SCI_SUCCESS) | 151 | if (s != SCI_SUCCESS) |
153 | status = s; | 152 | status = s; |
154 | } | 153 | } |
@@ -276,7 +275,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
276 | { | 275 | { |
277 | struct sci_base_state_machine *sm = &idev->sm; | 276 | struct sci_base_state_machine *sm = &idev->sm; |
278 | enum scic_sds_remote_device_states state = sm->current_state_id; | 277 | enum scic_sds_remote_device_states state = sm->current_state_id; |
279 | struct scic_sds_controller *scic = idev->owning_port->owning_controller; | 278 | struct isci_host *ihost = idev->owning_port->owning_controller; |
280 | enum sci_status status; | 279 | enum sci_status status; |
281 | 280 | ||
282 | switch (state) { | 281 | switch (state) { |
@@ -290,7 +289,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
290 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", | 289 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
291 | __func__, state); | 290 | __func__, state); |
292 | /* Return the frame back to the controller */ | 291 | /* Return the frame back to the controller */ |
293 | scic_sds_controller_release_frame(scic, frame_index); | 292 | scic_sds_controller_release_frame(ihost, frame_index); |
294 | return SCI_FAILURE_INVALID_STATE; | 293 | return SCI_FAILURE_INVALID_STATE; |
295 | case SCI_DEV_READY: | 294 | case SCI_DEV_READY: |
296 | case SCI_STP_DEV_NCQ_ERROR: | 295 | case SCI_STP_DEV_NCQ_ERROR: |
@@ -303,7 +302,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
303 | void *frame_header; | 302 | void *frame_header; |
304 | ssize_t word_cnt; | 303 | ssize_t word_cnt; |
305 | 304 | ||
306 | status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 305 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
307 | frame_index, | 306 | frame_index, |
308 | &frame_header); | 307 | &frame_header); |
309 | if (status != SCI_SUCCESS) | 308 | if (status != SCI_SUCCESS) |
@@ -312,7 +311,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
312 | word_cnt = sizeof(hdr) / sizeof(u32); | 311 | word_cnt = sizeof(hdr) / sizeof(u32); |
313 | sci_swab32_cpy(&hdr, frame_header, word_cnt); | 312 | sci_swab32_cpy(&hdr, frame_header, word_cnt); |
314 | 313 | ||
315 | ireq = scic_request_by_tag(scic, be16_to_cpu(hdr.tag)); | 314 | ireq = scic_request_by_tag(ihost, be16_to_cpu(hdr.tag)); |
316 | if (ireq && ireq->target_device == idev) { | 315 | if (ireq && ireq->target_device == idev) { |
317 | /* The IO request is now in charge of releasing the frame */ | 316 | /* The IO request is now in charge of releasing the frame */ |
318 | status = scic_sds_io_request_frame_handler(ireq, frame_index); | 317 | status = scic_sds_io_request_frame_handler(ireq, frame_index); |
@@ -320,14 +319,14 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
320 | /* We could not map this tag to a valid IO | 319 | /* We could not map this tag to a valid IO |
321 | * request Just toss the frame and continue | 320 | * request Just toss the frame and continue |
322 | */ | 321 | */ |
323 | scic_sds_controller_release_frame(scic, frame_index); | 322 | scic_sds_controller_release_frame(ihost, frame_index); |
324 | } | 323 | } |
325 | break; | 324 | break; |
326 | } | 325 | } |
327 | case SCI_STP_DEV_NCQ: { | 326 | case SCI_STP_DEV_NCQ: { |
328 | struct dev_to_host_fis *hdr; | 327 | struct dev_to_host_fis *hdr; |
329 | 328 | ||
330 | status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 329 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
331 | frame_index, | 330 | frame_index, |
332 | (void **)&hdr); | 331 | (void **)&hdr); |
333 | if (status != SCI_SUCCESS) | 332 | if (status != SCI_SUCCESS) |
@@ -350,7 +349,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device * | |||
350 | } else | 349 | } else |
351 | status = SCI_FAILURE; | 350 | status = SCI_FAILURE; |
352 | 351 | ||
353 | scic_sds_controller_release_frame(scic, frame_index); | 352 | scic_sds_controller_release_frame(ihost, frame_index); |
354 | break; | 353 | break; |
355 | } | 354 | } |
356 | case SCI_STP_DEV_CMD: | 355 | case SCI_STP_DEV_CMD: |
@@ -461,7 +460,7 @@ static void scic_sds_remote_device_start_request(struct isci_remote_device *idev | |||
461 | } | 460 | } |
462 | } | 461 | } |
463 | 462 | ||
464 | enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic, | 463 | enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost, |
465 | struct isci_remote_device *idev, | 464 | struct isci_remote_device *idev, |
466 | struct isci_request *ireq) | 465 | struct isci_request *ireq) |
467 | { | 466 | { |
@@ -597,7 +596,7 @@ static enum sci_status common_complete_io(struct isci_port *iport, | |||
597 | return status; | 596 | return status; |
598 | } | 597 | } |
599 | 598 | ||
600 | enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *scic, | 599 | enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost, |
601 | struct isci_remote_device *idev, | 600 | struct isci_remote_device *idev, |
602 | struct isci_request *ireq) | 601 | struct isci_request *ireq) |
603 | { | 602 | { |
@@ -678,7 +677,7 @@ static void scic_sds_remote_device_continue_request(void *dev) | |||
678 | scic_controller_continue_io(idev->working_request); | 677 | scic_controller_continue_io(idev->working_request); |
679 | } | 678 | } |
680 | 679 | ||
681 | enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *scic, | 680 | enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost, |
682 | struct isci_remote_device *idev, | 681 | struct isci_remote_device *idev, |
683 | struct isci_request *ireq) | 682 | struct isci_request *ireq) |
684 | { | 683 | { |
@@ -802,13 +801,13 @@ static void remote_device_resume_done(void *_dev) | |||
802 | static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) | 801 | static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) |
803 | { | 802 | { |
804 | struct isci_remote_device *idev = _dev; | 803 | struct isci_remote_device *idev = _dev; |
805 | struct scic_sds_controller *scic = idev->owning_port->owning_controller; | 804 | struct isci_host *ihost = idev->owning_port->owning_controller; |
806 | 805 | ||
807 | /* For NCQ operation we do not issue a isci_remote_device_not_ready(). | 806 | /* For NCQ operation we do not issue a isci_remote_device_not_ready(). |
808 | * As a result, avoid sending the ready notification. | 807 | * As a result, avoid sending the ready notification. |
809 | */ | 808 | */ |
810 | if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) | 809 | if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) |
811 | isci_remote_device_ready(scic_to_ihost(scic), idev); | 810 | isci_remote_device_ready(ihost, idev); |
812 | } | 811 | } |
813 | 812 | ||
814 | static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm) | 813 | static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm) |
@@ -836,7 +835,7 @@ static enum sci_status scic_remote_device_destruct(struct isci_remote_device *id | |||
836 | { | 835 | { |
837 | struct sci_base_state_machine *sm = &idev->sm; | 836 | struct sci_base_state_machine *sm = &idev->sm; |
838 | enum scic_sds_remote_device_states state = sm->current_state_id; | 837 | enum scic_sds_remote_device_states state = sm->current_state_id; |
839 | struct scic_sds_controller *scic; | 838 | struct isci_host *ihost; |
840 | 839 | ||
841 | if (state != SCI_DEV_STOPPED) { | 840 | if (state != SCI_DEV_STOPPED) { |
842 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", | 841 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", |
@@ -844,8 +843,8 @@ static enum sci_status scic_remote_device_destruct(struct isci_remote_device *id | |||
844 | return SCI_FAILURE_INVALID_STATE; | 843 | return SCI_FAILURE_INVALID_STATE; |
845 | } | 844 | } |
846 | 845 | ||
847 | scic = idev->owning_port->owning_controller; | 846 | ihost = idev->owning_port->owning_controller; |
848 | scic_sds_controller_free_remote_node_context(scic, idev, | 847 | scic_sds_controller_free_remote_node_context(ihost, idev, |
849 | idev->rnc.remote_node_index); | 848 | idev->rnc.remote_node_index); |
850 | idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; | 849 | idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; |
851 | sci_change_state(sm, SCI_DEV_FINAL); | 850 | sci_change_state(sm, SCI_DEV_FINAL); |
@@ -878,7 +877,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ | |||
878 | static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) | 877 | static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) |
879 | { | 878 | { |
880 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 879 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
881 | struct scic_sds_controller *scic = idev->owning_port->owning_controller; | 880 | struct isci_host *ihost = idev->owning_port->owning_controller; |
882 | u32 prev_state; | 881 | u32 prev_state; |
883 | 882 | ||
884 | /* If we are entering from the stopping state let the SCI User know that | 883 | /* If we are entering from the stopping state let the SCI User know that |
@@ -886,16 +885,15 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac | |||
886 | */ | 885 | */ |
887 | prev_state = idev->sm.previous_state_id; | 886 | prev_state = idev->sm.previous_state_id; |
888 | if (prev_state == SCI_DEV_STOPPING) | 887 | if (prev_state == SCI_DEV_STOPPING) |
889 | isci_remote_device_deconstruct(scic_to_ihost(scic), idev); | 888 | isci_remote_device_deconstruct(ihost, idev); |
890 | 889 | ||
891 | scic_sds_controller_remote_device_stopped(scic, idev); | 890 | scic_sds_controller_remote_device_stopped(ihost, idev); |
892 | } | 891 | } |
893 | 892 | ||
894 | static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm) | 893 | static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm) |
895 | { | 894 | { |
896 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 895 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
897 | struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); | 896 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); |
898 | struct isci_host *ihost = scic_to_ihost(scic); | ||
899 | 897 | ||
900 | isci_remote_device_not_ready(ihost, idev, | 898 | isci_remote_device_not_ready(ihost, idev, |
901 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); | 899 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); |
@@ -904,7 +902,7 @@ static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_ma | |||
904 | static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm) | 902 | static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm) |
905 | { | 903 | { |
906 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 904 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
907 | struct scic_sds_controller *scic = idev->owning_port->owning_controller; | 905 | struct isci_host *ihost = idev->owning_port->owning_controller; |
908 | struct domain_device *dev = idev->domain_dev; | 906 | struct domain_device *dev = idev->domain_dev; |
909 | 907 | ||
910 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { | 908 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { |
@@ -912,7 +910,7 @@ static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machi | |||
912 | } else if (dev_is_expander(dev)) { | 910 | } else if (dev_is_expander(dev)) { |
913 | sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); | 911 | sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); |
914 | } else | 912 | } else |
915 | isci_remote_device_ready(scic_to_ihost(scic), idev); | 913 | isci_remote_device_ready(ihost, idev); |
916 | } | 914 | } |
917 | 915 | ||
918 | static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm) | 916 | static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm) |
@@ -921,9 +919,9 @@ static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machin | |||
921 | struct domain_device *dev = idev->domain_dev; | 919 | struct domain_device *dev = idev->domain_dev; |
922 | 920 | ||
923 | if (dev->dev_type == SAS_END_DEV) { | 921 | if (dev->dev_type == SAS_END_DEV) { |
924 | struct scic_sds_controller *scic = idev->owning_port->owning_controller; | 922 | struct isci_host *ihost = idev->owning_port->owning_controller; |
925 | 923 | ||
926 | isci_remote_device_not_ready(scic_to_ihost(scic), idev, | 924 | isci_remote_device_not_ready(ihost, idev, |
927 | SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); | 925 | SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); |
928 | } | 926 | } |
929 | } | 927 | } |
@@ -963,40 +961,40 @@ static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base | |||
963 | static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) | 961 | static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) |
964 | { | 962 | { |
965 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 963 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
966 | struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); | 964 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); |
967 | 965 | ||
968 | BUG_ON(idev->working_request == NULL); | 966 | BUG_ON(idev->working_request == NULL); |
969 | 967 | ||
970 | isci_remote_device_not_ready(scic_to_ihost(scic), idev, | 968 | isci_remote_device_not_ready(ihost, idev, |
971 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); | 969 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); |
972 | } | 970 | } |
973 | 971 | ||
974 | static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) | 972 | static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) |
975 | { | 973 | { |
976 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 974 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
977 | struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); | 975 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); |
978 | 976 | ||
979 | if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) | 977 | if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) |
980 | isci_remote_device_not_ready(scic_to_ihost(scic), idev, | 978 | isci_remote_device_not_ready(ihost, idev, |
981 | idev->not_ready_reason); | 979 | idev->not_ready_reason); |
982 | } | 980 | } |
983 | 981 | ||
984 | static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) | 982 | static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) |
985 | { | 983 | { |
986 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 984 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
987 | struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); | 985 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); |
988 | 986 | ||
989 | isci_remote_device_ready(scic_to_ihost(scic), idev); | 987 | isci_remote_device_ready(ihost, idev); |
990 | } | 988 | } |
991 | 989 | ||
992 | static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) | 990 | static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) |
993 | { | 991 | { |
994 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 992 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
995 | struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(idev); | 993 | struct isci_host *ihost = scic_sds_remote_device_get_controller(idev); |
996 | 994 | ||
997 | BUG_ON(idev->working_request == NULL); | 995 | BUG_ON(idev->working_request == NULL); |
998 | 996 | ||
999 | isci_remote_device_not_ready(scic_to_ihost(scic), idev, | 997 | isci_remote_device_not_ready(ihost, idev, |
1000 | SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); | 998 | SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); |
1001 | } | 999 | } |
1002 | 1000 | ||
@@ -1303,7 +1301,7 @@ void isci_remote_device_release(struct kref *kref) | |||
1303 | * @isci_host: This parameter specifies the isci host object. | 1301 | * @isci_host: This parameter specifies the isci host object. |
1304 | * @isci_device: This parameter specifies the remote device. | 1302 | * @isci_device: This parameter specifies the remote device. |
1305 | * | 1303 | * |
1306 | * The status of the scic request to stop. | 1304 | * The status of the ihost request to stop. |
1307 | */ | 1305 | */ |
1308 | enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) | 1306 | enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) |
1309 | { | 1307 | { |
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 45798582fc14..bc4da20a13fa 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h | |||
@@ -402,17 +402,17 @@ enum sci_status scic_sds_remote_device_event_handler( | |||
402 | u32 event_code); | 402 | u32 event_code); |
403 | 403 | ||
404 | enum sci_status scic_sds_remote_device_start_io( | 404 | enum sci_status scic_sds_remote_device_start_io( |
405 | struct scic_sds_controller *controller, | 405 | struct isci_host *ihost, |
406 | struct isci_remote_device *idev, | 406 | struct isci_remote_device *idev, |
407 | struct isci_request *ireq); | 407 | struct isci_request *ireq); |
408 | 408 | ||
409 | enum sci_status scic_sds_remote_device_start_task( | 409 | enum sci_status scic_sds_remote_device_start_task( |
410 | struct scic_sds_controller *controller, | 410 | struct isci_host *ihost, |
411 | struct isci_remote_device *idev, | 411 | struct isci_remote_device *idev, |
412 | struct isci_request *ireq); | 412 | struct isci_request *ireq); |
413 | 413 | ||
414 | enum sci_status scic_sds_remote_device_complete_io( | 414 | enum sci_status scic_sds_remote_device_complete_io( |
415 | struct scic_sds_controller *controller, | 415 | struct isci_host *ihost, |
416 | struct isci_remote_device *idev, | 416 | struct isci_remote_device *idev, |
417 | struct isci_request *ireq); | 417 | struct isci_request *ireq); |
418 | 418 | ||
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index e485744e1263..8a5203b6eb09 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c | |||
@@ -107,11 +107,11 @@ static void scic_sds_remote_node_context_construct_buffer( | |||
107 | struct domain_device *dev = idev->domain_dev; | 107 | struct domain_device *dev = idev->domain_dev; |
108 | int rni = sci_rnc->remote_node_index; | 108 | int rni = sci_rnc->remote_node_index; |
109 | union scu_remote_node_context *rnc; | 109 | union scu_remote_node_context *rnc; |
110 | struct scic_sds_controller *scic; | 110 | struct isci_host *ihost; |
111 | __le64 sas_addr; | 111 | __le64 sas_addr; |
112 | 112 | ||
113 | scic = scic_sds_remote_device_get_controller(idev); | 113 | ihost = scic_sds_remote_device_get_controller(idev); |
114 | rnc = scic_sds_controller_get_remote_node_context_buffer(scic, rni); | 114 | rnc = scic_sds_controller_get_remote_node_context_buffer(ihost, rni); |
115 | 115 | ||
116 | memset(rnc, 0, sizeof(union scu_remote_node_context) | 116 | memset(rnc, 0, sizeof(union scu_remote_node_context) |
117 | * scic_sds_remote_device_node_count(idev)); | 117 | * scic_sds_remote_device_node_count(idev)); |
@@ -135,14 +135,14 @@ static void scic_sds_remote_node_context_construct_buffer( | |||
135 | 135 | ||
136 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | 136 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { |
137 | rnc->ssp.connection_occupancy_timeout = | 137 | rnc->ssp.connection_occupancy_timeout = |
138 | scic->user_parameters.sds1.stp_max_occupancy_timeout; | 138 | ihost->user_parameters.sds1.stp_max_occupancy_timeout; |
139 | rnc->ssp.connection_inactivity_timeout = | 139 | rnc->ssp.connection_inactivity_timeout = |
140 | scic->user_parameters.sds1.stp_inactivity_timeout; | 140 | ihost->user_parameters.sds1.stp_inactivity_timeout; |
141 | } else { | 141 | } else { |
142 | rnc->ssp.connection_occupancy_timeout = | 142 | rnc->ssp.connection_occupancy_timeout = |
143 | scic->user_parameters.sds1.ssp_max_occupancy_timeout; | 143 | ihost->user_parameters.sds1.ssp_max_occupancy_timeout; |
144 | rnc->ssp.connection_inactivity_timeout = | 144 | rnc->ssp.connection_inactivity_timeout = |
145 | scic->user_parameters.sds1.ssp_inactivity_timeout; | 145 | ihost->user_parameters.sds1.ssp_inactivity_timeout; |
146 | } | 146 | } |
147 | 147 | ||
148 | rnc->ssp.initial_arbitration_wait_time = 0; | 148 | rnc->ssp.initial_arbitration_wait_time = 0; |
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 90ead662828d..36e674896bc5 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c | |||
@@ -74,19 +74,19 @@ static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ire | |||
74 | return &ireq->sg_table[idx - 2]; | 74 | return &ireq->sg_table[idx - 2]; |
75 | } | 75 | } |
76 | 76 | ||
77 | static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic, | 77 | static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, |
78 | struct isci_request *ireq, u32 idx) | 78 | struct isci_request *ireq, u32 idx) |
79 | { | 79 | { |
80 | u32 offset; | 80 | u32 offset; |
81 | 81 | ||
82 | if (idx == 0) { | 82 | if (idx == 0) { |
83 | offset = (void *) &ireq->tc->sgl_pair_ab - | 83 | offset = (void *) &ireq->tc->sgl_pair_ab - |
84 | (void *) &scic->task_context_table[0]; | 84 | (void *) &ihost->task_context_table[0]; |
85 | return scic->task_context_dma + offset; | 85 | return ihost->task_context_dma + offset; |
86 | } else if (idx == 1) { | 86 | } else if (idx == 1) { |
87 | offset = (void *) &ireq->tc->sgl_pair_cd - | 87 | offset = (void *) &ireq->tc->sgl_pair_cd - |
88 | (void *) &scic->task_context_table[0]; | 88 | (void *) &ihost->task_context_table[0]; |
89 | return scic->task_context_dma + offset; | 89 | return ihost->task_context_dma + offset; |
90 | } | 90 | } |
91 | 91 | ||
92 | return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); | 92 | return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); |
@@ -102,8 +102,7 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) | |||
102 | 102 | ||
103 | static void scic_sds_request_build_sgl(struct isci_request *ireq) | 103 | static void scic_sds_request_build_sgl(struct isci_request *ireq) |
104 | { | 104 | { |
105 | struct isci_host *isci_host = ireq->isci_host; | 105 | struct isci_host *ihost = ireq->isci_host; |
106 | struct scic_sds_controller *scic = &isci_host->sci; | ||
107 | struct sas_task *task = isci_request_access_task(ireq); | 106 | struct sas_task *task = isci_request_access_task(ireq); |
108 | struct scatterlist *sg = NULL; | 107 | struct scatterlist *sg = NULL; |
109 | dma_addr_t dma_addr; | 108 | dma_addr_t dma_addr; |
@@ -125,7 +124,7 @@ static void scic_sds_request_build_sgl(struct isci_request *ireq) | |||
125 | memset(&scu_sg->B, 0, sizeof(scu_sg->B)); | 124 | memset(&scu_sg->B, 0, sizeof(scu_sg->B)); |
126 | 125 | ||
127 | if (prev_sg) { | 126 | if (prev_sg) { |
128 | dma_addr = to_sgl_element_pair_dma(scic, | 127 | dma_addr = to_sgl_element_pair_dma(ihost, |
129 | ireq, | 128 | ireq, |
130 | sg_idx); | 129 | sg_idx); |
131 | 130 | ||
@@ -141,7 +140,7 @@ static void scic_sds_request_build_sgl(struct isci_request *ireq) | |||
141 | } else { /* handle when no sg */ | 140 | } else { /* handle when no sg */ |
142 | scu_sg = to_sgl_element_pair(ireq, sg_idx); | 141 | scu_sg = to_sgl_element_pair(ireq, sg_idx); |
143 | 142 | ||
144 | dma_addr = dma_map_single(&isci_host->pdev->dev, | 143 | dma_addr = dma_map_single(&ihost->pdev->dev, |
145 | task->scatter, | 144 | task->scatter, |
146 | task->total_xfer_len, | 145 | task->total_xfer_len, |
147 | task->data_dir); | 146 | task->data_dir); |
@@ -508,7 +507,7 @@ scic_io_request_construct_sata(struct isci_request *ireq, | |||
508 | scu_stp_raw_request_construct_task_context(ireq); | 507 | scu_stp_raw_request_construct_task_context(ireq); |
509 | return SCI_SUCCESS; | 508 | return SCI_SUCCESS; |
510 | } else { | 509 | } else { |
511 | dev_err(scic_to_dev(ireq->owning_controller), | 510 | dev_err(&ireq->owning_controller->pdev->dev, |
512 | "%s: Request 0x%p received un-handled SAT " | 511 | "%s: Request 0x%p received un-handled SAT " |
513 | "management protocol 0x%x.\n", | 512 | "management protocol 0x%x.\n", |
514 | __func__, ireq, tmf->tmf_code); | 513 | __func__, ireq, tmf->tmf_code); |
@@ -518,7 +517,7 @@ scic_io_request_construct_sata(struct isci_request *ireq, | |||
518 | } | 517 | } |
519 | 518 | ||
520 | if (!sas_protocol_ata(task->task_proto)) { | 519 | if (!sas_protocol_ata(task->task_proto)) { |
521 | dev_err(scic_to_dev(ireq->owning_controller), | 520 | dev_err(&ireq->owning_controller->pdev->dev, |
522 | "%s: Non-ATA protocol in SATA path: 0x%x\n", | 521 | "%s: Non-ATA protocol in SATA path: 0x%x\n", |
523 | __func__, | 522 | __func__, |
524 | task->task_proto); | 523 | task->task_proto); |
@@ -616,7 +615,7 @@ enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) | |||
616 | tmf->tmf_code == isci_tmf_sata_srst_low) { | 615 | tmf->tmf_code == isci_tmf_sata_srst_low) { |
617 | scu_stp_raw_request_construct_task_context(ireq); | 616 | scu_stp_raw_request_construct_task_context(ireq); |
618 | } else { | 617 | } else { |
619 | dev_err(scic_to_dev(ireq->owning_controller), | 618 | dev_err(&ireq->owning_controller->pdev->dev, |
620 | "%s: Request 0x%p received un-handled SAT " | 619 | "%s: Request 0x%p received un-handled SAT " |
621 | "Protocol 0x%x.\n", | 620 | "Protocol 0x%x.\n", |
622 | __func__, ireq, tmf->tmf_code); | 621 | __func__, ireq, tmf->tmf_code); |
@@ -639,11 +638,11 @@ enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) | |||
639 | #define SCU_TASK_CONTEXT_SRAM 0x200000 | 638 | #define SCU_TASK_CONTEXT_SRAM 0x200000 |
640 | static u32 sci_req_tx_bytes(struct isci_request *ireq) | 639 | static u32 sci_req_tx_bytes(struct isci_request *ireq) |
641 | { | 640 | { |
642 | struct scic_sds_controller *scic = ireq->owning_controller; | 641 | struct isci_host *ihost = ireq->owning_controller; |
643 | u32 ret_val = 0; | 642 | u32 ret_val = 0; |
644 | 643 | ||
645 | if (readl(&scic->smu_registers->address_modifier) == 0) { | 644 | if (readl(&ihost->smu_registers->address_modifier) == 0) { |
646 | void __iomem *scu_reg_base = scic->scu_registers; | 645 | void __iomem *scu_reg_base = ihost->scu_registers; |
647 | 646 | ||
648 | /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where | 647 | /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where |
649 | * BAR1 is the scu_registers | 648 | * BAR1 is the scu_registers |
@@ -663,11 +662,11 @@ enum sci_status scic_sds_request_start(struct isci_request *ireq) | |||
663 | { | 662 | { |
664 | enum sci_base_request_states state; | 663 | enum sci_base_request_states state; |
665 | struct scu_task_context *tc = ireq->tc; | 664 | struct scu_task_context *tc = ireq->tc; |
666 | struct scic_sds_controller *scic = ireq->owning_controller; | 665 | struct isci_host *ihost = ireq->owning_controller; |
667 | 666 | ||
668 | state = ireq->sm.current_state_id; | 667 | state = ireq->sm.current_state_id; |
669 | if (state != SCI_REQ_CONSTRUCTED) { | 668 | if (state != SCI_REQ_CONSTRUCTED) { |
670 | dev_warn(scic_to_dev(scic), | 669 | dev_warn(&ihost->pdev->dev, |
671 | "%s: SCIC IO Request requested to start while in wrong " | 670 | "%s: SCIC IO Request requested to start while in wrong " |
672 | "state %d\n", __func__, state); | 671 | "state %d\n", __func__, state); |
673 | return SCI_FAILURE_INVALID_STATE; | 672 | return SCI_FAILURE_INVALID_STATE; |
@@ -749,7 +748,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) | |||
749 | return SCI_SUCCESS; | 748 | return SCI_SUCCESS; |
750 | case SCI_REQ_COMPLETED: | 749 | case SCI_REQ_COMPLETED: |
751 | default: | 750 | default: |
752 | dev_warn(scic_to_dev(ireq->owning_controller), | 751 | dev_warn(&ireq->owning_controller->pdev->dev, |
753 | "%s: SCIC IO Request requested to abort while in wrong " | 752 | "%s: SCIC IO Request requested to abort while in wrong " |
754 | "state %d\n", | 753 | "state %d\n", |
755 | __func__, | 754 | __func__, |
@@ -763,7 +762,7 @@ scic_sds_io_request_terminate(struct isci_request *ireq) | |||
763 | enum sci_status scic_sds_request_complete(struct isci_request *ireq) | 762 | enum sci_status scic_sds_request_complete(struct isci_request *ireq) |
764 | { | 763 | { |
765 | enum sci_base_request_states state; | 764 | enum sci_base_request_states state; |
766 | struct scic_sds_controller *scic = ireq->owning_controller; | 765 | struct isci_host *ihost = ireq->owning_controller; |
767 | 766 | ||
768 | state = ireq->sm.current_state_id; | 767 | state = ireq->sm.current_state_id; |
769 | if (WARN_ONCE(state != SCI_REQ_COMPLETED, | 768 | if (WARN_ONCE(state != SCI_REQ_COMPLETED, |
@@ -771,7 +770,7 @@ enum sci_status scic_sds_request_complete(struct isci_request *ireq) | |||
771 | return SCI_FAILURE_INVALID_STATE; | 770 | return SCI_FAILURE_INVALID_STATE; |
772 | 771 | ||
773 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) | 772 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) |
774 | scic_sds_controller_release_frame(scic, | 773 | scic_sds_controller_release_frame(ihost, |
775 | ireq->saved_rx_frame_index); | 774 | ireq->saved_rx_frame_index); |
776 | 775 | ||
777 | /* XXX can we just stop the machine and remove the 'final' state? */ | 776 | /* XXX can we just stop the machine and remove the 'final' state? */ |
@@ -783,12 +782,12 @@ enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, | |||
783 | u32 event_code) | 782 | u32 event_code) |
784 | { | 783 | { |
785 | enum sci_base_request_states state; | 784 | enum sci_base_request_states state; |
786 | struct scic_sds_controller *scic = ireq->owning_controller; | 785 | struct isci_host *ihost = ireq->owning_controller; |
787 | 786 | ||
788 | state = ireq->sm.current_state_id; | 787 | state = ireq->sm.current_state_id; |
789 | 788 | ||
790 | if (state != SCI_REQ_STP_PIO_DATA_IN) { | 789 | if (state != SCI_REQ_STP_PIO_DATA_IN) { |
791 | dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n", | 790 | dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n", |
792 | __func__, event_code, state); | 791 | __func__, event_code, state); |
793 | 792 | ||
794 | return SCI_FAILURE_INVALID_STATE; | 793 | return SCI_FAILURE_INVALID_STATE; |
@@ -802,7 +801,7 @@ enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, | |||
802 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); | 801 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); |
803 | return SCI_SUCCESS; | 802 | return SCI_SUCCESS; |
804 | default: | 803 | default: |
805 | dev_err(scic_to_dev(scic), | 804 | dev_err(&ihost->pdev->dev, |
806 | "%s: pio request unexpected event %#x\n", | 805 | "%s: pio request unexpected event %#x\n", |
807 | __func__, event_code); | 806 | __func__, event_code); |
808 | 807 | ||
@@ -1024,7 +1023,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq | |||
1024 | * There is a potential for receiving multiple task responses if | 1023 | * There is a potential for receiving multiple task responses if |
1025 | * we decide to send the task IU again. | 1024 | * we decide to send the task IU again. |
1026 | */ | 1025 | */ |
1027 | dev_warn(scic_to_dev(ireq->owning_controller), | 1026 | dev_warn(&ireq->owning_controller->pdev->dev, |
1028 | "%s: TaskRequest:0x%p CompletionCode:%x - " | 1027 | "%s: TaskRequest:0x%p CompletionCode:%x - " |
1029 | "ACK/NAK timeout\n", __func__, ireq, | 1028 | "ACK/NAK timeout\n", __func__, ireq, |
1030 | completion_code); | 1029 | completion_code); |
@@ -1073,7 +1072,7 @@ smp_request_await_response_tc_event(struct isci_request *ireq, | |||
1073 | * response within 2 ms. This causes our hardware break | 1072 | * response within 2 ms. This causes our hardware break |
1074 | * the connection and set TC completion with one of | 1073 | * the connection and set TC completion with one of |
1075 | * these SMP_XXX_XX_ERR status. For these type of error, | 1074 | * these SMP_XXX_XX_ERR status. For these type of error, |
1076 | * we ask scic user to retry the request. | 1075 | * we ask ihost user to retry the request. |
1077 | */ | 1076 | */ |
1078 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, | 1077 | scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, |
1079 | SCI_FAILURE_RETRY_REQUIRED); | 1078 | SCI_FAILURE_RETRY_REQUIRED); |
@@ -1451,18 +1450,18 @@ static void scic_sds_stp_request_udma_complete_request( | |||
1451 | static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq, | 1450 | static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq, |
1452 | u32 frame_index) | 1451 | u32 frame_index) |
1453 | { | 1452 | { |
1454 | struct scic_sds_controller *scic = ireq->owning_controller; | 1453 | struct isci_host *ihost = ireq->owning_controller; |
1455 | struct dev_to_host_fis *frame_header; | 1454 | struct dev_to_host_fis *frame_header; |
1456 | enum sci_status status; | 1455 | enum sci_status status; |
1457 | u32 *frame_buffer; | 1456 | u32 *frame_buffer; |
1458 | 1457 | ||
1459 | status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 1458 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
1460 | frame_index, | 1459 | frame_index, |
1461 | (void **)&frame_header); | 1460 | (void **)&frame_header); |
1462 | 1461 | ||
1463 | if ((status == SCI_SUCCESS) && | 1462 | if ((status == SCI_SUCCESS) && |
1464 | (frame_header->fis_type == FIS_REGD2H)) { | 1463 | (frame_header->fis_type == FIS_REGD2H)) { |
1465 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 1464 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1466 | frame_index, | 1465 | frame_index, |
1467 | (void **)&frame_buffer); | 1466 | (void **)&frame_buffer); |
1468 | 1467 | ||
@@ -1471,7 +1470,7 @@ static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct is | |||
1471 | frame_buffer); | 1470 | frame_buffer); |
1472 | } | 1471 | } |
1473 | 1472 | ||
1474 | scic_sds_controller_release_frame(scic, frame_index); | 1473 | scic_sds_controller_release_frame(ihost, frame_index); |
1475 | 1474 | ||
1476 | return status; | 1475 | return status; |
1477 | } | 1476 | } |
@@ -1480,7 +1479,7 @@ enum sci_status | |||
1480 | scic_sds_io_request_frame_handler(struct isci_request *ireq, | 1479 | scic_sds_io_request_frame_handler(struct isci_request *ireq, |
1481 | u32 frame_index) | 1480 | u32 frame_index) |
1482 | { | 1481 | { |
1483 | struct scic_sds_controller *scic = ireq->owning_controller; | 1482 | struct isci_host *ihost = ireq->owning_controller; |
1484 | struct isci_stp_request *stp_req = &ireq->stp.req; | 1483 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1485 | enum sci_base_request_states state; | 1484 | enum sci_base_request_states state; |
1486 | enum sci_status status; | 1485 | enum sci_status status; |
@@ -1492,7 +1491,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1492 | struct ssp_frame_hdr ssp_hdr; | 1491 | struct ssp_frame_hdr ssp_hdr; |
1493 | void *frame_header; | 1492 | void *frame_header; |
1494 | 1493 | ||
1495 | scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 1494 | scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
1496 | frame_index, | 1495 | frame_index, |
1497 | &frame_header); | 1496 | &frame_header); |
1498 | 1497 | ||
@@ -1503,7 +1502,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1503 | struct ssp_response_iu *resp_iu; | 1502 | struct ssp_response_iu *resp_iu; |
1504 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); | 1503 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); |
1505 | 1504 | ||
1506 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 1505 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1507 | frame_index, | 1506 | frame_index, |
1508 | (void **)&resp_iu); | 1507 | (void **)&resp_iu); |
1509 | 1508 | ||
@@ -1522,7 +1521,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1522 | SCI_SUCCESS); | 1521 | SCI_SUCCESS); |
1523 | } else { | 1522 | } else { |
1524 | /* not a response frame, why did it get forwarded? */ | 1523 | /* not a response frame, why did it get forwarded? */ |
1525 | dev_err(scic_to_dev(scic), | 1524 | dev_err(&ihost->pdev->dev, |
1526 | "%s: SCIC IO Request 0x%p received unexpected " | 1525 | "%s: SCIC IO Request 0x%p received unexpected " |
1527 | "frame %d type 0x%02x\n", __func__, ireq, | 1526 | "frame %d type 0x%02x\n", __func__, ireq, |
1528 | frame_index, ssp_hdr.frame_type); | 1527 | frame_index, ssp_hdr.frame_type); |
@@ -1532,7 +1531,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1532 | * In any case we are done with this frame buffer return it to | 1531 | * In any case we are done with this frame buffer return it to |
1533 | * the controller | 1532 | * the controller |
1534 | */ | 1533 | */ |
1535 | scic_sds_controller_release_frame(scic, frame_index); | 1534 | scic_sds_controller_release_frame(ihost, frame_index); |
1536 | 1535 | ||
1537 | return SCI_SUCCESS; | 1536 | return SCI_SUCCESS; |
1538 | } | 1537 | } |
@@ -1540,14 +1539,14 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1540 | case SCI_REQ_TASK_WAIT_TC_RESP: | 1539 | case SCI_REQ_TASK_WAIT_TC_RESP: |
1541 | scic_sds_io_request_copy_response(ireq); | 1540 | scic_sds_io_request_copy_response(ireq); |
1542 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1541 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1543 | scic_sds_controller_release_frame(scic,frame_index); | 1542 | scic_sds_controller_release_frame(ihost,frame_index); |
1544 | return SCI_SUCCESS; | 1543 | return SCI_SUCCESS; |
1545 | 1544 | ||
1546 | case SCI_REQ_SMP_WAIT_RESP: { | 1545 | case SCI_REQ_SMP_WAIT_RESP: { |
1547 | struct smp_resp *rsp_hdr = &ireq->smp.rsp; | 1546 | struct smp_resp *rsp_hdr = &ireq->smp.rsp; |
1548 | void *frame_header; | 1547 | void *frame_header; |
1549 | 1548 | ||
1550 | scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 1549 | scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
1551 | frame_index, | 1550 | frame_index, |
1552 | &frame_header); | 1551 | &frame_header); |
1553 | 1552 | ||
@@ -1558,7 +1557,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1558 | if (rsp_hdr->frame_type == SMP_RESPONSE) { | 1557 | if (rsp_hdr->frame_type == SMP_RESPONSE) { |
1559 | void *smp_resp; | 1558 | void *smp_resp; |
1560 | 1559 | ||
1561 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 1560 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1562 | frame_index, | 1561 | frame_index, |
1563 | &smp_resp); | 1562 | &smp_resp); |
1564 | 1563 | ||
@@ -1577,7 +1576,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1577 | * This was not a response frame why did it get | 1576 | * This was not a response frame why did it get |
1578 | * forwarded? | 1577 | * forwarded? |
1579 | */ | 1578 | */ |
1580 | dev_err(scic_to_dev(scic), | 1579 | dev_err(&ihost->pdev->dev, |
1581 | "%s: SCIC SMP Request 0x%p received unexpected " | 1580 | "%s: SCIC SMP Request 0x%p received unexpected " |
1582 | "frame %d type 0x%02x\n", | 1581 | "frame %d type 0x%02x\n", |
1583 | __func__, | 1582 | __func__, |
@@ -1592,7 +1591,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1592 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1591 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1593 | } | 1592 | } |
1594 | 1593 | ||
1595 | scic_sds_controller_release_frame(scic, frame_index); | 1594 | scic_sds_controller_release_frame(ihost, frame_index); |
1596 | 1595 | ||
1597 | return SCI_SUCCESS; | 1596 | return SCI_SUCCESS; |
1598 | } | 1597 | } |
@@ -1619,12 +1618,12 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1619 | struct dev_to_host_fis *frame_header; | 1618 | struct dev_to_host_fis *frame_header; |
1620 | u32 *frame_buffer; | 1619 | u32 *frame_buffer; |
1621 | 1620 | ||
1622 | status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 1621 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
1623 | frame_index, | 1622 | frame_index, |
1624 | (void **)&frame_header); | 1623 | (void **)&frame_header); |
1625 | 1624 | ||
1626 | if (status != SCI_SUCCESS) { | 1625 | if (status != SCI_SUCCESS) { |
1627 | dev_err(scic_to_dev(scic), | 1626 | dev_err(&ihost->pdev->dev, |
1628 | "%s: SCIC IO Request 0x%p could not get frame " | 1627 | "%s: SCIC IO Request 0x%p could not get frame " |
1629 | "header for frame index %d, status %x\n", | 1628 | "header for frame index %d, status %x\n", |
1630 | __func__, | 1629 | __func__, |
@@ -1637,7 +1636,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1637 | 1636 | ||
1638 | switch (frame_header->fis_type) { | 1637 | switch (frame_header->fis_type) { |
1639 | case FIS_REGD2H: | 1638 | case FIS_REGD2H: |
1640 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 1639 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1641 | frame_index, | 1640 | frame_index, |
1642 | (void **)&frame_buffer); | 1641 | (void **)&frame_buffer); |
1643 | 1642 | ||
@@ -1651,7 +1650,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1651 | break; | 1650 | break; |
1652 | 1651 | ||
1653 | default: | 1652 | default: |
1654 | dev_warn(scic_to_dev(scic), | 1653 | dev_warn(&ihost->pdev->dev, |
1655 | "%s: IO Request:0x%p Frame Id:%d protocol " | 1654 | "%s: IO Request:0x%p Frame Id:%d protocol " |
1656 | "violation occurred\n", __func__, stp_req, | 1655 | "violation occurred\n", __func__, stp_req, |
1657 | frame_index); | 1656 | frame_index); |
@@ -1664,7 +1663,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1664 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1663 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1665 | 1664 | ||
1666 | /* Frame has been decoded return it to the controller */ | 1665 | /* Frame has been decoded return it to the controller */ |
1667 | scic_sds_controller_release_frame(scic, frame_index); | 1666 | scic_sds_controller_release_frame(ihost, frame_index); |
1668 | 1667 | ||
1669 | return status; | 1668 | return status; |
1670 | } | 1669 | } |
@@ -1674,12 +1673,12 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1674 | struct dev_to_host_fis *frame_header; | 1673 | struct dev_to_host_fis *frame_header; |
1675 | u32 *frame_buffer; | 1674 | u32 *frame_buffer; |
1676 | 1675 | ||
1677 | status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 1676 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
1678 | frame_index, | 1677 | frame_index, |
1679 | (void **)&frame_header); | 1678 | (void **)&frame_header); |
1680 | 1679 | ||
1681 | if (status != SCI_SUCCESS) { | 1680 | if (status != SCI_SUCCESS) { |
1682 | dev_err(scic_to_dev(scic), | 1681 | dev_err(&ihost->pdev->dev, |
1683 | "%s: SCIC IO Request 0x%p could not get frame " | 1682 | "%s: SCIC IO Request 0x%p could not get frame " |
1684 | "header for frame index %d, status %x\n", | 1683 | "header for frame index %d, status %x\n", |
1685 | __func__, stp_req, frame_index, status); | 1684 | __func__, stp_req, frame_index, status); |
@@ -1689,7 +1688,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1689 | switch (frame_header->fis_type) { | 1688 | switch (frame_header->fis_type) { |
1690 | case FIS_PIO_SETUP: | 1689 | case FIS_PIO_SETUP: |
1691 | /* Get from the frame buffer the PIO Setup Data */ | 1690 | /* Get from the frame buffer the PIO Setup Data */ |
1692 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 1691 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1693 | frame_index, | 1692 | frame_index, |
1694 | (void **)&frame_buffer); | 1693 | (void **)&frame_buffer); |
1695 | 1694 | ||
@@ -1736,7 +1735,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1736 | * FIS when it is still busy? Do nothing since | 1735 | * FIS when it is still busy? Do nothing since |
1737 | * we are still in the right state. | 1736 | * we are still in the right state. |
1738 | */ | 1737 | */ |
1739 | dev_dbg(scic_to_dev(scic), | 1738 | dev_dbg(&ihost->pdev->dev, |
1740 | "%s: SCIC PIO Request 0x%p received " | 1739 | "%s: SCIC PIO Request 0x%p received " |
1741 | "D2H Register FIS with BSY status " | 1740 | "D2H Register FIS with BSY status " |
1742 | "0x%x\n", | 1741 | "0x%x\n", |
@@ -1746,7 +1745,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1746 | break; | 1745 | break; |
1747 | } | 1746 | } |
1748 | 1747 | ||
1749 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 1748 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1750 | frame_index, | 1749 | frame_index, |
1751 | (void **)&frame_buffer); | 1750 | (void **)&frame_buffer); |
1752 | 1751 | ||
@@ -1767,7 +1766,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1767 | } | 1766 | } |
1768 | 1767 | ||
1769 | /* Frame is decoded return it to the controller */ | 1768 | /* Frame is decoded return it to the controller */ |
1770 | scic_sds_controller_release_frame(scic, frame_index); | 1769 | scic_sds_controller_release_frame(ihost, frame_index); |
1771 | 1770 | ||
1772 | return status; | 1771 | return status; |
1773 | } | 1772 | } |
@@ -1776,12 +1775,12 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1776 | struct dev_to_host_fis *frame_header; | 1775 | struct dev_to_host_fis *frame_header; |
1777 | struct sata_fis_data *frame_buffer; | 1776 | struct sata_fis_data *frame_buffer; |
1778 | 1777 | ||
1779 | status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 1778 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
1780 | frame_index, | 1779 | frame_index, |
1781 | (void **)&frame_header); | 1780 | (void **)&frame_header); |
1782 | 1781 | ||
1783 | if (status != SCI_SUCCESS) { | 1782 | if (status != SCI_SUCCESS) { |
1784 | dev_err(scic_to_dev(scic), | 1783 | dev_err(&ihost->pdev->dev, |
1785 | "%s: SCIC IO Request 0x%p could not get frame " | 1784 | "%s: SCIC IO Request 0x%p could not get frame " |
1786 | "header for frame index %d, status %x\n", | 1785 | "header for frame index %d, status %x\n", |
1787 | __func__, | 1786 | __func__, |
@@ -1792,7 +1791,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1792 | } | 1791 | } |
1793 | 1792 | ||
1794 | if (frame_header->fis_type != FIS_DATA) { | 1793 | if (frame_header->fis_type != FIS_DATA) { |
1795 | dev_err(scic_to_dev(scic), | 1794 | dev_err(&ihost->pdev->dev, |
1796 | "%s: SCIC PIO Request 0x%p received frame %d " | 1795 | "%s: SCIC PIO Request 0x%p received frame %d " |
1797 | "with fis type 0x%02x when expecting a data " | 1796 | "with fis type 0x%02x when expecting a data " |
1798 | "fis.\n", | 1797 | "fis.\n", |
@@ -1808,7 +1807,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1808 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1807 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1809 | 1808 | ||
1810 | /* Frame is decoded return it to the controller */ | 1809 | /* Frame is decoded return it to the controller */ |
1811 | scic_sds_controller_release_frame(scic, frame_index); | 1810 | scic_sds_controller_release_frame(ihost, frame_index); |
1812 | return status; | 1811 | return status; |
1813 | } | 1812 | } |
1814 | 1813 | ||
@@ -1816,7 +1815,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1816 | ireq->saved_rx_frame_index = frame_index; | 1815 | ireq->saved_rx_frame_index = frame_index; |
1817 | stp_req->pio_len = 0; | 1816 | stp_req->pio_len = 0; |
1818 | } else { | 1817 | } else { |
1819 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 1818 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1820 | frame_index, | 1819 | frame_index, |
1821 | (void **)&frame_buffer); | 1820 | (void **)&frame_buffer); |
1822 | 1821 | ||
@@ -1824,7 +1823,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1824 | (u8 *)frame_buffer); | 1823 | (u8 *)frame_buffer); |
1825 | 1824 | ||
1826 | /* Frame is decoded return it to the controller */ | 1825 | /* Frame is decoded return it to the controller */ |
1827 | scic_sds_controller_release_frame(scic, frame_index); | 1826 | scic_sds_controller_release_frame(ihost, frame_index); |
1828 | } | 1827 | } |
1829 | 1828 | ||
1830 | /* Check for the end of the transfer, are there more | 1829 | /* Check for the end of the transfer, are there more |
@@ -1849,11 +1848,11 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1849 | struct dev_to_host_fis *frame_header; | 1848 | struct dev_to_host_fis *frame_header; |
1850 | u32 *frame_buffer; | 1849 | u32 *frame_buffer; |
1851 | 1850 | ||
1852 | status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control, | 1851 | status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control, |
1853 | frame_index, | 1852 | frame_index, |
1854 | (void **)&frame_header); | 1853 | (void **)&frame_header); |
1855 | if (status != SCI_SUCCESS) { | 1854 | if (status != SCI_SUCCESS) { |
1856 | dev_err(scic_to_dev(scic), | 1855 | dev_err(&ihost->pdev->dev, |
1857 | "%s: SCIC IO Request 0x%p could not get frame " | 1856 | "%s: SCIC IO Request 0x%p could not get frame " |
1858 | "header for frame index %d, status %x\n", | 1857 | "header for frame index %d, status %x\n", |
1859 | __func__, | 1858 | __func__, |
@@ -1865,7 +1864,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1865 | 1864 | ||
1866 | switch (frame_header->fis_type) { | 1865 | switch (frame_header->fis_type) { |
1867 | case FIS_REGD2H: | 1866 | case FIS_REGD2H: |
1868 | scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control, | 1867 | scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1869 | frame_index, | 1868 | frame_index, |
1870 | (void **)&frame_buffer); | 1869 | (void **)&frame_buffer); |
1871 | 1870 | ||
@@ -1880,7 +1879,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1880 | break; | 1879 | break; |
1881 | 1880 | ||
1882 | default: | 1881 | default: |
1883 | dev_warn(scic_to_dev(scic), | 1882 | dev_warn(&ihost->pdev->dev, |
1884 | "%s: IO Request:0x%p Frame Id:%d protocol " | 1883 | "%s: IO Request:0x%p Frame Id:%d protocol " |
1885 | "violation occurred\n", | 1884 | "violation occurred\n", |
1886 | __func__, | 1885 | __func__, |
@@ -1896,7 +1895,7 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1896 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1895 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1897 | 1896 | ||
1898 | /* Frame has been decoded return it to the controller */ | 1897 | /* Frame has been decoded return it to the controller */ |
1899 | scic_sds_controller_release_frame(scic, frame_index); | 1898 | scic_sds_controller_release_frame(ihost, frame_index); |
1900 | 1899 | ||
1901 | return status; | 1900 | return status; |
1902 | } | 1901 | } |
@@ -1905,18 +1904,18 @@ scic_sds_io_request_frame_handler(struct isci_request *ireq, | |||
1905 | * TODO: Is it even possible to get an unsolicited frame in the | 1904 | * TODO: Is it even possible to get an unsolicited frame in the |
1906 | * aborting state? | 1905 | * aborting state? |
1907 | */ | 1906 | */ |
1908 | scic_sds_controller_release_frame(scic, frame_index); | 1907 | scic_sds_controller_release_frame(ihost, frame_index); |
1909 | return SCI_SUCCESS; | 1908 | return SCI_SUCCESS; |
1910 | 1909 | ||
1911 | default: | 1910 | default: |
1912 | dev_warn(scic_to_dev(scic), | 1911 | dev_warn(&ihost->pdev->dev, |
1913 | "%s: SCIC IO Request given unexpected frame %x while " | 1912 | "%s: SCIC IO Request given unexpected frame %x while " |
1914 | "in state %d\n", | 1913 | "in state %d\n", |
1915 | __func__, | 1914 | __func__, |
1916 | frame_index, | 1915 | frame_index, |
1917 | state); | 1916 | state); |
1918 | 1917 | ||
1919 | scic_sds_controller_release_frame(scic, frame_index); | 1918 | scic_sds_controller_release_frame(ihost, frame_index); |
1920 | return SCI_FAILURE_INVALID_STATE; | 1919 | return SCI_FAILURE_INVALID_STATE; |
1921 | } | 1920 | } |
1922 | } | 1921 | } |
@@ -2042,7 +2041,7 @@ scic_sds_io_request_tc_completion(struct isci_request *ireq, | |||
2042 | u32 completion_code) | 2041 | u32 completion_code) |
2043 | { | 2042 | { |
2044 | enum sci_base_request_states state; | 2043 | enum sci_base_request_states state; |
2045 | struct scic_sds_controller *scic = ireq->owning_controller; | 2044 | struct isci_host *ihost = ireq->owning_controller; |
2046 | 2045 | ||
2047 | state = ireq->sm.current_state_id; | 2046 | state = ireq->sm.current_state_id; |
2048 | 2047 | ||
@@ -2089,7 +2088,7 @@ scic_sds_io_request_tc_completion(struct isci_request *ireq, | |||
2089 | completion_code); | 2088 | completion_code); |
2090 | 2089 | ||
2091 | default: | 2090 | default: |
2092 | dev_warn(scic_to_dev(scic), | 2091 | dev_warn(&ihost->pdev->dev, |
2093 | "%s: SCIC IO Request given task completion " | 2092 | "%s: SCIC IO Request given task completion " |
2094 | "notification %x while in wrong state %d\n", | 2093 | "notification %x while in wrong state %d\n", |
2095 | __func__, | 2094 | __func__, |
@@ -2480,7 +2479,7 @@ static void isci_task_save_for_upper_layer_completion( | |||
2480 | } | 2479 | } |
2481 | } | 2480 | } |
2482 | 2481 | ||
2483 | static void isci_request_io_request_complete(struct isci_host *isci_host, | 2482 | static void isci_request_io_request_complete(struct isci_host *ihost, |
2484 | struct isci_request *request, | 2483 | struct isci_request *request, |
2485 | enum sci_io_status completion_status) | 2484 | enum sci_io_status completion_status) |
2486 | { | 2485 | { |
@@ -2495,7 +2494,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2495 | enum isci_completion_selection complete_to_host | 2494 | enum isci_completion_selection complete_to_host |
2496 | = isci_perform_normal_io_completion; | 2495 | = isci_perform_normal_io_completion; |
2497 | 2496 | ||
2498 | dev_dbg(&isci_host->pdev->dev, | 2497 | dev_dbg(&ihost->pdev->dev, |
2499 | "%s: request = %p, task = %p,\n" | 2498 | "%s: request = %p, task = %p,\n" |
2500 | "task->data_dir = %d completion_status = 0x%x\n", | 2499 | "task->data_dir = %d completion_status = 0x%x\n", |
2501 | __func__, | 2500 | __func__, |
@@ -2616,7 +2615,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2616 | switch (completion_status) { | 2615 | switch (completion_status) { |
2617 | 2616 | ||
2618 | case SCI_IO_FAILURE_RESPONSE_VALID: | 2617 | case SCI_IO_FAILURE_RESPONSE_VALID: |
2619 | dev_dbg(&isci_host->pdev->dev, | 2618 | dev_dbg(&ihost->pdev->dev, |
2620 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", | 2619 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", |
2621 | __func__, | 2620 | __func__, |
2622 | request, | 2621 | request, |
@@ -2631,17 +2630,17 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2631 | /* crack the iu response buffer. */ | 2630 | /* crack the iu response buffer. */ |
2632 | resp_iu = &request->ssp.rsp; | 2631 | resp_iu = &request->ssp.rsp; |
2633 | isci_request_process_response_iu(task, resp_iu, | 2632 | isci_request_process_response_iu(task, resp_iu, |
2634 | &isci_host->pdev->dev); | 2633 | &ihost->pdev->dev); |
2635 | 2634 | ||
2636 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { | 2635 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { |
2637 | 2636 | ||
2638 | dev_err(&isci_host->pdev->dev, | 2637 | dev_err(&ihost->pdev->dev, |
2639 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " | 2638 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " |
2640 | "SAS_PROTOCOL_SMP protocol\n", | 2639 | "SAS_PROTOCOL_SMP protocol\n", |
2641 | __func__); | 2640 | __func__); |
2642 | 2641 | ||
2643 | } else | 2642 | } else |
2644 | dev_err(&isci_host->pdev->dev, | 2643 | dev_err(&ihost->pdev->dev, |
2645 | "%s: unknown protocol\n", __func__); | 2644 | "%s: unknown protocol\n", __func__); |
2646 | 2645 | ||
2647 | /* use the task status set in the task struct by the | 2646 | /* use the task status set in the task struct by the |
@@ -2662,7 +2661,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2662 | if (task->task_proto == SAS_PROTOCOL_SMP) { | 2661 | if (task->task_proto == SAS_PROTOCOL_SMP) { |
2663 | void *rsp = &request->smp.rsp; | 2662 | void *rsp = &request->smp.rsp; |
2664 | 2663 | ||
2665 | dev_dbg(&isci_host->pdev->dev, | 2664 | dev_dbg(&ihost->pdev->dev, |
2666 | "%s: SMP protocol completion\n", | 2665 | "%s: SMP protocol completion\n", |
2667 | __func__); | 2666 | __func__); |
2668 | 2667 | ||
@@ -2687,20 +2686,20 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2687 | if (task->task_status.residual != 0) | 2686 | if (task->task_status.residual != 0) |
2688 | status = SAS_DATA_UNDERRUN; | 2687 | status = SAS_DATA_UNDERRUN; |
2689 | 2688 | ||
2690 | dev_dbg(&isci_host->pdev->dev, | 2689 | dev_dbg(&ihost->pdev->dev, |
2691 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", | 2690 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", |
2692 | __func__, | 2691 | __func__, |
2693 | status); | 2692 | status); |
2694 | 2693 | ||
2695 | } else | 2694 | } else |
2696 | dev_dbg(&isci_host->pdev->dev, | 2695 | dev_dbg(&ihost->pdev->dev, |
2697 | "%s: SCI_IO_SUCCESS\n", | 2696 | "%s: SCI_IO_SUCCESS\n", |
2698 | __func__); | 2697 | __func__); |
2699 | 2698 | ||
2700 | break; | 2699 | break; |
2701 | 2700 | ||
2702 | case SCI_IO_FAILURE_TERMINATED: | 2701 | case SCI_IO_FAILURE_TERMINATED: |
2703 | dev_dbg(&isci_host->pdev->dev, | 2702 | dev_dbg(&ihost->pdev->dev, |
2704 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", | 2703 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", |
2705 | __func__, | 2704 | __func__, |
2706 | request, | 2705 | request, |
@@ -2768,7 +2767,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2768 | 2767 | ||
2769 | default: | 2768 | default: |
2770 | /* Catch any otherwise unhandled error codes here. */ | 2769 | /* Catch any otherwise unhandled error codes here. */ |
2771 | dev_warn(&isci_host->pdev->dev, | 2770 | dev_warn(&ihost->pdev->dev, |
2772 | "%s: invalid completion code: 0x%x - " | 2771 | "%s: invalid completion code: 0x%x - " |
2773 | "isci_request = %p\n", | 2772 | "isci_request = %p\n", |
2774 | __func__, completion_status, request); | 2773 | __func__, completion_status, request); |
@@ -2802,11 +2801,11 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2802 | break; | 2801 | break; |
2803 | if (task->num_scatter == 0) | 2802 | if (task->num_scatter == 0) |
2804 | /* 0 indicates a single dma address */ | 2803 | /* 0 indicates a single dma address */ |
2805 | dma_unmap_single(&isci_host->pdev->dev, | 2804 | dma_unmap_single(&ihost->pdev->dev, |
2806 | request->zero_scatter_daddr, | 2805 | request->zero_scatter_daddr, |
2807 | task->total_xfer_len, task->data_dir); | 2806 | task->total_xfer_len, task->data_dir); |
2808 | else /* unmap the sgl dma addresses */ | 2807 | else /* unmap the sgl dma addresses */ |
2809 | dma_unmap_sg(&isci_host->pdev->dev, task->scatter, | 2808 | dma_unmap_sg(&ihost->pdev->dev, task->scatter, |
2810 | request->num_sg_entries, task->data_dir); | 2809 | request->num_sg_entries, task->data_dir); |
2811 | break; | 2810 | break; |
2812 | case SAS_PROTOCOL_SMP: { | 2811 | case SAS_PROTOCOL_SMP: { |
@@ -2814,7 +2813,7 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2814 | struct smp_req *smp_req; | 2813 | struct smp_req *smp_req; |
2815 | void *kaddr; | 2814 | void *kaddr; |
2816 | 2815 | ||
2817 | dma_unmap_sg(&isci_host->pdev->dev, sg, 1, DMA_TO_DEVICE); | 2816 | dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); |
2818 | 2817 | ||
2819 | /* need to swab it back in case the command buffer is re-used */ | 2818 | /* need to swab it back in case the command buffer is re-used */ |
2820 | kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); | 2819 | kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); |
@@ -2828,14 +2827,12 @@ static void isci_request_io_request_complete(struct isci_host *isci_host, | |||
2828 | } | 2827 | } |
2829 | 2828 | ||
2830 | /* Put the completed request on the correct list */ | 2829 | /* Put the completed request on the correct list */ |
2831 | isci_task_save_for_upper_layer_completion(isci_host, request, response, | 2830 | isci_task_save_for_upper_layer_completion(ihost, request, response, |
2832 | status, complete_to_host | 2831 | status, complete_to_host |
2833 | ); | 2832 | ); |
2834 | 2833 | ||
2835 | /* complete the io request to the core. */ | 2834 | /* complete the io request to the core. */ |
2836 | scic_controller_complete_io(&isci_host->sci, | 2835 | scic_controller_complete_io(ihost, request->target_device, request); |
2837 | request->target_device, | ||
2838 | request); | ||
2839 | isci_put_device(idev); | 2836 | isci_put_device(idev); |
2840 | 2837 | ||
2841 | /* set terminated handle so it cannot be completed or | 2838 | /* set terminated handle so it cannot be completed or |
@@ -2885,8 +2882,7 @@ static void scic_sds_request_started_state_enter(struct sci_base_state_machine * | |||
2885 | static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) | 2882 | static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm) |
2886 | { | 2883 | { |
2887 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2884 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2888 | struct scic_sds_controller *scic = ireq->owning_controller; | 2885 | struct isci_host *ihost = ireq->owning_controller; |
2889 | struct isci_host *ihost = scic_to_ihost(scic); | ||
2890 | 2886 | ||
2891 | /* Tell the SCI_USER that the IO request is complete */ | 2887 | /* Tell the SCI_USER that the IO request is complete */ |
2892 | if (!test_bit(IREQ_TMF, &ireq->flags)) | 2888 | if (!test_bit(IREQ_TMF, &ireq->flags)) |
@@ -2985,7 +2981,7 @@ static const struct sci_base_state scic_sds_request_state_table[] = { | |||
2985 | }; | 2981 | }; |
2986 | 2982 | ||
2987 | static void | 2983 | static void |
2988 | scic_sds_general_request_construct(struct scic_sds_controller *scic, | 2984 | scic_sds_general_request_construct(struct isci_host *ihost, |
2989 | struct isci_remote_device *idev, | 2985 | struct isci_remote_device *idev, |
2990 | struct isci_request *ireq) | 2986 | struct isci_request *ireq) |
2991 | { | 2987 | { |
@@ -3001,7 +2997,7 @@ scic_sds_general_request_construct(struct scic_sds_controller *scic, | |||
3001 | } | 2997 | } |
3002 | 2998 | ||
3003 | static enum sci_status | 2999 | static enum sci_status |
3004 | scic_io_request_construct(struct scic_sds_controller *scic, | 3000 | scic_io_request_construct(struct isci_host *ihost, |
3005 | struct isci_remote_device *idev, | 3001 | struct isci_remote_device *idev, |
3006 | struct isci_request *ireq) | 3002 | struct isci_request *ireq) |
3007 | { | 3003 | { |
@@ -3009,7 +3005,7 @@ scic_io_request_construct(struct scic_sds_controller *scic, | |||
3009 | enum sci_status status = SCI_SUCCESS; | 3005 | enum sci_status status = SCI_SUCCESS; |
3010 | 3006 | ||
3011 | /* Build the common part of the request */ | 3007 | /* Build the common part of the request */ |
3012 | scic_sds_general_request_construct(scic, idev, ireq); | 3008 | scic_sds_general_request_construct(ihost, idev, ireq); |
3013 | 3009 | ||
3014 | if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) | 3010 | if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) |
3015 | return SCI_FAILURE_INVALID_REMOTE_DEVICE; | 3011 | return SCI_FAILURE_INVALID_REMOTE_DEVICE; |
@@ -3028,7 +3024,7 @@ scic_io_request_construct(struct scic_sds_controller *scic, | |||
3028 | return status; | 3024 | return status; |
3029 | } | 3025 | } |
3030 | 3026 | ||
3031 | enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, | 3027 | enum sci_status scic_task_request_construct(struct isci_host *ihost, |
3032 | struct isci_remote_device *idev, | 3028 | struct isci_remote_device *idev, |
3033 | u16 io_tag, struct isci_request *ireq) | 3029 | u16 io_tag, struct isci_request *ireq) |
3034 | { | 3030 | { |
@@ -3036,7 +3032,7 @@ enum sci_status scic_task_request_construct(struct scic_sds_controller *scic, | |||
3036 | enum sci_status status = SCI_SUCCESS; | 3032 | enum sci_status status = SCI_SUCCESS; |
3037 | 3033 | ||
3038 | /* Build the common part of the request */ | 3034 | /* Build the common part of the request */ |
3039 | scic_sds_general_request_construct(scic, idev, ireq); | 3035 | scic_sds_general_request_construct(ihost, idev, ireq); |
3040 | 3036 | ||
3041 | if (dev->dev_type == SAS_END_DEV || | 3037 | if (dev->dev_type == SAS_END_DEV || |
3042 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { | 3038 | dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { |
@@ -3156,7 +3152,7 @@ scic_io_request_construct_smp(struct device *dev, | |||
3156 | task_context->initiator_request = 1; | 3152 | task_context->initiator_request = 1; |
3157 | task_context->connection_rate = idev->connection_rate; | 3153 | task_context->connection_rate = idev->connection_rate; |
3158 | task_context->protocol_engine_index = | 3154 | task_context->protocol_engine_index = |
3159 | scic_sds_controller_get_protocol_engine_group(scic); | 3155 | scic_sds_controller_get_protocol_engine_group(ihost); |
3160 | task_context->logical_port_index = scic_sds_port_get_index(iport); | 3156 | task_context->logical_port_index = scic_sds_port_get_index(iport); |
3161 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; | 3157 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; |
3162 | task_context->abort = 0; | 3158 | task_context->abort = 0; |
@@ -3199,7 +3195,7 @@ scic_io_request_construct_smp(struct device *dev, | |||
3199 | task_context->task_phase = 0; | 3195 | task_context->task_phase = 0; |
3200 | 3196 | ||
3201 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 3197 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
3202 | (scic_sds_controller_get_protocol_engine_group(scic) << | 3198 | (scic_sds_controller_get_protocol_engine_group(ihost) << |
3203 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 3199 | SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
3204 | (scic_sds_port_get_index(iport) << | 3200 | (scic_sds_port_get_index(iport) << |
3205 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 3201 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
@@ -3245,7 +3241,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) | |||
3245 | 3241 | ||
3246 | /** | 3242 | /** |
3247 | * isci_io_request_build() - This function builds the io request object. | 3243 | * isci_io_request_build() - This function builds the io request object. |
3248 | * @isci_host: This parameter specifies the ISCI host object | 3244 | * @ihost: This parameter specifies the ISCI host object |
3249 | * @request: This parameter points to the isci_request object allocated in the | 3245 | * @request: This parameter points to the isci_request object allocated in the |
3250 | * request construct function. | 3246 | * request construct function. |
3251 | * @sci_device: This parameter is the handle for the sci core's remote device | 3247 | * @sci_device: This parameter is the handle for the sci core's remote device |
@@ -3253,14 +3249,14 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq) | |||
3253 | * | 3249 | * |
3254 | * SCI_SUCCESS on successfull completion, or specific failure code. | 3250 | * SCI_SUCCESS on successfull completion, or specific failure code. |
3255 | */ | 3251 | */ |
3256 | static enum sci_status isci_io_request_build(struct isci_host *isci_host, | 3252 | static enum sci_status isci_io_request_build(struct isci_host *ihost, |
3257 | struct isci_request *request, | 3253 | struct isci_request *request, |
3258 | struct isci_remote_device *idev) | 3254 | struct isci_remote_device *idev) |
3259 | { | 3255 | { |
3260 | enum sci_status status = SCI_SUCCESS; | 3256 | enum sci_status status = SCI_SUCCESS; |
3261 | struct sas_task *task = isci_request_access_task(request); | 3257 | struct sas_task *task = isci_request_access_task(request); |
3262 | 3258 | ||
3263 | dev_dbg(&isci_host->pdev->dev, | 3259 | dev_dbg(&ihost->pdev->dev, |
3264 | "%s: idev = 0x%p; request = %p, " | 3260 | "%s: idev = 0x%p; request = %p, " |
3265 | "num_scatter = %d\n", | 3261 | "num_scatter = %d\n", |
3266 | __func__, | 3262 | __func__, |
@@ -3277,7 +3273,7 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, | |||
3277 | !(SAS_PROTOCOL_SMP & task->task_proto)) { | 3273 | !(SAS_PROTOCOL_SMP & task->task_proto)) { |
3278 | 3274 | ||
3279 | request->num_sg_entries = dma_map_sg( | 3275 | request->num_sg_entries = dma_map_sg( |
3280 | &isci_host->pdev->dev, | 3276 | &ihost->pdev->dev, |
3281 | task->scatter, | 3277 | task->scatter, |
3282 | task->num_scatter, | 3278 | task->num_scatter, |
3283 | task->data_dir | 3279 | task->data_dir |
@@ -3287,10 +3283,10 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, | |||
3287 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; | 3283 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; |
3288 | } | 3284 | } |
3289 | 3285 | ||
3290 | status = scic_io_request_construct(&isci_host->sci, idev, request); | 3286 | status = scic_io_request_construct(ihost, idev, request); |
3291 | 3287 | ||
3292 | if (status != SCI_SUCCESS) { | 3288 | if (status != SCI_SUCCESS) { |
3293 | dev_warn(&isci_host->pdev->dev, | 3289 | dev_warn(&ihost->pdev->dev, |
3294 | "%s: failed request construct\n", | 3290 | "%s: failed request construct\n", |
3295 | __func__); | 3291 | __func__); |
3296 | return SCI_FAILURE; | 3292 | return SCI_FAILURE; |
@@ -3309,7 +3305,7 @@ static enum sci_status isci_io_request_build(struct isci_host *isci_host, | |||
3309 | status = isci_request_stp_request_construct(request); | 3305 | status = isci_request_stp_request_construct(request); |
3310 | break; | 3306 | break; |
3311 | default: | 3307 | default: |
3312 | dev_warn(&isci_host->pdev->dev, | 3308 | dev_warn(&ihost->pdev->dev, |
3313 | "%s: unknown protocol\n", __func__); | 3309 | "%s: unknown protocol\n", __func__); |
3314 | return SCI_FAILURE; | 3310 | return SCI_FAILURE; |
3315 | } | 3311 | } |
@@ -3392,7 +3388,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3392 | * request was built that way (ie. | 3388 | * request was built that way (ie. |
3393 | * ireq->is_task_management_request is false). | 3389 | * ireq->is_task_management_request is false). |
3394 | */ | 3390 | */ |
3395 | status = scic_controller_start_task(&ihost->sci, | 3391 | status = scic_controller_start_task(ihost, |
3396 | idev, | 3392 | idev, |
3397 | ireq); | 3393 | ireq); |
3398 | } else { | 3394 | } else { |
@@ -3400,7 +3396,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
3400 | } | 3396 | } |
3401 | } else { | 3397 | } else { |
3402 | /* send the request, let the core assign the IO TAG. */ | 3398 | /* send the request, let the core assign the IO TAG. */ |
3403 | status = scic_controller_start_io(&ihost->sci, idev, | 3399 | status = scic_controller_start_io(ihost, idev, |
3404 | ireq); | 3400 | ireq); |
3405 | } | 3401 | } |
3406 | 3402 | ||
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index ca64ea207ac8..0cafcead7a01 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h | |||
@@ -145,7 +145,7 @@ struct isci_request { | |||
145 | */ | 145 | */ |
146 | struct completion *io_request_completion; | 146 | struct completion *io_request_completion; |
147 | struct sci_base_state_machine sm; | 147 | struct sci_base_state_machine sm; |
148 | struct scic_sds_controller *owning_controller; | 148 | struct isci_host *owning_controller; |
149 | struct isci_remote_device *target_device; | 149 | struct isci_remote_device *target_device; |
150 | u16 io_tag; | 150 | u16 io_tag; |
151 | enum sci_request_protocol protocol; | 151 | enum sci_request_protocol protocol; |
@@ -500,7 +500,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide | |||
500 | void isci_terminate_pending_requests(struct isci_host *ihost, | 500 | void isci_terminate_pending_requests(struct isci_host *ihost, |
501 | struct isci_remote_device *idev); | 501 | struct isci_remote_device *idev); |
502 | enum sci_status | 502 | enum sci_status |
503 | scic_task_request_construct(struct scic_sds_controller *scic, | 503 | scic_task_request_construct(struct isci_host *ihost, |
504 | struct isci_remote_device *idev, | 504 | struct isci_remote_device *idev, |
505 | u16 io_tag, | 505 | u16 io_tag, |
506 | struct isci_request *ireq); | 506 | struct isci_request *ireq); |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 89b01eef44b1..3a1fc55a7557 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -257,7 +257,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, | |||
257 | return NULL; | 257 | return NULL; |
258 | 258 | ||
259 | /* let the core do it's construct. */ | 259 | /* let the core do it's construct. */ |
260 | status = scic_task_request_construct(&ihost->sci, idev, tag, | 260 | status = scic_task_request_construct(ihost, idev, tag, |
261 | ireq); | 261 | ireq); |
262 | 262 | ||
263 | if (status != SCI_SUCCESS) { | 263 | if (status != SCI_SUCCESS) { |
@@ -332,7 +332,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
332 | spin_lock_irqsave(&ihost->scic_lock, flags); | 332 | spin_lock_irqsave(&ihost->scic_lock, flags); |
333 | 333 | ||
334 | /* start the TMF io. */ | 334 | /* start the TMF io. */ |
335 | status = scic_controller_start_task(&ihost->sci, idev, ireq); | 335 | status = scic_controller_start_task(ihost, idev, ireq); |
336 | 336 | ||
337 | if (status != SCI_TASK_SUCCESS) { | 337 | if (status != SCI_TASK_SUCCESS) { |
338 | dev_warn(&ihost->pdev->dev, | 338 | dev_warn(&ihost->pdev->dev, |
@@ -364,7 +364,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, | |||
364 | if (tmf->cb_state_func != NULL) | 364 | if (tmf->cb_state_func != NULL) |
365 | tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); | 365 | tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data); |
366 | 366 | ||
367 | scic_controller_terminate_request(&ihost->sci, | 367 | scic_controller_terminate_request(ihost, |
368 | idev, | 368 | idev, |
369 | ireq); | 369 | ireq); |
370 | 370 | ||
@@ -514,15 +514,14 @@ static void isci_request_cleanup_completed_loiterer( | |||
514 | * request, and wait for it to complete. This function must only be called | 514 | * request, and wait for it to complete. This function must only be called |
515 | * from a thread that can wait. Note that the request is terminated and | 515 | * from a thread that can wait. Note that the request is terminated and |
516 | * completed (back to the host, if started there). | 516 | * completed (back to the host, if started there). |
517 | * @isci_host: This SCU. | 517 | * @ihost: This SCU. |
518 | * @idev: The target. | 518 | * @idev: The target. |
519 | * @isci_request: The I/O request to be terminated. | 519 | * @isci_request: The I/O request to be terminated. |
520 | * | 520 | * |
521 | */ | 521 | */ |
522 | static void isci_terminate_request_core( | 522 | static void isci_terminate_request_core(struct isci_host *ihost, |
523 | struct isci_host *isci_host, | 523 | struct isci_remote_device *idev, |
524 | struct isci_remote_device *idev, | 524 | struct isci_request *isci_request) |
525 | struct isci_request *isci_request) | ||
526 | { | 525 | { |
527 | enum sci_status status = SCI_SUCCESS; | 526 | enum sci_status status = SCI_SUCCESS; |
528 | bool was_terminated = false; | 527 | bool was_terminated = false; |
@@ -533,11 +532,11 @@ static void isci_terminate_request_core( | |||
533 | struct completion *io_request_completion; | 532 | struct completion *io_request_completion; |
534 | struct sas_task *task; | 533 | struct sas_task *task; |
535 | 534 | ||
536 | dev_dbg(&isci_host->pdev->dev, | 535 | dev_dbg(&ihost->pdev->dev, |
537 | "%s: device = %p; request = %p\n", | 536 | "%s: device = %p; request = %p\n", |
538 | __func__, idev, isci_request); | 537 | __func__, idev, isci_request); |
539 | 538 | ||
540 | spin_lock_irqsave(&isci_host->scic_lock, flags); | 539 | spin_lock_irqsave(&ihost->scic_lock, flags); |
541 | 540 | ||
542 | io_request_completion = isci_request->io_request_completion; | 541 | io_request_completion = isci_request->io_request_completion; |
543 | 542 | ||
@@ -557,12 +556,11 @@ static void isci_terminate_request_core( | |||
557 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { | 556 | if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { |
558 | was_terminated = true; | 557 | was_terminated = true; |
559 | needs_cleanup_handling = true; | 558 | needs_cleanup_handling = true; |
560 | status = scic_controller_terminate_request( | 559 | status = scic_controller_terminate_request(ihost, |
561 | &isci_host->sci, | 560 | idev, |
562 | idev, | 561 | isci_request); |
563 | isci_request); | ||
564 | } | 562 | } |
565 | spin_unlock_irqrestore(&isci_host->scic_lock, flags); | 563 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
566 | 564 | ||
567 | /* | 565 | /* |
568 | * The only time the request to terminate will | 566 | * The only time the request to terminate will |
@@ -570,7 +568,7 @@ static void isci_terminate_request_core( | |||
570 | * being aborted. | 568 | * being aborted. |
571 | */ | 569 | */ |
572 | if (status != SCI_SUCCESS) { | 570 | if (status != SCI_SUCCESS) { |
573 | dev_err(&isci_host->pdev->dev, | 571 | dev_err(&ihost->pdev->dev, |
574 | "%s: scic_controller_terminate_request" | 572 | "%s: scic_controller_terminate_request" |
575 | " returned = 0x%x\n", | 573 | " returned = 0x%x\n", |
576 | __func__, status); | 574 | __func__, status); |
@@ -579,7 +577,7 @@ static void isci_terminate_request_core( | |||
579 | 577 | ||
580 | } else { | 578 | } else { |
581 | if (was_terminated) { | 579 | if (was_terminated) { |
582 | dev_dbg(&isci_host->pdev->dev, | 580 | dev_dbg(&ihost->pdev->dev, |
583 | "%s: before completion wait (%p/%p)\n", | 581 | "%s: before completion wait (%p/%p)\n", |
584 | __func__, isci_request, io_request_completion); | 582 | __func__, isci_request, io_request_completion); |
585 | 583 | ||
@@ -593,7 +591,7 @@ static void isci_terminate_request_core( | |||
593 | if (!termination_completed) { | 591 | if (!termination_completed) { |
594 | 592 | ||
595 | /* The request to terminate has timed out. */ | 593 | /* The request to terminate has timed out. */ |
596 | spin_lock_irqsave(&isci_host->scic_lock, | 594 | spin_lock_irqsave(&ihost->scic_lock, |
597 | flags); | 595 | flags); |
598 | 596 | ||
599 | /* Check for state changes. */ | 597 | /* Check for state changes. */ |
@@ -623,12 +621,12 @@ static void isci_terminate_request_core( | |||
623 | } else | 621 | } else |
624 | termination_completed = 1; | 622 | termination_completed = 1; |
625 | 623 | ||
626 | spin_unlock_irqrestore(&isci_host->scic_lock, | 624 | spin_unlock_irqrestore(&ihost->scic_lock, |
627 | flags); | 625 | flags); |
628 | 626 | ||
629 | if (!termination_completed) { | 627 | if (!termination_completed) { |
630 | 628 | ||
631 | dev_err(&isci_host->pdev->dev, | 629 | dev_err(&ihost->pdev->dev, |
632 | "%s: *** Timeout waiting for " | 630 | "%s: *** Timeout waiting for " |
633 | "termination(%p/%p)\n", | 631 | "termination(%p/%p)\n", |
634 | __func__, io_request_completion, | 632 | __func__, io_request_completion, |
@@ -642,7 +640,7 @@ static void isci_terminate_request_core( | |||
642 | } | 640 | } |
643 | } | 641 | } |
644 | if (termination_completed) | 642 | if (termination_completed) |
645 | dev_dbg(&isci_host->pdev->dev, | 643 | dev_dbg(&ihost->pdev->dev, |
646 | "%s: after completion wait (%p/%p)\n", | 644 | "%s: after completion wait (%p/%p)\n", |
647 | __func__, isci_request, io_request_completion); | 645 | __func__, isci_request, io_request_completion); |
648 | } | 646 | } |
@@ -678,7 +676,7 @@ static void isci_terminate_request_core( | |||
678 | } | 676 | } |
679 | if (needs_cleanup_handling) | 677 | if (needs_cleanup_handling) |
680 | isci_request_cleanup_completed_loiterer( | 678 | isci_request_cleanup_completed_loiterer( |
681 | isci_host, idev, isci_request, task); | 679 | ihost, idev, isci_request, task); |
682 | } | 680 | } |
683 | } | 681 | } |
684 | 682 | ||
@@ -1253,7 +1251,7 @@ isci_task_request_complete(struct isci_host *ihost, | |||
1253 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ | 1251 | /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ |
1254 | tmf_complete = tmf->complete; | 1252 | tmf_complete = tmf->complete; |
1255 | 1253 | ||
1256 | scic_controller_complete_io(&ihost->sci, ireq->target_device, ireq); | 1254 | scic_controller_complete_io(ihost, ireq->target_device, ireq); |
1257 | /* set the 'terminated' flag handle to make sure it cannot be terminated | 1255 | /* set the 'terminated' flag handle to make sure it cannot be terminated |
1258 | * or completed again. | 1256 | * or completed again. |
1259 | */ | 1257 | */ |
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index 680582d8cde5..a0e6f89fc6a1 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c | |||
@@ -57,9 +57,9 @@ | |||
57 | #include "unsolicited_frame_control.h" | 57 | #include "unsolicited_frame_control.h" |
58 | #include "registers.h" | 58 | #include "registers.h" |
59 | 59 | ||
60 | int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *scic) | 60 | int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost) |
61 | { | 61 | { |
62 | struct scic_sds_unsolicited_frame_control *uf_control = &scic->uf_control; | 62 | struct scic_sds_unsolicited_frame_control *uf_control = &ihost->uf_control; |
63 | struct scic_sds_unsolicited_frame *uf; | 63 | struct scic_sds_unsolicited_frame *uf; |
64 | u32 buf_len, header_len, i; | 64 | u32 buf_len, header_len, i; |
65 | dma_addr_t dma; | 65 | dma_addr_t dma; |
@@ -79,7 +79,7 @@ int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *sci | |||
79 | * memory descriptor entry. The headers and address table will be | 79 | * memory descriptor entry. The headers and address table will be |
80 | * placed after the buffers. | 80 | * placed after the buffers. |
81 | */ | 81 | */ |
82 | virt = dmam_alloc_coherent(scic_to_dev(scic), size, &dma, GFP_KERNEL); | 82 | virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL); |
83 | if (!virt) | 83 | if (!virt) |
84 | return -ENOMEM; | 84 | return -ENOMEM; |
85 | 85 | ||
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 2954904f025a..c0285a3db562 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h | |||
@@ -214,9 +214,9 @@ struct scic_sds_unsolicited_frame_control { | |||
214 | 214 | ||
215 | }; | 215 | }; |
216 | 216 | ||
217 | struct scic_sds_controller; | 217 | struct isci_host; |
218 | 218 | ||
219 | int scic_sds_unsolicited_frame_control_construct(struct scic_sds_controller *scic); | 219 | int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost); |
220 | 220 | ||
221 | enum sci_status scic_sds_unsolicited_frame_control_get_header( | 221 | enum sci_status scic_sds_unsolicited_frame_control_get_header( |
222 | struct scic_sds_unsolicited_frame_control *uf_control, | 222 | struct scic_sds_unsolicited_frame_control *uf_control, |