aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci/host.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2012-02-15 16:58:42 -0500
committerDan Williams <dan.j.williams@intel.com>2012-05-17 15:27:12 -0400
commitabec912d71c44bbd642ce12ad98aab76f5a53163 (patch)
treed8967b23a5a4ea8302b43d4db5e0cd09d21d34d3 /drivers/scsi/isci/host.c
parentae904d15cf344bcb426f63982016f6bacc45825b (diff)
isci: refactor initialization for S3/S4
Based on an original implementation by Ed Nadolski and Artur Wojcik In preparation for S3/S4 support refactor initialization so that driver-load and resume-from-suspend can share the common init path of isci_host_init(). Organize the initialization into objects that are self-contained to the driver (initialized by isci_host_init) versus those that have some upward registration (initialized at allocation time asd_sas_phy, asd_sas_port, dma allocations). The largest change is moving the the validation of the oem and module parameters from isci_host_init() to isci_host_alloc(). The S3/S4 approach being taken is that libsas will be tasked with remembering the state of the domain and the lldd is free to be forgetful. In the case of isci we'll just re-init using a subset of the normal driver load path. [clean up some unused / mis-indented function definitions in host.h] Signed-off-by: Ed Nadolski <edmund.nadolski@intel.com> Signed-off-by: Artur Wojcik <artur.wojcik@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci/host.c')
-rw-r--r--drivers/scsi/isci/host.c313
1 files changed, 61 insertions, 252 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index bbec1982d07f..0fe372f93289 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -1074,7 +1074,7 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
1074 * @data: This parameter specifies the ISCI host object 1074 * @data: This parameter specifies the ISCI host object
1075 * 1075 *
1076 */ 1076 */
1077static void isci_host_completion_routine(unsigned long data) 1077void isci_host_completion_routine(unsigned long data)
1078{ 1078{
1079 struct isci_host *ihost = (struct isci_host *)data; 1079 struct isci_host *ihost = (struct isci_host *)data;
1080 struct list_head completed_request_list; 1080 struct list_head completed_request_list;
@@ -1317,29 +1317,6 @@ static void __iomem *smu_base(struct isci_host *isci_host)
1317 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; 1317 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1318} 1318}
1319 1319
1320static void isci_user_parameters_get(struct sci_user_parameters *u)
1321{
1322 int i;
1323
1324 for (i = 0; i < SCI_MAX_PHYS; i++) {
1325 struct sci_phy_user_params *u_phy = &u->phys[i];
1326
1327 u_phy->max_speed_generation = phy_gen;
1328
1329 /* we are not exporting these for now */
1330 u_phy->align_insertion_frequency = 0x7f;
1331 u_phy->in_connection_align_insertion_frequency = 0xff;
1332 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1333 }
1334
1335 u->stp_inactivity_timeout = stp_inactive_to;
1336 u->ssp_inactivity_timeout = ssp_inactive_to;
1337 u->stp_max_occupancy_timeout = stp_max_occ_to;
1338 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1339 u->no_outbound_task_timeout = no_outbound_task_to;
1340 u->max_concurr_spinup = max_concurr_spinup;
1341}
1342
1343static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) 1320static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1344{ 1321{
1345 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1322 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
@@ -1648,55 +1625,6 @@ static const struct sci_base_state sci_controller_state_table[] = {
1648 [SCIC_FAILED] = {} 1625 [SCIC_FAILED] = {}
1649}; 1626};
1650 1627
1651static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1652{
1653 /* these defaults are overridden by the platform / firmware */
1654 u16 index;
1655
1656 /* Default to APC mode. */
1657 ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1658
1659 /* Default to APC mode. */
1660 ihost->oem_parameters.controller.max_concurr_spin_up = 1;
1661
1662 /* Default to no SSC operation. */
1663 ihost->oem_parameters.controller.do_enable_ssc = false;
1664
1665 /* Default to short cables on all phys. */
1666 ihost->oem_parameters.controller.cable_selection_mask = 0;
1667
1668 /* Initialize all of the port parameter information to narrow ports. */
1669 for (index = 0; index < SCI_MAX_PORTS; index++) {
1670 ihost->oem_parameters.ports[index].phy_mask = 0;
1671 }
1672
1673 /* Initialize all of the phy parameter information. */
1674 for (index = 0; index < SCI_MAX_PHYS; index++) {
1675 /* Default to 3G (i.e. Gen 2). */
1676 ihost->user_parameters.phys[index].max_speed_generation =
1677 SCIC_SDS_PARM_GEN2_SPEED;
1678
1679 /* the frequencies cannot be 0 */
1680 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
1681 ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
1682 ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1683
1684 /*
1685 * Previous Vitesse based expanders had a arbitration issue that
1686 * is worked around by having the upper 32-bits of SAS address
1687 * with a value greater then the Vitesse company identifier.
1688 * Hence, usage of 0x5FCFFFFF. */
1689 ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
1690 ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1691 }
1692
1693 ihost->user_parameters.stp_inactivity_timeout = 5;
1694 ihost->user_parameters.ssp_inactivity_timeout = 5;
1695 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1696 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1697 ihost->user_parameters.no_outbound_task_timeout = 2;
1698}
1699
1700static void controller_timeout(unsigned long data) 1628static void controller_timeout(unsigned long data)
1701{ 1629{
1702 struct sci_timer *tmr = (struct sci_timer *)data; 1630 struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1753,9 +1681,6 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
1753 1681
1754 sci_init_timer(&ihost->timer, controller_timeout); 1682 sci_init_timer(&ihost->timer, controller_timeout);
1755 1683
1756 /* Initialize the User and OEM parameters to default values. */
1757 sci_controller_set_default_config_parameters(ihost);
1758
1759 return sci_controller_reset(ihost); 1684 return sci_controller_reset(ihost);
1760} 1685}
1761 1686
@@ -1835,27 +1760,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1835 return 0; 1760 return 0;
1836} 1761}
1837 1762
1838static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1839{
1840 u32 state = ihost->sm.current_state_id;
1841 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
1842
1843 if (state == SCIC_RESET ||
1844 state == SCIC_INITIALIZING ||
1845 state == SCIC_INITIALIZED) {
1846 u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version :
1847 ISCI_ROM_VER_1_0;
1848
1849 if (sci_oem_parameters_validate(&ihost->oem_parameters,
1850 oem_version))
1851 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1852
1853 return SCI_SUCCESS;
1854 }
1855
1856 return SCI_FAILURE_INVALID_STATE;
1857}
1858
1859static u8 max_spin_up(struct isci_host *ihost) 1763static u8 max_spin_up(struct isci_host *ihost)
1860{ 1764{
1861 if (ihost->user_parameters.max_concurr_spinup) 1765 if (ihost->user_parameters.max_concurr_spinup)
@@ -2372,96 +2276,77 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2372 return result; 2276 return result;
2373} 2277}
2374 2278
2375static enum sci_status sci_user_parameters_set(struct isci_host *ihost, 2279static int sci_controller_dma_alloc(struct isci_host *ihost)
2376 struct sci_user_parameters *sci_parms)
2377{
2378 u32 state = ihost->sm.current_state_id;
2379
2380 if (state == SCIC_RESET ||
2381 state == SCIC_INITIALIZING ||
2382 state == SCIC_INITIALIZED) {
2383 u16 index;
2384
2385 /*
2386 * Validate the user parameters. If they are not legal, then
2387 * return a failure.
2388 */
2389 for (index = 0; index < SCI_MAX_PHYS; index++) {
2390 struct sci_phy_user_params *user_phy;
2391
2392 user_phy = &sci_parms->phys[index];
2393
2394 if (!((user_phy->max_speed_generation <=
2395 SCIC_SDS_PARM_MAX_SPEED) &&
2396 (user_phy->max_speed_generation >
2397 SCIC_SDS_PARM_NO_SPEED)))
2398 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2399
2400 if (user_phy->in_connection_align_insertion_frequency <
2401 3)
2402 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2403
2404 if ((user_phy->in_connection_align_insertion_frequency <
2405 3) ||
2406 (user_phy->align_insertion_frequency == 0) ||
2407 (user_phy->
2408 notify_enable_spin_up_insertion_frequency ==
2409 0))
2410 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2411 }
2412
2413 if ((sci_parms->stp_inactivity_timeout == 0) ||
2414 (sci_parms->ssp_inactivity_timeout == 0) ||
2415 (sci_parms->stp_max_occupancy_timeout == 0) ||
2416 (sci_parms->ssp_max_occupancy_timeout == 0) ||
2417 (sci_parms->no_outbound_task_timeout == 0))
2418 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2419
2420 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2421
2422 return SCI_SUCCESS;
2423 }
2424
2425 return SCI_FAILURE_INVALID_STATE;
2426}
2427
2428static int sci_controller_mem_init(struct isci_host *ihost)
2429{ 2280{
2430 struct device *dev = &ihost->pdev->dev; 2281 struct device *dev = &ihost->pdev->dev;
2431 dma_addr_t dma;
2432 size_t size; 2282 size_t size;
2433 int err; 2283 int i;
2284
2285 /* detect re-initialization */
2286 if (ihost->completion_queue)
2287 return 0;
2434 2288
2435 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); 2289 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2436 ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); 2290 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2291 GFP_KERNEL);
2437 if (!ihost->completion_queue) 2292 if (!ihost->completion_queue)
2438 return -ENOMEM; 2293 return -ENOMEM;
2439 2294
2440 writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
2441 writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
2442
2443 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); 2295 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2444 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, 2296 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2445 GFP_KERNEL); 2297 GFP_KERNEL);
2298
2446 if (!ihost->remote_node_context_table) 2299 if (!ihost->remote_node_context_table)
2447 return -ENOMEM; 2300 return -ENOMEM;
2448 2301
2449 writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
2450 writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
2451
2452 size = ihost->task_context_entries * sizeof(struct scu_task_context), 2302 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2453 ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); 2303 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2304 GFP_KERNEL);
2454 if (!ihost->task_context_table) 2305 if (!ihost->task_context_table)
2455 return -ENOMEM; 2306 return -ENOMEM;
2456 2307
2457 ihost->task_context_dma = dma; 2308 size = SCI_UFI_TOTAL_SIZE;
2458 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); 2309 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2459 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); 2310 if (!ihost->ufi_buf)
2311 return -ENOMEM;
2312
2313 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2314 struct isci_request *ireq;
2315 dma_addr_t dma;
2316
2317 ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2318 if (!ireq)
2319 return -ENOMEM;
2320
2321 ireq->tc = &ihost->task_context_table[i];
2322 ireq->owning_controller = ihost;
2323 spin_lock_init(&ireq->state_lock);
2324 ireq->request_daddr = dma;
2325 ireq->isci_host = ihost;
2326 ihost->reqs[i] = ireq;
2327 }
2328
2329 return 0;
2330}
2331
2332static int sci_controller_mem_init(struct isci_host *ihost)
2333{
2334 int err = sci_controller_dma_alloc(ihost);
2460 2335
2461 err = sci_unsolicited_frame_control_construct(ihost);
2462 if (err) 2336 if (err)
2463 return err; 2337 return err;
2464 2338
2339 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2340 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2341
2342 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2343 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2344
2345 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2346 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2347
2348 sci_unsolicited_frame_control_construct(ihost);
2349
2465 /* 2350 /*
2466 * Inform the silicon as to the location of the UF headers and 2351 * Inform the silicon as to the location of the UF headers and
2467 * address table. 2352 * address table.
@@ -2479,19 +2364,20 @@ static int sci_controller_mem_init(struct isci_host *ihost)
2479 return 0; 2364 return 0;
2480} 2365}
2481 2366
2367/**
2368 * isci_host_init - (re-)initialize hardware and internal (private) state
2369 * @ihost: host to init
2370 *
2371 * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
2372 * one-time initialization objects like locks and waitqueues, are
2373 * not touched (they are initialized in isci_host_alloc)
2374 */
2482int isci_host_init(struct isci_host *ihost) 2375int isci_host_init(struct isci_host *ihost)
2483{ 2376{
2484 int err = 0, i; 2377 int i, err;
2485 enum sci_status status; 2378 enum sci_status status;
2486 struct sci_user_parameters sci_user_params;
2487 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2488
2489 spin_lock_init(&ihost->scic_lock);
2490 init_waitqueue_head(&ihost->eventq);
2491
2492 status = sci_controller_construct(ihost, scu_base(ihost),
2493 smu_base(ihost));
2494 2379
2380 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2495 if (status != SCI_SUCCESS) { 2381 if (status != SCI_SUCCESS) {
2496 dev_err(&ihost->pdev->dev, 2382 dev_err(&ihost->pdev->dev,
2497 "%s: sci_controller_construct failed - status = %x\n", 2383 "%s: sci_controller_construct failed - status = %x\n",
@@ -2500,48 +2386,6 @@ int isci_host_init(struct isci_host *ihost)
2500 return -ENODEV; 2386 return -ENODEV;
2501 } 2387 }
2502 2388
2503 ihost->sas_ha.dev = &ihost->pdev->dev;
2504 ihost->sas_ha.lldd_ha = ihost;
2505
2506 /*
2507 * grab initial values stored in the controller object for OEM and USER
2508 * parameters
2509 */
2510 isci_user_parameters_get(&sci_user_params);
2511 status = sci_user_parameters_set(ihost, &sci_user_params);
2512 if (status != SCI_SUCCESS) {
2513 dev_warn(&ihost->pdev->dev,
2514 "%s: sci_user_parameters_set failed\n",
2515 __func__);
2516 return -ENODEV;
2517 }
2518
2519 /* grab any OEM parameters specified in orom */
2520 if (pci_info->orom) {
2521 status = isci_parse_oem_parameters(&ihost->oem_parameters,
2522 pci_info->orom,
2523 ihost->id);
2524 if (status != SCI_SUCCESS) {
2525 dev_warn(&ihost->pdev->dev,
2526 "parsing firmware oem parameters failed\n");
2527 return -EINVAL;
2528 }
2529 }
2530
2531 status = sci_oem_parameters_set(ihost);
2532 if (status != SCI_SUCCESS) {
2533 dev_warn(&ihost->pdev->dev,
2534 "%s: sci_oem_parameters_set failed\n",
2535 __func__);
2536 return -ENODEV;
2537 }
2538
2539 tasklet_init(&ihost->completion_tasklet,
2540 isci_host_completion_routine, (unsigned long)ihost);
2541
2542 INIT_LIST_HEAD(&ihost->requests_to_complete);
2543 INIT_LIST_HEAD(&ihost->requests_to_errorback);
2544
2545 spin_lock_irq(&ihost->scic_lock); 2389 spin_lock_irq(&ihost->scic_lock);
2546 status = sci_controller_initialize(ihost); 2390 status = sci_controller_initialize(ihost);
2547 spin_unlock_irq(&ihost->scic_lock); 2391 spin_unlock_irq(&ihost->scic_lock);
@@ -2557,47 +2401,12 @@ int isci_host_init(struct isci_host *ihost)
2557 if (err) 2401 if (err)
2558 return err; 2402 return err;
2559 2403
2560 for (i = 0; i < SCI_MAX_PORTS; i++) {
2561 struct isci_port *iport = &ihost->ports[i];
2562
2563 INIT_LIST_HEAD(&iport->remote_dev_list);
2564 iport->isci_host = ihost;
2565 }
2566
2567 for (i = 0; i < SCI_MAX_PHYS; i++)
2568 isci_phy_init(&ihost->phys[i], ihost, i);
2569
2570 /* enable sgpio */ 2404 /* enable sgpio */
2571 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); 2405 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2572 for (i = 0; i < isci_gpio_count(ihost); i++) 2406 for (i = 0; i < isci_gpio_count(ihost); i++)
2573 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); 2407 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2574 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); 2408 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2575 2409
2576 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2577 struct isci_remote_device *idev = &ihost->devices[i];
2578
2579 INIT_LIST_HEAD(&idev->reqs_in_process);
2580 INIT_LIST_HEAD(&idev->node);
2581 }
2582
2583 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2584 struct isci_request *ireq;
2585 dma_addr_t dma;
2586
2587 ireq = dmam_alloc_coherent(&ihost->pdev->dev,
2588 sizeof(struct isci_request), &dma,
2589 GFP_KERNEL);
2590 if (!ireq)
2591 return -ENOMEM;
2592
2593 ireq->tc = &ihost->task_context_table[i];
2594 ireq->owning_controller = ihost;
2595 spin_lock_init(&ireq->state_lock);
2596 ireq->request_daddr = dma;
2597 ireq->isci_host = ihost;
2598 ihost->reqs[i] = ireq;
2599 }
2600
2601 return 0; 2410 return 0;
2602} 2411}
2603 2412