diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/ipr.c | 201 | ||||
-rw-r--r-- | drivers/scsi/ipr.h | 18 |
2 files changed, 210 insertions, 9 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 8817ea06adbf..7149aada3f3c 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -91,6 +91,7 @@ static unsigned int ipr_max_speed = 1; | |||
91 | static int ipr_testmode = 0; | 91 | static int ipr_testmode = 0; |
92 | static unsigned int ipr_fastfail = 0; | 92 | static unsigned int ipr_fastfail = 0; |
93 | static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT; | 93 | static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT; |
94 | static unsigned int ipr_enable_cache = 1; | ||
94 | static DEFINE_SPINLOCK(ipr_driver_lock); | 95 | static DEFINE_SPINLOCK(ipr_driver_lock); |
95 | 96 | ||
96 | /* This table describes the differences between DMA controller chips */ | 97 | /* This table describes the differences between DMA controller chips */ |
@@ -150,6 +151,8 @@ module_param_named(fastfail, ipr_fastfail, int, 0); | |||
150 | MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); | 151 | MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); |
151 | module_param_named(transop_timeout, ipr_transop_timeout, int, 0); | 152 | module_param_named(transop_timeout, ipr_transop_timeout, int, 0); |
152 | MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); | 153 | MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); |
154 | module_param_named(enable_cache, ipr_enable_cache, int, 0); | ||
155 | MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); | ||
153 | MODULE_LICENSE("GPL"); | 156 | MODULE_LICENSE("GPL"); |
154 | MODULE_VERSION(IPR_DRIVER_VERSION); | 157 | MODULE_VERSION(IPR_DRIVER_VERSION); |
155 | 158 | ||
@@ -1937,6 +1940,103 @@ static struct bin_attribute ipr_trace_attr = { | |||
1937 | }; | 1940 | }; |
1938 | #endif | 1941 | #endif |
1939 | 1942 | ||
1943 | static const struct { | ||
1944 | enum ipr_cache_state state; | ||
1945 | char *name; | ||
1946 | } cache_state [] = { | ||
1947 | { CACHE_NONE, "none" }, | ||
1948 | { CACHE_DISABLED, "disabled" }, | ||
1949 | { CACHE_ENABLED, "enabled" } | ||
1950 | }; | ||
1951 | |||
1952 | /** | ||
1953 | * ipr_show_write_caching - Show the write caching attribute | ||
1954 | * @class_dev: class device struct | ||
1955 | * @buf: buffer | ||
1956 | * | ||
1957 | * Return value: | ||
1958 | * number of bytes printed to buffer | ||
1959 | **/ | ||
1960 | static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf) | ||
1961 | { | ||
1962 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
1963 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; | ||
1964 | unsigned long lock_flags = 0; | ||
1965 | int i, len = 0; | ||
1966 | |||
1967 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
1968 | for (i = 0; i < ARRAY_SIZE(cache_state); i++) { | ||
1969 | if (cache_state[i].state == ioa_cfg->cache_state) { | ||
1970 | len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name); | ||
1971 | break; | ||
1972 | } | ||
1973 | } | ||
1974 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
1975 | return len; | ||
1976 | } | ||
1977 | |||
1978 | |||
1979 | /** | ||
1980 | * ipr_store_write_caching - Enable/disable adapter write cache | ||
1981 | * @class_dev: class_device struct | ||
1982 | * @buf: buffer | ||
1983 | * @count: buffer size | ||
1984 | * | ||
1985 | * This function will enable/disable adapter write cache. | ||
1986 | * | ||
1987 | * Return value: | ||
1988 | * count on success / other on failure | ||
1989 | **/ | ||
1990 | static ssize_t ipr_store_write_caching(struct class_device *class_dev, | ||
1991 | const char *buf, size_t count) | ||
1992 | { | ||
1993 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
1994 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; | ||
1995 | unsigned long lock_flags = 0; | ||
1996 | enum ipr_cache_state new_state = CACHE_INVALID; | ||
1997 | int i; | ||
1998 | |||
1999 | if (!capable(CAP_SYS_ADMIN)) | ||
2000 | return -EACCES; | ||
2001 | if (ioa_cfg->cache_state == CACHE_NONE) | ||
2002 | return -EINVAL; | ||
2003 | |||
2004 | for (i = 0; i < ARRAY_SIZE(cache_state); i++) { | ||
2005 | if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) { | ||
2006 | new_state = cache_state[i].state; | ||
2007 | break; | ||
2008 | } | ||
2009 | } | ||
2010 | |||
2011 | if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED) | ||
2012 | return -EINVAL; | ||
2013 | |||
2014 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
2015 | if (ioa_cfg->cache_state == new_state) { | ||
2016 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
2017 | return count; | ||
2018 | } | ||
2019 | |||
2020 | ioa_cfg->cache_state = new_state; | ||
2021 | dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n", | ||
2022 | new_state == CACHE_ENABLED ? "Enabling" : "Disabling"); | ||
2023 | if (!ioa_cfg->in_reset_reload) | ||
2024 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); | ||
2025 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
2026 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
2027 | |||
2028 | return count; | ||
2029 | } | ||
2030 | |||
2031 | static struct class_device_attribute ipr_ioa_cache_attr = { | ||
2032 | .attr = { | ||
2033 | .name = "write_cache", | ||
2034 | .mode = S_IRUGO | S_IWUSR, | ||
2035 | }, | ||
2036 | .show = ipr_show_write_caching, | ||
2037 | .store = ipr_store_write_caching | ||
2038 | }; | ||
2039 | |||
1940 | /** | 2040 | /** |
1941 | * ipr_show_fw_version - Show the firmware version | 2041 | * ipr_show_fw_version - Show the firmware version |
1942 | * @class_dev: class device struct | 2042 | * @class_dev: class device struct |
@@ -2406,6 +2506,7 @@ static struct class_device_attribute *ipr_ioa_attrs[] = { | |||
2406 | &ipr_diagnostics_attr, | 2506 | &ipr_diagnostics_attr, |
2407 | &ipr_ioa_reset_attr, | 2507 | &ipr_ioa_reset_attr, |
2408 | &ipr_update_fw_attr, | 2508 | &ipr_update_fw_attr, |
2509 | &ipr_ioa_cache_attr, | ||
2409 | NULL, | 2510 | NULL, |
2410 | }; | 2511 | }; |
2411 | 2512 | ||
@@ -4148,6 +4249,36 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) | |||
4148 | } | 4249 | } |
4149 | 4250 | ||
4150 | /** | 4251 | /** |
4252 | * ipr_setup_write_cache - Disable write cache if needed | ||
4253 | * @ipr_cmd: ipr command struct | ||
4254 | * | ||
4255 | * This function sets up adapters write cache to desired setting | ||
4256 | * | ||
4257 | * Return value: | ||
4258 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN | ||
4259 | **/ | ||
4260 | static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd) | ||
4261 | { | ||
4262 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
4263 | |||
4264 | ipr_cmd->job_step = ipr_set_supported_devs; | ||
4265 | ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, | ||
4266 | struct ipr_resource_entry, queue); | ||
4267 | |||
4268 | if (ioa_cfg->cache_state != CACHE_DISABLED) | ||
4269 | return IPR_RC_JOB_CONTINUE; | ||
4270 | |||
4271 | ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); | ||
4272 | ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; | ||
4273 | ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; | ||
4274 | ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; | ||
4275 | |||
4276 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); | ||
4277 | |||
4278 | return IPR_RC_JOB_RETURN; | ||
4279 | } | ||
4280 | |||
4281 | /** | ||
4151 | * ipr_get_mode_page - Locate specified mode page | 4282 | * ipr_get_mode_page - Locate specified mode page |
4152 | * @mode_pages: mode page buffer | 4283 | * @mode_pages: mode page buffer |
4153 | * @page_code: page code to find | 4284 | * @page_code: page code to find |
@@ -4358,10 +4489,7 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) | |||
4358 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), | 4489 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), |
4359 | length); | 4490 | length); |
4360 | 4491 | ||
4361 | ipr_cmd->job_step = ipr_set_supported_devs; | 4492 | ipr_cmd->job_step = ipr_setup_write_cache; |
4362 | ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, | ||
4363 | struct ipr_resource_entry, queue); | ||
4364 | |||
4365 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); | 4493 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); |
4366 | 4494 | ||
4367 | LEAVE; | 4495 | LEAVE; |
@@ -4581,6 +4709,27 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, | |||
4581 | } | 4709 | } |
4582 | 4710 | ||
4583 | /** | 4711 | /** |
4712 | * ipr_inquiry_page_supported - Is the given inquiry page supported | ||
4713 | * @page0: inquiry page 0 buffer | ||
4714 | * @page: page code. | ||
4715 | * | ||
4716 | * This function determines if the specified inquiry page is supported. | ||
4717 | * | ||
4718 | * Return value: | ||
4719 | * 1 if page is supported / 0 if not | ||
4720 | **/ | ||
4721 | static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) | ||
4722 | { | ||
4723 | int i; | ||
4724 | |||
4725 | for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) | ||
4726 | if (page0->page[i] == page) | ||
4727 | return 1; | ||
4728 | |||
4729 | return 0; | ||
4730 | } | ||
4731 | |||
4732 | /** | ||
4584 | * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. | 4733 | * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. |
4585 | * @ipr_cmd: ipr command struct | 4734 | * @ipr_cmd: ipr command struct |
4586 | * | 4735 | * |
@@ -4593,6 +4742,36 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, | |||
4593 | static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) | 4742 | static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) |
4594 | { | 4743 | { |
4595 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 4744 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
4745 | struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; | ||
4746 | |||
4747 | ENTER; | ||
4748 | |||
4749 | if (!ipr_inquiry_page_supported(page0, 1)) | ||
4750 | ioa_cfg->cache_state = CACHE_NONE; | ||
4751 | |||
4752 | ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; | ||
4753 | |||
4754 | ipr_ioafp_inquiry(ipr_cmd, 1, 3, | ||
4755 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), | ||
4756 | sizeof(struct ipr_inquiry_page3)); | ||
4757 | |||
4758 | LEAVE; | ||
4759 | return IPR_RC_JOB_RETURN; | ||
4760 | } | ||
4761 | |||
4762 | /** | ||
4763 | * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. | ||
4764 | * @ipr_cmd: ipr command struct | ||
4765 | * | ||
4766 | * This function sends a Page 0 inquiry to the adapter | ||
4767 | * to retrieve supported inquiry pages. | ||
4768 | * | ||
4769 | * Return value: | ||
4770 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN | ||
4771 | **/ | ||
4772 | static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) | ||
4773 | { | ||
4774 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
4596 | char type[5]; | 4775 | char type[5]; |
4597 | 4776 | ||
4598 | ENTER; | 4777 | ENTER; |
@@ -4602,11 +4781,11 @@ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) | |||
4602 | type[4] = '\0'; | 4781 | type[4] = '\0'; |
4603 | ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); | 4782 | ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); |
4604 | 4783 | ||
4605 | ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; | 4784 | ipr_cmd->job_step = ipr_ioafp_page3_inquiry; |
4606 | 4785 | ||
4607 | ipr_ioafp_inquiry(ipr_cmd, 1, 3, | 4786 | ipr_ioafp_inquiry(ipr_cmd, 1, 0, |
4608 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), | 4787 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), |
4609 | sizeof(struct ipr_inquiry_page3)); | 4788 | sizeof(struct ipr_inquiry_page0)); |
4610 | 4789 | ||
4611 | LEAVE; | 4790 | LEAVE; |
4612 | return IPR_RC_JOB_RETURN; | 4791 | return IPR_RC_JOB_RETURN; |
@@ -4626,7 +4805,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) | |||
4626 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 4805 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
4627 | 4806 | ||
4628 | ENTER; | 4807 | ENTER; |
4629 | ipr_cmd->job_step = ipr_ioafp_page3_inquiry; | 4808 | ipr_cmd->job_step = ipr_ioafp_page0_inquiry; |
4630 | 4809 | ||
4631 | ipr_ioafp_inquiry(ipr_cmd, 0, 0, | 4810 | ipr_ioafp_inquiry(ipr_cmd, 0, 0, |
4632 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), | 4811 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), |
@@ -5629,6 +5808,10 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
5629 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); | 5808 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); |
5630 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 5809 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
5631 | ioa_cfg->sdt_state = INACTIVE; | 5810 | ioa_cfg->sdt_state = INACTIVE; |
5811 | if (ipr_enable_cache) | ||
5812 | ioa_cfg->cache_state = CACHE_ENABLED; | ||
5813 | else | ||
5814 | ioa_cfg->cache_state = CACHE_DISABLED; | ||
5632 | 5815 | ||
5633 | ipr_initialize_bus_attr(ioa_cfg); | 5816 | ipr_initialize_bus_attr(ioa_cfg); |
5634 | 5817 | ||
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index b8c1603f99eb..6d9aef001fe7 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -541,6 +541,15 @@ struct ipr_inquiry_page3 { | |||
541 | u8 patch_number[4]; | 541 | u8 patch_number[4]; |
542 | }__attribute__((packed)); | 542 | }__attribute__((packed)); |
543 | 543 | ||
544 | #define IPR_INQUIRY_PAGE0_ENTRIES 20 | ||
545 | struct ipr_inquiry_page0 { | ||
546 | u8 peri_qual_dev_type; | ||
547 | u8 page_code; | ||
548 | u8 reserved1; | ||
549 | u8 len; | ||
550 | u8 page[IPR_INQUIRY_PAGE0_ENTRIES]; | ||
551 | }__attribute__((packed)); | ||
552 | |||
544 | struct ipr_hostrcb_device_data_entry { | 553 | struct ipr_hostrcb_device_data_entry { |
545 | struct ipr_vpd vpd; | 554 | struct ipr_vpd vpd; |
546 | struct ipr_res_addr dev_res_addr; | 555 | struct ipr_res_addr dev_res_addr; |
@@ -731,6 +740,7 @@ struct ipr_resource_table { | |||
731 | 740 | ||
732 | struct ipr_misc_cbs { | 741 | struct ipr_misc_cbs { |
733 | struct ipr_ioa_vpd ioa_vpd; | 742 | struct ipr_ioa_vpd ioa_vpd; |
743 | struct ipr_inquiry_page0 page0_data; | ||
734 | struct ipr_inquiry_page3 page3_data; | 744 | struct ipr_inquiry_page3 page3_data; |
735 | struct ipr_mode_pages mode_pages; | 745 | struct ipr_mode_pages mode_pages; |
736 | struct ipr_supported_device supp_dev; | 746 | struct ipr_supported_device supp_dev; |
@@ -813,6 +823,13 @@ enum ipr_sdt_state { | |||
813 | DUMP_OBTAINED | 823 | DUMP_OBTAINED |
814 | }; | 824 | }; |
815 | 825 | ||
826 | enum ipr_cache_state { | ||
827 | CACHE_NONE, | ||
828 | CACHE_DISABLED, | ||
829 | CACHE_ENABLED, | ||
830 | CACHE_INVALID | ||
831 | }; | ||
832 | |||
816 | /* Per-controller data */ | 833 | /* Per-controller data */ |
817 | struct ipr_ioa_cfg { | 834 | struct ipr_ioa_cfg { |
818 | char eye_catcher[8]; | 835 | char eye_catcher[8]; |
@@ -829,6 +846,7 @@ struct ipr_ioa_cfg { | |||
829 | u8 allow_cmds:1; | 846 | u8 allow_cmds:1; |
830 | u8 allow_ml_add_del:1; | 847 | u8 allow_ml_add_del:1; |
831 | 848 | ||
849 | enum ipr_cache_state cache_state; | ||
832 | u16 type; /* CCIN of the card */ | 850 | u16 type; /* CCIN of the card */ |
833 | 851 | ||
834 | u8 log_level; | 852 | u8 log_level; |