diff options
author | Kleber Sacilotto de Souza <klebers@linux.vnet.ibm.com> | 2011-04-26 18:23:29 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2011-05-01 13:09:20 -0400 |
commit | 4d4dd7065572225bf6d97e5eb9915d94f9d53548 (patch) | |
tree | 5c73ad19f4f39a76251db0fae2ea3e04e9391a71 /drivers/scsi/ipr.c | |
parent | 0b15fb1fdfd403726542cb6111bc916b7a9f7fad (diff) |
[SCSI] ipr: increase the dump size for 64 bit adapters
Currently the size of the dump generated by the driver is limited
in 4MB, which is insufficient to gather much useful data from the
new 64 bit adapters.
This patch makes the needed changes to increase the dump limit
for the 64 bit adapters to 32MB, or even to a bigger value in the
future, but keeping the current limitations for the legacy 32 bit
adapters.
Signed-off-by: Kleber Sacilotto de Souza <klebers@linux.vnet.ibm.com>
Acked-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r-- | drivers/scsi/ipr.c | 80 |
1 files changed, 65 insertions, 15 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index fa2513cc76cc..3667f89abdea 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/errno.h> | 60 | #include <linux/errno.h> |
61 | #include <linux/kernel.h> | 61 | #include <linux/kernel.h> |
62 | #include <linux/slab.h> | 62 | #include <linux/slab.h> |
63 | #include <linux/vmalloc.h> | ||
63 | #include <linux/ioport.h> | 64 | #include <linux/ioport.h> |
64 | #include <linux/delay.h> | 65 | #include <linux/delay.h> |
65 | #include <linux/pci.h> | 66 | #include <linux/pci.h> |
@@ -2717,13 +2718,18 @@ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, | |||
2717 | unsigned long pci_address, u32 length) | 2718 | unsigned long pci_address, u32 length) |
2718 | { | 2719 | { |
2719 | int bytes_copied = 0; | 2720 | int bytes_copied = 0; |
2720 | int cur_len, rc, rem_len, rem_page_len; | 2721 | int cur_len, rc, rem_len, rem_page_len, max_dump_size; |
2721 | __be32 *page; | 2722 | __be32 *page; |
2722 | unsigned long lock_flags = 0; | 2723 | unsigned long lock_flags = 0; |
2723 | struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; | 2724 | struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; |
2724 | 2725 | ||
2726 | if (ioa_cfg->sis64) | ||
2727 | max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; | ||
2728 | else | ||
2729 | max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; | ||
2730 | |||
2725 | while (bytes_copied < length && | 2731 | while (bytes_copied < length && |
2726 | (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) { | 2732 | (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { |
2727 | if (ioa_dump->page_offset >= PAGE_SIZE || | 2733 | if (ioa_dump->page_offset >= PAGE_SIZE || |
2728 | ioa_dump->page_offset == 0) { | 2734 | ioa_dump->page_offset == 0) { |
2729 | page = (__be32 *)__get_free_page(GFP_ATOMIC); | 2735 | page = (__be32 *)__get_free_page(GFP_ATOMIC); |
@@ -2885,8 +2891,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2885 | unsigned long lock_flags = 0; | 2891 | unsigned long lock_flags = 0; |
2886 | struct ipr_driver_dump *driver_dump = &dump->driver_dump; | 2892 | struct ipr_driver_dump *driver_dump = &dump->driver_dump; |
2887 | struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; | 2893 | struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; |
2888 | u32 num_entries, start_off, end_off; | 2894 | u32 num_entries, max_num_entries, start_off, end_off; |
2889 | u32 bytes_to_copy, bytes_copied, rc; | 2895 | u32 max_dump_size, bytes_to_copy, bytes_copied, rc; |
2890 | struct ipr_sdt *sdt; | 2896 | struct ipr_sdt *sdt; |
2891 | int valid = 1; | 2897 | int valid = 1; |
2892 | int i; | 2898 | int i; |
@@ -2947,8 +2953,18 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2947 | on entries in this table */ | 2953 | on entries in this table */ |
2948 | sdt = &ioa_dump->sdt; | 2954 | sdt = &ioa_dump->sdt; |
2949 | 2955 | ||
2956 | if (ioa_cfg->sis64) { | ||
2957 | max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; | ||
2958 | max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; | ||
2959 | } else { | ||
2960 | max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; | ||
2961 | max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; | ||
2962 | } | ||
2963 | |||
2964 | bytes_to_copy = offsetof(struct ipr_sdt, entry) + | ||
2965 | (max_num_entries * sizeof(struct ipr_sdt_entry)); | ||
2950 | rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, | 2966 | rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, |
2951 | sizeof(struct ipr_sdt) / sizeof(__be32)); | 2967 | bytes_to_copy / sizeof(__be32)); |
2952 | 2968 | ||
2953 | /* Smart Dump table is ready to use and the first entry is valid */ | 2969 | /* Smart Dump table is ready to use and the first entry is valid */ |
2954 | if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && | 2970 | if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && |
@@ -2964,13 +2980,20 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2964 | 2980 | ||
2965 | num_entries = be32_to_cpu(sdt->hdr.num_entries_used); | 2981 | num_entries = be32_to_cpu(sdt->hdr.num_entries_used); |
2966 | 2982 | ||
2967 | if (num_entries > IPR_NUM_SDT_ENTRIES) | 2983 | if (num_entries > max_num_entries) |
2968 | num_entries = IPR_NUM_SDT_ENTRIES; | 2984 | num_entries = max_num_entries; |
2985 | |||
2986 | /* Update dump length to the actual data to be copied */ | ||
2987 | dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); | ||
2988 | if (ioa_cfg->sis64) | ||
2989 | dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); | ||
2990 | else | ||
2991 | dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); | ||
2969 | 2992 | ||
2970 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 2993 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
2971 | 2994 | ||
2972 | for (i = 0; i < num_entries; i++) { | 2995 | for (i = 0; i < num_entries; i++) { |
2973 | if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) { | 2996 | if (ioa_dump->hdr.len > max_dump_size) { |
2974 | driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; | 2997 | driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; |
2975 | break; | 2998 | break; |
2976 | } | 2999 | } |
@@ -2989,7 +3012,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2989 | valid = 0; | 3012 | valid = 0; |
2990 | } | 3013 | } |
2991 | if (valid) { | 3014 | if (valid) { |
2992 | if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { | 3015 | if (bytes_to_copy > max_dump_size) { |
2993 | sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; | 3016 | sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; |
2994 | continue; | 3017 | continue; |
2995 | } | 3018 | } |
@@ -3044,6 +3067,7 @@ static void ipr_release_dump(struct kref *kref) | |||
3044 | for (i = 0; i < dump->ioa_dump.next_page_index; i++) | 3067 | for (i = 0; i < dump->ioa_dump.next_page_index; i++) |
3045 | free_page((unsigned long) dump->ioa_dump.ioa_data[i]); | 3068 | free_page((unsigned long) dump->ioa_dump.ioa_data[i]); |
3046 | 3069 | ||
3070 | vfree(dump->ioa_dump.ioa_data); | ||
3047 | kfree(dump); | 3071 | kfree(dump); |
3048 | LEAVE; | 3072 | LEAVE; |
3049 | } | 3073 | } |
@@ -3835,7 +3859,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, | |||
3835 | struct ipr_dump *dump; | 3859 | struct ipr_dump *dump; |
3836 | unsigned long lock_flags = 0; | 3860 | unsigned long lock_flags = 0; |
3837 | char *src; | 3861 | char *src; |
3838 | int len; | 3862 | int len, sdt_end; |
3839 | size_t rc = count; | 3863 | size_t rc = count; |
3840 | 3864 | ||
3841 | if (!capable(CAP_SYS_ADMIN)) | 3865 | if (!capable(CAP_SYS_ADMIN)) |
@@ -3875,9 +3899,17 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, | |||
3875 | 3899 | ||
3876 | off -= sizeof(dump->driver_dump); | 3900 | off -= sizeof(dump->driver_dump); |
3877 | 3901 | ||
3878 | if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) { | 3902 | if (ioa_cfg->sis64) |
3879 | if (off + count > offsetof(struct ipr_ioa_dump, ioa_data)) | 3903 | sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + |
3880 | len = offsetof(struct ipr_ioa_dump, ioa_data) - off; | 3904 | (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * |
3905 | sizeof(struct ipr_sdt_entry)); | ||
3906 | else | ||
3907 | sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + | ||
3908 | (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); | ||
3909 | |||
3910 | if (count && off < sdt_end) { | ||
3911 | if (off + count > sdt_end) | ||
3912 | len = sdt_end - off; | ||
3881 | else | 3913 | else |
3882 | len = count; | 3914 | len = count; |
3883 | src = (u8 *)&dump->ioa_dump + off; | 3915 | src = (u8 *)&dump->ioa_dump + off; |
@@ -3887,7 +3919,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, | |||
3887 | count -= len; | 3919 | count -= len; |
3888 | } | 3920 | } |
3889 | 3921 | ||
3890 | off -= offsetof(struct ipr_ioa_dump, ioa_data); | 3922 | off -= sdt_end; |
3891 | 3923 | ||
3892 | while (count) { | 3924 | while (count) { |
3893 | if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) | 3925 | if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) |
@@ -3916,6 +3948,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, | |||
3916 | static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) | 3948 | static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) |
3917 | { | 3949 | { |
3918 | struct ipr_dump *dump; | 3950 | struct ipr_dump *dump; |
3951 | __be32 **ioa_data; | ||
3919 | unsigned long lock_flags = 0; | 3952 | unsigned long lock_flags = 0; |
3920 | 3953 | ||
3921 | dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); | 3954 | dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); |
@@ -3925,6 +3958,19 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) | |||
3925 | return -ENOMEM; | 3958 | return -ENOMEM; |
3926 | } | 3959 | } |
3927 | 3960 | ||
3961 | if (ioa_cfg->sis64) | ||
3962 | ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); | ||
3963 | else | ||
3964 | ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); | ||
3965 | |||
3966 | if (!ioa_data) { | ||
3967 | ipr_err("Dump memory allocation failed\n"); | ||
3968 | kfree(dump); | ||
3969 | return -ENOMEM; | ||
3970 | } | ||
3971 | |||
3972 | dump->ioa_dump.ioa_data = ioa_data; | ||
3973 | |||
3928 | kref_init(&dump->kref); | 3974 | kref_init(&dump->kref); |
3929 | dump->ioa_cfg = ioa_cfg; | 3975 | dump->ioa_cfg = ioa_cfg; |
3930 | 3976 | ||
@@ -3932,6 +3978,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) | |||
3932 | 3978 | ||
3933 | if (INACTIVE != ioa_cfg->sdt_state) { | 3979 | if (INACTIVE != ioa_cfg->sdt_state) { |
3934 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3980 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
3981 | vfree(dump->ioa_dump.ioa_data); | ||
3935 | kfree(dump); | 3982 | kfree(dump); |
3936 | return 0; | 3983 | return 0; |
3937 | } | 3984 | } |
@@ -7566,7 +7613,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | |||
7566 | ipr_cmd->job_step = ipr_reset_enable_ioa; | 7613 | ipr_cmd->job_step = ipr_reset_enable_ioa; |
7567 | 7614 | ||
7568 | if (GET_DUMP == ioa_cfg->sdt_state) { | 7615 | if (GET_DUMP == ioa_cfg->sdt_state) { |
7569 | ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT); | 7616 | if (ioa_cfg->sis64) |
7617 | ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); | ||
7618 | else | ||
7619 | ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); | ||
7570 | ipr_cmd->job_step = ipr_reset_wait_for_dump; | 7620 | ipr_cmd->job_step = ipr_reset_wait_for_dump; |
7571 | schedule_work(&ioa_cfg->work_q); | 7621 | schedule_work(&ioa_cfg->work_q); |
7572 | return IPR_RC_JOB_RETURN; | 7622 | return IPR_RC_JOB_RETURN; |