diff options
Diffstat (limited to 'drivers')
134 files changed, 8285 insertions, 2559 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 7edaccce6640..a51df9681319 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -71,9 +71,6 @@ enum ec_command { | |||
| 71 | #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ | 71 | #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ |
| 72 | #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ | 72 | #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ |
| 73 | 73 | ||
| 74 | #define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts | ||
| 75 | per one transaction */ | ||
| 76 | |||
| 77 | enum { | 74 | enum { |
| 78 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ | 75 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ |
| 79 | EC_FLAGS_GPE_STORM, /* GPE storm detected */ | 76 | EC_FLAGS_GPE_STORM, /* GPE storm detected */ |
| @@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; | |||
| 87 | module_param(ec_delay, uint, 0644); | 84 | module_param(ec_delay, uint, 0644); |
| 88 | MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); | 85 | MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); |
| 89 | 86 | ||
| 87 | /* | ||
| 88 | * If the number of false interrupts per one transaction exceeds | ||
| 89 | * this threshold, will think there is a GPE storm happened and | ||
| 90 | * will disable the GPE for normal transaction. | ||
| 91 | */ | ||
| 92 | static unsigned int ec_storm_threshold __read_mostly = 8; | ||
| 93 | module_param(ec_storm_threshold, uint, 0644); | ||
| 94 | MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); | ||
| 95 | |||
| 90 | /* If we find an EC via the ECDT, we need to keep a ptr to its context */ | 96 | /* If we find an EC via the ECDT, we need to keep a ptr to its context */ |
| 91 | /* External interfaces use first EC only, so remember */ | 97 | /* External interfaces use first EC only, so remember */ |
| 92 | typedef int (*acpi_ec_query_func) (void *data); | 98 | typedef int (*acpi_ec_query_func) (void *data); |
| @@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
| 319 | msleep(1); | 325 | msleep(1); |
| 320 | /* It is safe to enable the GPE outside of the transaction. */ | 326 | /* It is safe to enable the GPE outside of the transaction. */ |
| 321 | acpi_enable_gpe(NULL, ec->gpe); | 327 | acpi_enable_gpe(NULL, ec->gpe); |
| 322 | } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { | 328 | } else if (t->irq_count > ec_storm_threshold) { |
| 323 | pr_info(PREFIX "GPE storm detected, " | 329 | pr_info(PREFIX "GPE storm detected, " |
| 324 | "transactions will use polling mode\n"); | 330 | "transactions will use polling mode\n"); |
| 325 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); | 331 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); |
| @@ -924,6 +930,17 @@ static int ec_flag_msi(const struct dmi_system_id *id) | |||
| 924 | return 0; | 930 | return 0; |
| 925 | } | 931 | } |
| 926 | 932 | ||
| 933 | /* | ||
| 934 | * Clevo M720 notebook actually works ok with IRQ mode, if we lifted | ||
| 935 | * the GPE storm threshold back to 20 | ||
| 936 | */ | ||
| 937 | static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) | ||
| 938 | { | ||
| 939 | pr_debug("Setting the EC GPE storm threshold to 20\n"); | ||
| 940 | ec_storm_threshold = 20; | ||
| 941 | return 0; | ||
| 942 | } | ||
| 943 | |||
| 927 | static struct dmi_system_id __initdata ec_dmi_table[] = { | 944 | static struct dmi_system_id __initdata ec_dmi_table[] = { |
| 928 | { | 945 | { |
| 929 | ec_skip_dsdt_scan, "Compal JFL92", { | 946 | ec_skip_dsdt_scan, "Compal JFL92", { |
| @@ -955,10 +972,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { | |||
| 955 | { | 972 | { |
| 956 | ec_validate_ecdt, "ASUS hardware", { | 973 | ec_validate_ecdt, "ASUS hardware", { |
| 957 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, | 974 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, |
| 975 | { | ||
| 976 | ec_enlarge_storm_threshold, "CLEVO hardware", { | ||
| 977 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), | ||
| 978 | DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, | ||
| 958 | {}, | 979 | {}, |
| 959 | }; | 980 | }; |
| 960 | 981 | ||
| 961 | |||
| 962 | int __init acpi_ec_ecdt_probe(void) | 982 | int __init acpi_ec_ecdt_probe(void) |
| 963 | { | 983 | { |
| 964 | acpi_status status; | 984 | acpi_status status; |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 3655ab923812..e8086c725305 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -1132,7 +1132,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) | |||
| 1132 | int acpi_processor_hotplug(struct acpi_processor *pr) | 1132 | int acpi_processor_hotplug(struct acpi_processor *pr) |
| 1133 | { | 1133 | { |
| 1134 | int ret = 0; | 1134 | int ret = 0; |
| 1135 | struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); | 1135 | struct cpuidle_device *dev; |
| 1136 | 1136 | ||
| 1137 | if (disabled_by_idle_boot_param()) | 1137 | if (disabled_by_idle_boot_param()) |
| 1138 | return 0; | 1138 | return 0; |
| @@ -1147,6 +1147,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr) | |||
| 1147 | if (!pr->flags.power_setup_done) | 1147 | if (!pr->flags.power_setup_done) |
| 1148 | return -ENODEV; | 1148 | return -ENODEV; |
| 1149 | 1149 | ||
| 1150 | dev = per_cpu(acpi_cpuidle_device, pr->id); | ||
| 1150 | cpuidle_pause_and_lock(); | 1151 | cpuidle_pause_and_lock(); |
| 1151 | cpuidle_disable_device(dev); | 1152 | cpuidle_disable_device(dev); |
| 1152 | acpi_processor_get_power_info(pr); | 1153 | acpi_processor_get_power_info(pr); |
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index edda74a43406..804204d41999 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
| @@ -708,6 +708,40 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal, | |||
| 708 | return -EINVAL; | 708 | return -EINVAL; |
| 709 | } | 709 | } |
| 710 | 710 | ||
| 711 | static int thermal_get_trend(struct thermal_zone_device *thermal, | ||
| 712 | int trip, enum thermal_trend *trend) | ||
| 713 | { | ||
| 714 | struct acpi_thermal *tz = thermal->devdata; | ||
| 715 | enum thermal_trip_type type; | ||
| 716 | int i; | ||
| 717 | |||
| 718 | if (thermal_get_trip_type(thermal, trip, &type)) | ||
| 719 | return -EINVAL; | ||
| 720 | |||
| 721 | if (type == THERMAL_TRIP_ACTIVE) { | ||
| 722 | /* aggressive active cooling */ | ||
| 723 | *trend = THERMAL_TREND_RAISING; | ||
| 724 | return 0; | ||
| 725 | } | ||
| 726 | |||
| 727 | /* | ||
| 728 | * tz->temperature has already been updated by generic thermal layer, | ||
| 729 | * before this callback being invoked | ||
| 730 | */ | ||
| 731 | i = (tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature)) | ||
| 732 | + (tz->trips.passive.tc2 | ||
| 733 | * (tz->temperature - tz->trips.passive.temperature)); | ||
| 734 | |||
| 735 | if (i > 0) | ||
| 736 | *trend = THERMAL_TREND_RAISING; | ||
| 737 | else if (i < 0) | ||
| 738 | *trend = THERMAL_TREND_DROPPING; | ||
| 739 | else | ||
| 740 | *trend = THERMAL_TREND_STABLE; | ||
| 741 | return 0; | ||
| 742 | } | ||
| 743 | |||
| 744 | |||
| 711 | static int thermal_notify(struct thermal_zone_device *thermal, int trip, | 745 | static int thermal_notify(struct thermal_zone_device *thermal, int trip, |
| 712 | enum thermal_trip_type trip_type) | 746 | enum thermal_trip_type trip_type) |
| 713 | { | 747 | { |
| @@ -731,11 +765,9 @@ static int thermal_notify(struct thermal_zone_device *thermal, int trip, | |||
| 731 | return 0; | 765 | return 0; |
| 732 | } | 766 | } |
| 733 | 767 | ||
| 734 | typedef int (*cb)(struct thermal_zone_device *, int, | ||
| 735 | struct thermal_cooling_device *); | ||
| 736 | static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, | 768 | static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, |
| 737 | struct thermal_cooling_device *cdev, | 769 | struct thermal_cooling_device *cdev, |
| 738 | cb action) | 770 | bool bind) |
| 739 | { | 771 | { |
| 740 | struct acpi_device *device = cdev->devdata; | 772 | struct acpi_device *device = cdev->devdata; |
| 741 | struct acpi_thermal *tz = thermal->devdata; | 773 | struct acpi_thermal *tz = thermal->devdata; |
| @@ -759,11 +791,19 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, | |||
| 759 | i++) { | 791 | i++) { |
| 760 | handle = tz->trips.passive.devices.handles[i]; | 792 | handle = tz->trips.passive.devices.handles[i]; |
| 761 | status = acpi_bus_get_device(handle, &dev); | 793 | status = acpi_bus_get_device(handle, &dev); |
| 762 | if (ACPI_SUCCESS(status) && (dev == device)) { | 794 | if (ACPI_FAILURE(status) || dev != device) |
| 763 | result = action(thermal, trip, cdev); | 795 | continue; |
| 764 | if (result) | 796 | if (bind) |
| 765 | goto failed; | 797 | result = |
| 766 | } | 798 | thermal_zone_bind_cooling_device |
| 799 | (thermal, trip, cdev, | ||
| 800 | THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); | ||
| 801 | else | ||
| 802 | result = | ||
| 803 | thermal_zone_unbind_cooling_device | ||
| 804 | (thermal, trip, cdev); | ||
| 805 | if (result) | ||
| 806 | goto failed; | ||
| 767 | } | 807 | } |
| 768 | } | 808 | } |
| 769 | 809 | ||
| @@ -776,11 +816,17 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, | |||
| 776 | j++) { | 816 | j++) { |
| 777 | handle = tz->trips.active[i].devices.handles[j]; | 817 | handle = tz->trips.active[i].devices.handles[j]; |
| 778 | status = acpi_bus_get_device(handle, &dev); | 818 | status = acpi_bus_get_device(handle, &dev); |
| 779 | if (ACPI_SUCCESS(status) && (dev == device)) { | 819 | if (ACPI_FAILURE(status) || dev != device) |
| 780 | result = action(thermal, trip, cdev); | 820 | continue; |
| 781 | if (result) | 821 | if (bind) |
| 782 | goto failed; | 822 | result = thermal_zone_bind_cooling_device |
| 783 | } | 823 | (thermal, trip, cdev, |
| 824 | THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); | ||
| 825 | else | ||
| 826 | result = thermal_zone_unbind_cooling_device | ||
| 827 | (thermal, trip, cdev); | ||
| 828 | if (result) | ||
| 829 | goto failed; | ||
| 784 | } | 830 | } |
| 785 | } | 831 | } |
| 786 | 832 | ||
| @@ -788,7 +834,14 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, | |||
| 788 | handle = tz->devices.handles[i]; | 834 | handle = tz->devices.handles[i]; |
| 789 | status = acpi_bus_get_device(handle, &dev); | 835 | status = acpi_bus_get_device(handle, &dev); |
| 790 | if (ACPI_SUCCESS(status) && (dev == device)) { | 836 | if (ACPI_SUCCESS(status) && (dev == device)) { |
| 791 | result = action(thermal, -1, cdev); | 837 | if (bind) |
| 838 | result = thermal_zone_bind_cooling_device | ||
| 839 | (thermal, -1, cdev, | ||
| 840 | THERMAL_NO_LIMIT, | ||
| 841 | THERMAL_NO_LIMIT); | ||
| 842 | else | ||
| 843 | result = thermal_zone_unbind_cooling_device | ||
| 844 | (thermal, -1, cdev); | ||
| 792 | if (result) | 845 | if (result) |
| 793 | goto failed; | 846 | goto failed; |
| 794 | } | 847 | } |
| @@ -802,16 +855,14 @@ static int | |||
| 802 | acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal, | 855 | acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal, |
| 803 | struct thermal_cooling_device *cdev) | 856 | struct thermal_cooling_device *cdev) |
| 804 | { | 857 | { |
| 805 | return acpi_thermal_cooling_device_cb(thermal, cdev, | 858 | return acpi_thermal_cooling_device_cb(thermal, cdev, true); |
| 806 | thermal_zone_bind_cooling_device); | ||
| 807 | } | 859 | } |
| 808 | 860 | ||
| 809 | static int | 861 | static int |
| 810 | acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal, | 862 | acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal, |
| 811 | struct thermal_cooling_device *cdev) | 863 | struct thermal_cooling_device *cdev) |
| 812 | { | 864 | { |
| 813 | return acpi_thermal_cooling_device_cb(thermal, cdev, | 865 | return acpi_thermal_cooling_device_cb(thermal, cdev, false); |
| 814 | thermal_zone_unbind_cooling_device); | ||
| 815 | } | 866 | } |
| 816 | 867 | ||
| 817 | static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { | 868 | static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { |
| @@ -823,6 +874,7 @@ static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { | |||
| 823 | .get_trip_type = thermal_get_trip_type, | 874 | .get_trip_type = thermal_get_trip_type, |
| 824 | .get_trip_temp = thermal_get_trip_temp, | 875 | .get_trip_temp = thermal_get_trip_temp, |
| 825 | .get_crit_temp = thermal_get_crit_temp, | 876 | .get_crit_temp = thermal_get_crit_temp, |
| 877 | .get_trend = thermal_get_trend, | ||
| 826 | .notify = thermal_notify, | 878 | .notify = thermal_notify, |
| 827 | }; | 879 | }; |
| 828 | 880 | ||
| @@ -849,15 +901,12 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) | |||
| 849 | tz->thermal_zone = | 901 | tz->thermal_zone = |
| 850 | thermal_zone_device_register("acpitz", trips, 0, tz, | 902 | thermal_zone_device_register("acpitz", trips, 0, tz, |
| 851 | &acpi_thermal_zone_ops, | 903 | &acpi_thermal_zone_ops, |
| 852 | tz->trips.passive.tc1, | ||
| 853 | tz->trips.passive.tc2, | ||
| 854 | tz->trips.passive.tsp*100, | 904 | tz->trips.passive.tsp*100, |
| 855 | tz->polling_frequency*100); | 905 | tz->polling_frequency*100); |
| 856 | else | 906 | else |
| 857 | tz->thermal_zone = | 907 | tz->thermal_zone = |
| 858 | thermal_zone_device_register("acpitz", trips, 0, tz, | 908 | thermal_zone_device_register("acpitz", trips, 0, tz, |
| 859 | &acpi_thermal_zone_ops, | 909 | &acpi_thermal_zone_ops, 0, |
| 860 | 0, 0, 0, | ||
| 861 | tz->polling_frequency*100); | 910 | tz->polling_frequency*100); |
| 862 | if (IS_ERR(tz->thermal_zone)) | 911 | if (IS_ERR(tz->thermal_zone)) |
| 863 | return -ENODEV; | 912 | return -ENODEV; |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index f26afdb1a702..93211df52aab 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
| @@ -1182,17 +1182,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf, | |||
| 1182 | size_t size, loff_t *off) | 1182 | size_t size, loff_t *off) |
| 1183 | { | 1183 | { |
| 1184 | struct tpm_chip *chip = file->private_data; | 1184 | struct tpm_chip *chip = file->private_data; |
| 1185 | size_t in_size = size, out_size; | 1185 | size_t in_size = size; |
| 1186 | ssize_t out_size; | ||
| 1186 | 1187 | ||
| 1187 | /* cannot perform a write until the read has cleared | 1188 | /* cannot perform a write until the read has cleared |
| 1188 | either via tpm_read or a user_read_timer timeout */ | 1189 | either via tpm_read or a user_read_timer timeout. |
| 1189 | while (atomic_read(&chip->data_pending) != 0) | 1190 | This also prevents splitted buffered writes from blocking here. |
| 1190 | msleep(TPM_TIMEOUT); | 1191 | */ |
| 1191 | 1192 | if (atomic_read(&chip->data_pending) != 0) | |
| 1192 | mutex_lock(&chip->buffer_mutex); | 1193 | return -EBUSY; |
| 1193 | 1194 | ||
| 1194 | if (in_size > TPM_BUFSIZE) | 1195 | if (in_size > TPM_BUFSIZE) |
| 1195 | in_size = TPM_BUFSIZE; | 1196 | return -E2BIG; |
| 1197 | |||
| 1198 | mutex_lock(&chip->buffer_mutex); | ||
| 1196 | 1199 | ||
| 1197 | if (copy_from_user | 1200 | if (copy_from_user |
| 1198 | (chip->data_buffer, (void __user *) buf, in_size)) { | 1201 | (chip->data_buffer, (void __user *) buf, in_size)) { |
| @@ -1202,6 +1205,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf, | |||
| 1202 | 1205 | ||
| 1203 | /* atomic tpm command send and result receive */ | 1206 | /* atomic tpm command send and result receive */ |
| 1204 | out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); | 1207 | out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); |
| 1208 | if (out_size < 0) { | ||
| 1209 | mutex_unlock(&chip->buffer_mutex); | ||
| 1210 | return out_size; | ||
| 1211 | } | ||
| 1205 | 1212 | ||
| 1206 | atomic_set(&chip->data_pending, out_size); | 1213 | atomic_set(&chip->data_pending, out_size); |
| 1207 | mutex_unlock(&chip->buffer_mutex); | 1214 | mutex_unlock(&chip->buffer_mutex); |
| @@ -1259,6 +1266,7 @@ void tpm_remove_hardware(struct device *dev) | |||
| 1259 | 1266 | ||
| 1260 | misc_deregister(&chip->vendor.miscdev); | 1267 | misc_deregister(&chip->vendor.miscdev); |
| 1261 | sysfs_remove_group(&dev->kobj, chip->vendor.attr_group); | 1268 | sysfs_remove_group(&dev->kobj, chip->vendor.attr_group); |
| 1269 | tpm_remove_ppi(&dev->kobj); | ||
| 1262 | tpm_bios_log_teardown(chip->bios_dir); | 1270 | tpm_bios_log_teardown(chip->bios_dir); |
| 1263 | 1271 | ||
| 1264 | /* write it this way to be explicit (chip->dev == dev) */ | 1272 | /* write it this way to be explicit (chip->dev == dev) */ |
| @@ -1476,7 +1484,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, | |||
| 1476 | goto put_device; | 1484 | goto put_device; |
| 1477 | } | 1485 | } |
| 1478 | 1486 | ||
| 1479 | if (sys_add_ppi(&dev->kobj)) { | 1487 | if (tpm_add_ppi(&dev->kobj)) { |
| 1480 | misc_deregister(&chip->vendor.miscdev); | 1488 | misc_deregister(&chip->vendor.miscdev); |
| 1481 | goto put_device; | 1489 | goto put_device; |
| 1482 | } | 1490 | } |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 02c266aa2bf7..8ef7649a50aa 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
| @@ -329,10 +329,15 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, | |||
| 329 | wait_queue_head_t *); | 329 | wait_queue_head_t *); |
| 330 | 330 | ||
| 331 | #ifdef CONFIG_ACPI | 331 | #ifdef CONFIG_ACPI |
| 332 | extern ssize_t sys_add_ppi(struct kobject *parent); | 332 | extern int tpm_add_ppi(struct kobject *); |
| 333 | extern void tpm_remove_ppi(struct kobject *); | ||
| 333 | #else | 334 | #else |
| 334 | static inline ssize_t sys_add_ppi(struct kobject *parent) | 335 | static inline int tpm_add_ppi(struct kobject *parent) |
| 335 | { | 336 | { |
| 336 | return 0; | 337 | return 0; |
| 337 | } | 338 | } |
| 339 | |||
| 340 | static inline void tpm_remove_ppi(struct kobject *parent) | ||
| 341 | { | ||
| 342 | } | ||
| 338 | #endif | 343 | #endif |
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index f27b58cfae98..720ebcf29fdf 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c | |||
| @@ -444,18 +444,20 @@ static struct attribute *ppi_attrs[] = { | |||
| 444 | &dev_attr_vs_operations.attr, NULL, | 444 | &dev_attr_vs_operations.attr, NULL, |
| 445 | }; | 445 | }; |
| 446 | static struct attribute_group ppi_attr_grp = { | 446 | static struct attribute_group ppi_attr_grp = { |
| 447 | .name = "ppi", | ||
| 447 | .attrs = ppi_attrs | 448 | .attrs = ppi_attrs |
| 448 | }; | 449 | }; |
| 449 | 450 | ||
| 450 | ssize_t sys_add_ppi(struct kobject *parent) | 451 | int tpm_add_ppi(struct kobject *parent) |
| 451 | { | 452 | { |
| 452 | struct kobject *ppi; | 453 | return sysfs_create_group(parent, &ppi_attr_grp); |
| 453 | ppi = kobject_create_and_add("ppi", parent); | 454 | } |
| 454 | if (sysfs_create_group(ppi, &ppi_attr_grp)) | 455 | EXPORT_SYMBOL_GPL(tpm_add_ppi); |
| 455 | return -EFAULT; | 456 | |
| 456 | else | 457 | void tpm_remove_ppi(struct kobject *parent) |
| 457 | return 0; | 458 | { |
| 459 | sysfs_remove_group(parent, &ppi_attr_grp); | ||
| 458 | } | 460 | } |
| 459 | EXPORT_SYMBOL_GPL(sys_add_ppi); | 461 | EXPORT_SYMBOL_GPL(tpm_remove_ppi); |
| 460 | 462 | ||
| 461 | MODULE_LICENSE("GPL"); | 463 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index e28f6ea46f1a..7f15b8514a18 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -368,7 +368,7 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device); | |||
| 368 | */ | 368 | */ |
| 369 | void cpuidle_disable_device(struct cpuidle_device *dev) | 369 | void cpuidle_disable_device(struct cpuidle_device *dev) |
| 370 | { | 370 | { |
| 371 | if (!dev->enabled) | 371 | if (!dev || !dev->enabled) |
| 372 | return; | 372 | return; |
| 373 | if (!cpuidle_get_driver() || !cpuidle_curr_governor) | 373 | if (!cpuidle_get_driver() || !cpuidle_curr_governor) |
| 374 | return; | 374 | return; |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index c74e73b2069a..c4633de64465 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -334,16 +334,6 @@ config SENSORS_DA9052_ADC | |||
| 334 | This driver can also be built as module. If so, the module | 334 | This driver can also be built as module. If so, the module |
| 335 | will be called da9052-hwmon. | 335 | will be called da9052-hwmon. |
| 336 | 336 | ||
| 337 | config SENSORS_EXYNOS4_TMU | ||
| 338 | tristate "Temperature sensor on Samsung EXYNOS4" | ||
| 339 | depends on ARCH_EXYNOS4 | ||
| 340 | help | ||
| 341 | If you say yes here you get support for TMU (Thermal Management | ||
| 342 | Unit) on SAMSUNG EXYNOS4 series of SoC. | ||
| 343 | |||
| 344 | This driver can also be built as a module. If so, the module | ||
| 345 | will be called exynos4-tmu. | ||
| 346 | |||
| 347 | config SENSORS_I5K_AMB | 337 | config SENSORS_I5K_AMB |
| 348 | tristate "FB-DIMM AMB temperature sensor on Intel 5000 series chipsets" | 338 | tristate "FB-DIMM AMB temperature sensor on Intel 5000 series chipsets" |
| 349 | depends on PCI | 339 | depends on PCI |
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index a62ce17ddbfc..8d5fcb5e8e9f 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile | |||
| @@ -50,7 +50,6 @@ obj-$(CONFIG_SENSORS_DS1621) += ds1621.o | |||
| 50 | obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o | 50 | obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o |
| 51 | obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o | 51 | obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o |
| 52 | obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o | 52 | obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o |
| 53 | obj-$(CONFIG_SENSORS_EXYNOS4_TMU) += exynos4_tmu.o | ||
| 54 | obj-$(CONFIG_SENSORS_F71805F) += f71805f.o | 53 | obj-$(CONFIG_SENSORS_F71805F) += f71805f.o |
| 55 | obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o | 54 | obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o |
| 56 | obj-$(CONFIG_SENSORS_F75375S) += f75375s.o | 55 | obj-$(CONFIG_SENSORS_F75375S) += f75375s.o |
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c deleted file mode 100644 index e912059140cd..000000000000 --- a/drivers/hwmon/exynos4_tmu.c +++ /dev/null | |||
| @@ -1,518 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * exynos4_tmu.c - Samsung EXYNOS4 TMU (Thermal Management Unit) | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Samsung Electronics | ||
| 5 | * Donggeun Kim <dg77.kim@samsung.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/err.h> | ||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | #include <linux/platform_device.h> | ||
| 28 | #include <linux/interrupt.h> | ||
| 29 | #include <linux/clk.h> | ||
| 30 | #include <linux/workqueue.h> | ||
| 31 | #include <linux/sysfs.h> | ||
| 32 | #include <linux/kobject.h> | ||
| 33 | #include <linux/io.h> | ||
| 34 | #include <linux/mutex.h> | ||
| 35 | |||
| 36 | #include <linux/hwmon.h> | ||
| 37 | #include <linux/hwmon-sysfs.h> | ||
| 38 | |||
| 39 | #include <linux/platform_data/exynos4_tmu.h> | ||
| 40 | |||
| 41 | #define EXYNOS4_TMU_REG_TRIMINFO 0x0 | ||
| 42 | #define EXYNOS4_TMU_REG_CONTROL 0x20 | ||
| 43 | #define EXYNOS4_TMU_REG_STATUS 0x28 | ||
| 44 | #define EXYNOS4_TMU_REG_CURRENT_TEMP 0x40 | ||
| 45 | #define EXYNOS4_TMU_REG_THRESHOLD_TEMP 0x44 | ||
| 46 | #define EXYNOS4_TMU_REG_TRIG_LEVEL0 0x50 | ||
| 47 | #define EXYNOS4_TMU_REG_TRIG_LEVEL1 0x54 | ||
| 48 | #define EXYNOS4_TMU_REG_TRIG_LEVEL2 0x58 | ||
| 49 | #define EXYNOS4_TMU_REG_TRIG_LEVEL3 0x5C | ||
| 50 | #define EXYNOS4_TMU_REG_PAST_TEMP0 0x60 | ||
| 51 | #define EXYNOS4_TMU_REG_PAST_TEMP1 0x64 | ||
| 52 | #define EXYNOS4_TMU_REG_PAST_TEMP2 0x68 | ||
| 53 | #define EXYNOS4_TMU_REG_PAST_TEMP3 0x6C | ||
| 54 | #define EXYNOS4_TMU_REG_INTEN 0x70 | ||
| 55 | #define EXYNOS4_TMU_REG_INTSTAT 0x74 | ||
| 56 | #define EXYNOS4_TMU_REG_INTCLEAR 0x78 | ||
| 57 | |||
| 58 | #define EXYNOS4_TMU_GAIN_SHIFT 8 | ||
| 59 | #define EXYNOS4_TMU_REF_VOLTAGE_SHIFT 24 | ||
| 60 | |||
| 61 | #define EXYNOS4_TMU_TRIM_TEMP_MASK 0xff | ||
| 62 | #define EXYNOS4_TMU_CORE_ON 3 | ||
| 63 | #define EXYNOS4_TMU_CORE_OFF 2 | ||
| 64 | #define EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET 50 | ||
| 65 | #define EXYNOS4_TMU_TRIG_LEVEL0_MASK 0x1 | ||
| 66 | #define EXYNOS4_TMU_TRIG_LEVEL1_MASK 0x10 | ||
| 67 | #define EXYNOS4_TMU_TRIG_LEVEL2_MASK 0x100 | ||
| 68 | #define EXYNOS4_TMU_TRIG_LEVEL3_MASK 0x1000 | ||
| 69 | #define EXYNOS4_TMU_INTCLEAR_VAL 0x1111 | ||
| 70 | |||
| 71 | struct exynos4_tmu_data { | ||
| 72 | struct exynos4_tmu_platform_data *pdata; | ||
| 73 | struct device *hwmon_dev; | ||
| 74 | struct resource *mem; | ||
| 75 | void __iomem *base; | ||
| 76 | int irq; | ||
| 77 | struct work_struct irq_work; | ||
| 78 | struct mutex lock; | ||
| 79 | struct clk *clk; | ||
| 80 | u8 temp_error1, temp_error2; | ||
| 81 | }; | ||
| 82 | |||
| 83 | /* | ||
| 84 | * TMU treats temperature as a mapped temperature code. | ||
| 85 | * The temperature is converted differently depending on the calibration type. | ||
| 86 | */ | ||
| 87 | static int temp_to_code(struct exynos4_tmu_data *data, u8 temp) | ||
| 88 | { | ||
| 89 | struct exynos4_tmu_platform_data *pdata = data->pdata; | ||
| 90 | int temp_code; | ||
| 91 | |||
| 92 | /* temp should range between 25 and 125 */ | ||
| 93 | if (temp < 25 || temp > 125) { | ||
| 94 | temp_code = -EINVAL; | ||
| 95 | goto out; | ||
| 96 | } | ||
| 97 | |||
| 98 | switch (pdata->cal_type) { | ||
| 99 | case TYPE_TWO_POINT_TRIMMING: | ||
| 100 | temp_code = (temp - 25) * | ||
| 101 | (data->temp_error2 - data->temp_error1) / | ||
| 102 | (85 - 25) + data->temp_error1; | ||
| 103 | break; | ||
| 104 | case TYPE_ONE_POINT_TRIMMING: | ||
| 105 | temp_code = temp + data->temp_error1 - 25; | ||
| 106 | break; | ||
| 107 | default: | ||
| 108 | temp_code = temp + EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET; | ||
| 109 | break; | ||
| 110 | } | ||
| 111 | out: | ||
| 112 | return temp_code; | ||
| 113 | } | ||
| 114 | |||
| 115 | /* | ||
| 116 | * Calculate a temperature value from a temperature code. | ||
| 117 | * The unit of the temperature is degree Celsius. | ||
| 118 | */ | ||
| 119 | static int code_to_temp(struct exynos4_tmu_data *data, u8 temp_code) | ||
| 120 | { | ||
| 121 | struct exynos4_tmu_platform_data *pdata = data->pdata; | ||
| 122 | int temp; | ||
| 123 | |||
| 124 | /* temp_code should range between 75 and 175 */ | ||
| 125 | if (temp_code < 75 || temp_code > 175) { | ||
| 126 | temp = -ENODATA; | ||
| 127 | goto out; | ||
| 128 | } | ||
| 129 | |||
| 130 | switch (pdata->cal_type) { | ||
| 131 | case TYPE_TWO_POINT_TRIMMING: | ||
| 132 | temp = (temp_code - data->temp_error1) * (85 - 25) / | ||
| 133 | (data->temp_error2 - data->temp_error1) + 25; | ||
| 134 | break; | ||
| 135 | case TYPE_ONE_POINT_TRIMMING: | ||
| 136 | temp = temp_code - data->temp_error1 + 25; | ||
| 137 | break; | ||
| 138 | default: | ||
| 139 | temp = temp_code - EXYNOS4_TMU_DEF_CODE_TO_TEMP_OFFSET; | ||
| 140 | break; | ||
| 141 | } | ||
| 142 | out: | ||
| 143 | return temp; | ||
| 144 | } | ||
| 145 | |||
| 146 | static int exynos4_tmu_initialize(struct platform_device *pdev) | ||
| 147 | { | ||
| 148 | struct exynos4_tmu_data *data = platform_get_drvdata(pdev); | ||
| 149 | struct exynos4_tmu_platform_data *pdata = data->pdata; | ||
| 150 | unsigned int status, trim_info; | ||
| 151 | int ret = 0, threshold_code; | ||
| 152 | |||
| 153 | mutex_lock(&data->lock); | ||
| 154 | clk_enable(data->clk); | ||
| 155 | |||
| 156 | status = readb(data->base + EXYNOS4_TMU_REG_STATUS); | ||
| 157 | if (!status) { | ||
| 158 | ret = -EBUSY; | ||
| 159 | goto out; | ||
| 160 | } | ||
| 161 | |||
| 162 | /* Save trimming info in order to perform calibration */ | ||
| 163 | trim_info = readl(data->base + EXYNOS4_TMU_REG_TRIMINFO); | ||
| 164 | data->temp_error1 = trim_info & EXYNOS4_TMU_TRIM_TEMP_MASK; | ||
| 165 | data->temp_error2 = ((trim_info >> 8) & EXYNOS4_TMU_TRIM_TEMP_MASK); | ||
| 166 | |||
| 167 | /* Write temperature code for threshold */ | ||
| 168 | threshold_code = temp_to_code(data, pdata->threshold); | ||
| 169 | if (threshold_code < 0) { | ||
| 170 | ret = threshold_code; | ||
| 171 | goto out; | ||
| 172 | } | ||
| 173 | writeb(threshold_code, | ||
| 174 | data->base + EXYNOS4_TMU_REG_THRESHOLD_TEMP); | ||
| 175 | |||
| 176 | writeb(pdata->trigger_levels[0], | ||
| 177 | data->base + EXYNOS4_TMU_REG_TRIG_LEVEL0); | ||
| 178 | writeb(pdata->trigger_levels[1], | ||
| 179 | data->base + EXYNOS4_TMU_REG_TRIG_LEVEL1); | ||
| 180 | writeb(pdata->trigger_levels[2], | ||
| 181 | data->base + EXYNOS4_TMU_REG_TRIG_LEVEL2); | ||
| 182 | writeb(pdata->trigger_levels[3], | ||
| 183 | data->base + EXYNOS4_TMU_REG_TRIG_LEVEL3); | ||
| 184 | |||
| 185 | writel(EXYNOS4_TMU_INTCLEAR_VAL, | ||
| 186 | data->base + EXYNOS4_TMU_REG_INTCLEAR); | ||
| 187 | out: | ||
| 188 | clk_disable(data->clk); | ||
| 189 | mutex_unlock(&data->lock); | ||
| 190 | |||
| 191 | return ret; | ||
| 192 | } | ||
| 193 | |||
| 194 | static void exynos4_tmu_control(struct platform_device *pdev, bool on) | ||
| 195 | { | ||
| 196 | struct exynos4_tmu_data *data = platform_get_drvdata(pdev); | ||
| 197 | struct exynos4_tmu_platform_data *pdata = data->pdata; | ||
| 198 | unsigned int con, interrupt_en; | ||
| 199 | |||
| 200 | mutex_lock(&data->lock); | ||
| 201 | clk_enable(data->clk); | ||
| 202 | |||
| 203 | con = pdata->reference_voltage << EXYNOS4_TMU_REF_VOLTAGE_SHIFT | | ||
| 204 | pdata->gain << EXYNOS4_TMU_GAIN_SHIFT; | ||
| 205 | if (on) { | ||
| 206 | con |= EXYNOS4_TMU_CORE_ON; | ||
| 207 | interrupt_en = pdata->trigger_level3_en << 12 | | ||
| 208 | pdata->trigger_level2_en << 8 | | ||
| 209 | pdata->trigger_level1_en << 4 | | ||
| 210 | pdata->trigger_level0_en; | ||
| 211 | } else { | ||
| 212 | con |= EXYNOS4_TMU_CORE_OFF; | ||
| 213 | interrupt_en = 0; /* Disable all interrupts */ | ||
| 214 | } | ||
| 215 | writel(interrupt_en, data->base + EXYNOS4_TMU_REG_INTEN); | ||
| 216 | writel(con, data->base + EXYNOS4_TMU_REG_CONTROL); | ||
| 217 | |||
| 218 | clk_disable(data->clk); | ||
| 219 | mutex_unlock(&data->lock); | ||
| 220 | } | ||
| 221 | |||
| 222 | static int exynos4_tmu_read(struct exynos4_tmu_data *data) | ||
| 223 | { | ||
| 224 | u8 temp_code; | ||
| 225 | int temp; | ||
| 226 | |||
| 227 | mutex_lock(&data->lock); | ||
| 228 | clk_enable(data->clk); | ||
| 229 | |||
| 230 | temp_code = readb(data->base + EXYNOS4_TMU_REG_CURRENT_TEMP); | ||
| 231 | temp = code_to_temp(data, temp_code); | ||
| 232 | |||
| 233 | clk_disable(data->clk); | ||
| 234 | mutex_unlock(&data->lock); | ||
| 235 | |||
| 236 | return temp; | ||
| 237 | } | ||
| 238 | |||
| 239 | static void exynos4_tmu_work(struct work_struct *work) | ||
| 240 | { | ||
| 241 | struct exynos4_tmu_data *data = container_of(work, | ||
| 242 | struct exynos4_tmu_data, irq_work); | ||
| 243 | |||
| 244 | mutex_lock(&data->lock); | ||
| 245 | clk_enable(data->clk); | ||
| 246 | |||
| 247 | writel(EXYNOS4_TMU_INTCLEAR_VAL, data->base + EXYNOS4_TMU_REG_INTCLEAR); | ||
| 248 | |||
| 249 | kobject_uevent(&data->hwmon_dev->kobj, KOBJ_CHANGE); | ||
| 250 | |||
| 251 | enable_irq(data->irq); | ||
| 252 | |||
| 253 | clk_disable(data->clk); | ||
| 254 | mutex_unlock(&data->lock); | ||
| 255 | } | ||
| 256 | |||
| 257 | static irqreturn_t exynos4_tmu_irq(int irq, void *id) | ||
| 258 | { | ||
| 259 | struct exynos4_tmu_data *data = id; | ||
| 260 | |||
| 261 | disable_irq_nosync(irq); | ||
| 262 | schedule_work(&data->irq_work); | ||
| 263 | |||
| 264 | return IRQ_HANDLED; | ||
| 265 | } | ||
| 266 | |||
| 267 | static ssize_t exynos4_tmu_show_name(struct device *dev, | ||
| 268 | struct device_attribute *attr, char *buf) | ||
| 269 | { | ||
| 270 | return sprintf(buf, "exynos4-tmu\n"); | ||
| 271 | } | ||
| 272 | |||
| 273 | static ssize_t exynos4_tmu_show_temp(struct device *dev, | ||
| 274 | struct device_attribute *attr, char *buf) | ||
| 275 | { | ||
| 276 | struct exynos4_tmu_data *data = dev_get_drvdata(dev); | ||
| 277 | int ret; | ||
| 278 | |||
| 279 | ret = exynos4_tmu_read(data); | ||
| 280 | if (ret < 0) | ||
| 281 | return ret; | ||
| 282 | |||
| 283 | /* convert from degree Celsius to millidegree Celsius */ | ||
| 284 | return sprintf(buf, "%d\n", ret * 1000); | ||
| 285 | } | ||
| 286 | |||
| 287 | static ssize_t exynos4_tmu_show_alarm(struct device *dev, | ||
| 288 | struct device_attribute *devattr, char *buf) | ||
| 289 | { | ||
| 290 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
| 291 | struct exynos4_tmu_data *data = dev_get_drvdata(dev); | ||
| 292 | struct exynos4_tmu_platform_data *pdata = data->pdata; | ||
| 293 | int temp; | ||
| 294 | unsigned int trigger_level; | ||
| 295 | |||
| 296 | temp = exynos4_tmu_read(data); | ||
| 297 | if (temp < 0) | ||
| 298 | return temp; | ||
| 299 | |||
| 300 | trigger_level = pdata->threshold + pdata->trigger_levels[attr->index]; | ||
| 301 | |||
| 302 | return sprintf(buf, "%d\n", !!(temp > trigger_level)); | ||
| 303 | } | ||
| 304 | |||
| 305 | static ssize_t exynos4_tmu_show_level(struct device *dev, | ||
| 306 | struct device_attribute *devattr, char *buf) | ||
| 307 | { | ||
| 308 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
| 309 | struct exynos4_tmu_data *data = dev_get_drvdata(dev); | ||
| 310 | struct exynos4_tmu_platform_data *pdata = data->pdata; | ||
| 311 | unsigned int temp = pdata->threshold + | ||
| 312 | pdata->trigger_levels[attr->index]; | ||
| 313 | |||
| 314 | return sprintf(buf, "%u\n", temp * 1000); | ||
| 315 | } | ||
| 316 | |||
| 317 | static DEVICE_ATTR(name, S_IRUGO, exynos4_tmu_show_name, NULL); | ||
| 318 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, exynos4_tmu_show_temp, NULL, 0); | ||
| 319 | |||
| 320 | static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, | ||
| 321 | exynos4_tmu_show_alarm, NULL, 1); | ||
| 322 | static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, | ||
| 323 | exynos4_tmu_show_alarm, NULL, 2); | ||
| 324 | static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, | ||
| 325 | exynos4_tmu_show_alarm, NULL, 3); | ||
| 326 | |||
| 327 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, exynos4_tmu_show_level, NULL, 1); | ||
| 328 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, exynos4_tmu_show_level, NULL, 2); | ||
| 329 | static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, | ||
| 330 | exynos4_tmu_show_level, NULL, 3); | ||
| 331 | |||
| 332 | static struct attribute *exynos4_tmu_attributes[] = { | ||
| 333 | &dev_attr_name.attr, | ||
| 334 | &sensor_dev_attr_temp1_input.dev_attr.attr, | ||
| 335 | &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, | ||
| 336 | &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, | ||
| 337 | &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr, | ||
| 338 | &sensor_dev_attr_temp1_max.dev_attr.attr, | ||
| 339 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | ||
| 340 | &sensor_dev_attr_temp1_emergency.dev_attr.attr, | ||
| 341 | NULL, | ||
| 342 | }; | ||
| 343 | |||
| 344 | static const struct attribute_group exynos4_tmu_attr_group = { | ||
| 345 | .attrs = exynos4_tmu_attributes, | ||
| 346 | }; | ||
| 347 | |||
| 348 | static int __devinit exynos4_tmu_probe(struct platform_device *pdev) | ||
| 349 | { | ||
| 350 | struct exynos4_tmu_data *data; | ||
| 351 | struct exynos4_tmu_platform_data *pdata = pdev->dev.platform_data; | ||
| 352 | int ret; | ||
| 353 | |||
| 354 | if (!pdata) { | ||
| 355 | dev_err(&pdev->dev, "No platform init data supplied.\n"); | ||
| 356 | return -ENODEV; | ||
| 357 | } | ||
| 358 | |||
| 359 | data = kzalloc(sizeof(struct exynos4_tmu_data), GFP_KERNEL); | ||
| 360 | if (!data) { | ||
| 361 | dev_err(&pdev->dev, "Failed to allocate driver structure\n"); | ||
| 362 | return -ENOMEM; | ||
| 363 | } | ||
| 364 | |||
| 365 | data->irq = platform_get_irq(pdev, 0); | ||
| 366 | if (data->irq < 0) { | ||
| 367 | ret = data->irq; | ||
| 368 | dev_err(&pdev->dev, "Failed to get platform irq\n"); | ||
| 369 | goto err_free; | ||
| 370 | } | ||
| 371 | |||
| 372 | INIT_WORK(&data->irq_work, exynos4_tmu_work); | ||
| 373 | |||
| 374 | data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 375 | if (!data->mem) { | ||
| 376 | ret = -ENOENT; | ||
| 377 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
| 378 | goto err_free; | ||
| 379 | } | ||
| 380 | |||
| 381 | data->mem = request_mem_region(data->mem->start, | ||
| 382 | resource_size(data->mem), pdev->name); | ||
| 383 | if (!data->mem) { | ||
| 384 | ret = -ENODEV; | ||
| 385 | dev_err(&pdev->dev, "Failed to request memory region\n"); | ||
| 386 | goto err_free; | ||
| 387 | } | ||
| 388 | |||
| 389 | data->base = ioremap(data->mem->start, resource_size(data->mem)); | ||
| 390 | if (!data->base) { | ||
| 391 | ret = -ENODEV; | ||
| 392 | dev_err(&pdev->dev, "Failed to ioremap memory\n"); | ||
| 393 | goto err_mem_region; | ||
| 394 | } | ||
| 395 | |||
| 396 | ret = request_irq(data->irq, exynos4_tmu_irq, | ||
| 397 | IRQF_TRIGGER_RISING, | ||
| 398 | "exynos4-tmu", data); | ||
| 399 | if (ret) { | ||
| 400 | dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); | ||
| 401 | goto err_io_remap; | ||
| 402 | } | ||
| 403 | |||
| 404 | data->clk = clk_get(NULL, "tmu_apbif"); | ||
| 405 | if (IS_ERR(data->clk)) { | ||
| 406 | ret = PTR_ERR(data->clk); | ||
| 407 | dev_err(&pdev->dev, "Failed to get clock\n"); | ||
| 408 | goto err_irq; | ||
| 409 | } | ||
| 410 | |||
| 411 | data->pdata = pdata; | ||
| 412 | platform_set_drvdata(pdev, data); | ||
| 413 | mutex_init(&data->lock); | ||
| 414 | |||
| 415 | ret = exynos4_tmu_initialize(pdev); | ||
| 416 | if (ret) { | ||
| 417 | dev_err(&pdev->dev, "Failed to initialize TMU\n"); | ||
| 418 | goto err_clk; | ||
| 419 | } | ||
| 420 | |||
| 421 | ret = sysfs_create_group(&pdev->dev.kobj, &exynos4_tmu_attr_group); | ||
| 422 | if (ret) { | ||
| 423 | dev_err(&pdev->dev, "Failed to create sysfs group\n"); | ||
| 424 | goto err_clk; | ||
| 425 | } | ||
| 426 | |||
| 427 | data->hwmon_dev = hwmon_device_register(&pdev->dev); | ||
| 428 | if (IS_ERR(data->hwmon_dev)) { | ||
| 429 | ret = PTR_ERR(data->hwmon_dev); | ||
| 430 | dev_err(&pdev->dev, "Failed to register hwmon device\n"); | ||
| 431 | goto err_create_group; | ||
| 432 | } | ||
| 433 | |||
| 434 | exynos4_tmu_control(pdev, true); | ||
| 435 | |||
| 436 | return 0; | ||
| 437 | |||
| 438 | err_create_group: | ||
| 439 | sysfs_remove_group(&pdev->dev.kobj, &exynos4_tmu_attr_group); | ||
| 440 | err_clk: | ||
| 441 | platform_set_drvdata(pdev, NULL); | ||
| 442 | clk_put(data->clk); | ||
| 443 | err_irq: | ||
| 444 | free_irq(data->irq, data); | ||
| 445 | err_io_remap: | ||
| 446 | iounmap(data->base); | ||
| 447 | err_mem_region: | ||
| 448 | release_mem_region(data->mem->start, resource_size(data->mem)); | ||
| 449 | err_free: | ||
| 450 | kfree(data); | ||
| 451 | |||
| 452 | return ret; | ||
| 453 | } | ||
| 454 | |||
| 455 | static int __devexit exynos4_tmu_remove(struct platform_device *pdev) | ||
| 456 | { | ||
| 457 | struct exynos4_tmu_data *data = platform_get_drvdata(pdev); | ||
| 458 | |||
| 459 | exynos4_tmu_control(pdev, false); | ||
| 460 | |||
| 461 | hwmon_device_unregister(data->hwmon_dev); | ||
| 462 | sysfs_remove_group(&pdev->dev.kobj, &exynos4_tmu_attr_group); | ||
| 463 | |||
| 464 | clk_put(data->clk); | ||
| 465 | |||
| 466 | free_irq(data->irq, data); | ||
| 467 | |||
| 468 | iounmap(data->base); | ||
| 469 | release_mem_region(data->mem->start, resource_size(data->mem)); | ||
| 470 | |||
| 471 | platform_set_drvdata(pdev, NULL); | ||
| 472 | |||
| 473 | kfree(data); | ||
| 474 | |||
| 475 | return 0; | ||
| 476 | } | ||
| 477 | |||
| 478 | #ifdef CONFIG_PM_SLEEP | ||
| 479 | static int exynos4_tmu_suspend(struct device *dev) | ||
| 480 | { | ||
| 481 | exynos4_tmu_control(to_platform_device(dev), false); | ||
| 482 | |||
| 483 | return 0; | ||
| 484 | } | ||
| 485 | |||
| 486 | static int exynos4_tmu_resume(struct device *dev) | ||
| 487 | { | ||
| 488 | struct platform_device *pdev = to_platform_device(dev); | ||
| 489 | |||
| 490 | exynos4_tmu_initialize(pdev); | ||
| 491 | exynos4_tmu_control(pdev, true); | ||
| 492 | |||
| 493 | return 0; | ||
| 494 | } | ||
| 495 | |||
| 496 | static SIMPLE_DEV_PM_OPS(exynos4_tmu_pm, | ||
| 497 | exynos4_tmu_suspend, exynos4_tmu_resume); | ||
| 498 | #define EXYNOS4_TMU_PM &exynos4_tmu_pm | ||
| 499 | #else | ||
| 500 | #define EXYNOS4_TMU_PM NULL | ||
| 501 | #endif | ||
| 502 | |||
| 503 | static struct platform_driver exynos4_tmu_driver = { | ||
| 504 | .driver = { | ||
| 505 | .name = "exynos4-tmu", | ||
| 506 | .owner = THIS_MODULE, | ||
| 507 | .pm = EXYNOS4_TMU_PM, | ||
| 508 | }, | ||
| 509 | .probe = exynos4_tmu_probe, | ||
| 510 | .remove = __devexit_p(exynos4_tmu_remove), | ||
| 511 | }; | ||
| 512 | |||
| 513 | module_platform_driver(exynos4_tmu_driver); | ||
| 514 | |||
| 515 | MODULE_DESCRIPTION("EXYNOS4 TMU Driver"); | ||
| 516 | MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); | ||
| 517 | MODULE_LICENSE("GPL"); | ||
| 518 | MODULE_ALIAS("platform:exynos4-tmu"); | ||
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 118d0300f1fb..6ae2ac47c9c8 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
| @@ -23,11 +23,11 @@ | |||
| 23 | #include <linux/input/mt.h> | 23 | #include <linux/input/mt.h> |
| 24 | #include <linux/major.h> | 24 | #include <linux/major.h> |
| 25 | #include <linux/device.h> | 25 | #include <linux/device.h> |
| 26 | #include <linux/cdev.h> | ||
| 26 | #include "input-compat.h" | 27 | #include "input-compat.h" |
| 27 | 28 | ||
| 28 | struct evdev { | 29 | struct evdev { |
| 29 | int open; | 30 | int open; |
| 30 | int minor; | ||
| 31 | struct input_handle handle; | 31 | struct input_handle handle; |
| 32 | wait_queue_head_t wait; | 32 | wait_queue_head_t wait; |
| 33 | struct evdev_client __rcu *grab; | 33 | struct evdev_client __rcu *grab; |
| @@ -35,6 +35,7 @@ struct evdev { | |||
| 35 | spinlock_t client_lock; /* protects client_list */ | 35 | spinlock_t client_lock; /* protects client_list */ |
| 36 | struct mutex mutex; | 36 | struct mutex mutex; |
| 37 | struct device dev; | 37 | struct device dev; |
| 38 | struct cdev cdev; | ||
| 38 | bool exist; | 39 | bool exist; |
| 39 | }; | 40 | }; |
| 40 | 41 | ||
| @@ -51,9 +52,6 @@ struct evdev_client { | |||
| 51 | struct input_event buffer[]; | 52 | struct input_event buffer[]; |
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 54 | static struct evdev *evdev_table[EVDEV_MINORS]; | ||
| 55 | static DEFINE_MUTEX(evdev_table_mutex); | ||
| 56 | |||
| 57 | static void __pass_event(struct evdev_client *client, | 55 | static void __pass_event(struct evdev_client *client, |
| 58 | const struct input_event *event) | 56 | const struct input_event *event) |
| 59 | { | 57 | { |
| @@ -310,35 +308,16 @@ static unsigned int evdev_compute_buffer_size(struct input_dev *dev) | |||
| 310 | 308 | ||
| 311 | static int evdev_open(struct inode *inode, struct file *file) | 309 | static int evdev_open(struct inode *inode, struct file *file) |
| 312 | { | 310 | { |
| 313 | struct evdev *evdev; | 311 | struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev); |
| 312 | unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev); | ||
| 314 | struct evdev_client *client; | 313 | struct evdev_client *client; |
| 315 | int i = iminor(inode) - EVDEV_MINOR_BASE; | ||
| 316 | unsigned int bufsize; | ||
| 317 | int error; | 314 | int error; |
| 318 | 315 | ||
| 319 | if (i >= EVDEV_MINORS) | ||
| 320 | return -ENODEV; | ||
| 321 | |||
| 322 | error = mutex_lock_interruptible(&evdev_table_mutex); | ||
| 323 | if (error) | ||
| 324 | return error; | ||
| 325 | evdev = evdev_table[i]; | ||
| 326 | if (evdev) | ||
| 327 | get_device(&evdev->dev); | ||
| 328 | mutex_unlock(&evdev_table_mutex); | ||
| 329 | |||
| 330 | if (!evdev) | ||
| 331 | return -ENODEV; | ||
| 332 | |||
| 333 | bufsize = evdev_compute_buffer_size(evdev->handle.dev); | ||
| 334 | |||
| 335 | client = kzalloc(sizeof(struct evdev_client) + | 316 | client = kzalloc(sizeof(struct evdev_client) + |
| 336 | bufsize * sizeof(struct input_event), | 317 | bufsize * sizeof(struct input_event), |
| 337 | GFP_KERNEL); | 318 | GFP_KERNEL); |
| 338 | if (!client) { | 319 | if (!client) |
| 339 | error = -ENOMEM; | 320 | return -ENOMEM; |
| 340 | goto err_put_evdev; | ||
| 341 | } | ||
| 342 | 321 | ||
| 343 | client->bufsize = bufsize; | 322 | client->bufsize = bufsize; |
| 344 | spin_lock_init(&client->buffer_lock); | 323 | spin_lock_init(&client->buffer_lock); |
| @@ -352,13 +331,12 @@ static int evdev_open(struct inode *inode, struct file *file) | |||
| 352 | file->private_data = client; | 331 | file->private_data = client; |
| 353 | nonseekable_open(inode, file); | 332 | nonseekable_open(inode, file); |
| 354 | 333 | ||
| 334 | get_device(&evdev->dev); | ||
| 355 | return 0; | 335 | return 0; |
| 356 | 336 | ||
| 357 | err_free_client: | 337 | err_free_client: |
| 358 | evdev_detach_client(evdev, client); | 338 | evdev_detach_client(evdev, client); |
| 359 | kfree(client); | 339 | kfree(client); |
| 360 | err_put_evdev: | ||
| 361 | put_device(&evdev->dev); | ||
| 362 | return error; | 340 | return error; |
| 363 | } | 341 | } |
| 364 | 342 | ||
| @@ -942,26 +920,6 @@ static const struct file_operations evdev_fops = { | |||
| 942 | .llseek = no_llseek, | 920 | .llseek = no_llseek, |
| 943 | }; | 921 | }; |
| 944 | 922 | ||
| 945 | static int evdev_install_chrdev(struct evdev *evdev) | ||
| 946 | { | ||
| 947 | /* | ||
| 948 | * No need to do any locking here as calls to connect and | ||
| 949 | * disconnect are serialized by the input core | ||
| 950 | */ | ||
| 951 | evdev_table[evdev->minor] = evdev; | ||
| 952 | return 0; | ||
| 953 | } | ||
| 954 | |||
| 955 | static void evdev_remove_chrdev(struct evdev *evdev) | ||
| 956 | { | ||
| 957 | /* | ||
| 958 | * Lock evdev table to prevent race with evdev_open() | ||
| 959 | */ | ||
| 960 | mutex_lock(&evdev_table_mutex); | ||
| 961 | evdev_table[evdev->minor] = NULL; | ||
| 962 | mutex_unlock(&evdev_table_mutex); | ||
| 963 | } | ||
| 964 | |||
| 965 | /* | 923 | /* |
| 966 | * Mark device non-existent. This disables writes, ioctls and | 924 | * Mark device non-existent. This disables writes, ioctls and |
| 967 | * prevents new users from opening the device. Already posted | 925 | * prevents new users from opening the device. Already posted |
| @@ -980,7 +938,8 @@ static void evdev_cleanup(struct evdev *evdev) | |||
| 980 | 938 | ||
| 981 | evdev_mark_dead(evdev); | 939 | evdev_mark_dead(evdev); |
| 982 | evdev_hangup(evdev); | 940 | evdev_hangup(evdev); |
| 983 | evdev_remove_chrdev(evdev); | 941 | |
| 942 | cdev_del(&evdev->cdev); | ||
| 984 | 943 | ||
| 985 | /* evdev is marked dead so no one else accesses evdev->open */ | 944 | /* evdev is marked dead so no one else accesses evdev->open */ |
| 986 | if (evdev->open) { | 945 | if (evdev->open) { |
| @@ -991,43 +950,47 @@ static void evdev_cleanup(struct evdev *evdev) | |||
| 991 | 950 | ||
| 992 | /* | 951 | /* |
| 993 | * Create new evdev device. Note that input core serializes calls | 952 | * Create new evdev device. Note that input core serializes calls |
| 994 | * to connect and disconnect so we don't need to lock evdev_table here. | 953 | * to connect and disconnect. |
| 995 | */ | 954 | */ |
| 996 | static int evdev_connect(struct input_handler *handler, struct input_dev *dev, | 955 | static int evdev_connect(struct input_handler *handler, struct input_dev *dev, |
| 997 | const struct input_device_id *id) | 956 | const struct input_device_id *id) |
| 998 | { | 957 | { |
| 999 | struct evdev *evdev; | 958 | struct evdev *evdev; |
| 1000 | int minor; | 959 | int minor; |
| 960 | int dev_no; | ||
| 1001 | int error; | 961 | int error; |
| 1002 | 962 | ||
| 1003 | for (minor = 0; minor < EVDEV_MINORS; minor++) | 963 | minor = input_get_new_minor(EVDEV_MINOR_BASE, EVDEV_MINORS, true); |
| 1004 | if (!evdev_table[minor]) | 964 | if (minor < 0) { |
| 1005 | break; | 965 | error = minor; |
| 1006 | 966 | pr_err("failed to reserve new minor: %d\n", error); | |
| 1007 | if (minor == EVDEV_MINORS) { | 967 | return error; |
| 1008 | pr_err("no more free evdev devices\n"); | ||
| 1009 | return -ENFILE; | ||
| 1010 | } | 968 | } |
| 1011 | 969 | ||
| 1012 | evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL); | 970 | evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL); |
| 1013 | if (!evdev) | 971 | if (!evdev) { |
| 1014 | return -ENOMEM; | 972 | error = -ENOMEM; |
| 973 | goto err_free_minor; | ||
| 974 | } | ||
| 1015 | 975 | ||
| 1016 | INIT_LIST_HEAD(&evdev->client_list); | 976 | INIT_LIST_HEAD(&evdev->client_list); |
| 1017 | spin_lock_init(&evdev->client_lock); | 977 | spin_lock_init(&evdev->client_lock); |
| 1018 | mutex_init(&evdev->mutex); | 978 | mutex_init(&evdev->mutex); |
| 1019 | init_waitqueue_head(&evdev->wait); | 979 | init_waitqueue_head(&evdev->wait); |
| 1020 | |||
| 1021 | dev_set_name(&evdev->dev, "event%d", minor); | ||
| 1022 | evdev->exist = true; | 980 | evdev->exist = true; |
| 1023 | evdev->minor = minor; | 981 | |
| 982 | dev_no = minor; | ||
| 983 | /* Normalize device number if it falls into legacy range */ | ||
| 984 | if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS) | ||
| 985 | dev_no -= EVDEV_MINOR_BASE; | ||
| 986 | dev_set_name(&evdev->dev, "event%d", dev_no); | ||
| 1024 | 987 | ||
| 1025 | evdev->handle.dev = input_get_device(dev); | 988 | evdev->handle.dev = input_get_device(dev); |
| 1026 | evdev->handle.name = dev_name(&evdev->dev); | 989 | evdev->handle.name = dev_name(&evdev->dev); |
| 1027 | evdev->handle.handler = handler; | 990 | evdev->handle.handler = handler; |
| 1028 | evdev->handle.private = evdev; | 991 | evdev->handle.private = evdev; |
| 1029 | 992 | ||
| 1030 | evdev->dev.devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor); | 993 | evdev->dev.devt = MKDEV(INPUT_MAJOR, minor); |
| 1031 | evdev->dev.class = &input_class; | 994 | evdev->dev.class = &input_class; |
| 1032 | evdev->dev.parent = &dev->dev; | 995 | evdev->dev.parent = &dev->dev; |
| 1033 | evdev->dev.release = evdev_free; | 996 | evdev->dev.release = evdev_free; |
| @@ -1037,7 +1000,8 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev, | |||
| 1037 | if (error) | 1000 | if (error) |
| 1038 | goto err_free_evdev; | 1001 | goto err_free_evdev; |
| 1039 | 1002 | ||
| 1040 | error = evdev_install_chrdev(evdev); | 1003 | cdev_init(&evdev->cdev, &evdev_fops); |
| 1004 | error = cdev_add(&evdev->cdev, evdev->dev.devt, 1); | ||
| 1041 | if (error) | 1005 | if (error) |
| 1042 | goto err_unregister_handle; | 1006 | goto err_unregister_handle; |
| 1043 | 1007 | ||
| @@ -1053,6 +1017,8 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev, | |||
| 1053 | input_unregister_handle(&evdev->handle); | 1017 | input_unregister_handle(&evdev->handle); |
| 1054 | err_free_evdev: | 1018 | err_free_evdev: |
| 1055 | put_device(&evdev->dev); | 1019 | put_device(&evdev->dev); |
| 1020 | err_free_minor: | ||
| 1021 | input_free_minor(minor); | ||
| 1056 | return error; | 1022 | return error; |
| 1057 | } | 1023 | } |
| 1058 | 1024 | ||
| @@ -1062,6 +1028,7 @@ static void evdev_disconnect(struct input_handle *handle) | |||
| 1062 | 1028 | ||
| 1063 | device_del(&evdev->dev); | 1029 | device_del(&evdev->dev); |
| 1064 | evdev_cleanup(evdev); | 1030 | evdev_cleanup(evdev); |
| 1031 | input_free_minor(MINOR(evdev->dev.devt)); | ||
| 1065 | input_unregister_handle(handle); | 1032 | input_unregister_handle(handle); |
| 1066 | put_device(&evdev->dev); | 1033 | put_device(&evdev->dev); |
| 1067 | } | 1034 | } |
| @@ -1078,7 +1045,7 @@ static struct input_handler evdev_handler = { | |||
| 1078 | .events = evdev_events, | 1045 | .events = evdev_events, |
| 1079 | .connect = evdev_connect, | 1046 | .connect = evdev_connect, |
| 1080 | .disconnect = evdev_disconnect, | 1047 | .disconnect = evdev_disconnect, |
| 1081 | .fops = &evdev_fops, | 1048 | .legacy_minors = true, |
| 1082 | .minor = EVDEV_MINOR_BASE, | 1049 | .minor = EVDEV_MINOR_BASE, |
| 1083 | .name = "evdev", | 1050 | .name = "evdev", |
| 1084 | .id_table = evdev_ids, | 1051 | .id_table = evdev_ids, |
diff --git a/drivers/input/input.c b/drivers/input/input.c index ace3f7c4226d..53a0ddee7872 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | 14 | ||
| 15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| 16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 17 | #include <linux/idr.h> | ||
| 17 | #include <linux/input/mt.h> | 18 | #include <linux/input/mt.h> |
| 18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 19 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| @@ -32,7 +33,9 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); | |||
| 32 | MODULE_DESCRIPTION("Input core"); | 33 | MODULE_DESCRIPTION("Input core"); |
| 33 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
| 34 | 35 | ||
| 35 | #define INPUT_DEVICES 256 | 36 | #define INPUT_MAX_CHAR_DEVICES 1024 |
| 37 | #define INPUT_FIRST_DYNAMIC_DEV 256 | ||
| 38 | static DEFINE_IDA(input_ida); | ||
| 36 | 39 | ||
| 37 | static LIST_HEAD(input_dev_list); | 40 | static LIST_HEAD(input_dev_list); |
| 38 | static LIST_HEAD(input_handler_list); | 41 | static LIST_HEAD(input_handler_list); |
| @@ -45,8 +48,6 @@ static LIST_HEAD(input_handler_list); | |||
| 45 | */ | 48 | */ |
| 46 | static DEFINE_MUTEX(input_mutex); | 49 | static DEFINE_MUTEX(input_mutex); |
| 47 | 50 | ||
| 48 | static struct input_handler *input_table[8]; | ||
| 49 | |||
| 50 | static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; | 51 | static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; |
| 51 | 52 | ||
| 52 | static inline int is_event_supported(unsigned int code, | 53 | static inline int is_event_supported(unsigned int code, |
| @@ -1218,7 +1219,7 @@ static int input_handlers_seq_show(struct seq_file *seq, void *v) | |||
| 1218 | seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); | 1219 | seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); |
| 1219 | if (handler->filter) | 1220 | if (handler->filter) |
| 1220 | seq_puts(seq, " (filter)"); | 1221 | seq_puts(seq, " (filter)"); |
| 1221 | if (handler->fops) | 1222 | if (handler->legacy_minors) |
| 1222 | seq_printf(seq, " Minor=%d", handler->minor); | 1223 | seq_printf(seq, " Minor=%d", handler->minor); |
| 1223 | seq_putc(seq, '\n'); | 1224 | seq_putc(seq, '\n'); |
| 1224 | 1225 | ||
| @@ -2016,22 +2017,14 @@ EXPORT_SYMBOL(input_unregister_device); | |||
| 2016 | int input_register_handler(struct input_handler *handler) | 2017 | int input_register_handler(struct input_handler *handler) |
| 2017 | { | 2018 | { |
| 2018 | struct input_dev *dev; | 2019 | struct input_dev *dev; |
| 2019 | int retval; | 2020 | int error; |
| 2020 | 2021 | ||
| 2021 | retval = mutex_lock_interruptible(&input_mutex); | 2022 | error = mutex_lock_interruptible(&input_mutex); |
| 2022 | if (retval) | 2023 | if (error) |
| 2023 | return retval; | 2024 | return error; |
| 2024 | 2025 | ||
| 2025 | INIT_LIST_HEAD(&handler->h_list); | 2026 | INIT_LIST_HEAD(&handler->h_list); |
| 2026 | 2027 | ||
| 2027 | if (handler->fops != NULL) { | ||
| 2028 | if (input_table[handler->minor >> 5]) { | ||
| 2029 | retval = -EBUSY; | ||
| 2030 | goto out; | ||
| 2031 | } | ||
| 2032 | input_table[handler->minor >> 5] = handler; | ||
| 2033 | } | ||
| 2034 | |||
| 2035 | list_add_tail(&handler->node, &input_handler_list); | 2028 | list_add_tail(&handler->node, &input_handler_list); |
| 2036 | 2029 | ||
| 2037 | list_for_each_entry(dev, &input_dev_list, node) | 2030 | list_for_each_entry(dev, &input_dev_list, node) |
| @@ -2039,9 +2032,8 @@ int input_register_handler(struct input_handler *handler) | |||
| 2039 | 2032 | ||
| 2040 | input_wakeup_procfs_readers(); | 2033 | input_wakeup_procfs_readers(); |
| 2041 | 2034 | ||
| 2042 | out: | ||
| 2043 | mutex_unlock(&input_mutex); | 2035 | mutex_unlock(&input_mutex); |
| 2044 | return retval; | 2036 | return 0; |
| 2045 | } | 2037 | } |
| 2046 | EXPORT_SYMBOL(input_register_handler); | 2038 | EXPORT_SYMBOL(input_register_handler); |
| 2047 | 2039 | ||
| @@ -2064,9 +2056,6 @@ void input_unregister_handler(struct input_handler *handler) | |||
| 2064 | 2056 | ||
| 2065 | list_del_init(&handler->node); | 2057 | list_del_init(&handler->node); |
| 2066 | 2058 | ||
| 2067 | if (handler->fops != NULL) | ||
| 2068 | input_table[handler->minor >> 5] = NULL; | ||
| 2069 | |||
| 2070 | input_wakeup_procfs_readers(); | 2059 | input_wakeup_procfs_readers(); |
| 2071 | 2060 | ||
| 2072 | mutex_unlock(&input_mutex); | 2061 | mutex_unlock(&input_mutex); |
| @@ -2183,51 +2172,52 @@ void input_unregister_handle(struct input_handle *handle) | |||
| 2183 | } | 2172 | } |
| 2184 | EXPORT_SYMBOL(input_unregister_handle); | 2173 | EXPORT_SYMBOL(input_unregister_handle); |
| 2185 | 2174 | ||
| 2186 | static int input_open_file(struct inode *inode, struct file *file) | 2175 | /** |
| 2176 | * input_get_new_minor - allocates a new input minor number | ||
| 2177 | * @legacy_base: beginning or the legacy range to be searched | ||
| 2178 | * @legacy_num: size of legacy range | ||
| 2179 | * @allow_dynamic: whether we can also take ID from the dynamic range | ||
| 2180 | * | ||
| 2181 | * This function allocates a new device minor for from input major namespace. | ||
| 2182 | * Caller can request legacy minor by specifying @legacy_base and @legacy_num | ||
| 2183 | * parameters and whether ID can be allocated from dynamic range if there are | ||
| 2184 | * no free IDs in legacy range. | ||
| 2185 | */ | ||
| 2186 | int input_get_new_minor(int legacy_base, unsigned int legacy_num, | ||
| 2187 | bool allow_dynamic) | ||
| 2187 | { | 2188 | { |
| 2188 | struct input_handler *handler; | ||
| 2189 | const struct file_operations *old_fops, *new_fops = NULL; | ||
| 2190 | int err; | ||
| 2191 | |||
| 2192 | err = mutex_lock_interruptible(&input_mutex); | ||
| 2193 | if (err) | ||
| 2194 | return err; | ||
| 2195 | |||
| 2196 | /* No load-on-demand here? */ | ||
| 2197 | handler = input_table[iminor(inode) >> 5]; | ||
| 2198 | if (handler) | ||
| 2199 | new_fops = fops_get(handler->fops); | ||
| 2200 | |||
| 2201 | mutex_unlock(&input_mutex); | ||
| 2202 | |||
| 2203 | /* | 2189 | /* |
| 2204 | * That's _really_ odd. Usually NULL ->open means "nothing special", | 2190 | * This function should be called from input handler's ->connect() |
| 2205 | * not "no device". Oh, well... | 2191 | * methods, which are serialized with input_mutex, so no additional |
| 2192 | * locking is needed here. | ||
| 2206 | */ | 2193 | */ |
| 2207 | if (!new_fops || !new_fops->open) { | 2194 | if (legacy_base >= 0) { |
| 2208 | fops_put(new_fops); | 2195 | int minor = ida_simple_get(&input_ida, |
| 2209 | err = -ENODEV; | 2196 | legacy_base, |
| 2210 | goto out; | 2197 | legacy_base + legacy_num, |
| 2198 | GFP_KERNEL); | ||
| 2199 | if (minor >= 0 || !allow_dynamic) | ||
| 2200 | return minor; | ||
| 2211 | } | 2201 | } |
| 2212 | 2202 | ||
| 2213 | old_fops = file->f_op; | 2203 | return ida_simple_get(&input_ida, |
| 2214 | file->f_op = new_fops; | 2204 | INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES, |
| 2215 | 2205 | GFP_KERNEL); | |
| 2216 | err = new_fops->open(inode, file); | ||
| 2217 | if (err) { | ||
| 2218 | fops_put(file->f_op); | ||
| 2219 | file->f_op = fops_get(old_fops); | ||
| 2220 | } | ||
| 2221 | fops_put(old_fops); | ||
| 2222 | out: | ||
| 2223 | return err; | ||
| 2224 | } | 2206 | } |
| 2207 | EXPORT_SYMBOL(input_get_new_minor); | ||
| 2225 | 2208 | ||
| 2226 | static const struct file_operations input_fops = { | 2209 | /** |
| 2227 | .owner = THIS_MODULE, | 2210 | * input_free_minor - release previously allocated minor |
| 2228 | .open = input_open_file, | 2211 | * @minor: minor to be released |
| 2229 | .llseek = noop_llseek, | 2212 | * |
| 2230 | }; | 2213 | * This function releases previously allocated input minor so that it can be |
| 2214 | * reused later. | ||
| 2215 | */ | ||
| 2216 | void input_free_minor(unsigned int minor) | ||
| 2217 | { | ||
| 2218 | ida_simple_remove(&input_ida, minor); | ||
| 2219 | } | ||
| 2220 | EXPORT_SYMBOL(input_free_minor); | ||
| 2231 | 2221 | ||
| 2232 | static int __init input_init(void) | 2222 | static int __init input_init(void) |
| 2233 | { | 2223 | { |
| @@ -2243,7 +2233,8 @@ static int __init input_init(void) | |||
| 2243 | if (err) | 2233 | if (err) |
| 2244 | goto fail1; | 2234 | goto fail1; |
| 2245 | 2235 | ||
| 2246 | err = register_chrdev(INPUT_MAJOR, "input", &input_fops); | 2236 | err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), |
| 2237 | INPUT_MAX_CHAR_DEVICES, "input"); | ||
| 2247 | if (err) { | 2238 | if (err) { |
| 2248 | pr_err("unable to register char major %d", INPUT_MAJOR); | 2239 | pr_err("unable to register char major %d", INPUT_MAJOR); |
| 2249 | goto fail2; | 2240 | goto fail2; |
| @@ -2259,7 +2250,8 @@ static int __init input_init(void) | |||
| 2259 | static void __exit input_exit(void) | 2250 | static void __exit input_exit(void) |
| 2260 | { | 2251 | { |
| 2261 | input_proc_exit(); | 2252 | input_proc_exit(); |
| 2262 | unregister_chrdev(INPUT_MAJOR, "input"); | 2253 | unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), |
| 2254 | INPUT_MAX_CHAR_DEVICES); | ||
| 2263 | class_unregister(&input_class); | 2255 | class_unregister(&input_class); |
| 2264 | } | 2256 | } |
| 2265 | 2257 | ||
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 78f323ea1e4b..b62b5891f399 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/poll.h> | 27 | #include <linux/poll.h> |
| 28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
| 29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
| 30 | #include <linux/cdev.h> | ||
| 30 | 31 | ||
| 31 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); | 32 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); |
| 32 | MODULE_DESCRIPTION("Joystick device interfaces"); | 33 | MODULE_DESCRIPTION("Joystick device interfaces"); |
| @@ -39,13 +40,13 @@ MODULE_LICENSE("GPL"); | |||
| 39 | 40 | ||
| 40 | struct joydev { | 41 | struct joydev { |
| 41 | int open; | 42 | int open; |
| 42 | int minor; | ||
| 43 | struct input_handle handle; | 43 | struct input_handle handle; |
| 44 | wait_queue_head_t wait; | 44 | wait_queue_head_t wait; |
| 45 | struct list_head client_list; | 45 | struct list_head client_list; |
| 46 | spinlock_t client_lock; /* protects client_list */ | 46 | spinlock_t client_lock; /* protects client_list */ |
| 47 | struct mutex mutex; | 47 | struct mutex mutex; |
| 48 | struct device dev; | 48 | struct device dev; |
| 49 | struct cdev cdev; | ||
| 49 | bool exist; | 50 | bool exist; |
| 50 | 51 | ||
| 51 | struct js_corr corr[ABS_CNT]; | 52 | struct js_corr corr[ABS_CNT]; |
| @@ -70,9 +71,6 @@ struct joydev_client { | |||
| 70 | struct list_head node; | 71 | struct list_head node; |
| 71 | }; | 72 | }; |
| 72 | 73 | ||
| 73 | static struct joydev *joydev_table[JOYDEV_MINORS]; | ||
| 74 | static DEFINE_MUTEX(joydev_table_mutex); | ||
| 75 | |||
| 76 | static int joydev_correct(int value, struct js_corr *corr) | 74 | static int joydev_correct(int value, struct js_corr *corr) |
| 77 | { | 75 | { |
| 78 | switch (corr->type) { | 76 | switch (corr->type) { |
| @@ -252,30 +250,14 @@ static int joydev_release(struct inode *inode, struct file *file) | |||
| 252 | 250 | ||
| 253 | static int joydev_open(struct inode *inode, struct file *file) | 251 | static int joydev_open(struct inode *inode, struct file *file) |
| 254 | { | 252 | { |
| 253 | struct joydev *joydev = | ||
| 254 | container_of(inode->i_cdev, struct joydev, cdev); | ||
| 255 | struct joydev_client *client; | 255 | struct joydev_client *client; |
| 256 | struct joydev *joydev; | ||
| 257 | int i = iminor(inode) - JOYDEV_MINOR_BASE; | ||
| 258 | int error; | 256 | int error; |
| 259 | 257 | ||
| 260 | if (i >= JOYDEV_MINORS) | ||
| 261 | return -ENODEV; | ||
| 262 | |||
| 263 | error = mutex_lock_interruptible(&joydev_table_mutex); | ||
| 264 | if (error) | ||
| 265 | return error; | ||
| 266 | joydev = joydev_table[i]; | ||
| 267 | if (joydev) | ||
| 268 | get_device(&joydev->dev); | ||
| 269 | mutex_unlock(&joydev_table_mutex); | ||
| 270 | |||
| 271 | if (!joydev) | ||
| 272 | return -ENODEV; | ||
| 273 | |||
| 274 | client = kzalloc(sizeof(struct joydev_client), GFP_KERNEL); | 258 | client = kzalloc(sizeof(struct joydev_client), GFP_KERNEL); |
| 275 | if (!client) { | 259 | if (!client) |
| 276 | error = -ENOMEM; | 260 | return -ENOMEM; |
| 277 | goto err_put_joydev; | ||
| 278 | } | ||
| 279 | 261 | ||
| 280 | spin_lock_init(&client->buffer_lock); | 262 | spin_lock_init(&client->buffer_lock); |
| 281 | client->joydev = joydev; | 263 | client->joydev = joydev; |
| @@ -288,13 +270,12 @@ static int joydev_open(struct inode *inode, struct file *file) | |||
| 288 | file->private_data = client; | 270 | file->private_data = client; |
| 289 | nonseekable_open(inode, file); | 271 | nonseekable_open(inode, file); |
| 290 | 272 | ||
| 273 | get_device(&joydev->dev); | ||
| 291 | return 0; | 274 | return 0; |
| 292 | 275 | ||
| 293 | err_free_client: | 276 | err_free_client: |
| 294 | joydev_detach_client(joydev, client); | 277 | joydev_detach_client(joydev, client); |
| 295 | kfree(client); | 278 | kfree(client); |
| 296 | err_put_joydev: | ||
| 297 | put_device(&joydev->dev); | ||
| 298 | return error; | 279 | return error; |
| 299 | } | 280 | } |
| 300 | 281 | ||
| @@ -742,19 +723,6 @@ static const struct file_operations joydev_fops = { | |||
| 742 | .llseek = no_llseek, | 723 | .llseek = no_llseek, |
| 743 | }; | 724 | }; |
| 744 | 725 | ||
| 745 | static int joydev_install_chrdev(struct joydev *joydev) | ||
| 746 | { | ||
| 747 | joydev_table[joydev->minor] = joydev; | ||
| 748 | return 0; | ||
| 749 | } | ||
| 750 | |||
| 751 | static void joydev_remove_chrdev(struct joydev *joydev) | ||
| 752 | { | ||
| 753 | mutex_lock(&joydev_table_mutex); | ||
| 754 | joydev_table[joydev->minor] = NULL; | ||
| 755 | mutex_unlock(&joydev_table_mutex); | ||
| 756 | } | ||
| 757 | |||
| 758 | /* | 726 | /* |
| 759 | * Mark device non-existent. This disables writes, ioctls and | 727 | * Mark device non-existent. This disables writes, ioctls and |
| 760 | * prevents new users from opening the device. Already posted | 728 | * prevents new users from opening the device. Already posted |
| @@ -773,7 +741,8 @@ static void joydev_cleanup(struct joydev *joydev) | |||
| 773 | 741 | ||
| 774 | joydev_mark_dead(joydev); | 742 | joydev_mark_dead(joydev); |
| 775 | joydev_hangup(joydev); | 743 | joydev_hangup(joydev); |
| 776 | joydev_remove_chrdev(joydev); | 744 | |
| 745 | cdev_del(&joydev->cdev); | ||
| 777 | 746 | ||
| 778 | /* joydev is marked dead so no one else accesses joydev->open */ | 747 | /* joydev is marked dead so no one else accesses joydev->open */ |
| 779 | if (joydev->open) | 748 | if (joydev->open) |
| @@ -798,30 +767,33 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, | |||
| 798 | const struct input_device_id *id) | 767 | const struct input_device_id *id) |
| 799 | { | 768 | { |
| 800 | struct joydev *joydev; | 769 | struct joydev *joydev; |
| 801 | int i, j, t, minor; | 770 | int i, j, t, minor, dev_no; |
| 802 | int error; | 771 | int error; |
| 803 | 772 | ||
| 804 | for (minor = 0; minor < JOYDEV_MINORS; minor++) | 773 | minor = input_get_new_minor(JOYDEV_MINOR_BASE, JOYDEV_MINORS, true); |
| 805 | if (!joydev_table[minor]) | 774 | if (minor < 0) { |
| 806 | break; | 775 | error = minor; |
| 807 | 776 | pr_err("failed to reserve new minor: %d\n", error); | |
| 808 | if (minor == JOYDEV_MINORS) { | 777 | return error; |
| 809 | pr_err("no more free joydev devices\n"); | ||
| 810 | return -ENFILE; | ||
| 811 | } | 778 | } |
| 812 | 779 | ||
| 813 | joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL); | 780 | joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL); |
| 814 | if (!joydev) | 781 | if (!joydev) { |
| 815 | return -ENOMEM; | 782 | error = -ENOMEM; |
| 783 | goto err_free_minor; | ||
| 784 | } | ||
| 816 | 785 | ||
| 817 | INIT_LIST_HEAD(&joydev->client_list); | 786 | INIT_LIST_HEAD(&joydev->client_list); |
| 818 | spin_lock_init(&joydev->client_lock); | 787 | spin_lock_init(&joydev->client_lock); |
| 819 | mutex_init(&joydev->mutex); | 788 | mutex_init(&joydev->mutex); |
| 820 | init_waitqueue_head(&joydev->wait); | 789 | init_waitqueue_head(&joydev->wait); |
| 821 | |||
| 822 | dev_set_name(&joydev->dev, "js%d", minor); | ||
| 823 | joydev->exist = true; | 790 | joydev->exist = true; |
| 824 | joydev->minor = minor; | 791 | |
| 792 | dev_no = minor; | ||
| 793 | /* Normalize device number if it falls into legacy range */ | ||
| 794 | if (dev_no < JOYDEV_MINOR_BASE + JOYDEV_MINORS) | ||
| 795 | dev_no -= JOYDEV_MINOR_BASE; | ||
| 796 | dev_set_name(&joydev->dev, "js%d", dev_no); | ||
| 825 | 797 | ||
| 826 | joydev->handle.dev = input_get_device(dev); | 798 | joydev->handle.dev = input_get_device(dev); |
| 827 | joydev->handle.name = dev_name(&joydev->dev); | 799 | joydev->handle.name = dev_name(&joydev->dev); |
| @@ -875,7 +847,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, | |||
| 875 | } | 847 | } |
| 876 | } | 848 | } |
| 877 | 849 | ||
| 878 | joydev->dev.devt = MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + minor); | 850 | joydev->dev.devt = MKDEV(INPUT_MAJOR, minor); |
| 879 | joydev->dev.class = &input_class; | 851 | joydev->dev.class = &input_class; |
| 880 | joydev->dev.parent = &dev->dev; | 852 | joydev->dev.parent = &dev->dev; |
| 881 | joydev->dev.release = joydev_free; | 853 | joydev->dev.release = joydev_free; |
| @@ -885,7 +857,8 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, | |||
| 885 | if (error) | 857 | if (error) |
| 886 | goto err_free_joydev; | 858 | goto err_free_joydev; |
| 887 | 859 | ||
| 888 | error = joydev_install_chrdev(joydev); | 860 | cdev_init(&joydev->cdev, &joydev_fops); |
| 861 | error = cdev_add(&joydev->cdev, joydev->dev.devt, 1); | ||
| 889 | if (error) | 862 | if (error) |
| 890 | goto err_unregister_handle; | 863 | goto err_unregister_handle; |
| 891 | 864 | ||
| @@ -901,6 +874,8 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, | |||
| 901 | input_unregister_handle(&joydev->handle); | 874 | input_unregister_handle(&joydev->handle); |
| 902 | err_free_joydev: | 875 | err_free_joydev: |
| 903 | put_device(&joydev->dev); | 876 | put_device(&joydev->dev); |
| 877 | err_free_minor: | ||
| 878 | input_free_minor(minor); | ||
| 904 | return error; | 879 | return error; |
| 905 | } | 880 | } |
| 906 | 881 | ||
| @@ -910,6 +885,7 @@ static void joydev_disconnect(struct input_handle *handle) | |||
| 910 | 885 | ||
| 911 | device_del(&joydev->dev); | 886 | device_del(&joydev->dev); |
| 912 | joydev_cleanup(joydev); | 887 | joydev_cleanup(joydev); |
| 888 | input_free_minor(MINOR(joydev->dev.devt)); | ||
| 913 | input_unregister_handle(handle); | 889 | input_unregister_handle(handle); |
| 914 | put_device(&joydev->dev); | 890 | put_device(&joydev->dev); |
| 915 | } | 891 | } |
| @@ -961,7 +937,7 @@ static struct input_handler joydev_handler = { | |||
| 961 | .match = joydev_match, | 937 | .match = joydev_match, |
| 962 | .connect = joydev_connect, | 938 | .connect = joydev_connect, |
| 963 | .disconnect = joydev_disconnect, | 939 | .disconnect = joydev_disconnect, |
| 964 | .fops = &joydev_fops, | 940 | .legacy_minors = true, |
| 965 | .minor = JOYDEV_MINOR_BASE, | 941 | .minor = JOYDEV_MINOR_BASE, |
| 966 | .name = "joydev", | 942 | .name = "joydev", |
| 967 | .id_table = joydev_ids, | 943 | .id_table = joydev_ids, |
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c index 277e26dc910e..9d7a111486f7 100644 --- a/drivers/input/keyboard/samsung-keypad.c +++ b/drivers/input/keyboard/samsung-keypad.c | |||
| @@ -431,6 +431,12 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev) | |||
| 431 | goto err_unmap_base; | 431 | goto err_unmap_base; |
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | error = clk_prepare(keypad->clk); | ||
| 435 | if (error) { | ||
| 436 | dev_err(&pdev->dev, "keypad clock prepare failed\n"); | ||
| 437 | goto err_put_clk; | ||
| 438 | } | ||
| 439 | |||
| 434 | keypad->input_dev = input_dev; | 440 | keypad->input_dev = input_dev; |
| 435 | keypad->pdev = pdev; | 441 | keypad->pdev = pdev; |
| 436 | keypad->row_shift = row_shift; | 442 | keypad->row_shift = row_shift; |
| @@ -461,7 +467,7 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev) | |||
| 461 | keypad->keycodes, input_dev); | 467 | keypad->keycodes, input_dev); |
| 462 | if (error) { | 468 | if (error) { |
| 463 | dev_err(&pdev->dev, "failed to build keymap\n"); | 469 | dev_err(&pdev->dev, "failed to build keymap\n"); |
| 464 | goto err_put_clk; | 470 | goto err_unprepare_clk; |
| 465 | } | 471 | } |
| 466 | 472 | ||
| 467 | input_set_capability(input_dev, EV_MSC, MSC_SCAN); | 473 | input_set_capability(input_dev, EV_MSC, MSC_SCAN); |
| @@ -503,6 +509,8 @@ err_free_irq: | |||
| 503 | pm_runtime_disable(&pdev->dev); | 509 | pm_runtime_disable(&pdev->dev); |
| 504 | device_init_wakeup(&pdev->dev, 0); | 510 | device_init_wakeup(&pdev->dev, 0); |
| 505 | platform_set_drvdata(pdev, NULL); | 511 | platform_set_drvdata(pdev, NULL); |
| 512 | err_unprepare_clk: | ||
| 513 | clk_unprepare(keypad->clk); | ||
| 506 | err_put_clk: | 514 | err_put_clk: |
| 507 | clk_put(keypad->clk); | 515 | clk_put(keypad->clk); |
| 508 | samsung_keypad_dt_gpio_free(keypad); | 516 | samsung_keypad_dt_gpio_free(keypad); |
| @@ -531,6 +539,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev) | |||
| 531 | */ | 539 | */ |
| 532 | free_irq(keypad->irq, keypad); | 540 | free_irq(keypad->irq, keypad); |
| 533 | 541 | ||
| 542 | clk_unprepare(keypad->clk); | ||
| 534 | clk_put(keypad->clk); | 543 | clk_put(keypad->clk); |
| 535 | samsung_keypad_dt_gpio_free(keypad); | 544 | samsung_keypad_dt_gpio_free(keypad); |
| 536 | 545 | ||
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 964e43d81e29..a1b4c37956b2 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c | |||
| @@ -24,10 +24,8 @@ | |||
| 24 | #include <linux/random.h> | 24 | #include <linux/random.h> |
| 25 | #include <linux/major.h> | 25 | #include <linux/major.h> |
| 26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
| 27 | #include <linux/cdev.h> | ||
| 27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
| 28 | #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX | ||
| 29 | #include <linux/miscdevice.h> | ||
| 30 | #endif | ||
| 31 | 29 | ||
| 32 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); | 30 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); |
| 33 | MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces"); | 31 | MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces"); |
| @@ -61,17 +59,18 @@ struct mousedev_hw_data { | |||
| 61 | 59 | ||
| 62 | struct mousedev { | 60 | struct mousedev { |
| 63 | int open; | 61 | int open; |
| 64 | int minor; | ||
| 65 | struct input_handle handle; | 62 | struct input_handle handle; |
| 66 | wait_queue_head_t wait; | 63 | wait_queue_head_t wait; |
| 67 | struct list_head client_list; | 64 | struct list_head client_list; |
| 68 | spinlock_t client_lock; /* protects client_list */ | 65 | spinlock_t client_lock; /* protects client_list */ |
| 69 | struct mutex mutex; | 66 | struct mutex mutex; |
| 70 | struct device dev; | 67 | struct device dev; |
| 68 | struct cdev cdev; | ||
| 71 | bool exist; | 69 | bool exist; |
| 70 | bool is_mixdev; | ||
| 72 | 71 | ||
| 73 | struct list_head mixdev_node; | 72 | struct list_head mixdev_node; |
| 74 | int mixdev_open; | 73 | bool opened_by_mixdev; |
| 75 | 74 | ||
| 76 | struct mousedev_hw_data packet; | 75 | struct mousedev_hw_data packet; |
| 77 | unsigned int pkt_count; | 76 | unsigned int pkt_count; |
| @@ -114,10 +113,6 @@ struct mousedev_client { | |||
| 114 | static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 }; | 113 | static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 }; |
| 115 | static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; | 114 | static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; |
| 116 | 115 | ||
| 117 | static struct input_handler mousedev_handler; | ||
| 118 | |||
| 119 | static struct mousedev *mousedev_table[MOUSEDEV_MINORS]; | ||
| 120 | static DEFINE_MUTEX(mousedev_table_mutex); | ||
| 121 | static struct mousedev *mousedev_mix; | 116 | static struct mousedev *mousedev_mix; |
| 122 | static LIST_HEAD(mousedev_mix_list); | 117 | static LIST_HEAD(mousedev_mix_list); |
| 123 | 118 | ||
| @@ -433,7 +428,7 @@ static int mousedev_open_device(struct mousedev *mousedev) | |||
| 433 | if (retval) | 428 | if (retval) |
| 434 | return retval; | 429 | return retval; |
| 435 | 430 | ||
| 436 | if (mousedev->minor == MOUSEDEV_MIX) | 431 | if (mousedev->is_mixdev) |
| 437 | mixdev_open_devices(); | 432 | mixdev_open_devices(); |
| 438 | else if (!mousedev->exist) | 433 | else if (!mousedev->exist) |
| 439 | retval = -ENODEV; | 434 | retval = -ENODEV; |
| @@ -451,7 +446,7 @@ static void mousedev_close_device(struct mousedev *mousedev) | |||
| 451 | { | 446 | { |
| 452 | mutex_lock(&mousedev->mutex); | 447 | mutex_lock(&mousedev->mutex); |
| 453 | 448 | ||
| 454 | if (mousedev->minor == MOUSEDEV_MIX) | 449 | if (mousedev->is_mixdev) |
| 455 | mixdev_close_devices(); | 450 | mixdev_close_devices(); |
| 456 | else if (mousedev->exist && !--mousedev->open) | 451 | else if (mousedev->exist && !--mousedev->open) |
| 457 | input_close_device(&mousedev->handle); | 452 | input_close_device(&mousedev->handle); |
| @@ -472,11 +467,11 @@ static void mixdev_open_devices(void) | |||
| 472 | return; | 467 | return; |
| 473 | 468 | ||
| 474 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { | 469 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { |
| 475 | if (!mousedev->mixdev_open) { | 470 | if (!mousedev->opened_by_mixdev) { |
| 476 | if (mousedev_open_device(mousedev)) | 471 | if (mousedev_open_device(mousedev)) |
| 477 | continue; | 472 | continue; |
| 478 | 473 | ||
| 479 | mousedev->mixdev_open = 1; | 474 | mousedev->opened_by_mixdev = true; |
| 480 | } | 475 | } |
| 481 | } | 476 | } |
| 482 | } | 477 | } |
| @@ -494,8 +489,8 @@ static void mixdev_close_devices(void) | |||
| 494 | return; | 489 | return; |
| 495 | 490 | ||
| 496 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { | 491 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { |
| 497 | if (mousedev->mixdev_open) { | 492 | if (mousedev->opened_by_mixdev) { |
| 498 | mousedev->mixdev_open = 0; | 493 | mousedev->opened_by_mixdev = false; |
| 499 | mousedev_close_device(mousedev); | 494 | mousedev_close_device(mousedev); |
| 500 | } | 495 | } |
| 501 | } | 496 | } |
| @@ -538,35 +533,17 @@ static int mousedev_open(struct inode *inode, struct file *file) | |||
| 538 | struct mousedev_client *client; | 533 | struct mousedev_client *client; |
| 539 | struct mousedev *mousedev; | 534 | struct mousedev *mousedev; |
| 540 | int error; | 535 | int error; |
| 541 | int i; | ||
| 542 | 536 | ||
| 543 | #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX | 537 | #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX |
| 544 | if (imajor(inode) == MISC_MAJOR) | 538 | if (imajor(inode) == MISC_MAJOR) |
| 545 | i = MOUSEDEV_MIX; | 539 | mousedev = mousedev_mix; |
| 546 | else | 540 | else |
| 547 | #endif | 541 | #endif |
| 548 | i = iminor(inode) - MOUSEDEV_MINOR_BASE; | 542 | mousedev = container_of(inode->i_cdev, struct mousedev, cdev); |
| 549 | |||
| 550 | if (i >= MOUSEDEV_MINORS) | ||
| 551 | return -ENODEV; | ||
| 552 | |||
| 553 | error = mutex_lock_interruptible(&mousedev_table_mutex); | ||
| 554 | if (error) | ||
| 555 | return error; | ||
| 556 | |||
| 557 | mousedev = mousedev_table[i]; | ||
| 558 | if (mousedev) | ||
| 559 | get_device(&mousedev->dev); | ||
| 560 | mutex_unlock(&mousedev_table_mutex); | ||
| 561 | |||
| 562 | if (!mousedev) | ||
| 563 | return -ENODEV; | ||
| 564 | 543 | ||
| 565 | client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL); | 544 | client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL); |
| 566 | if (!client) { | 545 | if (!client) |
| 567 | error = -ENOMEM; | 546 | return -ENOMEM; |
| 568 | goto err_put_mousedev; | ||
| 569 | } | ||
| 570 | 547 | ||
| 571 | spin_lock_init(&client->packet_lock); | 548 | spin_lock_init(&client->packet_lock); |
| 572 | client->pos_x = xres / 2; | 549 | client->pos_x = xres / 2; |
| @@ -579,13 +556,14 @@ static int mousedev_open(struct inode *inode, struct file *file) | |||
| 579 | goto err_free_client; | 556 | goto err_free_client; |
| 580 | 557 | ||
| 581 | file->private_data = client; | 558 | file->private_data = client; |
| 559 | nonseekable_open(inode, file); | ||
| 560 | |||
| 561 | get_device(&mousedev->dev); | ||
| 582 | return 0; | 562 | return 0; |
| 583 | 563 | ||
| 584 | err_free_client: | 564 | err_free_client: |
| 585 | mousedev_detach_client(mousedev, client); | 565 | mousedev_detach_client(mousedev, client); |
| 586 | kfree(client); | 566 | kfree(client); |
| 587 | err_put_mousedev: | ||
| 588 | put_device(&mousedev->dev); | ||
| 589 | return error; | 567 | return error; |
| 590 | } | 568 | } |
| 591 | 569 | ||
| @@ -785,29 +763,16 @@ static unsigned int mousedev_poll(struct file *file, poll_table *wait) | |||
| 785 | } | 763 | } |
| 786 | 764 | ||
| 787 | static const struct file_operations mousedev_fops = { | 765 | static const struct file_operations mousedev_fops = { |
| 788 | .owner = THIS_MODULE, | 766 | .owner = THIS_MODULE, |
| 789 | .read = mousedev_read, | 767 | .read = mousedev_read, |
| 790 | .write = mousedev_write, | 768 | .write = mousedev_write, |
| 791 | .poll = mousedev_poll, | 769 | .poll = mousedev_poll, |
| 792 | .open = mousedev_open, | 770 | .open = mousedev_open, |
| 793 | .release = mousedev_release, | 771 | .release = mousedev_release, |
| 794 | .fasync = mousedev_fasync, | 772 | .fasync = mousedev_fasync, |
| 795 | .llseek = noop_llseek, | 773 | .llseek = noop_llseek, |
| 796 | }; | 774 | }; |
| 797 | 775 | ||
| 798 | static int mousedev_install_chrdev(struct mousedev *mousedev) | ||
| 799 | { | ||
| 800 | mousedev_table[mousedev->minor] = mousedev; | ||
| 801 | return 0; | ||
| 802 | } | ||
| 803 | |||
| 804 | static void mousedev_remove_chrdev(struct mousedev *mousedev) | ||
| 805 | { | ||
| 806 | mutex_lock(&mousedev_table_mutex); | ||
| 807 | mousedev_table[mousedev->minor] = NULL; | ||
| 808 | mutex_unlock(&mousedev_table_mutex); | ||
| 809 | } | ||
| 810 | |||
| 811 | /* | 776 | /* |
| 812 | * Mark device non-existent. This disables writes, ioctls and | 777 | * Mark device non-existent. This disables writes, ioctls and |
| 813 | * prevents new users from opening the device. Already posted | 778 | * prevents new users from opening the device. Already posted |
| @@ -842,24 +807,50 @@ static void mousedev_cleanup(struct mousedev *mousedev) | |||
| 842 | 807 | ||
| 843 | mousedev_mark_dead(mousedev); | 808 | mousedev_mark_dead(mousedev); |
| 844 | mousedev_hangup(mousedev); | 809 | mousedev_hangup(mousedev); |
| 845 | mousedev_remove_chrdev(mousedev); | 810 | |
| 811 | cdev_del(&mousedev->cdev); | ||
| 846 | 812 | ||
| 847 | /* mousedev is marked dead so no one else accesses mousedev->open */ | 813 | /* mousedev is marked dead so no one else accesses mousedev->open */ |
| 848 | if (mousedev->open) | 814 | if (mousedev->open) |
| 849 | input_close_device(handle); | 815 | input_close_device(handle); |
| 850 | } | 816 | } |
| 851 | 817 | ||
| 818 | static int mousedev_reserve_minor(bool mixdev) | ||
| 819 | { | ||
| 820 | int minor; | ||
| 821 | |||
| 822 | if (mixdev) { | ||
| 823 | minor = input_get_new_minor(MOUSEDEV_MIX, 1, false); | ||
| 824 | if (minor < 0) | ||
| 825 | pr_err("failed to reserve mixdev minor: %d\n", minor); | ||
| 826 | } else { | ||
| 827 | minor = input_get_new_minor(MOUSEDEV_MINOR_BASE, | ||
| 828 | MOUSEDEV_MINORS, true); | ||
| 829 | if (minor < 0) | ||
| 830 | pr_err("failed to reserve new minor: %d\n", minor); | ||
| 831 | } | ||
| 832 | |||
| 833 | return minor; | ||
| 834 | } | ||
| 835 | |||
| 852 | static struct mousedev *mousedev_create(struct input_dev *dev, | 836 | static struct mousedev *mousedev_create(struct input_dev *dev, |
| 853 | struct input_handler *handler, | 837 | struct input_handler *handler, |
| 854 | int minor) | 838 | bool mixdev) |
| 855 | { | 839 | { |
| 856 | struct mousedev *mousedev; | 840 | struct mousedev *mousedev; |
| 841 | int minor; | ||
| 857 | int error; | 842 | int error; |
| 858 | 843 | ||
| 844 | minor = mousedev_reserve_minor(mixdev); | ||
| 845 | if (minor < 0) { | ||
| 846 | error = minor; | ||
| 847 | goto err_out; | ||
| 848 | } | ||
| 849 | |||
| 859 | mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL); | 850 | mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL); |
| 860 | if (!mousedev) { | 851 | if (!mousedev) { |
| 861 | error = -ENOMEM; | 852 | error = -ENOMEM; |
| 862 | goto err_out; | 853 | goto err_free_minor; |
| 863 | } | 854 | } |
| 864 | 855 | ||
| 865 | INIT_LIST_HEAD(&mousedev->client_list); | 856 | INIT_LIST_HEAD(&mousedev->client_list); |
| @@ -867,16 +858,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev, | |||
| 867 | spin_lock_init(&mousedev->client_lock); | 858 | spin_lock_init(&mousedev->client_lock); |
| 868 | mutex_init(&mousedev->mutex); | 859 | mutex_init(&mousedev->mutex); |
| 869 | lockdep_set_subclass(&mousedev->mutex, | 860 | lockdep_set_subclass(&mousedev->mutex, |
| 870 | minor == MOUSEDEV_MIX ? SINGLE_DEPTH_NESTING : 0); | 861 | mixdev ? SINGLE_DEPTH_NESTING : 0); |
| 871 | init_waitqueue_head(&mousedev->wait); | 862 | init_waitqueue_head(&mousedev->wait); |
| 872 | 863 | ||
| 873 | if (minor == MOUSEDEV_MIX) | 864 | if (mixdev) { |
| 874 | dev_set_name(&mousedev->dev, "mice"); | 865 | dev_set_name(&mousedev->dev, "mice"); |
| 875 | else | 866 | } else { |
| 876 | dev_set_name(&mousedev->dev, "mouse%d", minor); | 867 | int dev_no = minor; |
| 868 | /* Normalize device number if it falls into legacy range */ | ||
| 869 | if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) | ||
| 870 | dev_no -= MOUSEDEV_MINOR_BASE; | ||
| 871 | dev_set_name(&mousedev->dev, "mouse%d", dev_no); | ||
| 872 | } | ||
| 877 | 873 | ||
| 878 | mousedev->minor = minor; | ||
| 879 | mousedev->exist = true; | 874 | mousedev->exist = true; |
| 875 | mousedev->is_mixdev = mixdev; | ||
| 880 | mousedev->handle.dev = input_get_device(dev); | 876 | mousedev->handle.dev = input_get_device(dev); |
| 881 | mousedev->handle.name = dev_name(&mousedev->dev); | 877 | mousedev->handle.name = dev_name(&mousedev->dev); |
| 882 | mousedev->handle.handler = handler; | 878 | mousedev->handle.handler = handler; |
| @@ -885,17 +881,18 @@ static struct mousedev *mousedev_create(struct input_dev *dev, | |||
| 885 | mousedev->dev.class = &input_class; | 881 | mousedev->dev.class = &input_class; |
| 886 | if (dev) | 882 | if (dev) |
| 887 | mousedev->dev.parent = &dev->dev; | 883 | mousedev->dev.parent = &dev->dev; |
| 888 | mousedev->dev.devt = MKDEV(INPUT_MAJOR, MOUSEDEV_MINOR_BASE + minor); | 884 | mousedev->dev.devt = MKDEV(INPUT_MAJOR, minor); |
| 889 | mousedev->dev.release = mousedev_free; | 885 | mousedev->dev.release = mousedev_free; |
| 890 | device_initialize(&mousedev->dev); | 886 | device_initialize(&mousedev->dev); |
| 891 | 887 | ||
| 892 | if (minor != MOUSEDEV_MIX) { | 888 | if (!mixdev) { |
| 893 | error = input_register_handle(&mousedev->handle); | 889 | error = input_register_handle(&mousedev->handle); |
| 894 | if (error) | 890 | if (error) |
| 895 | goto err_free_mousedev; | 891 | goto err_free_mousedev; |
| 896 | } | 892 | } |
| 897 | 893 | ||
| 898 | error = mousedev_install_chrdev(mousedev); | 894 | cdev_init(&mousedev->cdev, &mousedev_fops); |
| 895 | error = cdev_add(&mousedev->cdev, mousedev->dev.devt, 1); | ||
| 899 | if (error) | 896 | if (error) |
| 900 | goto err_unregister_handle; | 897 | goto err_unregister_handle; |
| 901 | 898 | ||
| @@ -908,10 +905,12 @@ static struct mousedev *mousedev_create(struct input_dev *dev, | |||
| 908 | err_cleanup_mousedev: | 905 | err_cleanup_mousedev: |
| 909 | mousedev_cleanup(mousedev); | 906 | mousedev_cleanup(mousedev); |
| 910 | err_unregister_handle: | 907 | err_unregister_handle: |
| 911 | if (minor != MOUSEDEV_MIX) | 908 | if (!mixdev) |
| 912 | input_unregister_handle(&mousedev->handle); | 909 | input_unregister_handle(&mousedev->handle); |
| 913 | err_free_mousedev: | 910 | err_free_mousedev: |
| 914 | put_device(&mousedev->dev); | 911 | put_device(&mousedev->dev); |
| 912 | err_free_minor: | ||
| 913 | input_free_minor(minor); | ||
| 915 | err_out: | 914 | err_out: |
| 916 | return ERR_PTR(error); | 915 | return ERR_PTR(error); |
| 917 | } | 916 | } |
| @@ -920,7 +919,8 @@ static void mousedev_destroy(struct mousedev *mousedev) | |||
| 920 | { | 919 | { |
| 921 | device_del(&mousedev->dev); | 920 | device_del(&mousedev->dev); |
| 922 | mousedev_cleanup(mousedev); | 921 | mousedev_cleanup(mousedev); |
| 923 | if (mousedev->minor != MOUSEDEV_MIX) | 922 | input_free_minor(MINOR(mousedev->dev.devt)); |
| 923 | if (!mousedev->is_mixdev) | ||
| 924 | input_unregister_handle(&mousedev->handle); | 924 | input_unregister_handle(&mousedev->handle); |
| 925 | put_device(&mousedev->dev); | 925 | put_device(&mousedev->dev); |
| 926 | } | 926 | } |
| @@ -938,7 +938,7 @@ static int mixdev_add_device(struct mousedev *mousedev) | |||
| 938 | if (retval) | 938 | if (retval) |
| 939 | goto out; | 939 | goto out; |
| 940 | 940 | ||
| 941 | mousedev->mixdev_open = 1; | 941 | mousedev->opened_by_mixdev = true; |
| 942 | } | 942 | } |
| 943 | 943 | ||
| 944 | get_device(&mousedev->dev); | 944 | get_device(&mousedev->dev); |
| @@ -953,8 +953,8 @@ static void mixdev_remove_device(struct mousedev *mousedev) | |||
| 953 | { | 953 | { |
| 954 | mutex_lock(&mousedev_mix->mutex); | 954 | mutex_lock(&mousedev_mix->mutex); |
| 955 | 955 | ||
| 956 | if (mousedev->mixdev_open) { | 956 | if (mousedev->opened_by_mixdev) { |
| 957 | mousedev->mixdev_open = 0; | 957 | mousedev->opened_by_mixdev = false; |
| 958 | mousedev_close_device(mousedev); | 958 | mousedev_close_device(mousedev); |
| 959 | } | 959 | } |
| 960 | 960 | ||
| @@ -969,19 +969,9 @@ static int mousedev_connect(struct input_handler *handler, | |||
| 969 | const struct input_device_id *id) | 969 | const struct input_device_id *id) |
| 970 | { | 970 | { |
| 971 | struct mousedev *mousedev; | 971 | struct mousedev *mousedev; |
| 972 | int minor; | ||
| 973 | int error; | 972 | int error; |
| 974 | 973 | ||
| 975 | for (minor = 0; minor < MOUSEDEV_MINORS; minor++) | 974 | mousedev = mousedev_create(dev, handler, false); |
| 976 | if (!mousedev_table[minor]) | ||
| 977 | break; | ||
| 978 | |||
| 979 | if (minor == MOUSEDEV_MINORS) { | ||
| 980 | pr_err("no more free mousedev devices\n"); | ||
| 981 | return -ENFILE; | ||
| 982 | } | ||
| 983 | |||
| 984 | mousedev = mousedev_create(dev, handler, minor); | ||
| 985 | if (IS_ERR(mousedev)) | 975 | if (IS_ERR(mousedev)) |
| 986 | return PTR_ERR(mousedev); | 976 | return PTR_ERR(mousedev); |
| 987 | 977 | ||
| @@ -1054,27 +1044,53 @@ static const struct input_device_id mousedev_ids[] = { | |||
| 1054 | MODULE_DEVICE_TABLE(input, mousedev_ids); | 1044 | MODULE_DEVICE_TABLE(input, mousedev_ids); |
| 1055 | 1045 | ||
| 1056 | static struct input_handler mousedev_handler = { | 1046 | static struct input_handler mousedev_handler = { |
| 1057 | .event = mousedev_event, | 1047 | .event = mousedev_event, |
| 1058 | .connect = mousedev_connect, | 1048 | .connect = mousedev_connect, |
| 1059 | .disconnect = mousedev_disconnect, | 1049 | .disconnect = mousedev_disconnect, |
| 1060 | .fops = &mousedev_fops, | 1050 | .legacy_minors = true, |
| 1061 | .minor = MOUSEDEV_MINOR_BASE, | 1051 | .minor = MOUSEDEV_MINOR_BASE, |
| 1062 | .name = "mousedev", | 1052 | .name = "mousedev", |
| 1063 | .id_table = mousedev_ids, | 1053 | .id_table = mousedev_ids, |
| 1064 | }; | 1054 | }; |
| 1065 | 1055 | ||
| 1066 | #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX | 1056 | #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX |
| 1057 | #include <linux/miscdevice.h> | ||
| 1058 | |||
| 1067 | static struct miscdevice psaux_mouse = { | 1059 | static struct miscdevice psaux_mouse = { |
| 1068 | PSMOUSE_MINOR, "psaux", &mousedev_fops | 1060 | .minor = PSMOUSE_MINOR, |
| 1061 | .name = "psaux", | ||
| 1062 | .fops = &mousedev_fops, | ||
| 1069 | }; | 1063 | }; |
| 1070 | static int psaux_registered; | 1064 | |
| 1065 | static bool psaux_registered; | ||
| 1066 | |||
| 1067 | static void __init mousedev_psaux_register(void) | ||
| 1068 | { | ||
| 1069 | int error; | ||
| 1070 | |||
| 1071 | error = misc_register(&psaux_mouse); | ||
| 1072 | if (error) | ||
| 1073 | pr_warn("could not register psaux device, error: %d\n", | ||
| 1074 | error); | ||
| 1075 | else | ||
| 1076 | psaux_registered = true; | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | static void __exit mousedev_psaux_unregister(void) | ||
| 1080 | { | ||
| 1081 | if (psaux_registered) | ||
| 1082 | misc_deregister(&psaux_mouse); | ||
| 1083 | } | ||
| 1084 | #else | ||
| 1085 | static inline void mousedev_psaux_register(void) { } | ||
| 1086 | static inline void mousedev_psaux_unregister(void) { } | ||
| 1071 | #endif | 1087 | #endif |
| 1072 | 1088 | ||
| 1073 | static int __init mousedev_init(void) | 1089 | static int __init mousedev_init(void) |
| 1074 | { | 1090 | { |
| 1075 | int error; | 1091 | int error; |
| 1076 | 1092 | ||
| 1077 | mousedev_mix = mousedev_create(NULL, &mousedev_handler, MOUSEDEV_MIX); | 1093 | mousedev_mix = mousedev_create(NULL, &mousedev_handler, true); |
| 1078 | if (IS_ERR(mousedev_mix)) | 1094 | if (IS_ERR(mousedev_mix)) |
| 1079 | return PTR_ERR(mousedev_mix); | 1095 | return PTR_ERR(mousedev_mix); |
| 1080 | 1096 | ||
| @@ -1084,14 +1100,7 @@ static int __init mousedev_init(void) | |||
| 1084 | return error; | 1100 | return error; |
| 1085 | } | 1101 | } |
| 1086 | 1102 | ||
| 1087 | #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX | 1103 | mousedev_psaux_register(); |
| 1088 | error = misc_register(&psaux_mouse); | ||
| 1089 | if (error) | ||
| 1090 | pr_warn("could not register psaux device, error: %d\n", | ||
| 1091 | error); | ||
| 1092 | else | ||
| 1093 | psaux_registered = 1; | ||
| 1094 | #endif | ||
| 1095 | 1104 | ||
| 1096 | pr_info("PS/2 mouse device common for all mice\n"); | 1105 | pr_info("PS/2 mouse device common for all mice\n"); |
| 1097 | 1106 | ||
| @@ -1100,10 +1109,7 @@ static int __init mousedev_init(void) | |||
| 1100 | 1109 | ||
| 1101 | static void __exit mousedev_exit(void) | 1110 | static void __exit mousedev_exit(void) |
| 1102 | { | 1111 | { |
| 1103 | #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX | 1112 | mousedev_psaux_unregister(); |
| 1104 | if (psaux_registered) | ||
| 1105 | misc_deregister(&psaux_mouse); | ||
| 1106 | #endif | ||
| 1107 | input_unregister_handler(&mousedev_handler); | 1113 | input_unregister_handler(&mousedev_handler); |
| 1108 | mousedev_destroy(mousedev_mix); | 1114 | mousedev_destroy(mousedev_mix); |
| 1109 | } | 1115 | } |
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 0d3219f29744..9edf9806cff9 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
| @@ -172,6 +172,76 @@ static void wacom_close(struct input_dev *dev) | |||
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | /* | 174 | /* |
| 175 | * Calculate the resolution of the X or Y axis, given appropriate HID data. | ||
| 176 | * This function is little more than hidinput_calc_abs_res stripped down. | ||
| 177 | */ | ||
| 178 | static int wacom_calc_hid_res(int logical_extents, int physical_extents, | ||
| 179 | unsigned char unit, unsigned char exponent) | ||
| 180 | { | ||
| 181 | int prev, unit_exponent; | ||
| 182 | |||
| 183 | /* Check if the extents are sane */ | ||
| 184 | if (logical_extents <= 0 || physical_extents <= 0) | ||
| 185 | return 0; | ||
| 186 | |||
| 187 | /* Get signed value of nybble-sized twos-compliment exponent */ | ||
| 188 | unit_exponent = exponent; | ||
| 189 | if (unit_exponent > 7) | ||
| 190 | unit_exponent -= 16; | ||
| 191 | |||
| 192 | /* Convert physical_extents to millimeters */ | ||
| 193 | if (unit == 0x11) { /* If centimeters */ | ||
| 194 | unit_exponent += 1; | ||
| 195 | } else if (unit == 0x13) { /* If inches */ | ||
| 196 | prev = physical_extents; | ||
| 197 | physical_extents *= 254; | ||
| 198 | if (physical_extents < prev) | ||
| 199 | return 0; | ||
| 200 | unit_exponent -= 1; | ||
| 201 | } else { | ||
| 202 | return 0; | ||
| 203 | } | ||
| 204 | |||
| 205 | /* Apply negative unit exponent */ | ||
| 206 | for (; unit_exponent < 0; unit_exponent++) { | ||
| 207 | prev = logical_extents; | ||
| 208 | logical_extents *= 10; | ||
| 209 | if (logical_extents < prev) | ||
| 210 | return 0; | ||
| 211 | } | ||
| 212 | /* Apply positive unit exponent */ | ||
| 213 | for (; unit_exponent > 0; unit_exponent--) { | ||
| 214 | prev = physical_extents; | ||
| 215 | physical_extents *= 10; | ||
| 216 | if (physical_extents < prev) | ||
| 217 | return 0; | ||
| 218 | } | ||
| 219 | |||
| 220 | /* Calculate resolution */ | ||
| 221 | return logical_extents / physical_extents; | ||
| 222 | } | ||
| 223 | |||
| 224 | /* | ||
| 225 | * The physical dimension specified by the HID descriptor is likely not in | ||
| 226 | * the "100th of a mm" units expected by wacom_calculate_touch_res. This | ||
| 227 | * function adjusts the value of [xy]_phy based on the unit and exponent | ||
| 228 | * provided by the HID descriptor. If an error occurs durring conversion | ||
| 229 | * (e.g. from the unit being left unspecified) [xy]_phy is not modified. | ||
| 230 | */ | ||
| 231 | static void wacom_fix_phy_from_hid(struct wacom_features *features) | ||
| 232 | { | ||
| 233 | int xres = wacom_calc_hid_res(features->x_max, features->x_phy, | ||
| 234 | features->unit, features->unitExpo); | ||
| 235 | int yres = wacom_calc_hid_res(features->y_max, features->y_phy, | ||
| 236 | features->unit, features->unitExpo); | ||
| 237 | |||
| 238 | if (xres > 0 && yres > 0) { | ||
| 239 | features->x_phy = (100 * features->x_max) / xres; | ||
| 240 | features->y_phy = (100 * features->y_max) / yres; | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | /* | ||
| 175 | * Static values for max X/Y and resolution of Pen interface is stored in | 245 | * Static values for max X/Y and resolution of Pen interface is stored in |
| 176 | * features. This mean physical size of active area can be computed. | 246 | * features. This mean physical size of active area can be computed. |
| 177 | * This is useful to do when Pen and Touch have same active area of tablet. | 247 | * This is useful to do when Pen and Touch have same active area of tablet. |
| @@ -432,56 +502,52 @@ static int wacom_parse_hid(struct usb_interface *intf, | |||
| 432 | return result; | 502 | return result; |
| 433 | } | 503 | } |
| 434 | 504 | ||
| 435 | static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features) | 505 | static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int length, int mode) |
| 436 | { | 506 | { |
| 437 | unsigned char *rep_data; | 507 | unsigned char *rep_data; |
| 438 | int limit = 0, report_id = 2; | 508 | int error = -ENOMEM, limit = 0; |
| 439 | int error = -ENOMEM; | ||
| 440 | 509 | ||
| 441 | rep_data = kmalloc(4, GFP_KERNEL); | 510 | rep_data = kzalloc(length, GFP_KERNEL); |
| 442 | if (!rep_data) | 511 | if (!rep_data) |
| 443 | return error; | 512 | return error; |
| 444 | 513 | ||
| 445 | /* ask to report Wacom data */ | 514 | rep_data[0] = report_id; |
| 515 | rep_data[1] = mode; | ||
| 516 | |||
| 517 | do { | ||
| 518 | error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT, | ||
| 519 | report_id, rep_data, length, 1); | ||
| 520 | if (error >= 0) | ||
| 521 | error = wacom_get_report(intf, WAC_HID_FEATURE_REPORT, | ||
| 522 | report_id, rep_data, length, 1); | ||
| 523 | } while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES); | ||
| 524 | |||
| 525 | kfree(rep_data); | ||
| 526 | |||
| 527 | return error < 0 ? error : 0; | ||
| 528 | } | ||
| 529 | |||
| 530 | /* | ||
| 531 | * Switch the tablet into its most-capable mode. Wacom tablets are | ||
| 532 | * typically configured to power-up in a mode which sends mouse-like | ||
| 533 | * reports to the OS. To get absolute position, pressure data, etc. | ||
| 534 | * from the tablet, it is necessary to switch the tablet out of this | ||
| 535 | * mode and into one which sends the full range of tablet data. | ||
| 536 | */ | ||
| 537 | static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features) | ||
| 538 | { | ||
| 446 | if (features->device_type == BTN_TOOL_FINGER) { | 539 | if (features->device_type == BTN_TOOL_FINGER) { |
| 447 | /* if it is an MT Tablet PC touch */ | ||
| 448 | if (features->type > TABLETPC) { | 540 | if (features->type > TABLETPC) { |
| 449 | do { | 541 | /* MT Tablet PC touch */ |
| 450 | rep_data[0] = 3; | 542 | return wacom_set_device_mode(intf, 3, 4, 4); |
| 451 | rep_data[1] = 4; | 543 | } |
| 452 | rep_data[2] = 0; | 544 | } else if (features->device_type == BTN_TOOL_PEN) { |
| 453 | rep_data[3] = 0; | 545 | if (features->type <= BAMBOO_PT && features->type != WIRELESS) { |
| 454 | report_id = 3; | 546 | return wacom_set_device_mode(intf, 2, 2, 2); |
| 455 | error = wacom_set_report(intf, | ||
| 456 | WAC_HID_FEATURE_REPORT, | ||
| 457 | report_id, | ||
| 458 | rep_data, 4, 1); | ||
| 459 | if (error >= 0) | ||
| 460 | error = wacom_get_report(intf, | ||
| 461 | WAC_HID_FEATURE_REPORT, | ||
| 462 | report_id, | ||
| 463 | rep_data, 4, 1); | ||
| 464 | } while ((error < 0 || rep_data[1] != 4) && | ||
| 465 | limit++ < WAC_MSG_RETRIES); | ||
| 466 | } | 547 | } |
| 467 | } else if (features->type <= BAMBOO_PT && | ||
| 468 | features->type != WIRELESS && | ||
| 469 | features->device_type == BTN_TOOL_PEN) { | ||
| 470 | do { | ||
| 471 | rep_data[0] = 2; | ||
| 472 | rep_data[1] = 2; | ||
| 473 | error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT, | ||
| 474 | report_id, rep_data, 2, 1); | ||
| 475 | if (error >= 0) | ||
| 476 | error = wacom_get_report(intf, | ||
| 477 | WAC_HID_FEATURE_REPORT, | ||
| 478 | report_id, rep_data, 2, 1); | ||
| 479 | } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES); | ||
| 480 | } | 548 | } |
| 481 | 549 | ||
| 482 | kfree(rep_data); | 550 | return 0; |
| 483 | |||
| 484 | return error < 0 ? error : 0; | ||
| 485 | } | 551 | } |
| 486 | 552 | ||
| 487 | static int wacom_retrieve_hid_descriptor(struct usb_interface *intf, | 553 | static int wacom_retrieve_hid_descriptor(struct usb_interface *intf, |
| @@ -531,6 +597,7 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf, | |||
| 531 | error = wacom_parse_hid(intf, hid_desc, features); | 597 | error = wacom_parse_hid(intf, hid_desc, features); |
| 532 | if (error) | 598 | if (error) |
| 533 | goto out; | 599 | goto out; |
| 600 | wacom_fix_phy_from_hid(features); | ||
| 534 | 601 | ||
| 535 | out: | 602 | out: |
| 536 | return error; | 603 | return error; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 08b462b6c0d8..c3468c8dbd89 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
| @@ -25,6 +25,11 @@ | |||
| 25 | #define WACOM_INTUOS_RES 100 | 25 | #define WACOM_INTUOS_RES 100 |
| 26 | #define WACOM_INTUOS3_RES 200 | 26 | #define WACOM_INTUOS3_RES 200 |
| 27 | 27 | ||
| 28 | /* Scale factor relating reported contact size to logical contact area. | ||
| 29 | * 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo | ||
| 30 | */ | ||
| 31 | #define WACOM_CONTACT_AREA_SCALE 2607 | ||
| 32 | |||
| 28 | static int wacom_penpartner_irq(struct wacom_wac *wacom) | 33 | static int wacom_penpartner_irq(struct wacom_wac *wacom) |
| 29 | { | 34 | { |
| 30 | unsigned char *data = wacom->data; | 35 | unsigned char *data = wacom->data; |
| @@ -326,7 +331,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
| 326 | 331 | ||
| 327 | /* Enter report */ | 332 | /* Enter report */ |
| 328 | if ((data[1] & 0xfc) == 0xc0) { | 333 | if ((data[1] & 0xfc) == 0xc0) { |
| 329 | if (features->type >= INTUOS5S && features->type <= INTUOS5L) | 334 | if (features->quirks == WACOM_QUIRK_MULTI_INPUT) |
| 330 | wacom->shared->stylus_in_proximity = true; | 335 | wacom->shared->stylus_in_proximity = true; |
| 331 | 336 | ||
| 332 | /* serial number of the tool */ | 337 | /* serial number of the tool */ |
| @@ -414,7 +419,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
| 414 | 419 | ||
| 415 | /* Exit report */ | 420 | /* Exit report */ |
| 416 | if ((data[1] & 0xfe) == 0x80) { | 421 | if ((data[1] & 0xfe) == 0x80) { |
| 417 | if (features->type >= INTUOS5S && features->type <= INTUOS5L) | 422 | if (features->quirks == WACOM_QUIRK_MULTI_INPUT) |
| 418 | wacom->shared->stylus_in_proximity = false; | 423 | wacom->shared->stylus_in_proximity = false; |
| 419 | 424 | ||
| 420 | /* | 425 | /* |
| @@ -1043,11 +1048,19 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) | |||
| 1043 | if (touch) { | 1048 | if (touch) { |
| 1044 | int x = (data[2] << 4) | (data[4] >> 4); | 1049 | int x = (data[2] << 4) | (data[4] >> 4); |
| 1045 | int y = (data[3] << 4) | (data[4] & 0x0f); | 1050 | int y = (data[3] << 4) | (data[4] & 0x0f); |
| 1046 | int w = data[6]; | 1051 | int a = data[5]; |
| 1052 | |||
| 1053 | // "a" is a scaled-down area which we assume is roughly | ||
| 1054 | // circular and which can be described as: a=(pi*r^2)/C. | ||
| 1055 | int x_res = input_abs_get_res(input, ABS_X); | ||
| 1056 | int y_res = input_abs_get_res(input, ABS_Y); | ||
| 1057 | int width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); | ||
| 1058 | int height = width * y_res / x_res; | ||
| 1047 | 1059 | ||
| 1048 | input_report_abs(input, ABS_MT_POSITION_X, x); | 1060 | input_report_abs(input, ABS_MT_POSITION_X, x); |
| 1049 | input_report_abs(input, ABS_MT_POSITION_Y, y); | 1061 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
| 1050 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, w); | 1062 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); |
| 1063 | input_report_abs(input, ABS_MT_TOUCH_MINOR, height); | ||
| 1051 | } | 1064 | } |
| 1052 | } | 1065 | } |
| 1053 | 1066 | ||
| @@ -1533,7 +1546,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
| 1533 | input_mt_init_slots(input_dev, features->touch_max, 0); | 1546 | input_mt_init_slots(input_dev, features->touch_max, 0); |
| 1534 | 1547 | ||
| 1535 | input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, | 1548 | input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, |
| 1536 | 0, 255, 0, 0); | 1549 | 0, features->x_max, 0, 0); |
| 1550 | input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, | ||
| 1551 | 0, features->y_max, 0, 0); | ||
| 1537 | 1552 | ||
| 1538 | input_set_abs_params(input_dev, ABS_MT_POSITION_X, | 1553 | input_set_abs_params(input_dev, ABS_MT_POSITION_X, |
| 1539 | 0, features->x_max, | 1554 | 0, features->x_max, |
| @@ -1641,7 +1656,10 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
| 1641 | 1656 | ||
| 1642 | input_set_abs_params(input_dev, | 1657 | input_set_abs_params(input_dev, |
| 1643 | ABS_MT_TOUCH_MAJOR, | 1658 | ABS_MT_TOUCH_MAJOR, |
| 1644 | 0, 255, 0, 0); | 1659 | 0, features->x_max, 0, 0); |
| 1660 | input_set_abs_params(input_dev, | ||
| 1661 | ABS_MT_TOUCH_MINOR, | ||
| 1662 | 0, features->y_max, 0, 0); | ||
| 1645 | } | 1663 | } |
| 1646 | 1664 | ||
| 1647 | input_set_abs_params(input_dev, ABS_MT_POSITION_X, | 1665 | input_set_abs_params(input_dev, ABS_MT_POSITION_X, |
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index e92615d0b1b0..1df2396af008 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
| @@ -320,10 +320,8 @@ static bool mxt_object_writable(unsigned int type) | |||
| 320 | static void mxt_dump_message(struct device *dev, | 320 | static void mxt_dump_message(struct device *dev, |
| 321 | struct mxt_message *message) | 321 | struct mxt_message *message) |
| 322 | { | 322 | { |
| 323 | dev_dbg(dev, "reportid: %u\tmessage: %02x %02x %02x %02x %02x %02x %02x\n", | 323 | dev_dbg(dev, "reportid: %u\tmessage: %*ph\n", |
| 324 | message->reportid, message->message[0], message->message[1], | 324 | message->reportid, 7, message->message); |
| 325 | message->message[2], message->message[3], message->message[4], | ||
| 326 | message->message[5], message->message[6]); | ||
| 327 | } | 325 | } |
| 328 | 326 | ||
| 329 | static int mxt_check_bootloader(struct i2c_client *client, | 327 | static int mxt_check_bootloader(struct i2c_client *client, |
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index a1e760150821..61d78fa03b1a 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c | |||
| @@ -595,7 +595,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) | |||
| 595 | j = ipc->num / (sizeof(long) * 8); | 595 | j = ipc->num / (sizeof(long) * 8); |
| 596 | i = ipc->num % (sizeof(long) * 8); | 596 | i = ipc->num % (sizeof(long) * 8); |
| 597 | if (j < 8) | 597 | if (j < 8) |
| 598 | protos[j] |= (0x1 << i); | 598 | protos[j] |= (1UL << i); |
| 599 | ipc = ipc->next; | 599 | ipc = ipc->next; |
| 600 | } | 600 | } |
| 601 | if ((r = set_arg(argp, protos, 8 * sizeof(long)))) | 601 | if ((r = set_arg(argp, protos, 8 * sizeof(long)))) |
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index d949b781f6f8..91a02eeeb319 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
| @@ -216,6 +216,13 @@ config DM_BUFIO | |||
| 216 | as a cache, holding recently-read blocks in memory and performing | 216 | as a cache, holding recently-read blocks in memory and performing |
| 217 | delayed writes. | 217 | delayed writes. |
| 218 | 218 | ||
| 219 | config DM_BIO_PRISON | ||
| 220 | tristate | ||
| 221 | depends on BLK_DEV_DM && EXPERIMENTAL | ||
| 222 | ---help--- | ||
| 223 | Some bio locking schemes used by other device-mapper targets | ||
| 224 | including thin provisioning. | ||
| 225 | |||
| 219 | source "drivers/md/persistent-data/Kconfig" | 226 | source "drivers/md/persistent-data/Kconfig" |
| 220 | 227 | ||
| 221 | config DM_CRYPT | 228 | config DM_CRYPT |
| @@ -247,6 +254,7 @@ config DM_THIN_PROVISIONING | |||
| 247 | tristate "Thin provisioning target (EXPERIMENTAL)" | 254 | tristate "Thin provisioning target (EXPERIMENTAL)" |
| 248 | depends on BLK_DEV_DM && EXPERIMENTAL | 255 | depends on BLK_DEV_DM && EXPERIMENTAL |
| 249 | select DM_PERSISTENT_DATA | 256 | select DM_PERSISTENT_DATA |
| 257 | select DM_BIO_PRISON | ||
| 250 | ---help--- | 258 | ---help--- |
| 251 | Provides thin provisioning and snapshots that share a data store. | 259 | Provides thin provisioning and snapshots that share a data store. |
| 252 | 260 | ||
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 8b2e0dffe82e..94dce8b49324 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
| @@ -29,6 +29,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o | |||
| 29 | obj-$(CONFIG_BLK_DEV_MD) += md-mod.o | 29 | obj-$(CONFIG_BLK_DEV_MD) += md-mod.o |
| 30 | obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o | 30 | obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o |
| 31 | obj-$(CONFIG_DM_BUFIO) += dm-bufio.o | 31 | obj-$(CONFIG_DM_BUFIO) += dm-bufio.o |
| 32 | obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o | ||
| 32 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o | 33 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o |
| 33 | obj-$(CONFIG_DM_DELAY) += dm-delay.o | 34 | obj-$(CONFIG_DM_DELAY) += dm-delay.o |
| 34 | obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o | 35 | obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 94e7f6ba2e11..7155945f8eb8 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
| @@ -163,20 +163,17 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde | |||
| 163 | * As devices are only added or removed when raid_disk is < 0 and | 163 | * As devices are only added or removed when raid_disk is < 0 and |
| 164 | * nr_pending is 0 and In_sync is clear, the entries we return will | 164 | * nr_pending is 0 and In_sync is clear, the entries we return will |
| 165 | * still be in the same position on the list when we re-enter | 165 | * still be in the same position on the list when we re-enter |
| 166 | * list_for_each_continue_rcu. | 166 | * list_for_each_entry_continue_rcu. |
| 167 | */ | 167 | */ |
| 168 | struct list_head *pos; | ||
| 169 | rcu_read_lock(); | 168 | rcu_read_lock(); |
| 170 | if (rdev == NULL) | 169 | if (rdev == NULL) |
| 171 | /* start at the beginning */ | 170 | /* start at the beginning */ |
| 172 | pos = &mddev->disks; | 171 | rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set); |
| 173 | else { | 172 | else { |
| 174 | /* release the previous rdev and start from there. */ | 173 | /* release the previous rdev and start from there. */ |
| 175 | rdev_dec_pending(rdev, mddev); | 174 | rdev_dec_pending(rdev, mddev); |
| 176 | pos = &rdev->same_set; | ||
| 177 | } | 175 | } |
| 178 | list_for_each_continue_rcu(pos, &mddev->disks) { | 176 | list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { |
| 179 | rdev = list_entry(pos, struct md_rdev, same_set); | ||
| 180 | if (rdev->raid_disk >= 0 && | 177 | if (rdev->raid_disk >= 0 && |
| 181 | !test_bit(Faulty, &rdev->flags)) { | 178 | !test_bit(Faulty, &rdev->flags)) { |
| 182 | /* this is a usable devices */ | 179 | /* this is a usable devices */ |
| @@ -473,14 +470,10 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) | |||
| 473 | { | 470 | { |
| 474 | bitmap_super_t *sb; | 471 | bitmap_super_t *sb; |
| 475 | unsigned long chunksize, daemon_sleep, write_behind; | 472 | unsigned long chunksize, daemon_sleep, write_behind; |
| 476 | int err = -EINVAL; | ||
| 477 | 473 | ||
| 478 | bitmap->storage.sb_page = alloc_page(GFP_KERNEL); | 474 | bitmap->storage.sb_page = alloc_page(GFP_KERNEL); |
| 479 | if (IS_ERR(bitmap->storage.sb_page)) { | 475 | if (bitmap->storage.sb_page == NULL) |
| 480 | err = PTR_ERR(bitmap->storage.sb_page); | 476 | return -ENOMEM; |
| 481 | bitmap->storage.sb_page = NULL; | ||
| 482 | return err; | ||
| 483 | } | ||
| 484 | bitmap->storage.sb_page->index = 0; | 477 | bitmap->storage.sb_page->index = 0; |
| 485 | 478 | ||
| 486 | sb = kmap_atomic(bitmap->storage.sb_page); | 479 | sb = kmap_atomic(bitmap->storage.sb_page); |
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c new file mode 100644 index 000000000000..e4e841567459 --- /dev/null +++ b/drivers/md/dm-bio-prison.c | |||
| @@ -0,0 +1,415 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2012 Red Hat, Inc. | ||
| 3 | * | ||
| 4 | * This file is released under the GPL. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include "dm.h" | ||
| 8 | #include "dm-bio-prison.h" | ||
| 9 | |||
| 10 | #include <linux/spinlock.h> | ||
| 11 | #include <linux/mempool.h> | ||
| 12 | #include <linux/module.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | |||
| 15 | /*----------------------------------------------------------------*/ | ||
| 16 | |||
| 17 | struct dm_bio_prison_cell { | ||
| 18 | struct hlist_node list; | ||
| 19 | struct dm_bio_prison *prison; | ||
| 20 | struct dm_cell_key key; | ||
| 21 | struct bio *holder; | ||
| 22 | struct bio_list bios; | ||
| 23 | }; | ||
| 24 | |||
| 25 | struct dm_bio_prison { | ||
| 26 | spinlock_t lock; | ||
| 27 | mempool_t *cell_pool; | ||
| 28 | |||
| 29 | unsigned nr_buckets; | ||
| 30 | unsigned hash_mask; | ||
| 31 | struct hlist_head *cells; | ||
| 32 | }; | ||
| 33 | |||
| 34 | /*----------------------------------------------------------------*/ | ||
| 35 | |||
| 36 | static uint32_t calc_nr_buckets(unsigned nr_cells) | ||
| 37 | { | ||
| 38 | uint32_t n = 128; | ||
| 39 | |||
| 40 | nr_cells /= 4; | ||
| 41 | nr_cells = min(nr_cells, 8192u); | ||
| 42 | |||
| 43 | while (n < nr_cells) | ||
| 44 | n <<= 1; | ||
| 45 | |||
| 46 | return n; | ||
| 47 | } | ||
| 48 | |||
| 49 | static struct kmem_cache *_cell_cache; | ||
| 50 | |||
| 51 | /* | ||
| 52 | * @nr_cells should be the number of cells you want in use _concurrently_. | ||
| 53 | * Don't confuse it with the number of distinct keys. | ||
| 54 | */ | ||
| 55 | struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells) | ||
| 56 | { | ||
| 57 | unsigned i; | ||
| 58 | uint32_t nr_buckets = calc_nr_buckets(nr_cells); | ||
| 59 | size_t len = sizeof(struct dm_bio_prison) + | ||
| 60 | (sizeof(struct hlist_head) * nr_buckets); | ||
| 61 | struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL); | ||
| 62 | |||
| 63 | if (!prison) | ||
| 64 | return NULL; | ||
| 65 | |||
| 66 | spin_lock_init(&prison->lock); | ||
| 67 | prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); | ||
| 68 | if (!prison->cell_pool) { | ||
| 69 | kfree(prison); | ||
| 70 | return NULL; | ||
| 71 | } | ||
| 72 | |||
| 73 | prison->nr_buckets = nr_buckets; | ||
| 74 | prison->hash_mask = nr_buckets - 1; | ||
| 75 | prison->cells = (struct hlist_head *) (prison + 1); | ||
| 76 | for (i = 0; i < nr_buckets; i++) | ||
| 77 | INIT_HLIST_HEAD(prison->cells + i); | ||
| 78 | |||
| 79 | return prison; | ||
| 80 | } | ||
| 81 | EXPORT_SYMBOL_GPL(dm_bio_prison_create); | ||
| 82 | |||
| 83 | void dm_bio_prison_destroy(struct dm_bio_prison *prison) | ||
| 84 | { | ||
| 85 | mempool_destroy(prison->cell_pool); | ||
| 86 | kfree(prison); | ||
| 87 | } | ||
| 88 | EXPORT_SYMBOL_GPL(dm_bio_prison_destroy); | ||
| 89 | |||
| 90 | static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key) | ||
| 91 | { | ||
| 92 | const unsigned long BIG_PRIME = 4294967291UL; | ||
| 93 | uint64_t hash = key->block * BIG_PRIME; | ||
| 94 | |||
| 95 | return (uint32_t) (hash & prison->hash_mask); | ||
| 96 | } | ||
| 97 | |||
| 98 | static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs) | ||
| 99 | { | ||
| 100 | return (lhs->virtual == rhs->virtual) && | ||
| 101 | (lhs->dev == rhs->dev) && | ||
| 102 | (lhs->block == rhs->block); | ||
| 103 | } | ||
| 104 | |||
| 105 | static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, | ||
| 106 | struct dm_cell_key *key) | ||
| 107 | { | ||
| 108 | struct dm_bio_prison_cell *cell; | ||
| 109 | struct hlist_node *tmp; | ||
| 110 | |||
| 111 | hlist_for_each_entry(cell, tmp, bucket, list) | ||
| 112 | if (keys_equal(&cell->key, key)) | ||
| 113 | return cell; | ||
| 114 | |||
| 115 | return NULL; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* | ||
| 119 | * This may block if a new cell needs allocating. You must ensure that | ||
| 120 | * cells will be unlocked even if the calling thread is blocked. | ||
| 121 | * | ||
| 122 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | ||
| 123 | */ | ||
| 124 | int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key, | ||
| 125 | struct bio *inmate, struct dm_bio_prison_cell **ref) | ||
| 126 | { | ||
| 127 | int r = 1; | ||
| 128 | unsigned long flags; | ||
| 129 | uint32_t hash = hash_key(prison, key); | ||
| 130 | struct dm_bio_prison_cell *cell, *cell2; | ||
| 131 | |||
| 132 | BUG_ON(hash > prison->nr_buckets); | ||
| 133 | |||
| 134 | spin_lock_irqsave(&prison->lock, flags); | ||
| 135 | |||
| 136 | cell = __search_bucket(prison->cells + hash, key); | ||
| 137 | if (cell) { | ||
| 138 | bio_list_add(&cell->bios, inmate); | ||
| 139 | goto out; | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Allocate a new cell | ||
| 144 | */ | ||
| 145 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 146 | cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO); | ||
| 147 | spin_lock_irqsave(&prison->lock, flags); | ||
| 148 | |||
| 149 | /* | ||
| 150 | * We've been unlocked, so we have to double check that | ||
| 151 | * nobody else has inserted this cell in the meantime. | ||
| 152 | */ | ||
| 153 | cell = __search_bucket(prison->cells + hash, key); | ||
| 154 | if (cell) { | ||
| 155 | mempool_free(cell2, prison->cell_pool); | ||
| 156 | bio_list_add(&cell->bios, inmate); | ||
| 157 | goto out; | ||
| 158 | } | ||
| 159 | |||
| 160 | /* | ||
| 161 | * Use new cell. | ||
| 162 | */ | ||
| 163 | cell = cell2; | ||
| 164 | |||
| 165 | cell->prison = prison; | ||
| 166 | memcpy(&cell->key, key, sizeof(cell->key)); | ||
| 167 | cell->holder = inmate; | ||
| 168 | bio_list_init(&cell->bios); | ||
| 169 | hlist_add_head(&cell->list, prison->cells + hash); | ||
| 170 | |||
| 171 | r = 0; | ||
| 172 | |||
| 173 | out: | ||
| 174 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 175 | |||
| 176 | *ref = cell; | ||
| 177 | |||
| 178 | return r; | ||
| 179 | } | ||
| 180 | EXPORT_SYMBOL_GPL(dm_bio_detain); | ||
| 181 | |||
| 182 | /* | ||
| 183 | * @inmates must have been initialised prior to this call | ||
| 184 | */ | ||
| 185 | static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | ||
| 186 | { | ||
| 187 | struct dm_bio_prison *prison = cell->prison; | ||
| 188 | |||
| 189 | hlist_del(&cell->list); | ||
| 190 | |||
| 191 | if (inmates) { | ||
| 192 | bio_list_add(inmates, cell->holder); | ||
| 193 | bio_list_merge(inmates, &cell->bios); | ||
| 194 | } | ||
| 195 | |||
| 196 | mempool_free(cell, prison->cell_pool); | ||
| 197 | } | ||
| 198 | |||
| 199 | void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) | ||
| 200 | { | ||
| 201 | unsigned long flags; | ||
| 202 | struct dm_bio_prison *prison = cell->prison; | ||
| 203 | |||
| 204 | spin_lock_irqsave(&prison->lock, flags); | ||
| 205 | __cell_release(cell, bios); | ||
| 206 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 207 | } | ||
| 208 | EXPORT_SYMBOL_GPL(dm_cell_release); | ||
| 209 | |||
| 210 | /* | ||
| 211 | * There are a couple of places where we put a bio into a cell briefly | ||
| 212 | * before taking it out again. In these situations we know that no other | ||
| 213 | * bio may be in the cell. This function releases the cell, and also does | ||
| 214 | * a sanity check. | ||
| 215 | */ | ||
| 216 | static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | ||
| 217 | { | ||
| 218 | BUG_ON(cell->holder != bio); | ||
| 219 | BUG_ON(!bio_list_empty(&cell->bios)); | ||
| 220 | |||
| 221 | __cell_release(cell, NULL); | ||
| 222 | } | ||
| 223 | |||
| 224 | void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | ||
| 225 | { | ||
| 226 | unsigned long flags; | ||
| 227 | struct dm_bio_prison *prison = cell->prison; | ||
| 228 | |||
| 229 | spin_lock_irqsave(&prison->lock, flags); | ||
| 230 | __cell_release_singleton(cell, bio); | ||
| 231 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 232 | } | ||
| 233 | EXPORT_SYMBOL_GPL(dm_cell_release_singleton); | ||
| 234 | |||
| 235 | /* | ||
| 236 | * Sometimes we don't want the holder, just the additional bios. | ||
| 237 | */ | ||
| 238 | static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | ||
| 239 | { | ||
| 240 | struct dm_bio_prison *prison = cell->prison; | ||
| 241 | |||
| 242 | hlist_del(&cell->list); | ||
| 243 | bio_list_merge(inmates, &cell->bios); | ||
| 244 | |||
| 245 | mempool_free(cell, prison->cell_pool); | ||
| 246 | } | ||
| 247 | |||
| 248 | void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | ||
| 249 | { | ||
| 250 | unsigned long flags; | ||
| 251 | struct dm_bio_prison *prison = cell->prison; | ||
| 252 | |||
| 253 | spin_lock_irqsave(&prison->lock, flags); | ||
| 254 | __cell_release_no_holder(cell, inmates); | ||
| 255 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 256 | } | ||
| 257 | EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); | ||
| 258 | |||
| 259 | void dm_cell_error(struct dm_bio_prison_cell *cell) | ||
| 260 | { | ||
| 261 | struct dm_bio_prison *prison = cell->prison; | ||
| 262 | struct bio_list bios; | ||
| 263 | struct bio *bio; | ||
| 264 | unsigned long flags; | ||
| 265 | |||
| 266 | bio_list_init(&bios); | ||
| 267 | |||
| 268 | spin_lock_irqsave(&prison->lock, flags); | ||
| 269 | __cell_release(cell, &bios); | ||
| 270 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 271 | |||
| 272 | while ((bio = bio_list_pop(&bios))) | ||
| 273 | bio_io_error(bio); | ||
| 274 | } | ||
| 275 | EXPORT_SYMBOL_GPL(dm_cell_error); | ||
| 276 | |||
| 277 | /*----------------------------------------------------------------*/ | ||
| 278 | |||
| 279 | #define DEFERRED_SET_SIZE 64 | ||
| 280 | |||
| 281 | struct dm_deferred_entry { | ||
| 282 | struct dm_deferred_set *ds; | ||
| 283 | unsigned count; | ||
| 284 | struct list_head work_items; | ||
| 285 | }; | ||
| 286 | |||
| 287 | struct dm_deferred_set { | ||
| 288 | spinlock_t lock; | ||
| 289 | unsigned current_entry; | ||
| 290 | unsigned sweeper; | ||
| 291 | struct dm_deferred_entry entries[DEFERRED_SET_SIZE]; | ||
| 292 | }; | ||
| 293 | |||
| 294 | struct dm_deferred_set *dm_deferred_set_create(void) | ||
| 295 | { | ||
| 296 | int i; | ||
| 297 | struct dm_deferred_set *ds; | ||
| 298 | |||
| 299 | ds = kmalloc(sizeof(*ds), GFP_KERNEL); | ||
| 300 | if (!ds) | ||
| 301 | return NULL; | ||
| 302 | |||
| 303 | spin_lock_init(&ds->lock); | ||
| 304 | ds->current_entry = 0; | ||
| 305 | ds->sweeper = 0; | ||
| 306 | for (i = 0; i < DEFERRED_SET_SIZE; i++) { | ||
| 307 | ds->entries[i].ds = ds; | ||
| 308 | ds->entries[i].count = 0; | ||
| 309 | INIT_LIST_HEAD(&ds->entries[i].work_items); | ||
| 310 | } | ||
| 311 | |||
| 312 | return ds; | ||
| 313 | } | ||
| 314 | EXPORT_SYMBOL_GPL(dm_deferred_set_create); | ||
| 315 | |||
| 316 | void dm_deferred_set_destroy(struct dm_deferred_set *ds) | ||
| 317 | { | ||
| 318 | kfree(ds); | ||
| 319 | } | ||
| 320 | EXPORT_SYMBOL_GPL(dm_deferred_set_destroy); | ||
| 321 | |||
| 322 | struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds) | ||
| 323 | { | ||
| 324 | unsigned long flags; | ||
| 325 | struct dm_deferred_entry *entry; | ||
| 326 | |||
| 327 | spin_lock_irqsave(&ds->lock, flags); | ||
| 328 | entry = ds->entries + ds->current_entry; | ||
| 329 | entry->count++; | ||
| 330 | spin_unlock_irqrestore(&ds->lock, flags); | ||
| 331 | |||
| 332 | return entry; | ||
| 333 | } | ||
| 334 | EXPORT_SYMBOL_GPL(dm_deferred_entry_inc); | ||
| 335 | |||
| 336 | static unsigned ds_next(unsigned index) | ||
| 337 | { | ||
| 338 | return (index + 1) % DEFERRED_SET_SIZE; | ||
| 339 | } | ||
| 340 | |||
| 341 | static void __sweep(struct dm_deferred_set *ds, struct list_head *head) | ||
| 342 | { | ||
| 343 | while ((ds->sweeper != ds->current_entry) && | ||
| 344 | !ds->entries[ds->sweeper].count) { | ||
| 345 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | ||
| 346 | ds->sweeper = ds_next(ds->sweeper); | ||
| 347 | } | ||
| 348 | |||
| 349 | if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count) | ||
| 350 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | ||
| 351 | } | ||
| 352 | |||
| 353 | void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head) | ||
| 354 | { | ||
| 355 | unsigned long flags; | ||
| 356 | |||
| 357 | spin_lock_irqsave(&entry->ds->lock, flags); | ||
| 358 | BUG_ON(!entry->count); | ||
| 359 | --entry->count; | ||
| 360 | __sweep(entry->ds, head); | ||
| 361 | spin_unlock_irqrestore(&entry->ds->lock, flags); | ||
| 362 | } | ||
| 363 | EXPORT_SYMBOL_GPL(dm_deferred_entry_dec); | ||
| 364 | |||
| 365 | /* | ||
| 366 | * Returns 1 if deferred or 0 if no pending items to delay job. | ||
| 367 | */ | ||
| 368 | int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work) | ||
| 369 | { | ||
| 370 | int r = 1; | ||
| 371 | unsigned long flags; | ||
| 372 | unsigned next_entry; | ||
| 373 | |||
| 374 | spin_lock_irqsave(&ds->lock, flags); | ||
| 375 | if ((ds->sweeper == ds->current_entry) && | ||
| 376 | !ds->entries[ds->current_entry].count) | ||
| 377 | r = 0; | ||
| 378 | else { | ||
| 379 | list_add(work, &ds->entries[ds->current_entry].work_items); | ||
| 380 | next_entry = ds_next(ds->current_entry); | ||
| 381 | if (!ds->entries[next_entry].count) | ||
| 382 | ds->current_entry = next_entry; | ||
| 383 | } | ||
| 384 | spin_unlock_irqrestore(&ds->lock, flags); | ||
| 385 | |||
| 386 | return r; | ||
| 387 | } | ||
| 388 | EXPORT_SYMBOL_GPL(dm_deferred_set_add_work); | ||
| 389 | |||
| 390 | /*----------------------------------------------------------------*/ | ||
| 391 | |||
| 392 | static int __init dm_bio_prison_init(void) | ||
| 393 | { | ||
| 394 | _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); | ||
| 395 | if (!_cell_cache) | ||
| 396 | return -ENOMEM; | ||
| 397 | |||
| 398 | return 0; | ||
| 399 | } | ||
| 400 | |||
| 401 | static void __exit dm_bio_prison_exit(void) | ||
| 402 | { | ||
| 403 | kmem_cache_destroy(_cell_cache); | ||
| 404 | _cell_cache = NULL; | ||
| 405 | } | ||
| 406 | |||
| 407 | /* | ||
| 408 | * module hooks | ||
| 409 | */ | ||
| 410 | module_init(dm_bio_prison_init); | ||
| 411 | module_exit(dm_bio_prison_exit); | ||
| 412 | |||
| 413 | MODULE_DESCRIPTION(DM_NAME " bio prison"); | ||
| 414 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | ||
| 415 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h new file mode 100644 index 000000000000..4e0ac376700a --- /dev/null +++ b/drivers/md/dm-bio-prison.h | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2011-2012 Red Hat, Inc. | ||
| 3 | * | ||
| 4 | * This file is released under the GPL. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef DM_BIO_PRISON_H | ||
| 8 | #define DM_BIO_PRISON_H | ||
| 9 | |||
| 10 | #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */ | ||
| 11 | #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */ | ||
| 12 | |||
| 13 | #include <linux/list.h> | ||
| 14 | #include <linux/bio.h> | ||
| 15 | |||
| 16 | /*----------------------------------------------------------------*/ | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Sometimes we can't deal with a bio straight away. We put them in prison | ||
| 20 | * where they can't cause any mischief. Bios are put in a cell identified | ||
| 21 | * by a key, multiple bios can be in the same cell. When the cell is | ||
| 22 | * subsequently unlocked the bios become available. | ||
| 23 | */ | ||
| 24 | struct dm_bio_prison; | ||
| 25 | struct dm_bio_prison_cell; | ||
| 26 | |||
| 27 | /* FIXME: this needs to be more abstract */ | ||
| 28 | struct dm_cell_key { | ||
| 29 | int virtual; | ||
| 30 | dm_thin_id dev; | ||
| 31 | dm_block_t block; | ||
| 32 | }; | ||
| 33 | |||
| 34 | struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells); | ||
| 35 | void dm_bio_prison_destroy(struct dm_bio_prison *prison); | ||
| 36 | |||
| 37 | /* | ||
| 38 | * This may block if a new cell needs allocating. You must ensure that | ||
| 39 | * cells will be unlocked even if the calling thread is blocked. | ||
| 40 | * | ||
| 41 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | ||
| 42 | */ | ||
| 43 | int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key, | ||
| 44 | struct bio *inmate, struct dm_bio_prison_cell **ref); | ||
| 45 | |||
| 46 | void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios); | ||
| 47 | void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed | ||
| 48 | void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates); | ||
| 49 | void dm_cell_error(struct dm_bio_prison_cell *cell); | ||
| 50 | |||
| 51 | /*----------------------------------------------------------------*/ | ||
| 52 | |||
| 53 | /* | ||
| 54 | * We use the deferred set to keep track of pending reads to shared blocks. | ||
| 55 | * We do this to ensure the new mapping caused by a write isn't performed | ||
| 56 | * until these prior reads have completed. Otherwise the insertion of the | ||
| 57 | * new mapping could free the old block that the read bios are mapped to. | ||
| 58 | */ | ||
| 59 | |||
| 60 | struct dm_deferred_set; | ||
| 61 | struct dm_deferred_entry; | ||
| 62 | |||
| 63 | struct dm_deferred_set *dm_deferred_set_create(void); | ||
| 64 | void dm_deferred_set_destroy(struct dm_deferred_set *ds); | ||
| 65 | |||
| 66 | struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds); | ||
| 67 | void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head); | ||
| 68 | int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work); | ||
| 69 | |||
| 70 | /*----------------------------------------------------------------*/ | ||
| 71 | |||
| 72 | #endif | ||
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cc06a1e52423..651ca79881dd 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
| @@ -280,9 +280,7 @@ static void __cache_size_refresh(void) | |||
| 280 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); | 280 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); |
| 281 | BUG_ON(dm_bufio_client_count < 0); | 281 | BUG_ON(dm_bufio_client_count < 0); |
| 282 | 282 | ||
| 283 | dm_bufio_cache_size_latch = dm_bufio_cache_size; | 283 | dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size); |
| 284 | |||
| 285 | barrier(); | ||
| 286 | 284 | ||
| 287 | /* | 285 | /* |
| 288 | * Use default if set to 0 and report the actual cache size used. | 286 | * Use default if set to 0 and report the actual cache size used. |
| @@ -441,8 +439,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty) | |||
| 441 | c->n_buffers[b->list_mode]--; | 439 | c->n_buffers[b->list_mode]--; |
| 442 | c->n_buffers[dirty]++; | 440 | c->n_buffers[dirty]++; |
| 443 | b->list_mode = dirty; | 441 | b->list_mode = dirty; |
| 444 | list_del(&b->lru_list); | 442 | list_move(&b->lru_list, &c->lru[dirty]); |
| 445 | list_add(&b->lru_list, &c->lru[dirty]); | ||
| 446 | } | 443 | } |
| 447 | 444 | ||
| 448 | /*---------------------------------------------------------------- | 445 | /*---------------------------------------------------------------- |
| @@ -813,7 +810,7 @@ static void __get_memory_limit(struct dm_bufio_client *c, | |||
| 813 | { | 810 | { |
| 814 | unsigned long buffers; | 811 | unsigned long buffers; |
| 815 | 812 | ||
| 816 | if (dm_bufio_cache_size != dm_bufio_cache_size_latch) { | 813 | if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) { |
| 817 | mutex_lock(&dm_bufio_clients_lock); | 814 | mutex_lock(&dm_bufio_clients_lock); |
| 818 | __cache_size_refresh(); | 815 | __cache_size_refresh(); |
| 819 | mutex_unlock(&dm_bufio_clients_lock); | 816 | mutex_unlock(&dm_bufio_clients_lock); |
| @@ -1591,11 +1588,9 @@ EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); | |||
| 1591 | 1588 | ||
| 1592 | static void cleanup_old_buffers(void) | 1589 | static void cleanup_old_buffers(void) |
| 1593 | { | 1590 | { |
| 1594 | unsigned long max_age = dm_bufio_max_age; | 1591 | unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age); |
| 1595 | struct dm_bufio_client *c; | 1592 | struct dm_bufio_client *c; |
| 1596 | 1593 | ||
| 1597 | barrier(); | ||
| 1598 | |||
| 1599 | if (max_age > ULONG_MAX / HZ) | 1594 | if (max_age > ULONG_MAX / HZ) |
| 1600 | max_age = ULONG_MAX / HZ; | 1595 | max_age = ULONG_MAX / HZ; |
| 1601 | 1596 | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d778563a4ffd..573bd04591bf 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -1309,13 +1309,14 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, | |||
| 1309 | { | 1309 | { |
| 1310 | struct multipath *m = ti->private; | 1310 | struct multipath *m = ti->private; |
| 1311 | struct dm_mpath_io *mpio = map_context->ptr; | 1311 | struct dm_mpath_io *mpio = map_context->ptr; |
| 1312 | struct pgpath *pgpath = mpio->pgpath; | 1312 | struct pgpath *pgpath; |
| 1313 | struct path_selector *ps; | 1313 | struct path_selector *ps; |
| 1314 | int r; | 1314 | int r; |
| 1315 | 1315 | ||
| 1316 | BUG_ON(!mpio); | 1316 | BUG_ON(!mpio); |
| 1317 | 1317 | ||
| 1318 | r = do_end_io(m, clone, error, mpio); | 1318 | r = do_end_io(m, clone, error, mpio); |
| 1319 | pgpath = mpio->pgpath; | ||
| 1319 | if (pgpath) { | 1320 | if (pgpath) { |
| 1320 | ps = &pgpath->pg->ps; | 1321 | ps = &pgpath->pg->ps; |
| 1321 | if (ps->type->end_io) | 1322 | if (ps->type->end_io) |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 982e3e390c45..45d94a7e7f6d 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
| @@ -338,6 +338,84 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |||
| 338 | } | 338 | } |
| 339 | 339 | ||
| 340 | /* | 340 | /* |
| 341 | * validate_rebuild_devices | ||
| 342 | * @rs | ||
| 343 | * | ||
| 344 | * Determine if the devices specified for rebuild can result in a valid | ||
| 345 | * usable array that is capable of rebuilding the given devices. | ||
| 346 | * | ||
| 347 | * Returns: 0 on success, -EINVAL on failure. | ||
| 348 | */ | ||
| 349 | static int validate_rebuild_devices(struct raid_set *rs) | ||
| 350 | { | ||
| 351 | unsigned i, rebuild_cnt = 0; | ||
| 352 | unsigned rebuilds_per_group, copies, d; | ||
| 353 | |||
| 354 | if (!(rs->print_flags & DMPF_REBUILD)) | ||
| 355 | return 0; | ||
| 356 | |||
| 357 | for (i = 0; i < rs->md.raid_disks; i++) | ||
| 358 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) | ||
| 359 | rebuild_cnt++; | ||
| 360 | |||
| 361 | switch (rs->raid_type->level) { | ||
| 362 | case 1: | ||
| 363 | if (rebuild_cnt >= rs->md.raid_disks) | ||
| 364 | goto too_many; | ||
| 365 | break; | ||
| 366 | case 4: | ||
| 367 | case 5: | ||
| 368 | case 6: | ||
| 369 | if (rebuild_cnt > rs->raid_type->parity_devs) | ||
| 370 | goto too_many; | ||
| 371 | break; | ||
| 372 | case 10: | ||
| 373 | copies = raid10_md_layout_to_copies(rs->md.layout); | ||
| 374 | if (rebuild_cnt < copies) | ||
| 375 | break; | ||
| 376 | |||
| 377 | /* | ||
| 378 | * It is possible to have a higher rebuild count for RAID10, | ||
| 379 | * as long as the failed devices occur in different mirror | ||
| 380 | * groups (i.e. different stripes). | ||
| 381 | * | ||
| 382 | * Right now, we only allow for "near" copies. When other | ||
| 383 | * formats are added, we will have to check those too. | ||
| 384 | * | ||
| 385 | * When checking "near" format, make sure no adjacent devices | ||
| 386 | * have failed beyond what can be handled. In addition to the | ||
| 387 | * simple case where the number of devices is a multiple of the | ||
| 388 | * number of copies, we must also handle cases where the number | ||
| 389 | * of devices is not a multiple of the number of copies. | ||
| 390 | * E.g. dev1 dev2 dev3 dev4 dev5 | ||
| 391 | * A A B B C | ||
| 392 | * C D D E E | ||
| 393 | */ | ||
| 394 | rebuilds_per_group = 0; | ||
| 395 | for (i = 0; i < rs->md.raid_disks * copies; i++) { | ||
| 396 | d = i % rs->md.raid_disks; | ||
| 397 | if (!test_bit(In_sync, &rs->dev[d].rdev.flags) && | ||
| 398 | (++rebuilds_per_group >= copies)) | ||
| 399 | goto too_many; | ||
| 400 | if (!((i + 1) % copies)) | ||
| 401 | rebuilds_per_group = 0; | ||
| 402 | } | ||
| 403 | break; | ||
| 404 | default: | ||
| 405 | DMERR("The rebuild parameter is not supported for %s", | ||
| 406 | rs->raid_type->name); | ||
| 407 | rs->ti->error = "Rebuild not supported for this RAID type"; | ||
| 408 | return -EINVAL; | ||
| 409 | } | ||
| 410 | |||
| 411 | return 0; | ||
| 412 | |||
| 413 | too_many: | ||
| 414 | rs->ti->error = "Too many rebuild devices specified"; | ||
| 415 | return -EINVAL; | ||
| 416 | } | ||
| 417 | |||
| 418 | /* | ||
| 341 | * Possible arguments are... | 419 | * Possible arguments are... |
| 342 | * <chunk_size> [optional_args] | 420 | * <chunk_size> [optional_args] |
| 343 | * | 421 | * |
| @@ -365,7 +443,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
| 365 | { | 443 | { |
| 366 | char *raid10_format = "near"; | 444 | char *raid10_format = "near"; |
| 367 | unsigned raid10_copies = 2; | 445 | unsigned raid10_copies = 2; |
| 368 | unsigned i, rebuild_cnt = 0; | 446 | unsigned i; |
| 369 | unsigned long value, region_size = 0; | 447 | unsigned long value, region_size = 0; |
| 370 | sector_t sectors_per_dev = rs->ti->len; | 448 | sector_t sectors_per_dev = rs->ti->len; |
| 371 | sector_t max_io_len; | 449 | sector_t max_io_len; |
| @@ -461,31 +539,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
| 461 | 539 | ||
| 462 | /* Parameters that take a numeric value are checked here */ | 540 | /* Parameters that take a numeric value are checked here */ |
| 463 | if (!strcasecmp(key, "rebuild")) { | 541 | if (!strcasecmp(key, "rebuild")) { |
| 464 | rebuild_cnt++; | 542 | if (value >= rs->md.raid_disks) { |
| 465 | |||
| 466 | switch (rs->raid_type->level) { | ||
| 467 | case 1: | ||
| 468 | if (rebuild_cnt >= rs->md.raid_disks) { | ||
| 469 | rs->ti->error = "Too many rebuild devices specified"; | ||
| 470 | return -EINVAL; | ||
| 471 | } | ||
| 472 | break; | ||
| 473 | case 4: | ||
| 474 | case 5: | ||
| 475 | case 6: | ||
| 476 | if (rebuild_cnt > rs->raid_type->parity_devs) { | ||
| 477 | rs->ti->error = "Too many rebuild devices specified for given RAID type"; | ||
| 478 | return -EINVAL; | ||
| 479 | } | ||
| 480 | break; | ||
| 481 | case 10: | ||
| 482 | default: | ||
| 483 | DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name); | ||
| 484 | rs->ti->error = "Rebuild not supported for this RAID type"; | ||
| 485 | return -EINVAL; | ||
| 486 | } | ||
| 487 | |||
| 488 | if (value > rs->md.raid_disks) { | ||
| 489 | rs->ti->error = "Invalid rebuild index given"; | 543 | rs->ti->error = "Invalid rebuild index given"; |
| 490 | return -EINVAL; | 544 | return -EINVAL; |
| 491 | } | 545 | } |
| @@ -608,6 +662,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
| 608 | } | 662 | } |
| 609 | rs->md.dev_sectors = sectors_per_dev; | 663 | rs->md.dev_sectors = sectors_per_dev; |
| 610 | 664 | ||
| 665 | if (validate_rebuild_devices(rs)) | ||
| 666 | return -EINVAL; | ||
| 667 | |||
| 611 | /* Assume there are no metadata devices until the drives are parsed */ | 668 | /* Assume there are no metadata devices until the drives are parsed */ |
| 612 | rs->md.persistent = 0; | 669 | rs->md.persistent = 0; |
| 613 | rs->md.external = 1; | 670 | rs->md.external = 1; |
| @@ -960,6 +1017,19 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
| 960 | 1017 | ||
| 961 | freshest = NULL; | 1018 | freshest = NULL; |
| 962 | rdev_for_each_safe(rdev, tmp, mddev) { | 1019 | rdev_for_each_safe(rdev, tmp, mddev) { |
| 1020 | /* | ||
| 1021 | * Skipping super_load due to DMPF_SYNC will cause | ||
| 1022 | * the array to undergo initialization again as | ||
| 1023 | * though it were new. This is the intended effect | ||
| 1024 | * of the "sync" directive. | ||
| 1025 | * | ||
| 1026 | * When reshaping capability is added, we must ensure | ||
| 1027 | * that the "sync" directive is disallowed during the | ||
| 1028 | * reshape. | ||
| 1029 | */ | ||
| 1030 | if (rs->print_flags & DMPF_SYNC) | ||
| 1031 | continue; | ||
| 1032 | |||
| 963 | if (!rdev->meta_bdev) | 1033 | if (!rdev->meta_bdev) |
| 964 | continue; | 1034 | continue; |
| 965 | 1035 | ||
| @@ -1360,7 +1430,7 @@ static void raid_resume(struct dm_target *ti) | |||
| 1360 | 1430 | ||
| 1361 | static struct target_type raid_target = { | 1431 | static struct target_type raid_target = { |
| 1362 | .name = "raid", | 1432 | .name = "raid", |
| 1363 | .version = {1, 3, 0}, | 1433 | .version = {1, 3, 1}, |
| 1364 | .module = THIS_MODULE, | 1434 | .module = THIS_MODULE, |
| 1365 | .ctr = raid_ctr, | 1435 | .ctr = raid_ctr, |
| 1366 | .dtr = raid_dtr, | 1436 | .dtr = raid_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c29410af1e22..058acf3a5ba7 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include "dm-thin-metadata.h" | 7 | #include "dm-thin-metadata.h" |
| 8 | #include "dm-bio-prison.h" | ||
| 8 | #include "dm.h" | 9 | #include "dm.h" |
| 9 | 10 | ||
| 10 | #include <linux/device-mapper.h> | 11 | #include <linux/device-mapper.h> |
| @@ -21,7 +22,6 @@ | |||
| 21 | * Tunable constants | 22 | * Tunable constants |
| 22 | */ | 23 | */ |
| 23 | #define ENDIO_HOOK_POOL_SIZE 1024 | 24 | #define ENDIO_HOOK_POOL_SIZE 1024 |
| 24 | #define DEFERRED_SET_SIZE 64 | ||
| 25 | #define MAPPING_POOL_SIZE 1024 | 25 | #define MAPPING_POOL_SIZE 1024 |
| 26 | #define PRISON_CELLS 1024 | 26 | #define PRISON_CELLS 1024 |
| 27 | #define COMMIT_PERIOD HZ | 27 | #define COMMIT_PERIOD HZ |
| @@ -58,7 +58,7 @@ | |||
| 58 | * i) plug io further to this physical block. (see bio_prison code). | 58 | * i) plug io further to this physical block. (see bio_prison code). |
| 59 | * | 59 | * |
| 60 | * ii) quiesce any read io to that shared data block. Obviously | 60 | * ii) quiesce any read io to that shared data block. Obviously |
| 61 | * including all devices that share this block. (see deferred_set code) | 61 | * including all devices that share this block. (see dm_deferred_set code) |
| 62 | * | 62 | * |
| 63 | * iii) copy the data block to a newly allocate block. This step can be | 63 | * iii) copy the data block to a newly allocate block. This step can be |
| 64 | * missed out if the io covers the block. (schedule_copy). | 64 | * missed out if the io covers the block. (schedule_copy). |
| @@ -99,381 +99,10 @@ | |||
| 99 | /*----------------------------------------------------------------*/ | 99 | /*----------------------------------------------------------------*/ |
| 100 | 100 | ||
| 101 | /* | 101 | /* |
| 102 | * Sometimes we can't deal with a bio straight away. We put them in prison | ||
| 103 | * where they can't cause any mischief. Bios are put in a cell identified | ||
| 104 | * by a key, multiple bios can be in the same cell. When the cell is | ||
| 105 | * subsequently unlocked the bios become available. | ||
| 106 | */ | ||
| 107 | struct bio_prison; | ||
| 108 | |||
| 109 | struct cell_key { | ||
| 110 | int virtual; | ||
| 111 | dm_thin_id dev; | ||
| 112 | dm_block_t block; | ||
| 113 | }; | ||
| 114 | |||
| 115 | struct dm_bio_prison_cell { | ||
| 116 | struct hlist_node list; | ||
| 117 | struct bio_prison *prison; | ||
| 118 | struct cell_key key; | ||
| 119 | struct bio *holder; | ||
| 120 | struct bio_list bios; | ||
| 121 | }; | ||
| 122 | |||
| 123 | struct bio_prison { | ||
| 124 | spinlock_t lock; | ||
| 125 | mempool_t *cell_pool; | ||
| 126 | |||
| 127 | unsigned nr_buckets; | ||
| 128 | unsigned hash_mask; | ||
| 129 | struct hlist_head *cells; | ||
| 130 | }; | ||
| 131 | |||
| 132 | static uint32_t calc_nr_buckets(unsigned nr_cells) | ||
| 133 | { | ||
| 134 | uint32_t n = 128; | ||
| 135 | |||
| 136 | nr_cells /= 4; | ||
| 137 | nr_cells = min(nr_cells, 8192u); | ||
| 138 | |||
| 139 | while (n < nr_cells) | ||
| 140 | n <<= 1; | ||
| 141 | |||
| 142 | return n; | ||
| 143 | } | ||
| 144 | |||
| 145 | static struct kmem_cache *_cell_cache; | ||
| 146 | |||
| 147 | /* | ||
| 148 | * @nr_cells should be the number of cells you want in use _concurrently_. | ||
| 149 | * Don't confuse it with the number of distinct keys. | ||
| 150 | */ | ||
| 151 | static struct bio_prison *prison_create(unsigned nr_cells) | ||
| 152 | { | ||
| 153 | unsigned i; | ||
| 154 | uint32_t nr_buckets = calc_nr_buckets(nr_cells); | ||
| 155 | size_t len = sizeof(struct bio_prison) + | ||
| 156 | (sizeof(struct hlist_head) * nr_buckets); | ||
| 157 | struct bio_prison *prison = kmalloc(len, GFP_KERNEL); | ||
| 158 | |||
| 159 | if (!prison) | ||
| 160 | return NULL; | ||
| 161 | |||
| 162 | spin_lock_init(&prison->lock); | ||
| 163 | prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); | ||
| 164 | if (!prison->cell_pool) { | ||
| 165 | kfree(prison); | ||
| 166 | return NULL; | ||
| 167 | } | ||
| 168 | |||
| 169 | prison->nr_buckets = nr_buckets; | ||
| 170 | prison->hash_mask = nr_buckets - 1; | ||
| 171 | prison->cells = (struct hlist_head *) (prison + 1); | ||
| 172 | for (i = 0; i < nr_buckets; i++) | ||
| 173 | INIT_HLIST_HEAD(prison->cells + i); | ||
| 174 | |||
| 175 | return prison; | ||
| 176 | } | ||
| 177 | |||
| 178 | static void prison_destroy(struct bio_prison *prison) | ||
| 179 | { | ||
| 180 | mempool_destroy(prison->cell_pool); | ||
| 181 | kfree(prison); | ||
| 182 | } | ||
| 183 | |||
| 184 | static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key) | ||
| 185 | { | ||
| 186 | const unsigned long BIG_PRIME = 4294967291UL; | ||
| 187 | uint64_t hash = key->block * BIG_PRIME; | ||
| 188 | |||
| 189 | return (uint32_t) (hash & prison->hash_mask); | ||
| 190 | } | ||
| 191 | |||
| 192 | static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) | ||
| 193 | { | ||
| 194 | return (lhs->virtual == rhs->virtual) && | ||
| 195 | (lhs->dev == rhs->dev) && | ||
| 196 | (lhs->block == rhs->block); | ||
| 197 | } | ||
| 198 | |||
| 199 | static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, | ||
| 200 | struct cell_key *key) | ||
| 201 | { | ||
| 202 | struct dm_bio_prison_cell *cell; | ||
| 203 | struct hlist_node *tmp; | ||
| 204 | |||
| 205 | hlist_for_each_entry(cell, tmp, bucket, list) | ||
| 206 | if (keys_equal(&cell->key, key)) | ||
| 207 | return cell; | ||
| 208 | |||
| 209 | return NULL; | ||
| 210 | } | ||
| 211 | |||
| 212 | /* | ||
| 213 | * This may block if a new cell needs allocating. You must ensure that | ||
| 214 | * cells will be unlocked even if the calling thread is blocked. | ||
| 215 | * | ||
| 216 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | ||
| 217 | */ | ||
| 218 | static int bio_detain(struct bio_prison *prison, struct cell_key *key, | ||
| 219 | struct bio *inmate, struct dm_bio_prison_cell **ref) | ||
| 220 | { | ||
| 221 | int r = 1; | ||
| 222 | unsigned long flags; | ||
| 223 | uint32_t hash = hash_key(prison, key); | ||
| 224 | struct dm_bio_prison_cell *cell, *cell2; | ||
| 225 | |||
| 226 | BUG_ON(hash > prison->nr_buckets); | ||
| 227 | |||
| 228 | spin_lock_irqsave(&prison->lock, flags); | ||
| 229 | |||
| 230 | cell = __search_bucket(prison->cells + hash, key); | ||
| 231 | if (cell) { | ||
| 232 | bio_list_add(&cell->bios, inmate); | ||
| 233 | goto out; | ||
| 234 | } | ||
| 235 | |||
| 236 | /* | ||
| 237 | * Allocate a new cell | ||
| 238 | */ | ||
| 239 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 240 | cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO); | ||
| 241 | spin_lock_irqsave(&prison->lock, flags); | ||
| 242 | |||
| 243 | /* | ||
| 244 | * We've been unlocked, so we have to double check that | ||
| 245 | * nobody else has inserted this cell in the meantime. | ||
| 246 | */ | ||
| 247 | cell = __search_bucket(prison->cells + hash, key); | ||
| 248 | if (cell) { | ||
| 249 | mempool_free(cell2, prison->cell_pool); | ||
| 250 | bio_list_add(&cell->bios, inmate); | ||
| 251 | goto out; | ||
| 252 | } | ||
| 253 | |||
| 254 | /* | ||
| 255 | * Use new cell. | ||
| 256 | */ | ||
| 257 | cell = cell2; | ||
| 258 | |||
| 259 | cell->prison = prison; | ||
| 260 | memcpy(&cell->key, key, sizeof(cell->key)); | ||
| 261 | cell->holder = inmate; | ||
| 262 | bio_list_init(&cell->bios); | ||
| 263 | hlist_add_head(&cell->list, prison->cells + hash); | ||
| 264 | |||
| 265 | r = 0; | ||
| 266 | |||
| 267 | out: | ||
| 268 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 269 | |||
| 270 | *ref = cell; | ||
| 271 | |||
| 272 | return r; | ||
| 273 | } | ||
| 274 | |||
| 275 | /* | ||
| 276 | * @inmates must have been initialised prior to this call | ||
| 277 | */ | ||
| 278 | static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) | ||
| 279 | { | ||
| 280 | struct bio_prison *prison = cell->prison; | ||
| 281 | |||
| 282 | hlist_del(&cell->list); | ||
| 283 | |||
| 284 | if (inmates) { | ||
| 285 | bio_list_add(inmates, cell->holder); | ||
| 286 | bio_list_merge(inmates, &cell->bios); | ||
| 287 | } | ||
| 288 | |||
| 289 | mempool_free(cell, prison->cell_pool); | ||
| 290 | } | ||
| 291 | |||
| 292 | static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) | ||
| 293 | { | ||
| 294 | unsigned long flags; | ||
| 295 | struct bio_prison *prison = cell->prison; | ||
| 296 | |||
| 297 | spin_lock_irqsave(&prison->lock, flags); | ||
| 298 | __cell_release(cell, bios); | ||
| 299 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 300 | } | ||
| 301 | |||
| 302 | /* | ||
| 303 | * There are a couple of places where we put a bio into a cell briefly | ||
| 304 | * before taking it out again. In these situations we know that no other | ||
| 305 | * bio may be in the cell. This function releases the cell, and also does | ||
| 306 | * a sanity check. | ||
| 307 | */ | ||
| 308 | static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | ||
| 309 | { | ||
| 310 | BUG_ON(cell->holder != bio); | ||
| 311 | BUG_ON(!bio_list_empty(&cell->bios)); | ||
| 312 | |||
| 313 | __cell_release(cell, NULL); | ||
| 314 | } | ||
| 315 | |||
| 316 | static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) | ||
| 317 | { | ||
| 318 | unsigned long flags; | ||
| 319 | struct bio_prison *prison = cell->prison; | ||
| 320 | |||
| 321 | spin_lock_irqsave(&prison->lock, flags); | ||
| 322 | __cell_release_singleton(cell, bio); | ||
| 323 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 324 | } | ||
| 325 | |||
| 326 | /* | ||
| 327 | * Sometimes we don't want the holder, just the additional bios. | ||
| 328 | */ | ||
| 329 | static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, | ||
| 330 | struct bio_list *inmates) | ||
| 331 | { | ||
| 332 | struct bio_prison *prison = cell->prison; | ||
| 333 | |||
| 334 | hlist_del(&cell->list); | ||
| 335 | bio_list_merge(inmates, &cell->bios); | ||
| 336 | |||
| 337 | mempool_free(cell, prison->cell_pool); | ||
| 338 | } | ||
| 339 | |||
| 340 | static void cell_release_no_holder(struct dm_bio_prison_cell *cell, | ||
| 341 | struct bio_list *inmates) | ||
| 342 | { | ||
| 343 | unsigned long flags; | ||
| 344 | struct bio_prison *prison = cell->prison; | ||
| 345 | |||
| 346 | spin_lock_irqsave(&prison->lock, flags); | ||
| 347 | __cell_release_no_holder(cell, inmates); | ||
| 348 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 349 | } | ||
| 350 | |||
| 351 | static void cell_error(struct dm_bio_prison_cell *cell) | ||
| 352 | { | ||
| 353 | struct bio_prison *prison = cell->prison; | ||
| 354 | struct bio_list bios; | ||
| 355 | struct bio *bio; | ||
| 356 | unsigned long flags; | ||
| 357 | |||
| 358 | bio_list_init(&bios); | ||
| 359 | |||
| 360 | spin_lock_irqsave(&prison->lock, flags); | ||
| 361 | __cell_release(cell, &bios); | ||
| 362 | spin_unlock_irqrestore(&prison->lock, flags); | ||
| 363 | |||
| 364 | while ((bio = bio_list_pop(&bios))) | ||
| 365 | bio_io_error(bio); | ||
| 366 | } | ||
| 367 | |||
| 368 | /*----------------------------------------------------------------*/ | ||
| 369 | |||
| 370 | /* | ||
| 371 | * We use the deferred set to keep track of pending reads to shared blocks. | ||
| 372 | * We do this to ensure the new mapping caused by a write isn't performed | ||
| 373 | * until these prior reads have completed. Otherwise the insertion of the | ||
| 374 | * new mapping could free the old block that the read bios are mapped to. | ||
| 375 | */ | ||
| 376 | |||
| 377 | struct deferred_set; | ||
| 378 | struct deferred_entry { | ||
| 379 | struct deferred_set *ds; | ||
| 380 | unsigned count; | ||
| 381 | struct list_head work_items; | ||
| 382 | }; | ||
| 383 | |||
| 384 | struct deferred_set { | ||
| 385 | spinlock_t lock; | ||
| 386 | unsigned current_entry; | ||
| 387 | unsigned sweeper; | ||
| 388 | struct deferred_entry entries[DEFERRED_SET_SIZE]; | ||
| 389 | }; | ||
| 390 | |||
| 391 | static void ds_init(struct deferred_set *ds) | ||
| 392 | { | ||
| 393 | int i; | ||
| 394 | |||
| 395 | spin_lock_init(&ds->lock); | ||
| 396 | ds->current_entry = 0; | ||
| 397 | ds->sweeper = 0; | ||
| 398 | for (i = 0; i < DEFERRED_SET_SIZE; i++) { | ||
| 399 | ds->entries[i].ds = ds; | ||
| 400 | ds->entries[i].count = 0; | ||
| 401 | INIT_LIST_HEAD(&ds->entries[i].work_items); | ||
| 402 | } | ||
| 403 | } | ||
| 404 | |||
| 405 | static struct deferred_entry *ds_inc(struct deferred_set *ds) | ||
| 406 | { | ||
| 407 | unsigned long flags; | ||
| 408 | struct deferred_entry *entry; | ||
| 409 | |||
| 410 | spin_lock_irqsave(&ds->lock, flags); | ||
| 411 | entry = ds->entries + ds->current_entry; | ||
| 412 | entry->count++; | ||
| 413 | spin_unlock_irqrestore(&ds->lock, flags); | ||
| 414 | |||
| 415 | return entry; | ||
| 416 | } | ||
| 417 | |||
| 418 | static unsigned ds_next(unsigned index) | ||
| 419 | { | ||
| 420 | return (index + 1) % DEFERRED_SET_SIZE; | ||
| 421 | } | ||
| 422 | |||
| 423 | static void __sweep(struct deferred_set *ds, struct list_head *head) | ||
| 424 | { | ||
| 425 | while ((ds->sweeper != ds->current_entry) && | ||
| 426 | !ds->entries[ds->sweeper].count) { | ||
| 427 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | ||
| 428 | ds->sweeper = ds_next(ds->sweeper); | ||
| 429 | } | ||
| 430 | |||
| 431 | if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count) | ||
| 432 | list_splice_init(&ds->entries[ds->sweeper].work_items, head); | ||
| 433 | } | ||
| 434 | |||
| 435 | static void ds_dec(struct deferred_entry *entry, struct list_head *head) | ||
| 436 | { | ||
| 437 | unsigned long flags; | ||
| 438 | |||
| 439 | spin_lock_irqsave(&entry->ds->lock, flags); | ||
| 440 | BUG_ON(!entry->count); | ||
| 441 | --entry->count; | ||
| 442 | __sweep(entry->ds, head); | ||
| 443 | spin_unlock_irqrestore(&entry->ds->lock, flags); | ||
| 444 | } | ||
| 445 | |||
| 446 | /* | ||
| 447 | * Returns 1 if deferred or 0 if no pending items to delay job. | ||
| 448 | */ | ||
| 449 | static int ds_add_work(struct deferred_set *ds, struct list_head *work) | ||
| 450 | { | ||
| 451 | int r = 1; | ||
| 452 | unsigned long flags; | ||
| 453 | unsigned next_entry; | ||
| 454 | |||
| 455 | spin_lock_irqsave(&ds->lock, flags); | ||
| 456 | if ((ds->sweeper == ds->current_entry) && | ||
| 457 | !ds->entries[ds->current_entry].count) | ||
| 458 | r = 0; | ||
| 459 | else { | ||
| 460 | list_add(work, &ds->entries[ds->current_entry].work_items); | ||
| 461 | next_entry = ds_next(ds->current_entry); | ||
| 462 | if (!ds->entries[next_entry].count) | ||
| 463 | ds->current_entry = next_entry; | ||
| 464 | } | ||
| 465 | spin_unlock_irqrestore(&ds->lock, flags); | ||
| 466 | |||
| 467 | return r; | ||
| 468 | } | ||
| 469 | |||
| 470 | /*----------------------------------------------------------------*/ | ||
| 471 | |||
| 472 | /* | ||
| 473 | * Key building. | 102 | * Key building. |
| 474 | */ | 103 | */ |
| 475 | static void build_data_key(struct dm_thin_device *td, | 104 | static void build_data_key(struct dm_thin_device *td, |
| 476 | dm_block_t b, struct cell_key *key) | 105 | dm_block_t b, struct dm_cell_key *key) |
| 477 | { | 106 | { |
| 478 | key->virtual = 0; | 107 | key->virtual = 0; |
| 479 | key->dev = dm_thin_dev_id(td); | 108 | key->dev = dm_thin_dev_id(td); |
| @@ -481,7 +110,7 @@ static void build_data_key(struct dm_thin_device *td, | |||
| 481 | } | 110 | } |
| 482 | 111 | ||
| 483 | static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, | 112 | static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, |
| 484 | struct cell_key *key) | 113 | struct dm_cell_key *key) |
| 485 | { | 114 | { |
| 486 | key->virtual = 1; | 115 | key->virtual = 1; |
| 487 | key->dev = dm_thin_dev_id(td); | 116 | key->dev = dm_thin_dev_id(td); |
| @@ -534,7 +163,7 @@ struct pool { | |||
| 534 | unsigned low_water_triggered:1; /* A dm event has been sent */ | 163 | unsigned low_water_triggered:1; /* A dm event has been sent */ |
| 535 | unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ | 164 | unsigned no_free_space:1; /* A -ENOSPC warning has been issued */ |
| 536 | 165 | ||
| 537 | struct bio_prison *prison; | 166 | struct dm_bio_prison *prison; |
| 538 | struct dm_kcopyd_client *copier; | 167 | struct dm_kcopyd_client *copier; |
| 539 | 168 | ||
| 540 | struct workqueue_struct *wq; | 169 | struct workqueue_struct *wq; |
| @@ -552,8 +181,8 @@ struct pool { | |||
| 552 | 181 | ||
| 553 | struct bio_list retry_on_resume_list; | 182 | struct bio_list retry_on_resume_list; |
| 554 | 183 | ||
| 555 | struct deferred_set shared_read_ds; | 184 | struct dm_deferred_set *shared_read_ds; |
| 556 | struct deferred_set all_io_ds; | 185 | struct dm_deferred_set *all_io_ds; |
| 557 | 186 | ||
| 558 | struct dm_thin_new_mapping *next_mapping; | 187 | struct dm_thin_new_mapping *next_mapping; |
| 559 | mempool_t *mapping_pool; | 188 | mempool_t *mapping_pool; |
| @@ -660,8 +289,8 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev | |||
| 660 | 289 | ||
| 661 | struct dm_thin_endio_hook { | 290 | struct dm_thin_endio_hook { |
| 662 | struct thin_c *tc; | 291 | struct thin_c *tc; |
| 663 | struct deferred_entry *shared_read_entry; | 292 | struct dm_deferred_entry *shared_read_entry; |
| 664 | struct deferred_entry *all_io_entry; | 293 | struct dm_deferred_entry *all_io_entry; |
| 665 | struct dm_thin_new_mapping *overwrite_mapping; | 294 | struct dm_thin_new_mapping *overwrite_mapping; |
| 666 | }; | 295 | }; |
| 667 | 296 | ||
| @@ -877,7 +506,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, | |||
| 877 | unsigned long flags; | 506 | unsigned long flags; |
| 878 | 507 | ||
| 879 | spin_lock_irqsave(&pool->lock, flags); | 508 | spin_lock_irqsave(&pool->lock, flags); |
| 880 | cell_release(cell, &pool->deferred_bios); | 509 | dm_cell_release(cell, &pool->deferred_bios); |
| 881 | spin_unlock_irqrestore(&tc->pool->lock, flags); | 510 | spin_unlock_irqrestore(&tc->pool->lock, flags); |
| 882 | 511 | ||
| 883 | wake_worker(pool); | 512 | wake_worker(pool); |
| @@ -896,7 +525,7 @@ static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell | |||
| 896 | bio_list_init(&bios); | 525 | bio_list_init(&bios); |
| 897 | 526 | ||
| 898 | spin_lock_irqsave(&pool->lock, flags); | 527 | spin_lock_irqsave(&pool->lock, flags); |
| 899 | cell_release_no_holder(cell, &pool->deferred_bios); | 528 | dm_cell_release_no_holder(cell, &pool->deferred_bios); |
| 900 | spin_unlock_irqrestore(&pool->lock, flags); | 529 | spin_unlock_irqrestore(&pool->lock, flags); |
| 901 | 530 | ||
| 902 | wake_worker(pool); | 531 | wake_worker(pool); |
| @@ -906,7 +535,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | |||
| 906 | { | 535 | { |
| 907 | if (m->bio) | 536 | if (m->bio) |
| 908 | m->bio->bi_end_io = m->saved_bi_end_io; | 537 | m->bio->bi_end_io = m->saved_bi_end_io; |
| 909 | cell_error(m->cell); | 538 | dm_cell_error(m->cell); |
| 910 | list_del(&m->list); | 539 | list_del(&m->list); |
| 911 | mempool_free(m, m->tc->pool->mapping_pool); | 540 | mempool_free(m, m->tc->pool->mapping_pool); |
| 912 | } | 541 | } |
| @@ -921,7 +550,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
| 921 | bio->bi_end_io = m->saved_bi_end_io; | 550 | bio->bi_end_io = m->saved_bi_end_io; |
| 922 | 551 | ||
| 923 | if (m->err) { | 552 | if (m->err) { |
| 924 | cell_error(m->cell); | 553 | dm_cell_error(m->cell); |
| 925 | goto out; | 554 | goto out; |
| 926 | } | 555 | } |
| 927 | 556 | ||
| @@ -933,7 +562,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
| 933 | r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); | 562 | r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); |
| 934 | if (r) { | 563 | if (r) { |
| 935 | DMERR("dm_thin_insert_block() failed"); | 564 | DMERR("dm_thin_insert_block() failed"); |
| 936 | cell_error(m->cell); | 565 | dm_cell_error(m->cell); |
| 937 | goto out; | 566 | goto out; |
| 938 | } | 567 | } |
| 939 | 568 | ||
| @@ -1067,7 +696,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
| 1067 | m->err = 0; | 696 | m->err = 0; |
| 1068 | m->bio = NULL; | 697 | m->bio = NULL; |
| 1069 | 698 | ||
| 1070 | if (!ds_add_work(&pool->shared_read_ds, &m->list)) | 699 | if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) |
| 1071 | m->quiesced = 1; | 700 | m->quiesced = 1; |
| 1072 | 701 | ||
| 1073 | /* | 702 | /* |
| @@ -1099,7 +728,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, | |||
| 1099 | if (r < 0) { | 728 | if (r < 0) { |
| 1100 | mempool_free(m, pool->mapping_pool); | 729 | mempool_free(m, pool->mapping_pool); |
| 1101 | DMERR("dm_kcopyd_copy() failed"); | 730 | DMERR("dm_kcopyd_copy() failed"); |
| 1102 | cell_error(cell); | 731 | dm_cell_error(cell); |
| 1103 | } | 732 | } |
| 1104 | } | 733 | } |
| 1105 | } | 734 | } |
| @@ -1164,7 +793,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, | |||
| 1164 | if (r < 0) { | 793 | if (r < 0) { |
| 1165 | mempool_free(m, pool->mapping_pool); | 794 | mempool_free(m, pool->mapping_pool); |
| 1166 | DMERR("dm_kcopyd_zero() failed"); | 795 | DMERR("dm_kcopyd_zero() failed"); |
| 1167 | cell_error(cell); | 796 | dm_cell_error(cell); |
| 1168 | } | 797 | } |
| 1169 | } | 798 | } |
| 1170 | } | 799 | } |
| @@ -1276,7 +905,7 @@ static void no_space(struct dm_bio_prison_cell *cell) | |||
| 1276 | struct bio_list bios; | 905 | struct bio_list bios; |
| 1277 | 906 | ||
| 1278 | bio_list_init(&bios); | 907 | bio_list_init(&bios); |
| 1279 | cell_release(cell, &bios); | 908 | dm_cell_release(cell, &bios); |
| 1280 | 909 | ||
| 1281 | while ((bio = bio_list_pop(&bios))) | 910 | while ((bio = bio_list_pop(&bios))) |
| 1282 | retry_on_resume(bio); | 911 | retry_on_resume(bio); |
| @@ -1288,13 +917,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1288 | unsigned long flags; | 917 | unsigned long flags; |
| 1289 | struct pool *pool = tc->pool; | 918 | struct pool *pool = tc->pool; |
| 1290 | struct dm_bio_prison_cell *cell, *cell2; | 919 | struct dm_bio_prison_cell *cell, *cell2; |
| 1291 | struct cell_key key, key2; | 920 | struct dm_cell_key key, key2; |
| 1292 | dm_block_t block = get_bio_block(tc, bio); | 921 | dm_block_t block = get_bio_block(tc, bio); |
| 1293 | struct dm_thin_lookup_result lookup_result; | 922 | struct dm_thin_lookup_result lookup_result; |
| 1294 | struct dm_thin_new_mapping *m; | 923 | struct dm_thin_new_mapping *m; |
| 1295 | 924 | ||
| 1296 | build_virtual_key(tc->td, block, &key); | 925 | build_virtual_key(tc->td, block, &key); |
| 1297 | if (bio_detain(tc->pool->prison, &key, bio, &cell)) | 926 | if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) |
| 1298 | return; | 927 | return; |
| 1299 | 928 | ||
| 1300 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); | 929 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); |
| @@ -1306,8 +935,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1306 | * on this block. | 935 | * on this block. |
| 1307 | */ | 936 | */ |
| 1308 | build_data_key(tc->td, lookup_result.block, &key2); | 937 | build_data_key(tc->td, lookup_result.block, &key2); |
| 1309 | if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) { | 938 | if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) { |
| 1310 | cell_release_singleton(cell, bio); | 939 | dm_cell_release_singleton(cell, bio); |
| 1311 | break; | 940 | break; |
| 1312 | } | 941 | } |
| 1313 | 942 | ||
| @@ -1326,7 +955,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1326 | m->err = 0; | 955 | m->err = 0; |
| 1327 | m->bio = bio; | 956 | m->bio = bio; |
| 1328 | 957 | ||
| 1329 | if (!ds_add_work(&pool->all_io_ds, &m->list)) { | 958 | if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { |
| 1330 | spin_lock_irqsave(&pool->lock, flags); | 959 | spin_lock_irqsave(&pool->lock, flags); |
| 1331 | list_add(&m->list, &pool->prepared_discards); | 960 | list_add(&m->list, &pool->prepared_discards); |
| 1332 | spin_unlock_irqrestore(&pool->lock, flags); | 961 | spin_unlock_irqrestore(&pool->lock, flags); |
| @@ -1338,8 +967,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1338 | * a block boundary. So we submit the discard of a | 967 | * a block boundary. So we submit the discard of a |
| 1339 | * partial block appropriately. | 968 | * partial block appropriately. |
| 1340 | */ | 969 | */ |
| 1341 | cell_release_singleton(cell, bio); | 970 | dm_cell_release_singleton(cell, bio); |
| 1342 | cell_release_singleton(cell2, bio); | 971 | dm_cell_release_singleton(cell2, bio); |
| 1343 | if ((!lookup_result.shared) && pool->pf.discard_passdown) | 972 | if ((!lookup_result.shared) && pool->pf.discard_passdown) |
| 1344 | remap_and_issue(tc, bio, lookup_result.block); | 973 | remap_and_issue(tc, bio, lookup_result.block); |
| 1345 | else | 974 | else |
| @@ -1351,20 +980,20 @@ static void process_discard(struct thin_c *tc, struct bio *bio) | |||
| 1351 | /* | 980 | /* |
| 1352 | * It isn't provisioned, just forget it. | 981 | * It isn't provisioned, just forget it. |
| 1353 | */ | 982 | */ |
| 1354 | cell_release_singleton(cell, bio); | 983 | dm_cell_release_singleton(cell, bio); |
| 1355 | bio_endio(bio, 0); | 984 | bio_endio(bio, 0); |
| 1356 | break; | 985 | break; |
| 1357 | 986 | ||
| 1358 | default: | 987 | default: |
| 1359 | DMERR("discard: find block unexpectedly returned %d", r); | 988 | DMERR("discard: find block unexpectedly returned %d", r); |
| 1360 | cell_release_singleton(cell, bio); | 989 | dm_cell_release_singleton(cell, bio); |
| 1361 | bio_io_error(bio); | 990 | bio_io_error(bio); |
| 1362 | break; | 991 | break; |
| 1363 | } | 992 | } |
| 1364 | } | 993 | } |
| 1365 | 994 | ||
| 1366 | static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, | 995 | static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, |
| 1367 | struct cell_key *key, | 996 | struct dm_cell_key *key, |
| 1368 | struct dm_thin_lookup_result *lookup_result, | 997 | struct dm_thin_lookup_result *lookup_result, |
| 1369 | struct dm_bio_prison_cell *cell) | 998 | struct dm_bio_prison_cell *cell) |
| 1370 | { | 999 | { |
| @@ -1384,7 +1013,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, | |||
| 1384 | 1013 | ||
| 1385 | default: | 1014 | default: |
| 1386 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); | 1015 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); |
| 1387 | cell_error(cell); | 1016 | dm_cell_error(cell); |
| 1388 | break; | 1017 | break; |
| 1389 | } | 1018 | } |
| 1390 | } | 1019 | } |
| @@ -1395,14 +1024,14 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
| 1395 | { | 1024 | { |
| 1396 | struct dm_bio_prison_cell *cell; | 1025 | struct dm_bio_prison_cell *cell; |
| 1397 | struct pool *pool = tc->pool; | 1026 | struct pool *pool = tc->pool; |
| 1398 | struct cell_key key; | 1027 | struct dm_cell_key key; |
| 1399 | 1028 | ||
| 1400 | /* | 1029 | /* |
| 1401 | * If cell is already occupied, then sharing is already in the process | 1030 | * If cell is already occupied, then sharing is already in the process |
| 1402 | * of being broken so we have nothing further to do here. | 1031 | * of being broken so we have nothing further to do here. |
| 1403 | */ | 1032 | */ |
| 1404 | build_data_key(tc->td, lookup_result->block, &key); | 1033 | build_data_key(tc->td, lookup_result->block, &key); |
| 1405 | if (bio_detain(pool->prison, &key, bio, &cell)) | 1034 | if (dm_bio_detain(pool->prison, &key, bio, &cell)) |
| 1406 | return; | 1035 | return; |
| 1407 | 1036 | ||
| 1408 | if (bio_data_dir(bio) == WRITE && bio->bi_size) | 1037 | if (bio_data_dir(bio) == WRITE && bio->bi_size) |
| @@ -1410,9 +1039,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
| 1410 | else { | 1039 | else { |
| 1411 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; | 1040 | struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; |
| 1412 | 1041 | ||
| 1413 | h->shared_read_entry = ds_inc(&pool->shared_read_ds); | 1042 | h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); |
| 1414 | 1043 | ||
| 1415 | cell_release_singleton(cell, bio); | 1044 | dm_cell_release_singleton(cell, bio); |
| 1416 | remap_and_issue(tc, bio, lookup_result->block); | 1045 | remap_and_issue(tc, bio, lookup_result->block); |
| 1417 | } | 1046 | } |
| 1418 | } | 1047 | } |
| @@ -1427,7 +1056,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
| 1427 | * Remap empty bios (flushes) immediately, without provisioning. | 1056 | * Remap empty bios (flushes) immediately, without provisioning. |
| 1428 | */ | 1057 | */ |
| 1429 | if (!bio->bi_size) { | 1058 | if (!bio->bi_size) { |
| 1430 | cell_release_singleton(cell, bio); | 1059 | dm_cell_release_singleton(cell, bio); |
| 1431 | remap_and_issue(tc, bio, 0); | 1060 | remap_and_issue(tc, bio, 0); |
| 1432 | return; | 1061 | return; |
| 1433 | } | 1062 | } |
| @@ -1437,7 +1066,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
| 1437 | */ | 1066 | */ |
| 1438 | if (bio_data_dir(bio) == READ) { | 1067 | if (bio_data_dir(bio) == READ) { |
| 1439 | zero_fill_bio(bio); | 1068 | zero_fill_bio(bio); |
| 1440 | cell_release_singleton(cell, bio); | 1069 | dm_cell_release_singleton(cell, bio); |
| 1441 | bio_endio(bio, 0); | 1070 | bio_endio(bio, 0); |
| 1442 | return; | 1071 | return; |
| 1443 | } | 1072 | } |
| @@ -1458,7 +1087,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
| 1458 | default: | 1087 | default: |
| 1459 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); | 1088 | DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); |
| 1460 | set_pool_mode(tc->pool, PM_READ_ONLY); | 1089 | set_pool_mode(tc->pool, PM_READ_ONLY); |
| 1461 | cell_error(cell); | 1090 | dm_cell_error(cell); |
| 1462 | break; | 1091 | break; |
| 1463 | } | 1092 | } |
| 1464 | } | 1093 | } |
| @@ -1468,7 +1097,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1468 | int r; | 1097 | int r; |
| 1469 | dm_block_t block = get_bio_block(tc, bio); | 1098 | dm_block_t block = get_bio_block(tc, bio); |
| 1470 | struct dm_bio_prison_cell *cell; | 1099 | struct dm_bio_prison_cell *cell; |
| 1471 | struct cell_key key; | 1100 | struct dm_cell_key key; |
| 1472 | struct dm_thin_lookup_result lookup_result; | 1101 | struct dm_thin_lookup_result lookup_result; |
| 1473 | 1102 | ||
| 1474 | /* | 1103 | /* |
| @@ -1476,7 +1105,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1476 | * being provisioned so we have nothing further to do here. | 1105 | * being provisioned so we have nothing further to do here. |
| 1477 | */ | 1106 | */ |
| 1478 | build_virtual_key(tc->td, block, &key); | 1107 | build_virtual_key(tc->td, block, &key); |
| 1479 | if (bio_detain(tc->pool->prison, &key, bio, &cell)) | 1108 | if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) |
| 1480 | return; | 1109 | return; |
| 1481 | 1110 | ||
| 1482 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); | 1111 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); |
| @@ -1491,7 +1120,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1491 | * TODO: this will probably have to change when discard goes | 1120 | * TODO: this will probably have to change when discard goes |
| 1492 | * back in. | 1121 | * back in. |
| 1493 | */ | 1122 | */ |
| 1494 | cell_release_singleton(cell, bio); | 1123 | dm_cell_release_singleton(cell, bio); |
| 1495 | 1124 | ||
| 1496 | if (lookup_result.shared) | 1125 | if (lookup_result.shared) |
| 1497 | process_shared_bio(tc, bio, block, &lookup_result); | 1126 | process_shared_bio(tc, bio, block, &lookup_result); |
| @@ -1501,7 +1130,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1501 | 1130 | ||
| 1502 | case -ENODATA: | 1131 | case -ENODATA: |
| 1503 | if (bio_data_dir(bio) == READ && tc->origin_dev) { | 1132 | if (bio_data_dir(bio) == READ && tc->origin_dev) { |
| 1504 | cell_release_singleton(cell, bio); | 1133 | dm_cell_release_singleton(cell, bio); |
| 1505 | remap_to_origin_and_issue(tc, bio); | 1134 | remap_to_origin_and_issue(tc, bio); |
| 1506 | } else | 1135 | } else |
| 1507 | provision_block(tc, bio, block, cell); | 1136 | provision_block(tc, bio, block, cell); |
| @@ -1509,7 +1138,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) | |||
| 1509 | 1138 | ||
| 1510 | default: | 1139 | default: |
| 1511 | DMERR("dm_thin_find_block() failed, error = %d", r); | 1140 | DMERR("dm_thin_find_block() failed, error = %d", r); |
| 1512 | cell_release_singleton(cell, bio); | 1141 | dm_cell_release_singleton(cell, bio); |
| 1513 | bio_io_error(bio); | 1142 | bio_io_error(bio); |
| 1514 | break; | 1143 | break; |
| 1515 | } | 1144 | } |
| @@ -1718,7 +1347,7 @@ static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *b | |||
| 1718 | 1347 | ||
| 1719 | h->tc = tc; | 1348 | h->tc = tc; |
| 1720 | h->shared_read_entry = NULL; | 1349 | h->shared_read_entry = NULL; |
| 1721 | h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds); | 1350 | h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds); |
| 1722 | h->overwrite_mapping = NULL; | 1351 | h->overwrite_mapping = NULL; |
| 1723 | 1352 | ||
| 1724 | return h; | 1353 | return h; |
| @@ -1928,7 +1557,7 @@ static void __pool_destroy(struct pool *pool) | |||
| 1928 | if (dm_pool_metadata_close(pool->pmd) < 0) | 1557 | if (dm_pool_metadata_close(pool->pmd) < 0) |
| 1929 | DMWARN("%s: dm_pool_metadata_close() failed.", __func__); | 1558 | DMWARN("%s: dm_pool_metadata_close() failed.", __func__); |
| 1930 | 1559 | ||
| 1931 | prison_destroy(pool->prison); | 1560 | dm_bio_prison_destroy(pool->prison); |
| 1932 | dm_kcopyd_client_destroy(pool->copier); | 1561 | dm_kcopyd_client_destroy(pool->copier); |
| 1933 | 1562 | ||
| 1934 | if (pool->wq) | 1563 | if (pool->wq) |
| @@ -1938,6 +1567,8 @@ static void __pool_destroy(struct pool *pool) | |||
| 1938 | mempool_free(pool->next_mapping, pool->mapping_pool); | 1567 | mempool_free(pool->next_mapping, pool->mapping_pool); |
| 1939 | mempool_destroy(pool->mapping_pool); | 1568 | mempool_destroy(pool->mapping_pool); |
| 1940 | mempool_destroy(pool->endio_hook_pool); | 1569 | mempool_destroy(pool->endio_hook_pool); |
| 1570 | dm_deferred_set_destroy(pool->shared_read_ds); | ||
| 1571 | dm_deferred_set_destroy(pool->all_io_ds); | ||
| 1941 | kfree(pool); | 1572 | kfree(pool); |
| 1942 | } | 1573 | } |
| 1943 | 1574 | ||
| @@ -1976,7 +1607,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
| 1976 | pool->sectors_per_block_shift = __ffs(block_size); | 1607 | pool->sectors_per_block_shift = __ffs(block_size); |
| 1977 | pool->low_water_blocks = 0; | 1608 | pool->low_water_blocks = 0; |
| 1978 | pool_features_init(&pool->pf); | 1609 | pool_features_init(&pool->pf); |
| 1979 | pool->prison = prison_create(PRISON_CELLS); | 1610 | pool->prison = dm_bio_prison_create(PRISON_CELLS); |
| 1980 | if (!pool->prison) { | 1611 | if (!pool->prison) { |
| 1981 | *error = "Error creating pool's bio prison"; | 1612 | *error = "Error creating pool's bio prison"; |
| 1982 | err_p = ERR_PTR(-ENOMEM); | 1613 | err_p = ERR_PTR(-ENOMEM); |
| @@ -2012,8 +1643,20 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
| 2012 | pool->low_water_triggered = 0; | 1643 | pool->low_water_triggered = 0; |
| 2013 | pool->no_free_space = 0; | 1644 | pool->no_free_space = 0; |
| 2014 | bio_list_init(&pool->retry_on_resume_list); | 1645 | bio_list_init(&pool->retry_on_resume_list); |
| 2015 | ds_init(&pool->shared_read_ds); | 1646 | |
| 2016 | ds_init(&pool->all_io_ds); | 1647 | pool->shared_read_ds = dm_deferred_set_create(); |
| 1648 | if (!pool->shared_read_ds) { | ||
| 1649 | *error = "Error creating pool's shared read deferred set"; | ||
| 1650 | err_p = ERR_PTR(-ENOMEM); | ||
| 1651 | goto bad_shared_read_ds; | ||
| 1652 | } | ||
| 1653 | |||
| 1654 | pool->all_io_ds = dm_deferred_set_create(); | ||
| 1655 | if (!pool->all_io_ds) { | ||
| 1656 | *error = "Error creating pool's all io deferred set"; | ||
| 1657 | err_p = ERR_PTR(-ENOMEM); | ||
| 1658 | goto bad_all_io_ds; | ||
| 1659 | } | ||
| 2017 | 1660 | ||
| 2018 | pool->next_mapping = NULL; | 1661 | pool->next_mapping = NULL; |
| 2019 | pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, | 1662 | pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, |
| @@ -2042,11 +1685,15 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
| 2042 | bad_endio_hook_pool: | 1685 | bad_endio_hook_pool: |
| 2043 | mempool_destroy(pool->mapping_pool); | 1686 | mempool_destroy(pool->mapping_pool); |
| 2044 | bad_mapping_pool: | 1687 | bad_mapping_pool: |
| 1688 | dm_deferred_set_destroy(pool->all_io_ds); | ||
| 1689 | bad_all_io_ds: | ||
| 1690 | dm_deferred_set_destroy(pool->shared_read_ds); | ||
| 1691 | bad_shared_read_ds: | ||
| 2045 | destroy_workqueue(pool->wq); | 1692 | destroy_workqueue(pool->wq); |
| 2046 | bad_wq: | 1693 | bad_wq: |
| 2047 | dm_kcopyd_client_destroy(pool->copier); | 1694 | dm_kcopyd_client_destroy(pool->copier); |
| 2048 | bad_kcopyd_client: | 1695 | bad_kcopyd_client: |
| 2049 | prison_destroy(pool->prison); | 1696 | dm_bio_prison_destroy(pool->prison); |
| 2050 | bad_prison: | 1697 | bad_prison: |
| 2051 | kfree(pool); | 1698 | kfree(pool); |
| 2052 | bad_pool: | 1699 | bad_pool: |
| @@ -2272,15 +1919,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 2272 | goto out_flags_changed; | 1919 | goto out_flags_changed; |
| 2273 | } | 1920 | } |
| 2274 | 1921 | ||
| 2275 | /* | ||
| 2276 | * The block layer requires discard_granularity to be a power of 2. | ||
| 2277 | */ | ||
| 2278 | if (pf.discard_enabled && !is_power_of_2(block_size)) { | ||
| 2279 | ti->error = "Discard support must be disabled when the block size is not a power of 2"; | ||
| 2280 | r = -EINVAL; | ||
| 2281 | goto out_flags_changed; | ||
| 2282 | } | ||
| 2283 | |||
| 2284 | pt->pool = pool; | 1922 | pt->pool = pool; |
| 2285 | pt->ti = ti; | 1923 | pt->ti = ti; |
| 2286 | pt->metadata_dev = metadata_dev; | 1924 | pt->metadata_dev = metadata_dev; |
| @@ -2762,6 +2400,11 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
| 2762 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 2400 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
| 2763 | } | 2401 | } |
| 2764 | 2402 | ||
| 2403 | static bool block_size_is_power_of_two(struct pool *pool) | ||
| 2404 | { | ||
| 2405 | return pool->sectors_per_block_shift >= 0; | ||
| 2406 | } | ||
| 2407 | |||
| 2765 | static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) | 2408 | static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) |
| 2766 | { | 2409 | { |
| 2767 | struct pool *pool = pt->pool; | 2410 | struct pool *pool = pt->pool; |
| @@ -2775,8 +2418,15 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) | |||
| 2775 | if (pt->adjusted_pf.discard_passdown) { | 2418 | if (pt->adjusted_pf.discard_passdown) { |
| 2776 | data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; | 2419 | data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; |
| 2777 | limits->discard_granularity = data_limits->discard_granularity; | 2420 | limits->discard_granularity = data_limits->discard_granularity; |
| 2778 | } else | 2421 | } else if (block_size_is_power_of_two(pool)) |
| 2779 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | 2422 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; |
| 2423 | else | ||
| 2424 | /* | ||
| 2425 | * Use largest power of 2 that is a factor of sectors_per_block | ||
| 2426 | * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS. | ||
| 2427 | */ | ||
| 2428 | limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1), | ||
| 2429 | DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT; | ||
| 2780 | } | 2430 | } |
| 2781 | 2431 | ||
| 2782 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | 2432 | static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) |
| @@ -2804,7 +2454,7 @@ static struct target_type pool_target = { | |||
| 2804 | .name = "thin-pool", | 2454 | .name = "thin-pool", |
| 2805 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 2455 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
| 2806 | DM_TARGET_IMMUTABLE, | 2456 | DM_TARGET_IMMUTABLE, |
| 2807 | .version = {1, 4, 0}, | 2457 | .version = {1, 5, 0}, |
| 2808 | .module = THIS_MODULE, | 2458 | .module = THIS_MODULE, |
| 2809 | .ctr = pool_ctr, | 2459 | .ctr = pool_ctr, |
| 2810 | .dtr = pool_dtr, | 2460 | .dtr = pool_dtr, |
| @@ -2979,7 +2629,7 @@ static int thin_endio(struct dm_target *ti, | |||
| 2979 | 2629 | ||
| 2980 | if (h->shared_read_entry) { | 2630 | if (h->shared_read_entry) { |
| 2981 | INIT_LIST_HEAD(&work); | 2631 | INIT_LIST_HEAD(&work); |
| 2982 | ds_dec(h->shared_read_entry, &work); | 2632 | dm_deferred_entry_dec(h->shared_read_entry, &work); |
| 2983 | 2633 | ||
| 2984 | spin_lock_irqsave(&pool->lock, flags); | 2634 | spin_lock_irqsave(&pool->lock, flags); |
| 2985 | list_for_each_entry_safe(m, tmp, &work, list) { | 2635 | list_for_each_entry_safe(m, tmp, &work, list) { |
| @@ -2992,7 +2642,7 @@ static int thin_endio(struct dm_target *ti, | |||
| 2992 | 2642 | ||
| 2993 | if (h->all_io_entry) { | 2643 | if (h->all_io_entry) { |
| 2994 | INIT_LIST_HEAD(&work); | 2644 | INIT_LIST_HEAD(&work); |
| 2995 | ds_dec(h->all_io_entry, &work); | 2645 | dm_deferred_entry_dec(h->all_io_entry, &work); |
| 2996 | spin_lock_irqsave(&pool->lock, flags); | 2646 | spin_lock_irqsave(&pool->lock, flags); |
| 2997 | list_for_each_entry_safe(m, tmp, &work, list) | 2647 | list_for_each_entry_safe(m, tmp, &work, list) |
| 2998 | list_add(&m->list, &pool->prepared_discards); | 2648 | list_add(&m->list, &pool->prepared_discards); |
| @@ -3095,7 +2745,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
| 3095 | 2745 | ||
| 3096 | static struct target_type thin_target = { | 2746 | static struct target_type thin_target = { |
| 3097 | .name = "thin", | 2747 | .name = "thin", |
| 3098 | .version = {1, 4, 0}, | 2748 | .version = {1, 5, 0}, |
| 3099 | .module = THIS_MODULE, | 2749 | .module = THIS_MODULE, |
| 3100 | .ctr = thin_ctr, | 2750 | .ctr = thin_ctr, |
| 3101 | .dtr = thin_dtr, | 2751 | .dtr = thin_dtr, |
| @@ -3125,10 +2775,6 @@ static int __init dm_thin_init(void) | |||
| 3125 | 2775 | ||
| 3126 | r = -ENOMEM; | 2776 | r = -ENOMEM; |
| 3127 | 2777 | ||
| 3128 | _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); | ||
| 3129 | if (!_cell_cache) | ||
| 3130 | goto bad_cell_cache; | ||
| 3131 | |||
| 3132 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); | 2778 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); |
| 3133 | if (!_new_mapping_cache) | 2779 | if (!_new_mapping_cache) |
| 3134 | goto bad_new_mapping_cache; | 2780 | goto bad_new_mapping_cache; |
| @@ -3142,8 +2788,6 @@ static int __init dm_thin_init(void) | |||
| 3142 | bad_endio_hook_cache: | 2788 | bad_endio_hook_cache: |
| 3143 | kmem_cache_destroy(_new_mapping_cache); | 2789 | kmem_cache_destroy(_new_mapping_cache); |
| 3144 | bad_new_mapping_cache: | 2790 | bad_new_mapping_cache: |
| 3145 | kmem_cache_destroy(_cell_cache); | ||
| 3146 | bad_cell_cache: | ||
| 3147 | dm_unregister_target(&pool_target); | 2791 | dm_unregister_target(&pool_target); |
| 3148 | bad_pool_target: | 2792 | bad_pool_target: |
| 3149 | dm_unregister_target(&thin_target); | 2793 | dm_unregister_target(&thin_target); |
| @@ -3156,7 +2800,6 @@ static void dm_thin_exit(void) | |||
| 3156 | dm_unregister_target(&thin_target); | 2800 | dm_unregister_target(&thin_target); |
| 3157 | dm_unregister_target(&pool_target); | 2801 | dm_unregister_target(&pool_target); |
| 3158 | 2802 | ||
| 3159 | kmem_cache_destroy(_cell_cache); | ||
| 3160 | kmem_cache_destroy(_new_mapping_cache); | 2803 | kmem_cache_destroy(_new_mapping_cache); |
| 3161 | kmem_cache_destroy(_endio_hook_cache); | 2804 | kmem_cache_destroy(_endio_hook_cache); |
| 3162 | } | 2805 | } |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 892ae2766aa6..9e7328bb4030 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
| @@ -438,7 +438,7 @@ static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io) | |||
| 438 | verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); | 438 | verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); |
| 439 | verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); | 439 | verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); |
| 440 | if (!i) { | 440 | if (!i) { |
| 441 | unsigned cluster = *(volatile unsigned *)&dm_verity_prefetch_cluster; | 441 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); |
| 442 | 442 | ||
| 443 | cluster >>= v->data_dev_block_bits; | 443 | cluster >>= v->data_dev_block_bits; |
| 444 | if (unlikely(!cluster)) | 444 | if (unlikely(!cluster)) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 66ceaff6455c..02db9183ca01 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -71,6 +71,7 @@ struct dm_target_io { | |||
| 71 | struct dm_io *io; | 71 | struct dm_io *io; |
| 72 | struct dm_target *ti; | 72 | struct dm_target *ti; |
| 73 | union map_info info; | 73 | union map_info info; |
| 74 | struct bio clone; | ||
| 74 | }; | 75 | }; |
| 75 | 76 | ||
| 76 | /* | 77 | /* |
| @@ -214,7 +215,6 @@ struct dm_md_mempools { | |||
| 214 | 215 | ||
| 215 | #define MIN_IOS 256 | 216 | #define MIN_IOS 256 |
| 216 | static struct kmem_cache *_io_cache; | 217 | static struct kmem_cache *_io_cache; |
| 217 | static struct kmem_cache *_tio_cache; | ||
| 218 | static struct kmem_cache *_rq_tio_cache; | 218 | static struct kmem_cache *_rq_tio_cache; |
| 219 | 219 | ||
| 220 | /* | 220 | /* |
| @@ -232,14 +232,9 @@ static int __init local_init(void) | |||
| 232 | if (!_io_cache) | 232 | if (!_io_cache) |
| 233 | return r; | 233 | return r; |
| 234 | 234 | ||
| 235 | /* allocate a slab for the target ios */ | ||
| 236 | _tio_cache = KMEM_CACHE(dm_target_io, 0); | ||
| 237 | if (!_tio_cache) | ||
| 238 | goto out_free_io_cache; | ||
| 239 | |||
| 240 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); | 235 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); |
| 241 | if (!_rq_tio_cache) | 236 | if (!_rq_tio_cache) |
| 242 | goto out_free_tio_cache; | 237 | goto out_free_io_cache; |
| 243 | 238 | ||
| 244 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); | 239 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); |
| 245 | if (!_rq_bio_info_cache) | 240 | if (!_rq_bio_info_cache) |
| @@ -265,8 +260,6 @@ out_free_rq_bio_info_cache: | |||
| 265 | kmem_cache_destroy(_rq_bio_info_cache); | 260 | kmem_cache_destroy(_rq_bio_info_cache); |
| 266 | out_free_rq_tio_cache: | 261 | out_free_rq_tio_cache: |
| 267 | kmem_cache_destroy(_rq_tio_cache); | 262 | kmem_cache_destroy(_rq_tio_cache); |
| 268 | out_free_tio_cache: | ||
| 269 | kmem_cache_destroy(_tio_cache); | ||
| 270 | out_free_io_cache: | 263 | out_free_io_cache: |
| 271 | kmem_cache_destroy(_io_cache); | 264 | kmem_cache_destroy(_io_cache); |
| 272 | 265 | ||
| @@ -277,7 +270,6 @@ static void local_exit(void) | |||
| 277 | { | 270 | { |
| 278 | kmem_cache_destroy(_rq_bio_info_cache); | 271 | kmem_cache_destroy(_rq_bio_info_cache); |
| 279 | kmem_cache_destroy(_rq_tio_cache); | 272 | kmem_cache_destroy(_rq_tio_cache); |
| 280 | kmem_cache_destroy(_tio_cache); | ||
| 281 | kmem_cache_destroy(_io_cache); | 273 | kmem_cache_destroy(_io_cache); |
| 282 | unregister_blkdev(_major, _name); | 274 | unregister_blkdev(_major, _name); |
| 283 | dm_uevent_exit(); | 275 | dm_uevent_exit(); |
| @@ -463,7 +455,7 @@ static void free_io(struct mapped_device *md, struct dm_io *io) | |||
| 463 | 455 | ||
| 464 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | 456 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
| 465 | { | 457 | { |
| 466 | mempool_free(tio, md->tio_pool); | 458 | bio_put(&tio->clone); |
| 467 | } | 459 | } |
| 468 | 460 | ||
| 469 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, | 461 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, |
| @@ -682,7 +674,6 @@ static void clone_endio(struct bio *bio, int error) | |||
| 682 | } | 674 | } |
| 683 | 675 | ||
| 684 | free_tio(md, tio); | 676 | free_tio(md, tio); |
| 685 | bio_put(bio); | ||
| 686 | dec_pending(io, error); | 677 | dec_pending(io, error); |
| 687 | } | 678 | } |
| 688 | 679 | ||
| @@ -1002,12 +993,12 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) | |||
| 1002 | } | 993 | } |
| 1003 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); | 994 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); |
| 1004 | 995 | ||
| 1005 | static void __map_bio(struct dm_target *ti, struct bio *clone, | 996 | static void __map_bio(struct dm_target *ti, struct dm_target_io *tio) |
| 1006 | struct dm_target_io *tio) | ||
| 1007 | { | 997 | { |
| 1008 | int r; | 998 | int r; |
| 1009 | sector_t sector; | 999 | sector_t sector; |
| 1010 | struct mapped_device *md; | 1000 | struct mapped_device *md; |
| 1001 | struct bio *clone = &tio->clone; | ||
| 1011 | 1002 | ||
| 1012 | clone->bi_end_io = clone_endio; | 1003 | clone->bi_end_io = clone_endio; |
| 1013 | clone->bi_private = tio; | 1004 | clone->bi_private = tio; |
| @@ -1031,7 +1022,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
| 1031 | /* error the io and bail out, or requeue it if needed */ | 1022 | /* error the io and bail out, or requeue it if needed */ |
| 1032 | md = tio->io->md; | 1023 | md = tio->io->md; |
| 1033 | dec_pending(tio->io, r); | 1024 | dec_pending(tio->io, r); |
| 1034 | bio_put(clone); | ||
| 1035 | free_tio(md, tio); | 1025 | free_tio(md, tio); |
| 1036 | } else if (r) { | 1026 | } else if (r) { |
| 1037 | DMWARN("unimplemented target map return value: %d", r); | 1027 | DMWARN("unimplemented target map return value: %d", r); |
| @@ -1052,14 +1042,13 @@ struct clone_info { | |||
| 1052 | /* | 1042 | /* |
| 1053 | * Creates a little bio that just does part of a bvec. | 1043 | * Creates a little bio that just does part of a bvec. |
| 1054 | */ | 1044 | */ |
| 1055 | static struct bio *split_bvec(struct bio *bio, sector_t sector, | 1045 | static void split_bvec(struct dm_target_io *tio, struct bio *bio, |
| 1056 | unsigned short idx, unsigned int offset, | 1046 | sector_t sector, unsigned short idx, unsigned int offset, |
| 1057 | unsigned int len, struct bio_set *bs) | 1047 | unsigned int len, struct bio_set *bs) |
| 1058 | { | 1048 | { |
| 1059 | struct bio *clone; | 1049 | struct bio *clone = &tio->clone; |
| 1060 | struct bio_vec *bv = bio->bi_io_vec + idx; | 1050 | struct bio_vec *bv = bio->bi_io_vec + idx; |
| 1061 | 1051 | ||
| 1062 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); | ||
| 1063 | *clone->bi_io_vec = *bv; | 1052 | *clone->bi_io_vec = *bv; |
| 1064 | 1053 | ||
| 1065 | clone->bi_sector = sector; | 1054 | clone->bi_sector = sector; |
| @@ -1076,20 +1065,18 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
| 1076 | bio_integrity_trim(clone, | 1065 | bio_integrity_trim(clone, |
| 1077 | bio_sector_offset(bio, idx, offset), len); | 1066 | bio_sector_offset(bio, idx, offset), len); |
| 1078 | } | 1067 | } |
| 1079 | |||
| 1080 | return clone; | ||
| 1081 | } | 1068 | } |
| 1082 | 1069 | ||
| 1083 | /* | 1070 | /* |
| 1084 | * Creates a bio that consists of range of complete bvecs. | 1071 | * Creates a bio that consists of range of complete bvecs. |
| 1085 | */ | 1072 | */ |
| 1086 | static struct bio *clone_bio(struct bio *bio, sector_t sector, | 1073 | static void clone_bio(struct dm_target_io *tio, struct bio *bio, |
| 1087 | unsigned short idx, unsigned short bv_count, | 1074 | sector_t sector, unsigned short idx, |
| 1088 | unsigned int len, struct bio_set *bs) | 1075 | unsigned short bv_count, unsigned int len, |
| 1076 | struct bio_set *bs) | ||
| 1089 | { | 1077 | { |
| 1090 | struct bio *clone; | 1078 | struct bio *clone = &tio->clone; |
| 1091 | 1079 | ||
| 1092 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | ||
| 1093 | __bio_clone(clone, bio); | 1080 | __bio_clone(clone, bio); |
| 1094 | clone->bi_sector = sector; | 1081 | clone->bi_sector = sector; |
| 1095 | clone->bi_idx = idx; | 1082 | clone->bi_idx = idx; |
| @@ -1104,14 +1091,16 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
| 1104 | bio_integrity_trim(clone, | 1091 | bio_integrity_trim(clone, |
| 1105 | bio_sector_offset(bio, idx, 0), len); | 1092 | bio_sector_offset(bio, idx, 0), len); |
| 1106 | } | 1093 | } |
| 1107 | |||
| 1108 | return clone; | ||
| 1109 | } | 1094 | } |
| 1110 | 1095 | ||
| 1111 | static struct dm_target_io *alloc_tio(struct clone_info *ci, | 1096 | static struct dm_target_io *alloc_tio(struct clone_info *ci, |
| 1112 | struct dm_target *ti) | 1097 | struct dm_target *ti, int nr_iovecs) |
| 1113 | { | 1098 | { |
| 1114 | struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); | 1099 | struct dm_target_io *tio; |
| 1100 | struct bio *clone; | ||
| 1101 | |||
| 1102 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs); | ||
| 1103 | tio = container_of(clone, struct dm_target_io, clone); | ||
| 1115 | 1104 | ||
| 1116 | tio->io = ci->io; | 1105 | tio->io = ci->io; |
| 1117 | tio->ti = ti; | 1106 | tio->ti = ti; |
| @@ -1123,8 +1112,8 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, | |||
| 1123 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | 1112 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, |
| 1124 | unsigned request_nr, sector_t len) | 1113 | unsigned request_nr, sector_t len) |
| 1125 | { | 1114 | { |
| 1126 | struct dm_target_io *tio = alloc_tio(ci, ti); | 1115 | struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs); |
| 1127 | struct bio *clone; | 1116 | struct bio *clone = &tio->clone; |
| 1128 | 1117 | ||
| 1129 | tio->info.target_request_nr = request_nr; | 1118 | tio->info.target_request_nr = request_nr; |
| 1130 | 1119 | ||
| @@ -1133,14 +1122,14 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | |||
| 1133 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush | 1122 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush |
| 1134 | * and discard, so no need for concern about wasted bvec allocations. | 1123 | * and discard, so no need for concern about wasted bvec allocations. |
| 1135 | */ | 1124 | */ |
| 1136 | clone = bio_clone_bioset(ci->bio, GFP_NOIO, ci->md->bs); | ||
| 1137 | 1125 | ||
| 1126 | __bio_clone(clone, ci->bio); | ||
| 1138 | if (len) { | 1127 | if (len) { |
| 1139 | clone->bi_sector = ci->sector; | 1128 | clone->bi_sector = ci->sector; |
| 1140 | clone->bi_size = to_bytes(len); | 1129 | clone->bi_size = to_bytes(len); |
| 1141 | } | 1130 | } |
| 1142 | 1131 | ||
| 1143 | __map_bio(ti, clone, tio); | 1132 | __map_bio(ti, tio); |
| 1144 | } | 1133 | } |
| 1145 | 1134 | ||
| 1146 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, | 1135 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, |
| @@ -1169,14 +1158,13 @@ static int __clone_and_map_empty_flush(struct clone_info *ci) | |||
| 1169 | */ | 1158 | */ |
| 1170 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) | 1159 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) |
| 1171 | { | 1160 | { |
| 1172 | struct bio *clone, *bio = ci->bio; | 1161 | struct bio *bio = ci->bio; |
| 1173 | struct dm_target_io *tio; | 1162 | struct dm_target_io *tio; |
| 1174 | 1163 | ||
| 1175 | tio = alloc_tio(ci, ti); | 1164 | tio = alloc_tio(ci, ti, bio->bi_max_vecs); |
| 1176 | clone = clone_bio(bio, ci->sector, ci->idx, | 1165 | clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, |
| 1177 | bio->bi_vcnt - ci->idx, ci->sector_count, | 1166 | ci->sector_count, ci->md->bs); |
| 1178 | ci->md->bs); | 1167 | __map_bio(ti, tio); |
| 1179 | __map_bio(ti, clone, tio); | ||
| 1180 | ci->sector_count = 0; | 1168 | ci->sector_count = 0; |
| 1181 | } | 1169 | } |
| 1182 | 1170 | ||
| @@ -1214,7 +1202,7 @@ static int __clone_and_map_discard(struct clone_info *ci) | |||
| 1214 | 1202 | ||
| 1215 | static int __clone_and_map(struct clone_info *ci) | 1203 | static int __clone_and_map(struct clone_info *ci) |
| 1216 | { | 1204 | { |
| 1217 | struct bio *clone, *bio = ci->bio; | 1205 | struct bio *bio = ci->bio; |
| 1218 | struct dm_target *ti; | 1206 | struct dm_target *ti; |
| 1219 | sector_t len = 0, max; | 1207 | sector_t len = 0, max; |
| 1220 | struct dm_target_io *tio; | 1208 | struct dm_target_io *tio; |
| @@ -1254,10 +1242,10 @@ static int __clone_and_map(struct clone_info *ci) | |||
| 1254 | len += bv_len; | 1242 | len += bv_len; |
| 1255 | } | 1243 | } |
| 1256 | 1244 | ||
| 1257 | tio = alloc_tio(ci, ti); | 1245 | tio = alloc_tio(ci, ti, bio->bi_max_vecs); |
| 1258 | clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, | 1246 | clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len, |
| 1259 | ci->md->bs); | 1247 | ci->md->bs); |
| 1260 | __map_bio(ti, clone, tio); | 1248 | __map_bio(ti, tio); |
| 1261 | 1249 | ||
| 1262 | ci->sector += len; | 1250 | ci->sector += len; |
| 1263 | ci->sector_count -= len; | 1251 | ci->sector_count -= len; |
| @@ -1282,12 +1270,11 @@ static int __clone_and_map(struct clone_info *ci) | |||
| 1282 | 1270 | ||
| 1283 | len = min(remaining, max); | 1271 | len = min(remaining, max); |
| 1284 | 1272 | ||
| 1285 | tio = alloc_tio(ci, ti); | 1273 | tio = alloc_tio(ci, ti, 1); |
| 1286 | clone = split_bvec(bio, ci->sector, ci->idx, | 1274 | split_bvec(tio, bio, ci->sector, ci->idx, |
| 1287 | bv->bv_offset + offset, len, | 1275 | bv->bv_offset + offset, len, ci->md->bs); |
| 1288 | ci->md->bs); | ||
| 1289 | 1276 | ||
| 1290 | __map_bio(ti, clone, tio); | 1277 | __map_bio(ti, tio); |
| 1291 | 1278 | ||
| 1292 | ci->sector += len; | 1279 | ci->sector += len; |
| 1293 | ci->sector_count -= len; | 1280 | ci->sector_count -= len; |
| @@ -1955,7 +1942,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
| 1955 | { | 1942 | { |
| 1956 | struct dm_md_mempools *p; | 1943 | struct dm_md_mempools *p; |
| 1957 | 1944 | ||
| 1958 | if (md->io_pool && md->tio_pool && md->bs) | 1945 | if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) |
| 1959 | /* the md already has necessary mempools */ | 1946 | /* the md already has necessary mempools */ |
| 1960 | goto out; | 1947 | goto out; |
| 1961 | 1948 | ||
| @@ -2732,14 +2719,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) | |||
| 2732 | if (!pools->io_pool) | 2719 | if (!pools->io_pool) |
| 2733 | goto free_pools_and_out; | 2720 | goto free_pools_and_out; |
| 2734 | 2721 | ||
| 2735 | pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? | 2722 | pools->tio_pool = NULL; |
| 2736 | mempool_create_slab_pool(MIN_IOS, _tio_cache) : | 2723 | if (type == DM_TYPE_REQUEST_BASED) { |
| 2737 | mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); | 2724 | pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); |
| 2738 | if (!pools->tio_pool) | 2725 | if (!pools->tio_pool) |
| 2739 | goto free_io_pool_and_out; | 2726 | goto free_io_pool_and_out; |
| 2727 | } | ||
| 2740 | 2728 | ||
| 2741 | pools->bs = (type == DM_TYPE_BIO_BASED) ? | 2729 | pools->bs = (type == DM_TYPE_BIO_BASED) ? |
| 2742 | bioset_create(pool_size, 0) : | 2730 | bioset_create(pool_size, |
| 2731 | offsetof(struct dm_target_io, clone)) : | ||
| 2743 | bioset_create(pool_size, | 2732 | bioset_create(pool_size, |
| 2744 | offsetof(struct dm_rq_clone_bio_info, clone)); | 2733 | offsetof(struct dm_rq_clone_bio_info, clone)); |
| 2745 | if (!pools->bs) | 2734 | if (!pools->bs) |
| @@ -2754,7 +2743,8 @@ free_bioset_and_out: | |||
| 2754 | bioset_free(pools->bs); | 2743 | bioset_free(pools->bs); |
| 2755 | 2744 | ||
| 2756 | free_tio_pool_and_out: | 2745 | free_tio_pool_and_out: |
| 2757 | mempool_destroy(pools->tio_pool); | 2746 | if (pools->tio_pool) |
| 2747 | mempool_destroy(pools->tio_pool); | ||
| 2758 | 2748 | ||
| 2759 | free_io_pool_and_out: | 2749 | free_io_pool_and_out: |
| 2760 | mempool_destroy(pools->io_pool); | 2750 | mempool_destroy(pools->io_pool); |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index fa211d80fc0a..21014836bdbf 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
| @@ -138,6 +138,7 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) | |||
| 138 | struct linear_conf *conf; | 138 | struct linear_conf *conf; |
| 139 | struct md_rdev *rdev; | 139 | struct md_rdev *rdev; |
| 140 | int i, cnt; | 140 | int i, cnt; |
| 141 | bool discard_supported = false; | ||
| 141 | 142 | ||
| 142 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info), | 143 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info), |
| 143 | GFP_KERNEL); | 144 | GFP_KERNEL); |
| @@ -171,6 +172,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) | |||
| 171 | conf->array_sectors += rdev->sectors; | 172 | conf->array_sectors += rdev->sectors; |
| 172 | cnt++; | 173 | cnt++; |
| 173 | 174 | ||
| 175 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
| 176 | discard_supported = true; | ||
| 174 | } | 177 | } |
| 175 | if (cnt != raid_disks) { | 178 | if (cnt != raid_disks) { |
| 176 | printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", | 179 | printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", |
| @@ -178,6 +181,11 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) | |||
| 178 | goto out; | 181 | goto out; |
| 179 | } | 182 | } |
| 180 | 183 | ||
| 184 | if (!discard_supported) | ||
| 185 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
| 186 | else | ||
| 187 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
| 188 | |||
| 181 | /* | 189 | /* |
| 182 | * Here we calculate the device offsets. | 190 | * Here we calculate the device offsets. |
| 183 | */ | 191 | */ |
| @@ -244,7 +252,9 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) | |||
| 244 | if (!newconf) | 252 | if (!newconf) |
| 245 | return -ENOMEM; | 253 | return -ENOMEM; |
| 246 | 254 | ||
| 247 | oldconf = rcu_dereference(mddev->private); | 255 | oldconf = rcu_dereference_protected(mddev->private, |
| 256 | lockdep_is_held( | ||
| 257 | &mddev->reconfig_mutex)); | ||
| 248 | mddev->raid_disks++; | 258 | mddev->raid_disks++; |
| 249 | rcu_assign_pointer(mddev->private, newconf); | 259 | rcu_assign_pointer(mddev->private, newconf); |
| 250 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); | 260 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); |
| @@ -256,7 +266,10 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) | |||
| 256 | 266 | ||
| 257 | static int linear_stop (struct mddev *mddev) | 267 | static int linear_stop (struct mddev *mddev) |
| 258 | { | 268 | { |
| 259 | struct linear_conf *conf = mddev->private; | 269 | struct linear_conf *conf = |
| 270 | rcu_dereference_protected(mddev->private, | ||
| 271 | lockdep_is_held( | ||
| 272 | &mddev->reconfig_mutex)); | ||
| 260 | 273 | ||
| 261 | /* | 274 | /* |
| 262 | * We do not require rcu protection here since | 275 | * We do not require rcu protection here since |
| @@ -326,6 +339,14 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) | |||
| 326 | bio->bi_sector = bio->bi_sector - start_sector | 339 | bio->bi_sector = bio->bi_sector - start_sector |
| 327 | + tmp_dev->rdev->data_offset; | 340 | + tmp_dev->rdev->data_offset; |
| 328 | rcu_read_unlock(); | 341 | rcu_read_unlock(); |
| 342 | |||
| 343 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | ||
| 344 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { | ||
| 345 | /* Just ignore it */ | ||
| 346 | bio_endio(bio, 0); | ||
| 347 | return; | ||
| 348 | } | ||
| 349 | |||
| 329 | generic_make_request(bio); | 350 | generic_make_request(bio); |
| 330 | } | 351 | } |
| 331 | 352 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 95c88012a3b9..9ab768acfb62 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -674,7 +674,18 @@ static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr) | |||
| 674 | return NULL; | 674 | return NULL; |
| 675 | } | 675 | } |
| 676 | 676 | ||
| 677 | static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev) | 677 | static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr) |
| 678 | { | ||
| 679 | struct md_rdev *rdev; | ||
| 680 | |||
| 681 | rdev_for_each_rcu(rdev, mddev) | ||
| 682 | if (rdev->desc_nr == nr) | ||
| 683 | return rdev; | ||
| 684 | |||
| 685 | return NULL; | ||
| 686 | } | ||
| 687 | |||
| 688 | static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) | ||
| 678 | { | 689 | { |
| 679 | struct md_rdev *rdev; | 690 | struct md_rdev *rdev; |
| 680 | 691 | ||
| @@ -685,6 +696,17 @@ static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev) | |||
| 685 | return NULL; | 696 | return NULL; |
| 686 | } | 697 | } |
| 687 | 698 | ||
| 699 | static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev) | ||
| 700 | { | ||
| 701 | struct md_rdev *rdev; | ||
| 702 | |||
| 703 | rdev_for_each_rcu(rdev, mddev) | ||
| 704 | if (rdev->bdev->bd_dev == dev) | ||
| 705 | return rdev; | ||
| 706 | |||
| 707 | return NULL; | ||
| 708 | } | ||
| 709 | |||
| 688 | static struct md_personality *find_pers(int level, char *clevel) | 710 | static struct md_personality *find_pers(int level, char *clevel) |
| 689 | { | 711 | { |
| 690 | struct md_personality *pers; | 712 | struct md_personality *pers; |
| @@ -2022,8 +2044,14 @@ EXPORT_SYMBOL(md_integrity_register); | |||
| 2022 | /* Disable data integrity if non-capable/non-matching disk is being added */ | 2044 | /* Disable data integrity if non-capable/non-matching disk is being added */ |
| 2023 | void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) | 2045 | void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) |
| 2024 | { | 2046 | { |
| 2025 | struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); | 2047 | struct blk_integrity *bi_rdev; |
| 2026 | struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); | 2048 | struct blk_integrity *bi_mddev; |
| 2049 | |||
| 2050 | if (!mddev->gendisk) | ||
| 2051 | return; | ||
| 2052 | |||
| 2053 | bi_rdev = bdev_get_integrity(rdev->bdev); | ||
| 2054 | bi_mddev = blk_get_integrity(mddev->gendisk); | ||
| 2027 | 2055 | ||
| 2028 | if (!bi_mddev) /* nothing to do */ | 2056 | if (!bi_mddev) /* nothing to do */ |
| 2029 | return; | 2057 | return; |
| @@ -3754,6 +3782,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len) | |||
| 3754 | return -EINVAL; | 3782 | return -EINVAL; |
| 3755 | 3783 | ||
| 3756 | mddev->recovery_cp = n; | 3784 | mddev->recovery_cp = n; |
| 3785 | if (mddev->pers) | ||
| 3786 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
| 3757 | return len; | 3787 | return len; |
| 3758 | } | 3788 | } |
| 3759 | static struct md_sysfs_entry md_resync_start = | 3789 | static struct md_sysfs_entry md_resync_start = |
| @@ -4231,6 +4261,13 @@ action_store(struct mddev *mddev, const char *page, size_t len) | |||
| 4231 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | 4261 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); |
| 4232 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 4262 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 4233 | } | 4263 | } |
| 4264 | if (mddev->ro == 2) { | ||
| 4265 | /* A write to sync_action is enough to justify | ||
| 4266 | * canceling read-auto mode | ||
| 4267 | */ | ||
| 4268 | mddev->ro = 0; | ||
| 4269 | md_wakeup_thread(mddev->sync_thread); | ||
| 4270 | } | ||
| 4234 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 4271 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
| 4235 | md_wakeup_thread(mddev->thread); | 4272 | md_wakeup_thread(mddev->thread); |
| 4236 | sysfs_notify_dirent_safe(mddev->sysfs_action); | 4273 | sysfs_notify_dirent_safe(mddev->sysfs_action); |
| @@ -4241,7 +4278,8 @@ static ssize_t | |||
| 4241 | mismatch_cnt_show(struct mddev *mddev, char *page) | 4278 | mismatch_cnt_show(struct mddev *mddev, char *page) |
| 4242 | { | 4279 | { |
| 4243 | return sprintf(page, "%llu\n", | 4280 | return sprintf(page, "%llu\n", |
| 4244 | (unsigned long long) mddev->resync_mismatches); | 4281 | (unsigned long long) |
| 4282 | atomic64_read(&mddev->resync_mismatches)); | ||
| 4245 | } | 4283 | } |
| 4246 | 4284 | ||
| 4247 | static struct md_sysfs_entry md_scan_mode = | 4285 | static struct md_sysfs_entry md_scan_mode = |
| @@ -4362,6 +4400,10 @@ sync_completed_show(struct mddev *mddev, char *page) | |||
| 4362 | if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | 4400 | if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
| 4363 | return sprintf(page, "none\n"); | 4401 | return sprintf(page, "none\n"); |
| 4364 | 4402 | ||
| 4403 | if (mddev->curr_resync == 1 || | ||
| 4404 | mddev->curr_resync == 2) | ||
| 4405 | return sprintf(page, "delayed\n"); | ||
| 4406 | |||
| 4365 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || | 4407 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || |
| 4366 | test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) | 4408 | test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) |
| 4367 | max_sectors = mddev->resync_max_sectors; | 4409 | max_sectors = mddev->resync_max_sectors; |
| @@ -5207,7 +5249,7 @@ static void md_clean(struct mddev *mddev) | |||
| 5207 | mddev->new_layout = 0; | 5249 | mddev->new_layout = 0; |
| 5208 | mddev->new_chunk_sectors = 0; | 5250 | mddev->new_chunk_sectors = 0; |
| 5209 | mddev->curr_resync = 0; | 5251 | mddev->curr_resync = 0; |
| 5210 | mddev->resync_mismatches = 0; | 5252 | atomic64_set(&mddev->resync_mismatches, 0); |
| 5211 | mddev->suspend_lo = mddev->suspend_hi = 0; | 5253 | mddev->suspend_lo = mddev->suspend_hi = 0; |
| 5212 | mddev->sync_speed_min = mddev->sync_speed_max = 0; | 5254 | mddev->sync_speed_min = mddev->sync_speed_max = 0; |
| 5213 | mddev->recovery = 0; | 5255 | mddev->recovery = 0; |
| @@ -5509,8 +5551,9 @@ static int get_array_info(struct mddev * mddev, void __user * arg) | |||
| 5509 | int nr,working,insync,failed,spare; | 5551 | int nr,working,insync,failed,spare; |
| 5510 | struct md_rdev *rdev; | 5552 | struct md_rdev *rdev; |
| 5511 | 5553 | ||
| 5512 | nr=working=insync=failed=spare=0; | 5554 | nr = working = insync = failed = spare = 0; |
| 5513 | rdev_for_each(rdev, mddev) { | 5555 | rcu_read_lock(); |
| 5556 | rdev_for_each_rcu(rdev, mddev) { | ||
| 5514 | nr++; | 5557 | nr++; |
| 5515 | if (test_bit(Faulty, &rdev->flags)) | 5558 | if (test_bit(Faulty, &rdev->flags)) |
| 5516 | failed++; | 5559 | failed++; |
| @@ -5522,6 +5565,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg) | |||
| 5522 | spare++; | 5565 | spare++; |
| 5523 | } | 5566 | } |
| 5524 | } | 5567 | } |
| 5568 | rcu_read_unlock(); | ||
| 5525 | 5569 | ||
| 5526 | info.major_version = mddev->major_version; | 5570 | info.major_version = mddev->major_version; |
| 5527 | info.minor_version = mddev->minor_version; | 5571 | info.minor_version = mddev->minor_version; |
| @@ -5605,7 +5649,8 @@ static int get_disk_info(struct mddev * mddev, void __user * arg) | |||
| 5605 | if (copy_from_user(&info, arg, sizeof(info))) | 5649 | if (copy_from_user(&info, arg, sizeof(info))) |
| 5606 | return -EFAULT; | 5650 | return -EFAULT; |
| 5607 | 5651 | ||
| 5608 | rdev = find_rdev_nr(mddev, info.number); | 5652 | rcu_read_lock(); |
| 5653 | rdev = find_rdev_nr_rcu(mddev, info.number); | ||
| 5609 | if (rdev) { | 5654 | if (rdev) { |
| 5610 | info.major = MAJOR(rdev->bdev->bd_dev); | 5655 | info.major = MAJOR(rdev->bdev->bd_dev); |
| 5611 | info.minor = MINOR(rdev->bdev->bd_dev); | 5656 | info.minor = MINOR(rdev->bdev->bd_dev); |
| @@ -5624,6 +5669,7 @@ static int get_disk_info(struct mddev * mddev, void __user * arg) | |||
| 5624 | info.raid_disk = -1; | 5669 | info.raid_disk = -1; |
| 5625 | info.state = (1<<MD_DISK_REMOVED); | 5670 | info.state = (1<<MD_DISK_REMOVED); |
| 5626 | } | 5671 | } |
| 5672 | rcu_read_unlock(); | ||
| 5627 | 5673 | ||
| 5628 | if (copy_to_user(arg, &info, sizeof(info))) | 5674 | if (copy_to_user(arg, &info, sizeof(info))) |
| 5629 | return -EFAULT; | 5675 | return -EFAULT; |
| @@ -6232,18 +6278,22 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) | |||
| 6232 | static int set_disk_faulty(struct mddev *mddev, dev_t dev) | 6278 | static int set_disk_faulty(struct mddev *mddev, dev_t dev) |
| 6233 | { | 6279 | { |
| 6234 | struct md_rdev *rdev; | 6280 | struct md_rdev *rdev; |
| 6281 | int err = 0; | ||
| 6235 | 6282 | ||
| 6236 | if (mddev->pers == NULL) | 6283 | if (mddev->pers == NULL) |
| 6237 | return -ENODEV; | 6284 | return -ENODEV; |
| 6238 | 6285 | ||
| 6239 | rdev = find_rdev(mddev, dev); | 6286 | rcu_read_lock(); |
| 6287 | rdev = find_rdev_rcu(mddev, dev); | ||
| 6240 | if (!rdev) | 6288 | if (!rdev) |
| 6241 | return -ENODEV; | 6289 | err = -ENODEV; |
| 6242 | 6290 | else { | |
| 6243 | md_error(mddev, rdev); | 6291 | md_error(mddev, rdev); |
| 6244 | if (!test_bit(Faulty, &rdev->flags)) | 6292 | if (!test_bit(Faulty, &rdev->flags)) |
| 6245 | return -EBUSY; | 6293 | err = -EBUSY; |
| 6246 | return 0; | 6294 | } |
| 6295 | rcu_read_unlock(); | ||
| 6296 | return err; | ||
| 6247 | } | 6297 | } |
| 6248 | 6298 | ||
| 6249 | /* | 6299 | /* |
| @@ -6315,6 +6365,27 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 6315 | goto abort; | 6365 | goto abort; |
| 6316 | } | 6366 | } |
| 6317 | 6367 | ||
| 6368 | /* Some actions do not requires the mutex */ | ||
| 6369 | switch (cmd) { | ||
| 6370 | case GET_ARRAY_INFO: | ||
| 6371 | if (!mddev->raid_disks && !mddev->external) | ||
| 6372 | err = -ENODEV; | ||
| 6373 | else | ||
| 6374 | err = get_array_info(mddev, argp); | ||
| 6375 | goto abort; | ||
| 6376 | |||
| 6377 | case GET_DISK_INFO: | ||
| 6378 | if (!mddev->raid_disks && !mddev->external) | ||
| 6379 | err = -ENODEV; | ||
| 6380 | else | ||
| 6381 | err = get_disk_info(mddev, argp); | ||
| 6382 | goto abort; | ||
| 6383 | |||
| 6384 | case SET_DISK_FAULTY: | ||
| 6385 | err = set_disk_faulty(mddev, new_decode_dev(arg)); | ||
| 6386 | goto abort; | ||
| 6387 | } | ||
| 6388 | |||
| 6318 | err = mddev_lock(mddev); | 6389 | err = mddev_lock(mddev); |
| 6319 | if (err) { | 6390 | if (err) { |
| 6320 | printk(KERN_INFO | 6391 | printk(KERN_INFO |
| @@ -6387,18 +6458,10 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 6387 | */ | 6458 | */ |
| 6388 | switch (cmd) | 6459 | switch (cmd) |
| 6389 | { | 6460 | { |
| 6390 | case GET_ARRAY_INFO: | ||
| 6391 | err = get_array_info(mddev, argp); | ||
| 6392 | goto done_unlock; | ||
| 6393 | |||
| 6394 | case GET_BITMAP_FILE: | 6461 | case GET_BITMAP_FILE: |
| 6395 | err = get_bitmap_file(mddev, argp); | 6462 | err = get_bitmap_file(mddev, argp); |
| 6396 | goto done_unlock; | 6463 | goto done_unlock; |
| 6397 | 6464 | ||
| 6398 | case GET_DISK_INFO: | ||
| 6399 | err = get_disk_info(mddev, argp); | ||
| 6400 | goto done_unlock; | ||
| 6401 | |||
| 6402 | case RESTART_ARRAY_RW: | 6465 | case RESTART_ARRAY_RW: |
| 6403 | err = restart_array(mddev); | 6466 | err = restart_array(mddev); |
| 6404 | goto done_unlock; | 6467 | goto done_unlock; |
| @@ -6480,10 +6543,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 6480 | err = hot_add_disk(mddev, new_decode_dev(arg)); | 6543 | err = hot_add_disk(mddev, new_decode_dev(arg)); |
| 6481 | goto done_unlock; | 6544 | goto done_unlock; |
| 6482 | 6545 | ||
| 6483 | case SET_DISK_FAULTY: | ||
| 6484 | err = set_disk_faulty(mddev, new_decode_dev(arg)); | ||
| 6485 | goto done_unlock; | ||
| 6486 | |||
| 6487 | case RUN_ARRAY: | 6546 | case RUN_ARRAY: |
| 6488 | err = do_md_run(mddev); | 6547 | err = do_md_run(mddev); |
| 6489 | goto done_unlock; | 6548 | goto done_unlock; |
| @@ -6641,7 +6700,7 @@ static int md_thread(void * arg) | |||
| 6641 | 6700 | ||
| 6642 | clear_bit(THREAD_WAKEUP, &thread->flags); | 6701 | clear_bit(THREAD_WAKEUP, &thread->flags); |
| 6643 | if (!kthread_should_stop()) | 6702 | if (!kthread_should_stop()) |
| 6644 | thread->run(thread->mddev); | 6703 | thread->run(thread); |
| 6645 | } | 6704 | } |
| 6646 | 6705 | ||
| 6647 | return 0; | 6706 | return 0; |
| @@ -6656,8 +6715,8 @@ void md_wakeup_thread(struct md_thread *thread) | |||
| 6656 | } | 6715 | } |
| 6657 | } | 6716 | } |
| 6658 | 6717 | ||
| 6659 | struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev, | 6718 | struct md_thread *md_register_thread(void (*run) (struct md_thread *), |
| 6660 | const char *name) | 6719 | struct mddev *mddev, const char *name) |
| 6661 | { | 6720 | { |
| 6662 | struct md_thread *thread; | 6721 | struct md_thread *thread; |
| 6663 | 6722 | ||
| @@ -6752,7 +6811,11 @@ static void status_resync(struct seq_file *seq, struct mddev * mddev) | |||
| 6752 | int scale; | 6811 | int scale; |
| 6753 | unsigned int per_milli; | 6812 | unsigned int per_milli; |
| 6754 | 6813 | ||
| 6755 | resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); | 6814 | if (mddev->curr_resync <= 3) |
| 6815 | resync = 0; | ||
| 6816 | else | ||
| 6817 | resync = mddev->curr_resync | ||
| 6818 | - atomic_read(&mddev->recovery_active); | ||
| 6756 | 6819 | ||
| 6757 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || | 6820 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || |
| 6758 | test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) | 6821 | test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) |
| @@ -6978,7 +7041,7 @@ static int md_seq_show(struct seq_file *seq, void *v) | |||
| 6978 | if (mddev->curr_resync > 2) { | 7041 | if (mddev->curr_resync > 2) { |
| 6979 | status_resync(seq, mddev); | 7042 | status_resync(seq, mddev); |
| 6980 | seq_printf(seq, "\n "); | 7043 | seq_printf(seq, "\n "); |
| 6981 | } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) | 7044 | } else if (mddev->curr_resync >= 1) |
| 6982 | seq_printf(seq, "\tresync=DELAYED\n "); | 7045 | seq_printf(seq, "\tresync=DELAYED\n "); |
| 6983 | else if (mddev->recovery_cp < MaxSector) | 7046 | else if (mddev->recovery_cp < MaxSector) |
| 6984 | seq_printf(seq, "\tresync=PENDING\n "); | 7047 | seq_printf(seq, "\tresync=PENDING\n "); |
| @@ -7206,8 +7269,9 @@ EXPORT_SYMBOL_GPL(md_allow_write); | |||
| 7206 | 7269 | ||
| 7207 | #define SYNC_MARKS 10 | 7270 | #define SYNC_MARKS 10 |
| 7208 | #define SYNC_MARK_STEP (3*HZ) | 7271 | #define SYNC_MARK_STEP (3*HZ) |
| 7209 | void md_do_sync(struct mddev *mddev) | 7272 | void md_do_sync(struct md_thread *thread) |
| 7210 | { | 7273 | { |
| 7274 | struct mddev *mddev = thread->mddev; | ||
| 7211 | struct mddev *mddev2; | 7275 | struct mddev *mddev2; |
| 7212 | unsigned int currspeed = 0, | 7276 | unsigned int currspeed = 0, |
| 7213 | window; | 7277 | window; |
| @@ -7311,7 +7375,7 @@ void md_do_sync(struct mddev *mddev) | |||
| 7311 | * which defaults to physical size, but can be virtual size | 7375 | * which defaults to physical size, but can be virtual size |
| 7312 | */ | 7376 | */ |
| 7313 | max_sectors = mddev->resync_max_sectors; | 7377 | max_sectors = mddev->resync_max_sectors; |
| 7314 | mddev->resync_mismatches = 0; | 7378 | atomic64_set(&mddev->resync_mismatches, 0); |
| 7315 | /* we don't use the checkpoint if there's a bitmap */ | 7379 | /* we don't use the checkpoint if there's a bitmap */ |
| 7316 | if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) | 7380 | if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) |
| 7317 | j = mddev->resync_min; | 7381 | j = mddev->resync_min; |
| @@ -7367,8 +7431,11 @@ void md_do_sync(struct mddev *mddev) | |||
| 7367 | "md: resuming %s of %s from checkpoint.\n", | 7431 | "md: resuming %s of %s from checkpoint.\n", |
| 7368 | desc, mdname(mddev)); | 7432 | desc, mdname(mddev)); |
| 7369 | mddev->curr_resync = j; | 7433 | mddev->curr_resync = j; |
| 7370 | } | 7434 | } else |
| 7435 | mddev->curr_resync = 3; /* no longer delayed */ | ||
| 7371 | mddev->curr_resync_completed = j; | 7436 | mddev->curr_resync_completed = j; |
| 7437 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); | ||
| 7438 | md_new_event(mddev); | ||
| 7372 | 7439 | ||
| 7373 | blk_start_plug(&plug); | 7440 | blk_start_plug(&plug); |
| 7374 | while (j < max_sectors) { | 7441 | while (j < max_sectors) { |
| @@ -7421,7 +7488,8 @@ void md_do_sync(struct mddev *mddev) | |||
| 7421 | break; | 7488 | break; |
| 7422 | 7489 | ||
| 7423 | j += sectors; | 7490 | j += sectors; |
| 7424 | if (j>1) mddev->curr_resync = j; | 7491 | if (j > 2) |
| 7492 | mddev->curr_resync = j; | ||
| 7425 | mddev->curr_mark_cnt = io_sectors; | 7493 | mddev->curr_mark_cnt = io_sectors; |
| 7426 | if (last_check == 0) | 7494 | if (last_check == 0) |
| 7427 | /* this is the earliest that rebuild will be | 7495 | /* this is the earliest that rebuild will be |
| @@ -7543,8 +7611,6 @@ static int remove_and_add_spares(struct mddev *mddev) | |||
| 7543 | int spares = 0; | 7611 | int spares = 0; |
| 7544 | int removed = 0; | 7612 | int removed = 0; |
| 7545 | 7613 | ||
| 7546 | mddev->curr_resync_completed = 0; | ||
| 7547 | |||
| 7548 | rdev_for_each(rdev, mddev) | 7614 | rdev_for_each(rdev, mddev) |
| 7549 | if (rdev->raid_disk >= 0 && | 7615 | if (rdev->raid_disk >= 0 && |
| 7550 | !test_bit(Blocked, &rdev->flags) && | 7616 | !test_bit(Blocked, &rdev->flags) && |
| @@ -7739,6 +7805,7 @@ void md_check_recovery(struct mddev *mddev) | |||
| 7739 | /* Set RUNNING before clearing NEEDED to avoid | 7805 | /* Set RUNNING before clearing NEEDED to avoid |
| 7740 | * any transients in the value of "sync_action". | 7806 | * any transients in the value of "sync_action". |
| 7741 | */ | 7807 | */ |
| 7808 | mddev->curr_resync_completed = 0; | ||
| 7742 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 7809 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
| 7743 | /* Clear some bits that don't mean anything, but | 7810 | /* Clear some bits that don't mean anything, but |
| 7744 | * might be left set | 7811 | * might be left set |
| @@ -7752,7 +7819,7 @@ void md_check_recovery(struct mddev *mddev) | |||
| 7752 | /* no recovery is running. | 7819 | /* no recovery is running. |
| 7753 | * remove any failed drives, then | 7820 | * remove any failed drives, then |
| 7754 | * add spares if possible. | 7821 | * add spares if possible. |
| 7755 | * Spare are also removed and re-added, to allow | 7822 | * Spares are also removed and re-added, to allow |
| 7756 | * the personality to fail the re-add. | 7823 | * the personality to fail the re-add. |
| 7757 | */ | 7824 | */ |
| 7758 | 7825 | ||
diff --git a/drivers/md/md.h b/drivers/md/md.h index f385b038589d..af443ab868db 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -282,7 +282,7 @@ struct mddev { | |||
| 282 | 282 | ||
| 283 | sector_t resync_max_sectors; /* may be set by personality */ | 283 | sector_t resync_max_sectors; /* may be set by personality */ |
| 284 | 284 | ||
| 285 | sector_t resync_mismatches; /* count of sectors where | 285 | atomic64_t resync_mismatches; /* count of sectors where |
| 286 | * parity/replica mismatch found | 286 | * parity/replica mismatch found |
| 287 | */ | 287 | */ |
| 288 | 288 | ||
| @@ -540,12 +540,13 @@ static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) | |||
| 540 | list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) | 540 | list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) |
| 541 | 541 | ||
| 542 | struct md_thread { | 542 | struct md_thread { |
| 543 | void (*run) (struct mddev *mddev); | 543 | void (*run) (struct md_thread *thread); |
| 544 | struct mddev *mddev; | 544 | struct mddev *mddev; |
| 545 | wait_queue_head_t wqueue; | 545 | wait_queue_head_t wqueue; |
| 546 | unsigned long flags; | 546 | unsigned long flags; |
| 547 | struct task_struct *tsk; | 547 | struct task_struct *tsk; |
| 548 | unsigned long timeout; | 548 | unsigned long timeout; |
| 549 | void *private; | ||
| 549 | }; | 550 | }; |
| 550 | 551 | ||
| 551 | #define THREAD_WAKEUP 0 | 552 | #define THREAD_WAKEUP 0 |
| @@ -584,7 +585,7 @@ static inline void safe_put_page(struct page *p) | |||
| 584 | extern int register_md_personality(struct md_personality *p); | 585 | extern int register_md_personality(struct md_personality *p); |
| 585 | extern int unregister_md_personality(struct md_personality *p); | 586 | extern int unregister_md_personality(struct md_personality *p); |
| 586 | extern struct md_thread *md_register_thread( | 587 | extern struct md_thread *md_register_thread( |
| 587 | void (*run)(struct mddev *mddev), | 588 | void (*run)(struct md_thread *thread), |
| 588 | struct mddev *mddev, | 589 | struct mddev *mddev, |
| 589 | const char *name); | 590 | const char *name); |
| 590 | extern void md_unregister_thread(struct md_thread **threadp); | 591 | extern void md_unregister_thread(struct md_thread **threadp); |
| @@ -603,7 +604,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
| 603 | extern void md_super_wait(struct mddev *mddev); | 604 | extern void md_super_wait(struct mddev *mddev); |
| 604 | extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | 605 | extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, |
| 605 | struct page *page, int rw, bool metadata_op); | 606 | struct page *page, int rw, bool metadata_op); |
| 606 | extern void md_do_sync(struct mddev *mddev); | 607 | extern void md_do_sync(struct md_thread *thread); |
| 607 | extern void md_new_event(struct mddev *mddev); | 608 | extern void md_new_event(struct mddev *mddev); |
| 608 | extern int md_allow_write(struct mddev *mddev); | 609 | extern int md_allow_write(struct mddev *mddev); |
| 609 | extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); | 610 | extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 61a1833ebaf3..1642eae75a33 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
| @@ -335,8 +335,9 @@ abort: | |||
| 335 | * 3. Performs writes following reads for array syncronising. | 335 | * 3. Performs writes following reads for array syncronising. |
| 336 | */ | 336 | */ |
| 337 | 337 | ||
| 338 | static void multipathd (struct mddev *mddev) | 338 | static void multipathd(struct md_thread *thread) |
| 339 | { | 339 | { |
| 340 | struct mddev *mddev = thread->mddev; | ||
| 340 | struct multipath_bh *mp_bh; | 341 | struct multipath_bh *mp_bh; |
| 341 | struct bio *bio; | 342 | struct bio *bio; |
| 342 | unsigned long flags; | 343 | unsigned long flags; |
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index d77602d63c83..f3a9af8cdec3 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c | |||
| @@ -434,14 +434,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, | |||
| 434 | if (ref_count && !old) { | 434 | if (ref_count && !old) { |
| 435 | *ev = SM_ALLOC; | 435 | *ev = SM_ALLOC; |
| 436 | ll->nr_allocated++; | 436 | ll->nr_allocated++; |
| 437 | ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1); | 437 | le32_add_cpu(&ie_disk.nr_free, -1); |
| 438 | if (le32_to_cpu(ie_disk.none_free_before) == bit) | 438 | if (le32_to_cpu(ie_disk.none_free_before) == bit) |
| 439 | ie_disk.none_free_before = cpu_to_le32(bit + 1); | 439 | ie_disk.none_free_before = cpu_to_le32(bit + 1); |
| 440 | 440 | ||
| 441 | } else if (old && !ref_count) { | 441 | } else if (old && !ref_count) { |
| 442 | *ev = SM_FREE; | 442 | *ev = SM_FREE; |
| 443 | ll->nr_allocated--; | 443 | ll->nr_allocated--; |
| 444 | ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1); | 444 | le32_add_cpu(&ie_disk.nr_free, 1); |
| 445 | ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit)); | 445 | ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit)); |
| 446 | } | 446 | } |
| 447 | 447 | ||
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a9e4fa95dfaa..24b359717a7e 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
| @@ -88,6 +88,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
| 88 | char b[BDEVNAME_SIZE]; | 88 | char b[BDEVNAME_SIZE]; |
| 89 | char b2[BDEVNAME_SIZE]; | 89 | char b2[BDEVNAME_SIZE]; |
| 90 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | 90 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); |
| 91 | bool discard_supported = false; | ||
| 91 | 92 | ||
| 92 | if (!conf) | 93 | if (!conf) |
| 93 | return -ENOMEM; | 94 | return -ENOMEM; |
| @@ -195,6 +196,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
| 195 | if (!smallest || (rdev1->sectors < smallest->sectors)) | 196 | if (!smallest || (rdev1->sectors < smallest->sectors)) |
| 196 | smallest = rdev1; | 197 | smallest = rdev1; |
| 197 | cnt++; | 198 | cnt++; |
| 199 | |||
| 200 | if (blk_queue_discard(bdev_get_queue(rdev1->bdev))) | ||
| 201 | discard_supported = true; | ||
| 198 | } | 202 | } |
| 199 | if (cnt != mddev->raid_disks) { | 203 | if (cnt != mddev->raid_disks) { |
| 200 | printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " | 204 | printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " |
| @@ -272,6 +276,11 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
| 272 | blk_queue_io_opt(mddev->queue, | 276 | blk_queue_io_opt(mddev->queue, |
| 273 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | 277 | (mddev->chunk_sectors << 9) * mddev->raid_disks); |
| 274 | 278 | ||
| 279 | if (!discard_supported) | ||
| 280 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
| 281 | else | ||
| 282 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
| 283 | |||
| 275 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); | 284 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); |
| 276 | *private_conf = conf; | 285 | *private_conf = conf; |
| 277 | 286 | ||
| @@ -423,6 +432,7 @@ static int raid0_run(struct mddev *mddev) | |||
| 423 | return -EINVAL; | 432 | return -EINVAL; |
| 424 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 433 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
| 425 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | 434 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); |
| 435 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); | ||
| 426 | 436 | ||
| 427 | /* if private is not null, we are here after takeover */ | 437 | /* if private is not null, we are here after takeover */ |
| 428 | if (mddev->private == NULL) { | 438 | if (mddev->private == NULL) { |
| @@ -510,7 +520,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
| 510 | sector_t sector = bio->bi_sector; | 520 | sector_t sector = bio->bi_sector; |
| 511 | struct bio_pair *bp; | 521 | struct bio_pair *bp; |
| 512 | /* Sanity check -- queue functions should prevent this happening */ | 522 | /* Sanity check -- queue functions should prevent this happening */ |
| 513 | if (bio->bi_vcnt != 1 || | 523 | if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) || |
| 514 | bio->bi_idx != 0) | 524 | bio->bi_idx != 0) |
| 515 | goto bad_map; | 525 | goto bad_map; |
| 516 | /* This is a one page bio that upper layers | 526 | /* This is a one page bio that upper layers |
| @@ -536,6 +546,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
| 536 | bio->bi_sector = sector_offset + zone->dev_start + | 546 | bio->bi_sector = sector_offset + zone->dev_start + |
| 537 | tmp_dev->data_offset; | 547 | tmp_dev->data_offset; |
| 538 | 548 | ||
| 549 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | ||
| 550 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { | ||
| 551 | /* Just ignore it */ | ||
| 552 | bio_endio(bio, 0); | ||
| 553 | return; | ||
| 554 | } | ||
| 555 | |||
| 539 | generic_make_request(bio); | 556 | generic_make_request(bio); |
| 540 | return; | 557 | return; |
| 541 | 558 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 611b5f797618..8034fbd6190c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -333,9 +333,10 @@ static void raid1_end_read_request(struct bio *bio, int error) | |||
| 333 | spin_unlock_irqrestore(&conf->device_lock, flags); | 333 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 334 | } | 334 | } |
| 335 | 335 | ||
| 336 | if (uptodate) | 336 | if (uptodate) { |
| 337 | raid_end_bio_io(r1_bio); | 337 | raid_end_bio_io(r1_bio); |
| 338 | else { | 338 | rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); |
| 339 | } else { | ||
| 339 | /* | 340 | /* |
| 340 | * oops, read error: | 341 | * oops, read error: |
| 341 | */ | 342 | */ |
| @@ -349,9 +350,8 @@ static void raid1_end_read_request(struct bio *bio, int error) | |||
| 349 | (unsigned long long)r1_bio->sector); | 350 | (unsigned long long)r1_bio->sector); |
| 350 | set_bit(R1BIO_ReadError, &r1_bio->state); | 351 | set_bit(R1BIO_ReadError, &r1_bio->state); |
| 351 | reschedule_retry(r1_bio); | 352 | reschedule_retry(r1_bio); |
| 353 | /* don't drop the reference on read_disk yet */ | ||
| 352 | } | 354 | } |
| 353 | |||
| 354 | rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); | ||
| 355 | } | 355 | } |
| 356 | 356 | ||
| 357 | static void close_write(struct r1bio *r1_bio) | 357 | static void close_write(struct r1bio *r1_bio) |
| @@ -781,7 +781,12 @@ static void flush_pending_writes(struct r1conf *conf) | |||
| 781 | while (bio) { /* submit pending writes */ | 781 | while (bio) { /* submit pending writes */ |
| 782 | struct bio *next = bio->bi_next; | 782 | struct bio *next = bio->bi_next; |
| 783 | bio->bi_next = NULL; | 783 | bio->bi_next = NULL; |
| 784 | generic_make_request(bio); | 784 | if (unlikely((bio->bi_rw & REQ_DISCARD) && |
| 785 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | ||
| 786 | /* Just ignore it */ | ||
| 787 | bio_endio(bio, 0); | ||
| 788 | else | ||
| 789 | generic_make_request(bio); | ||
| 785 | bio = next; | 790 | bio = next; |
| 786 | } | 791 | } |
| 787 | } else | 792 | } else |
| @@ -994,6 +999,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
| 994 | const int rw = bio_data_dir(bio); | 999 | const int rw = bio_data_dir(bio); |
| 995 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 1000 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
| 996 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); | 1001 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); |
| 1002 | const unsigned long do_discard = (bio->bi_rw | ||
| 1003 | & (REQ_DISCARD | REQ_SECURE)); | ||
| 997 | struct md_rdev *blocked_rdev; | 1004 | struct md_rdev *blocked_rdev; |
| 998 | struct blk_plug_cb *cb; | 1005 | struct blk_plug_cb *cb; |
| 999 | struct raid1_plug_cb *plug = NULL; | 1006 | struct raid1_plug_cb *plug = NULL; |
| @@ -1295,7 +1302,7 @@ read_again: | |||
| 1295 | conf->mirrors[i].rdev->data_offset); | 1302 | conf->mirrors[i].rdev->data_offset); |
| 1296 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; | 1303 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; |
| 1297 | mbio->bi_end_io = raid1_end_write_request; | 1304 | mbio->bi_end_io = raid1_end_write_request; |
| 1298 | mbio->bi_rw = WRITE | do_flush_fua | do_sync; | 1305 | mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard; |
| 1299 | mbio->bi_private = r1_bio; | 1306 | mbio->bi_private = r1_bio; |
| 1300 | 1307 | ||
| 1301 | atomic_inc(&r1_bio->remaining); | 1308 | atomic_inc(&r1_bio->remaining); |
| @@ -1549,6 +1556,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
| 1549 | clear_bit(Unmerged, &rdev->flags); | 1556 | clear_bit(Unmerged, &rdev->flags); |
| 1550 | } | 1557 | } |
| 1551 | md_integrity_add_rdev(rdev, mddev); | 1558 | md_integrity_add_rdev(rdev, mddev); |
| 1559 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
| 1560 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
| 1552 | print_conf(conf); | 1561 | print_conf(conf); |
| 1553 | return err; | 1562 | return err; |
| 1554 | } | 1563 | } |
| @@ -1867,7 +1876,7 @@ static int process_checks(struct r1bio *r1_bio) | |||
| 1867 | } else | 1876 | } else |
| 1868 | j = 0; | 1877 | j = 0; |
| 1869 | if (j >= 0) | 1878 | if (j >= 0) |
| 1870 | mddev->resync_mismatches += r1_bio->sectors; | 1879 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); |
| 1871 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) | 1880 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) |
| 1872 | && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { | 1881 | && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { |
| 1873 | /* No need to write to this device. */ | 1882 | /* No need to write to this device. */ |
| @@ -2220,6 +2229,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) | |||
| 2220 | unfreeze_array(conf); | 2229 | unfreeze_array(conf); |
| 2221 | } else | 2230 | } else |
| 2222 | md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); | 2231 | md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); |
| 2232 | rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); | ||
| 2223 | 2233 | ||
| 2224 | bio = r1_bio->bios[r1_bio->read_disk]; | 2234 | bio = r1_bio->bios[r1_bio->read_disk]; |
| 2225 | bdevname(bio->bi_bdev, b); | 2235 | bdevname(bio->bi_bdev, b); |
| @@ -2285,8 +2295,9 @@ read_more: | |||
| 2285 | } | 2295 | } |
| 2286 | } | 2296 | } |
| 2287 | 2297 | ||
| 2288 | static void raid1d(struct mddev *mddev) | 2298 | static void raid1d(struct md_thread *thread) |
| 2289 | { | 2299 | { |
| 2300 | struct mddev *mddev = thread->mddev; | ||
| 2290 | struct r1bio *r1_bio; | 2301 | struct r1bio *r1_bio; |
| 2291 | unsigned long flags; | 2302 | unsigned long flags; |
| 2292 | struct r1conf *conf = mddev->private; | 2303 | struct r1conf *conf = mddev->private; |
| @@ -2783,6 +2794,7 @@ static int run(struct mddev *mddev) | |||
| 2783 | int i; | 2794 | int i; |
| 2784 | struct md_rdev *rdev; | 2795 | struct md_rdev *rdev; |
| 2785 | int ret; | 2796 | int ret; |
| 2797 | bool discard_supported = false; | ||
| 2786 | 2798 | ||
| 2787 | if (mddev->level != 1) { | 2799 | if (mddev->level != 1) { |
| 2788 | printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", | 2800 | printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", |
| @@ -2812,6 +2824,8 @@ static int run(struct mddev *mddev) | |||
| 2812 | continue; | 2824 | continue; |
| 2813 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 2825 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
| 2814 | rdev->data_offset << 9); | 2826 | rdev->data_offset << 9); |
| 2827 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
| 2828 | discard_supported = true; | ||
| 2815 | } | 2829 | } |
| 2816 | 2830 | ||
| 2817 | mddev->degraded = 0; | 2831 | mddev->degraded = 0; |
| @@ -2846,6 +2860,13 @@ static int run(struct mddev *mddev) | |||
| 2846 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; | 2860 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; |
| 2847 | mddev->queue->backing_dev_info.congested_data = mddev; | 2861 | mddev->queue->backing_dev_info.congested_data = mddev; |
| 2848 | blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec); | 2862 | blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec); |
| 2863 | |||
| 2864 | if (discard_supported) | ||
| 2865 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, | ||
| 2866 | mddev->queue); | ||
| 2867 | else | ||
| 2868 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, | ||
| 2869 | mddev->queue); | ||
| 2849 | } | 2870 | } |
| 2850 | 2871 | ||
| 2851 | ret = md_integrity_register(mddev); | 2872 | ret = md_integrity_register(mddev); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0138a727c1f3..906ccbd0f7dc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -911,7 +911,12 @@ static void flush_pending_writes(struct r10conf *conf) | |||
| 911 | while (bio) { /* submit pending writes */ | 911 | while (bio) { /* submit pending writes */ |
| 912 | struct bio *next = bio->bi_next; | 912 | struct bio *next = bio->bi_next; |
| 913 | bio->bi_next = NULL; | 913 | bio->bi_next = NULL; |
| 914 | generic_make_request(bio); | 914 | if (unlikely((bio->bi_rw & REQ_DISCARD) && |
| 915 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | ||
| 916 | /* Just ignore it */ | ||
| 917 | bio_endio(bio, 0); | ||
| 918 | else | ||
| 919 | generic_make_request(bio); | ||
| 915 | bio = next; | 920 | bio = next; |
| 916 | } | 921 | } |
| 917 | } else | 922 | } else |
| @@ -1050,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio, | |||
| 1050 | return rdev->new_data_offset; | 1055 | return rdev->new_data_offset; |
| 1051 | } | 1056 | } |
| 1052 | 1057 | ||
| 1058 | struct raid10_plug_cb { | ||
| 1059 | struct blk_plug_cb cb; | ||
| 1060 | struct bio_list pending; | ||
| 1061 | int pending_cnt; | ||
| 1062 | }; | ||
| 1063 | |||
| 1064 | static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) | ||
| 1065 | { | ||
| 1066 | struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb, | ||
| 1067 | cb); | ||
| 1068 | struct mddev *mddev = plug->cb.data; | ||
| 1069 | struct r10conf *conf = mddev->private; | ||
| 1070 | struct bio *bio; | ||
| 1071 | |||
| 1072 | if (from_schedule) { | ||
| 1073 | spin_lock_irq(&conf->device_lock); | ||
| 1074 | bio_list_merge(&conf->pending_bio_list, &plug->pending); | ||
| 1075 | conf->pending_count += plug->pending_cnt; | ||
| 1076 | spin_unlock_irq(&conf->device_lock); | ||
| 1077 | md_wakeup_thread(mddev->thread); | ||
| 1078 | kfree(plug); | ||
| 1079 | return; | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | /* we aren't scheduling, so we can do the write-out directly. */ | ||
| 1083 | bio = bio_list_get(&plug->pending); | ||
| 1084 | bitmap_unplug(mddev->bitmap); | ||
| 1085 | wake_up(&conf->wait_barrier); | ||
| 1086 | |||
| 1087 | while (bio) { /* submit pending writes */ | ||
| 1088 | struct bio *next = bio->bi_next; | ||
| 1089 | bio->bi_next = NULL; | ||
| 1090 | generic_make_request(bio); | ||
| 1091 | bio = next; | ||
| 1092 | } | ||
| 1093 | kfree(plug); | ||
| 1094 | } | ||
| 1095 | |||
| 1053 | static void make_request(struct mddev *mddev, struct bio * bio) | 1096 | static void make_request(struct mddev *mddev, struct bio * bio) |
| 1054 | { | 1097 | { |
| 1055 | struct r10conf *conf = mddev->private; | 1098 | struct r10conf *conf = mddev->private; |
| @@ -1061,8 +1104,12 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
| 1061 | const int rw = bio_data_dir(bio); | 1104 | const int rw = bio_data_dir(bio); |
| 1062 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 1105 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
| 1063 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 1106 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
| 1107 | const unsigned long do_discard = (bio->bi_rw | ||
| 1108 | & (REQ_DISCARD | REQ_SECURE)); | ||
| 1064 | unsigned long flags; | 1109 | unsigned long flags; |
| 1065 | struct md_rdev *blocked_rdev; | 1110 | struct md_rdev *blocked_rdev; |
| 1111 | struct blk_plug_cb *cb; | ||
| 1112 | struct raid10_plug_cb *plug = NULL; | ||
| 1066 | int sectors_handled; | 1113 | int sectors_handled; |
| 1067 | int max_sectors; | 1114 | int max_sectors; |
| 1068 | int sectors; | 1115 | int sectors; |
| @@ -1081,7 +1128,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
| 1081 | || conf->prev.near_copies < conf->prev.raid_disks))) { | 1128 | || conf->prev.near_copies < conf->prev.raid_disks))) { |
| 1082 | struct bio_pair *bp; | 1129 | struct bio_pair *bp; |
| 1083 | /* Sanity check -- queue functions should prevent this happening */ | 1130 | /* Sanity check -- queue functions should prevent this happening */ |
| 1084 | if (bio->bi_vcnt != 1 || | 1131 | if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) || |
| 1085 | bio->bi_idx != 0) | 1132 | bio->bi_idx != 0) |
| 1086 | goto bad_map; | 1133 | goto bad_map; |
| 1087 | /* This is a one page bio that upper layers | 1134 | /* This is a one page bio that upper layers |
| @@ -1410,15 +1457,26 @@ retry_write: | |||
| 1410 | conf->mirrors[d].rdev)); | 1457 | conf->mirrors[d].rdev)); |
| 1411 | mbio->bi_bdev = conf->mirrors[d].rdev->bdev; | 1458 | mbio->bi_bdev = conf->mirrors[d].rdev->bdev; |
| 1412 | mbio->bi_end_io = raid10_end_write_request; | 1459 | mbio->bi_end_io = raid10_end_write_request; |
| 1413 | mbio->bi_rw = WRITE | do_sync | do_fua; | 1460 | mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; |
| 1414 | mbio->bi_private = r10_bio; | 1461 | mbio->bi_private = r10_bio; |
| 1415 | 1462 | ||
| 1416 | atomic_inc(&r10_bio->remaining); | 1463 | atomic_inc(&r10_bio->remaining); |
| 1464 | |||
| 1465 | cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); | ||
| 1466 | if (cb) | ||
| 1467 | plug = container_of(cb, struct raid10_plug_cb, cb); | ||
| 1468 | else | ||
| 1469 | plug = NULL; | ||
| 1417 | spin_lock_irqsave(&conf->device_lock, flags); | 1470 | spin_lock_irqsave(&conf->device_lock, flags); |
| 1418 | bio_list_add(&conf->pending_bio_list, mbio); | 1471 | if (plug) { |
| 1419 | conf->pending_count++; | 1472 | bio_list_add(&plug->pending, mbio); |
| 1473 | plug->pending_cnt++; | ||
| 1474 | } else { | ||
| 1475 | bio_list_add(&conf->pending_bio_list, mbio); | ||
| 1476 | conf->pending_count++; | ||
| 1477 | } | ||
| 1420 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1478 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 1421 | if (!mddev_check_plugged(mddev)) | 1479 | if (!plug) |
| 1422 | md_wakeup_thread(mddev->thread); | 1480 | md_wakeup_thread(mddev->thread); |
| 1423 | 1481 | ||
| 1424 | if (!r10_bio->devs[i].repl_bio) | 1482 | if (!r10_bio->devs[i].repl_bio) |
| @@ -1439,7 +1497,7 @@ retry_write: | |||
| 1439 | conf->mirrors[d].replacement)); | 1497 | conf->mirrors[d].replacement)); |
| 1440 | mbio->bi_bdev = conf->mirrors[d].replacement->bdev; | 1498 | mbio->bi_bdev = conf->mirrors[d].replacement->bdev; |
| 1441 | mbio->bi_end_io = raid10_end_write_request; | 1499 | mbio->bi_end_io = raid10_end_write_request; |
| 1442 | mbio->bi_rw = WRITE | do_sync | do_fua; | 1500 | mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; |
| 1443 | mbio->bi_private = r10_bio; | 1501 | mbio->bi_private = r10_bio; |
| 1444 | 1502 | ||
| 1445 | atomic_inc(&r10_bio->remaining); | 1503 | atomic_inc(&r10_bio->remaining); |
| @@ -1638,7 +1696,7 @@ static int raid10_spare_active(struct mddev *mddev) | |||
| 1638 | && !test_bit(Faulty, &tmp->rdev->flags) | 1696 | && !test_bit(Faulty, &tmp->rdev->flags) |
| 1639 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { | 1697 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { |
| 1640 | count++; | 1698 | count++; |
| 1641 | sysfs_notify_dirent(tmp->rdev->sysfs_state); | 1699 | sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); |
| 1642 | } | 1700 | } |
| 1643 | } | 1701 | } |
| 1644 | spin_lock_irqsave(&conf->device_lock, flags); | 1702 | spin_lock_irqsave(&conf->device_lock, flags); |
| @@ -1725,6 +1783,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
| 1725 | clear_bit(Unmerged, &rdev->flags); | 1783 | clear_bit(Unmerged, &rdev->flags); |
| 1726 | } | 1784 | } |
| 1727 | md_integrity_add_rdev(rdev, mddev); | 1785 | md_integrity_add_rdev(rdev, mddev); |
| 1786 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
| 1787 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
| 1788 | |||
| 1728 | print_conf(conf); | 1789 | print_conf(conf); |
| 1729 | return err; | 1790 | return err; |
| 1730 | } | 1791 | } |
| @@ -1952,7 +2013,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
| 1952 | break; | 2013 | break; |
| 1953 | if (j == vcnt) | 2014 | if (j == vcnt) |
| 1954 | continue; | 2015 | continue; |
| 1955 | mddev->resync_mismatches += r10_bio->sectors; | 2016 | atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); |
| 1956 | if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) | 2017 | if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) |
| 1957 | /* Don't fix anything. */ | 2018 | /* Don't fix anything. */ |
| 1958 | continue; | 2019 | continue; |
| @@ -2673,8 +2734,9 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
| 2673 | } | 2734 | } |
| 2674 | } | 2735 | } |
| 2675 | 2736 | ||
| 2676 | static void raid10d(struct mddev *mddev) | 2737 | static void raid10d(struct md_thread *thread) |
| 2677 | { | 2738 | { |
| 2739 | struct mddev *mddev = thread->mddev; | ||
| 2678 | struct r10bio *r10_bio; | 2740 | struct r10bio *r10_bio; |
| 2679 | unsigned long flags; | 2741 | unsigned long flags; |
| 2680 | struct r10conf *conf = mddev->private; | 2742 | struct r10conf *conf = mddev->private; |
| @@ -3158,7 +3220,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
| 3158 | else { | 3220 | else { |
| 3159 | bad_sectors -= (sector - first_bad); | 3221 | bad_sectors -= (sector - first_bad); |
| 3160 | if (max_sync > bad_sectors) | 3222 | if (max_sync > bad_sectors) |
| 3161 | max_sync = max_sync; | 3223 | max_sync = bad_sectors; |
| 3162 | continue; | 3224 | continue; |
| 3163 | } | 3225 | } |
| 3164 | } | 3226 | } |
| @@ -3482,6 +3544,7 @@ static int run(struct mddev *mddev) | |||
| 3482 | sector_t size; | 3544 | sector_t size; |
| 3483 | sector_t min_offset_diff = 0; | 3545 | sector_t min_offset_diff = 0; |
| 3484 | int first = 1; | 3546 | int first = 1; |
| 3547 | bool discard_supported = false; | ||
| 3485 | 3548 | ||
| 3486 | if (mddev->private == NULL) { | 3549 | if (mddev->private == NULL) { |
| 3487 | conf = setup_conf(mddev); | 3550 | conf = setup_conf(mddev); |
| @@ -3498,6 +3561,8 @@ static int run(struct mddev *mddev) | |||
| 3498 | 3561 | ||
| 3499 | chunk_size = mddev->chunk_sectors << 9; | 3562 | chunk_size = mddev->chunk_sectors << 9; |
| 3500 | if (mddev->queue) { | 3563 | if (mddev->queue) { |
| 3564 | blk_queue_max_discard_sectors(mddev->queue, | ||
| 3565 | mddev->chunk_sectors); | ||
| 3501 | blk_queue_io_min(mddev->queue, chunk_size); | 3566 | blk_queue_io_min(mddev->queue, chunk_size); |
| 3502 | if (conf->geo.raid_disks % conf->geo.near_copies) | 3567 | if (conf->geo.raid_disks % conf->geo.near_copies) |
| 3503 | blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); | 3568 | blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); |
| @@ -3543,8 +3608,16 @@ static int run(struct mddev *mddev) | |||
| 3543 | rdev->data_offset << 9); | 3608 | rdev->data_offset << 9); |
| 3544 | 3609 | ||
| 3545 | disk->head_position = 0; | 3610 | disk->head_position = 0; |
| 3611 | |||
| 3612 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
| 3613 | discard_supported = true; | ||
| 3546 | } | 3614 | } |
| 3547 | 3615 | ||
| 3616 | if (discard_supported) | ||
| 3617 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
| 3618 | else | ||
| 3619 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
| 3620 | |||
| 3548 | /* need to check that every block has at least one working mirror */ | 3621 | /* need to check that every block has at least one working mirror */ |
| 3549 | if (!enough(conf, -1)) { | 3622 | if (!enough(conf, -1)) { |
| 3550 | printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", | 3623 | printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0689173fd9f5..c5439dce0295 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -551,6 +551,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
| 551 | rw = WRITE_FUA; | 551 | rw = WRITE_FUA; |
| 552 | else | 552 | else |
| 553 | rw = WRITE; | 553 | rw = WRITE; |
| 554 | if (test_bit(R5_Discard, &sh->dev[i].flags)) | ||
| 555 | rw |= REQ_DISCARD; | ||
| 554 | } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) | 556 | } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) |
| 555 | rw = READ; | 557 | rw = READ; |
| 556 | else if (test_and_clear_bit(R5_WantReplace, | 558 | else if (test_and_clear_bit(R5_WantReplace, |
| @@ -1174,8 +1176,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
| 1174 | set_bit(R5_WantFUA, &dev->flags); | 1176 | set_bit(R5_WantFUA, &dev->flags); |
| 1175 | if (wbi->bi_rw & REQ_SYNC) | 1177 | if (wbi->bi_rw & REQ_SYNC) |
| 1176 | set_bit(R5_SyncIO, &dev->flags); | 1178 | set_bit(R5_SyncIO, &dev->flags); |
| 1177 | tx = async_copy_data(1, wbi, dev->page, | 1179 | if (wbi->bi_rw & REQ_DISCARD) |
| 1178 | dev->sector, tx); | 1180 | set_bit(R5_Discard, &dev->flags); |
| 1181 | else | ||
| 1182 | tx = async_copy_data(1, wbi, dev->page, | ||
| 1183 | dev->sector, tx); | ||
| 1179 | wbi = r5_next_bio(wbi, dev->sector); | 1184 | wbi = r5_next_bio(wbi, dev->sector); |
| 1180 | } | 1185 | } |
| 1181 | } | 1186 | } |
| @@ -1191,7 +1196,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref) | |||
| 1191 | int pd_idx = sh->pd_idx; | 1196 | int pd_idx = sh->pd_idx; |
| 1192 | int qd_idx = sh->qd_idx; | 1197 | int qd_idx = sh->qd_idx; |
| 1193 | int i; | 1198 | int i; |
| 1194 | bool fua = false, sync = false; | 1199 | bool fua = false, sync = false, discard = false; |
| 1195 | 1200 | ||
| 1196 | pr_debug("%s: stripe %llu\n", __func__, | 1201 | pr_debug("%s: stripe %llu\n", __func__, |
| 1197 | (unsigned long long)sh->sector); | 1202 | (unsigned long long)sh->sector); |
| @@ -1199,13 +1204,15 @@ static void ops_complete_reconstruct(void *stripe_head_ref) | |||
| 1199 | for (i = disks; i--; ) { | 1204 | for (i = disks; i--; ) { |
| 1200 | fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); | 1205 | fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); |
| 1201 | sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); | 1206 | sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); |
| 1207 | discard |= test_bit(R5_Discard, &sh->dev[i].flags); | ||
| 1202 | } | 1208 | } |
| 1203 | 1209 | ||
| 1204 | for (i = disks; i--; ) { | 1210 | for (i = disks; i--; ) { |
| 1205 | struct r5dev *dev = &sh->dev[i]; | 1211 | struct r5dev *dev = &sh->dev[i]; |
| 1206 | 1212 | ||
| 1207 | if (dev->written || i == pd_idx || i == qd_idx) { | 1213 | if (dev->written || i == pd_idx || i == qd_idx) { |
| 1208 | set_bit(R5_UPTODATE, &dev->flags); | 1214 | if (!discard) |
| 1215 | set_bit(R5_UPTODATE, &dev->flags); | ||
| 1209 | if (fua) | 1216 | if (fua) |
| 1210 | set_bit(R5_WantFUA, &dev->flags); | 1217 | set_bit(R5_WantFUA, &dev->flags); |
| 1211 | if (sync) | 1218 | if (sync) |
| @@ -1241,6 +1248,18 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, | |||
| 1241 | pr_debug("%s: stripe %llu\n", __func__, | 1248 | pr_debug("%s: stripe %llu\n", __func__, |
| 1242 | (unsigned long long)sh->sector); | 1249 | (unsigned long long)sh->sector); |
| 1243 | 1250 | ||
| 1251 | for (i = 0; i < sh->disks; i++) { | ||
| 1252 | if (pd_idx == i) | ||
| 1253 | continue; | ||
| 1254 | if (!test_bit(R5_Discard, &sh->dev[i].flags)) | ||
| 1255 | break; | ||
| 1256 | } | ||
| 1257 | if (i >= sh->disks) { | ||
| 1258 | atomic_inc(&sh->count); | ||
| 1259 | set_bit(R5_Discard, &sh->dev[pd_idx].flags); | ||
| 1260 | ops_complete_reconstruct(sh); | ||
| 1261 | return; | ||
| 1262 | } | ||
| 1244 | /* check if prexor is active which means only process blocks | 1263 | /* check if prexor is active which means only process blocks |
| 1245 | * that are part of a read-modify-write (written) | 1264 | * that are part of a read-modify-write (written) |
| 1246 | */ | 1265 | */ |
| @@ -1285,10 +1304,24 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, | |||
| 1285 | { | 1304 | { |
| 1286 | struct async_submit_ctl submit; | 1305 | struct async_submit_ctl submit; |
| 1287 | struct page **blocks = percpu->scribble; | 1306 | struct page **blocks = percpu->scribble; |
| 1288 | int count; | 1307 | int count, i; |
| 1289 | 1308 | ||
| 1290 | pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); | 1309 | pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); |
| 1291 | 1310 | ||
| 1311 | for (i = 0; i < sh->disks; i++) { | ||
| 1312 | if (sh->pd_idx == i || sh->qd_idx == i) | ||
| 1313 | continue; | ||
| 1314 | if (!test_bit(R5_Discard, &sh->dev[i].flags)) | ||
| 1315 | break; | ||
| 1316 | } | ||
| 1317 | if (i >= sh->disks) { | ||
| 1318 | atomic_inc(&sh->count); | ||
| 1319 | set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); | ||
| 1320 | set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); | ||
| 1321 | ops_complete_reconstruct(sh); | ||
| 1322 | return; | ||
| 1323 | } | ||
| 1324 | |||
| 1292 | count = set_syndrome_sources(blocks, sh); | 1325 | count = set_syndrome_sources(blocks, sh); |
| 1293 | 1326 | ||
| 1294 | atomic_inc(&sh->count); | 1327 | atomic_inc(&sh->count); |
| @@ -2408,11 +2441,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
| 2408 | if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) | 2441 | if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) |
| 2409 | set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); | 2442 | set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); |
| 2410 | } | 2443 | } |
| 2411 | spin_unlock_irq(&sh->stripe_lock); | ||
| 2412 | 2444 | ||
| 2413 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", | 2445 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", |
| 2414 | (unsigned long long)(*bip)->bi_sector, | 2446 | (unsigned long long)(*bip)->bi_sector, |
| 2415 | (unsigned long long)sh->sector, dd_idx); | 2447 | (unsigned long long)sh->sector, dd_idx); |
| 2448 | spin_unlock_irq(&sh->stripe_lock); | ||
| 2416 | 2449 | ||
| 2417 | if (conf->mddev->bitmap && firstwrite) { | 2450 | if (conf->mddev->bitmap && firstwrite) { |
| 2418 | bitmap_startwrite(conf->mddev->bitmap, sh->sector, | 2451 | bitmap_startwrite(conf->mddev->bitmap, sh->sector, |
| @@ -2479,10 +2512,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
| 2479 | bi = sh->dev[i].towrite; | 2512 | bi = sh->dev[i].towrite; |
| 2480 | sh->dev[i].towrite = NULL; | 2513 | sh->dev[i].towrite = NULL; |
| 2481 | spin_unlock_irq(&sh->stripe_lock); | 2514 | spin_unlock_irq(&sh->stripe_lock); |
| 2482 | if (bi) { | 2515 | if (bi) |
| 2483 | s->to_write--; | ||
| 2484 | bitmap_end = 1; | 2516 | bitmap_end = 1; |
| 2485 | } | ||
| 2486 | 2517 | ||
| 2487 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | 2518 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 2488 | wake_up(&conf->wait_for_overlap); | 2519 | wake_up(&conf->wait_for_overlap); |
| @@ -2524,11 +2555,12 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
| 2524 | if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && | 2555 | if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && |
| 2525 | (!test_bit(R5_Insync, &sh->dev[i].flags) || | 2556 | (!test_bit(R5_Insync, &sh->dev[i].flags) || |
| 2526 | test_bit(R5_ReadError, &sh->dev[i].flags))) { | 2557 | test_bit(R5_ReadError, &sh->dev[i].flags))) { |
| 2558 | spin_lock_irq(&sh->stripe_lock); | ||
| 2527 | bi = sh->dev[i].toread; | 2559 | bi = sh->dev[i].toread; |
| 2528 | sh->dev[i].toread = NULL; | 2560 | sh->dev[i].toread = NULL; |
| 2561 | spin_unlock_irq(&sh->stripe_lock); | ||
| 2529 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | 2562 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 2530 | wake_up(&conf->wait_for_overlap); | 2563 | wake_up(&conf->wait_for_overlap); |
| 2531 | if (bi) s->to_read--; | ||
| 2532 | while (bi && bi->bi_sector < | 2564 | while (bi && bi->bi_sector < |
| 2533 | sh->dev[i].sector + STRIPE_SECTORS) { | 2565 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 2534 | struct bio *nextbi = | 2566 | struct bio *nextbi = |
| @@ -2741,7 +2773,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
| 2741 | if (sh->dev[i].written) { | 2773 | if (sh->dev[i].written) { |
| 2742 | dev = &sh->dev[i]; | 2774 | dev = &sh->dev[i]; |
| 2743 | if (!test_bit(R5_LOCKED, &dev->flags) && | 2775 | if (!test_bit(R5_LOCKED, &dev->flags) && |
| 2744 | test_bit(R5_UPTODATE, &dev->flags)) { | 2776 | (test_bit(R5_UPTODATE, &dev->flags) || |
| 2777 | test_and_clear_bit(R5_Discard, &dev->flags))) { | ||
| 2745 | /* We can return any write requests */ | 2778 | /* We can return any write requests */ |
| 2746 | struct bio *wbi, *wbi2; | 2779 | struct bio *wbi, *wbi2; |
| 2747 | pr_debug("Return write for disc %d\n", i); | 2780 | pr_debug("Return write for disc %d\n", i); |
| @@ -2775,12 +2808,25 @@ static void handle_stripe_dirtying(struct r5conf *conf, | |||
| 2775 | int disks) | 2808 | int disks) |
| 2776 | { | 2809 | { |
| 2777 | int rmw = 0, rcw = 0, i; | 2810 | int rmw = 0, rcw = 0, i; |
| 2778 | if (conf->max_degraded == 2) { | 2811 | sector_t recovery_cp = conf->mddev->recovery_cp; |
| 2779 | /* RAID6 requires 'rcw' in current implementation | 2812 | |
| 2780 | * Calculate the real rcw later - for now fake it | 2813 | /* RAID6 requires 'rcw' in current implementation. |
| 2814 | * Otherwise, check whether resync is now happening or should start. | ||
| 2815 | * If yes, then the array is dirty (after unclean shutdown or | ||
| 2816 | * initial creation), so parity in some stripes might be inconsistent. | ||
| 2817 | * In this case, we need to always do reconstruct-write, to ensure | ||
| 2818 | * that in case of drive failure or read-error correction, we | ||
| 2819 | * generate correct data from the parity. | ||
| 2820 | */ | ||
| 2821 | if (conf->max_degraded == 2 || | ||
| 2822 | (recovery_cp < MaxSector && sh->sector >= recovery_cp)) { | ||
| 2823 | /* Calculate the real rcw later - for now make it | ||
| 2781 | * look like rcw is cheaper | 2824 | * look like rcw is cheaper |
| 2782 | */ | 2825 | */ |
| 2783 | rcw = 1; rmw = 2; | 2826 | rcw = 1; rmw = 2; |
| 2827 | pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n", | ||
| 2828 | conf->max_degraded, (unsigned long long)recovery_cp, | ||
| 2829 | (unsigned long long)sh->sector); | ||
| 2784 | } else for (i = disks; i--; ) { | 2830 | } else for (i = disks; i--; ) { |
| 2785 | /* would I have to read this buffer for read_modify_write */ | 2831 | /* would I have to read this buffer for read_modify_write */ |
| 2786 | struct r5dev *dev = &sh->dev[i]; | 2832 | struct r5dev *dev = &sh->dev[i]; |
| @@ -2932,7 +2978,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, | |||
| 2932 | */ | 2978 | */ |
| 2933 | set_bit(STRIPE_INSYNC, &sh->state); | 2979 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2934 | else { | 2980 | else { |
| 2935 | conf->mddev->resync_mismatches += STRIPE_SECTORS; | 2981 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); |
| 2936 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | 2982 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) |
| 2937 | /* don't try to repair!! */ | 2983 | /* don't try to repair!! */ |
| 2938 | set_bit(STRIPE_INSYNC, &sh->state); | 2984 | set_bit(STRIPE_INSYNC, &sh->state); |
| @@ -3084,7 +3130,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, | |||
| 3084 | */ | 3130 | */ |
| 3085 | } | 3131 | } |
| 3086 | } else { | 3132 | } else { |
| 3087 | conf->mddev->resync_mismatches += STRIPE_SECTORS; | 3133 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); |
| 3088 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | 3134 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) |
| 3089 | /* don't try to repair!! */ | 3135 | /* don't try to repair!! */ |
| 3090 | set_bit(STRIPE_INSYNC, &sh->state); | 3136 | set_bit(STRIPE_INSYNC, &sh->state); |
| @@ -3459,10 +3505,12 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 3459 | if (s.written && | 3505 | if (s.written && |
| 3460 | (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) | 3506 | (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) |
| 3461 | && !test_bit(R5_LOCKED, &pdev->flags) | 3507 | && !test_bit(R5_LOCKED, &pdev->flags) |
| 3462 | && test_bit(R5_UPTODATE, &pdev->flags)))) && | 3508 | && (test_bit(R5_UPTODATE, &pdev->flags) || |
| 3509 | test_bit(R5_Discard, &pdev->flags))))) && | ||
| 3463 | (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) | 3510 | (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) |
| 3464 | && !test_bit(R5_LOCKED, &qdev->flags) | 3511 | && !test_bit(R5_LOCKED, &qdev->flags) |
| 3465 | && test_bit(R5_UPTODATE, &qdev->flags))))) | 3512 | && (test_bit(R5_UPTODATE, &qdev->flags) || |
| 3513 | test_bit(R5_Discard, &qdev->flags)))))) | ||
| 3466 | handle_stripe_clean_event(conf, sh, disks, &s.return_bi); | 3514 | handle_stripe_clean_event(conf, sh, disks, &s.return_bi); |
| 3467 | 3515 | ||
| 3468 | /* Now we might consider reading some blocks, either to check/generate | 3516 | /* Now we might consider reading some blocks, either to check/generate |
| @@ -3489,9 +3537,11 @@ static void handle_stripe(struct stripe_head *sh) | |||
| 3489 | /* All the 'written' buffers and the parity block are ready to | 3537 | /* All the 'written' buffers and the parity block are ready to |
| 3490 | * be written back to disk | 3538 | * be written back to disk |
| 3491 | */ | 3539 | */ |
| 3492 | BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); | 3540 | BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && |
| 3541 | !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); | ||
| 3493 | BUG_ON(sh->qd_idx >= 0 && | 3542 | BUG_ON(sh->qd_idx >= 0 && |
| 3494 | !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags)); | 3543 | !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && |
| 3544 | !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); | ||
| 3495 | for (i = disks; i--; ) { | 3545 | for (i = disks; i--; ) { |
| 3496 | struct r5dev *dev = &sh->dev[i]; | 3546 | struct r5dev *dev = &sh->dev[i]; |
| 3497 | if (test_bit(R5_LOCKED, &dev->flags) && | 3547 | if (test_bit(R5_LOCKED, &dev->flags) && |
| @@ -4072,6 +4122,88 @@ static void release_stripe_plug(struct mddev *mddev, | |||
| 4072 | release_stripe(sh); | 4122 | release_stripe(sh); |
| 4073 | } | 4123 | } |
| 4074 | 4124 | ||
| 4125 | static void make_discard_request(struct mddev *mddev, struct bio *bi) | ||
| 4126 | { | ||
| 4127 | struct r5conf *conf = mddev->private; | ||
| 4128 | sector_t logical_sector, last_sector; | ||
| 4129 | struct stripe_head *sh; | ||
| 4130 | int remaining; | ||
| 4131 | int stripe_sectors; | ||
| 4132 | |||
| 4133 | if (mddev->reshape_position != MaxSector) | ||
| 4134 | /* Skip discard while reshape is happening */ | ||
| 4135 | return; | ||
| 4136 | |||
| 4137 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | ||
| 4138 | last_sector = bi->bi_sector + (bi->bi_size>>9); | ||
| 4139 | |||
| 4140 | bi->bi_next = NULL; | ||
| 4141 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ | ||
| 4142 | |||
| 4143 | stripe_sectors = conf->chunk_sectors * | ||
| 4144 | (conf->raid_disks - conf->max_degraded); | ||
| 4145 | logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, | ||
| 4146 | stripe_sectors); | ||
| 4147 | sector_div(last_sector, stripe_sectors); | ||
| 4148 | |||
| 4149 | logical_sector *= conf->chunk_sectors; | ||
| 4150 | last_sector *= conf->chunk_sectors; | ||
| 4151 | |||
| 4152 | for (; logical_sector < last_sector; | ||
| 4153 | logical_sector += STRIPE_SECTORS) { | ||
| 4154 | DEFINE_WAIT(w); | ||
| 4155 | int d; | ||
| 4156 | again: | ||
| 4157 | sh = get_active_stripe(conf, logical_sector, 0, 0, 0); | ||
| 4158 | prepare_to_wait(&conf->wait_for_overlap, &w, | ||
| 4159 | TASK_UNINTERRUPTIBLE); | ||
| 4160 | spin_lock_irq(&sh->stripe_lock); | ||
| 4161 | for (d = 0; d < conf->raid_disks; d++) { | ||
| 4162 | if (d == sh->pd_idx || d == sh->qd_idx) | ||
| 4163 | continue; | ||
| 4164 | if (sh->dev[d].towrite || sh->dev[d].toread) { | ||
| 4165 | set_bit(R5_Overlap, &sh->dev[d].flags); | ||
| 4166 | spin_unlock_irq(&sh->stripe_lock); | ||
| 4167 | release_stripe(sh); | ||
| 4168 | schedule(); | ||
| 4169 | goto again; | ||
| 4170 | } | ||
| 4171 | } | ||
| 4172 | finish_wait(&conf->wait_for_overlap, &w); | ||
| 4173 | for (d = 0; d < conf->raid_disks; d++) { | ||
| 4174 | if (d == sh->pd_idx || d == sh->qd_idx) | ||
| 4175 | continue; | ||
| 4176 | sh->dev[d].towrite = bi; | ||
| 4177 | set_bit(R5_OVERWRITE, &sh->dev[d].flags); | ||
| 4178 | raid5_inc_bi_active_stripes(bi); | ||
| 4179 | } | ||
| 4180 | spin_unlock_irq(&sh->stripe_lock); | ||
| 4181 | if (conf->mddev->bitmap) { | ||
| 4182 | for (d = 0; | ||
| 4183 | d < conf->raid_disks - conf->max_degraded; | ||
| 4184 | d++) | ||
| 4185 | bitmap_startwrite(mddev->bitmap, | ||
| 4186 | sh->sector, | ||
| 4187 | STRIPE_SECTORS, | ||
| 4188 | 0); | ||
| 4189 | sh->bm_seq = conf->seq_flush + 1; | ||
| 4190 | set_bit(STRIPE_BIT_DELAY, &sh->state); | ||
| 4191 | } | ||
| 4192 | |||
| 4193 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 4194 | clear_bit(STRIPE_DELAYED, &sh->state); | ||
| 4195 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) | ||
| 4196 | atomic_inc(&conf->preread_active_stripes); | ||
| 4197 | release_stripe_plug(mddev, sh); | ||
| 4198 | } | ||
| 4199 | |||
| 4200 | remaining = raid5_dec_bi_active_stripes(bi); | ||
| 4201 | if (remaining == 0) { | ||
| 4202 | md_write_end(mddev); | ||
| 4203 | bio_endio(bi, 0); | ||
| 4204 | } | ||
| 4205 | } | ||
| 4206 | |||
| 4075 | static void make_request(struct mddev *mddev, struct bio * bi) | 4207 | static void make_request(struct mddev *mddev, struct bio * bi) |
| 4076 | { | 4208 | { |
| 4077 | struct r5conf *conf = mddev->private; | 4209 | struct r5conf *conf = mddev->private; |
| @@ -4094,6 +4226,11 @@ static void make_request(struct mddev *mddev, struct bio * bi) | |||
| 4094 | chunk_aligned_read(mddev,bi)) | 4226 | chunk_aligned_read(mddev,bi)) |
| 4095 | return; | 4227 | return; |
| 4096 | 4228 | ||
| 4229 | if (unlikely(bi->bi_rw & REQ_DISCARD)) { | ||
| 4230 | make_discard_request(mddev, bi); | ||
| 4231 | return; | ||
| 4232 | } | ||
| 4233 | |||
| 4097 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 4234 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
| 4098 | last_sector = bi->bi_sector + (bi->bi_size>>9); | 4235 | last_sector = bi->bi_sector + (bi->bi_size>>9); |
| 4099 | bi->bi_next = NULL; | 4236 | bi->bi_next = NULL; |
| @@ -4630,8 +4767,9 @@ static int handle_active_stripes(struct r5conf *conf) | |||
| 4630 | * During the scan, completed stripes are saved for us by the interrupt | 4767 | * During the scan, completed stripes are saved for us by the interrupt |
| 4631 | * handler, so that they will not have to wait for our next wakeup. | 4768 | * handler, so that they will not have to wait for our next wakeup. |
| 4632 | */ | 4769 | */ |
| 4633 | static void raid5d(struct mddev *mddev) | 4770 | static void raid5d(struct md_thread *thread) |
| 4634 | { | 4771 | { |
| 4772 | struct mddev *mddev = thread->mddev; | ||
| 4635 | struct r5conf *conf = mddev->private; | 4773 | struct r5conf *conf = mddev->private; |
| 4636 | int handled; | 4774 | int handled; |
| 4637 | struct blk_plug plug; | 4775 | struct blk_plug plug; |
| @@ -5366,6 +5504,7 @@ static int run(struct mddev *mddev) | |||
| 5366 | 5504 | ||
| 5367 | if (mddev->queue) { | 5505 | if (mddev->queue) { |
| 5368 | int chunk_size; | 5506 | int chunk_size; |
| 5507 | bool discard_supported = true; | ||
| 5369 | /* read-ahead size must cover two whole stripes, which | 5508 | /* read-ahead size must cover two whole stripes, which |
| 5370 | * is 2 * (datadisks) * chunksize where 'n' is the | 5509 | * is 2 * (datadisks) * chunksize where 'n' is the |
| 5371 | * number of raid devices | 5510 | * number of raid devices |
| @@ -5385,13 +5524,48 @@ static int run(struct mddev *mddev) | |||
| 5385 | blk_queue_io_min(mddev->queue, chunk_size); | 5524 | blk_queue_io_min(mddev->queue, chunk_size); |
| 5386 | blk_queue_io_opt(mddev->queue, chunk_size * | 5525 | blk_queue_io_opt(mddev->queue, chunk_size * |
| 5387 | (conf->raid_disks - conf->max_degraded)); | 5526 | (conf->raid_disks - conf->max_degraded)); |
| 5527 | /* | ||
| 5528 | * We can only discard a whole stripe. It doesn't make sense to | ||
| 5529 | * discard data disk but write parity disk | ||
| 5530 | */ | ||
| 5531 | stripe = stripe * PAGE_SIZE; | ||
| 5532 | mddev->queue->limits.discard_alignment = stripe; | ||
| 5533 | mddev->queue->limits.discard_granularity = stripe; | ||
| 5534 | /* | ||
| 5535 | * unaligned part of discard request will be ignored, so can't | ||
| 5536 | * guarantee discard_zerors_data | ||
| 5537 | */ | ||
| 5538 | mddev->queue->limits.discard_zeroes_data = 0; | ||
| 5388 | 5539 | ||
| 5389 | rdev_for_each(rdev, mddev) { | 5540 | rdev_for_each(rdev, mddev) { |
| 5390 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 5541 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
| 5391 | rdev->data_offset << 9); | 5542 | rdev->data_offset << 9); |
| 5392 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 5543 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
| 5393 | rdev->new_data_offset << 9); | 5544 | rdev->new_data_offset << 9); |
| 5545 | /* | ||
| 5546 | * discard_zeroes_data is required, otherwise data | ||
| 5547 | * could be lost. Consider a scenario: discard a stripe | ||
| 5548 | * (the stripe could be inconsistent if | ||
| 5549 | * discard_zeroes_data is 0); write one disk of the | ||
| 5550 | * stripe (the stripe could be inconsistent again | ||
| 5551 | * depending on which disks are used to calculate | ||
| 5552 | * parity); the disk is broken; The stripe data of this | ||
| 5553 | * disk is lost. | ||
| 5554 | */ | ||
| 5555 | if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || | ||
| 5556 | !bdev_get_queue(rdev->bdev)-> | ||
| 5557 | limits.discard_zeroes_data) | ||
| 5558 | discard_supported = false; | ||
| 5394 | } | 5559 | } |
| 5560 | |||
| 5561 | if (discard_supported && | ||
| 5562 | mddev->queue->limits.max_discard_sectors >= stripe && | ||
| 5563 | mddev->queue->limits.discard_granularity >= stripe) | ||
| 5564 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, | ||
| 5565 | mddev->queue); | ||
| 5566 | else | ||
| 5567 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, | ||
| 5568 | mddev->queue); | ||
| 5395 | } | 5569 | } |
| 5396 | 5570 | ||
| 5397 | return 0; | 5571 | return 0; |
| @@ -5702,7 +5876,8 @@ static int check_reshape(struct mddev *mddev) | |||
| 5702 | if (!check_stripe_cache(mddev)) | 5876 | if (!check_stripe_cache(mddev)) |
| 5703 | return -ENOSPC; | 5877 | return -ENOSPC; |
| 5704 | 5878 | ||
| 5705 | return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); | 5879 | return resize_stripes(conf, (conf->previous_raid_disks |
| 5880 | + mddev->delta_disks)); | ||
| 5706 | } | 5881 | } |
| 5707 | 5882 | ||
| 5708 | static int raid5_start_reshape(struct mddev *mddev) | 5883 | static int raid5_start_reshape(struct mddev *mddev) |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index a9fc24901eda..18b2c4a8a1fd 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
| @@ -298,6 +298,7 @@ enum r5dev_flags { | |||
| 298 | R5_WantReplace, /* We need to update the replacement, we have read | 298 | R5_WantReplace, /* We need to update the replacement, we have read |
| 299 | * data in, and now is a good time to write it out. | 299 | * data in, and now is a good time to write it out. |
| 300 | */ | 300 | */ |
| 301 | R5_Discard, /* Discard the stripe */ | ||
| 301 | }; | 302 | }; |
| 302 | 303 | ||
| 303 | /* | 304 | /* |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index bb4c2bf04d09..80d1e6d4b0ae 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
| @@ -525,7 +525,7 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
| 525 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, | 525 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, |
| 526 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | 526 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); |
| 527 | writel(BM_SSP_CTRL1_SDIO_IRQ_EN, | 527 | writel(BM_SSP_CTRL1_SDIO_IRQ_EN, |
| 528 | host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); | 528 | ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET); |
| 529 | } else { | 529 | } else { |
| 530 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, | 530 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, |
| 531 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); | 531 | ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index cb3356c9af80..04668b47a1df 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
| @@ -175,13 +175,13 @@ struct e1000_info; | |||
| 175 | /* | 175 | /* |
| 176 | * in the case of WTHRESH, it appears at least the 82571/2 hardware | 176 | * in the case of WTHRESH, it appears at least the 82571/2 hardware |
| 177 | * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when | 177 | * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when |
| 178 | * WTHRESH=4, and since we want 64 bytes at a time written back, set | 178 | * WTHRESH=4, so a setting of 5 gives the most efficient bus |
| 179 | * it to 5 | 179 | * utilization but to avoid possible Tx stalls, set it to 1 |
| 180 | */ | 180 | */ |
| 181 | #define E1000_TXDCTL_DMA_BURST_ENABLE \ | 181 | #define E1000_TXDCTL_DMA_BURST_ENABLE \ |
| 182 | (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ | 182 | (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ |
| 183 | E1000_TXDCTL_COUNT_DESC | \ | 183 | E1000_TXDCTL_COUNT_DESC | \ |
| 184 | (5 << 16) | /* wthresh must be +1 more than desired */\ | 184 | (1 << 16) | /* wthresh must be +1 more than desired */\ |
| 185 | (1 << 8) | /* hthresh */ \ | 185 | (1 << 8) | /* hthresh */ \ |
| 186 | 0x1f) /* pthresh */ | 186 | 0x1f) /* pthresh */ |
| 187 | 187 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index de57a2ba6bde..f444eb0b76d8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
| @@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
| 2831 | * set up some performance related parameters to encourage the | 2831 | * set up some performance related parameters to encourage the |
| 2832 | * hardware to use the bus more efficiently in bursts, depends | 2832 | * hardware to use the bus more efficiently in bursts, depends |
| 2833 | * on the tx_int_delay to be enabled, | 2833 | * on the tx_int_delay to be enabled, |
| 2834 | * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time | 2834 | * wthresh = 1 ==> burst write is disabled to avoid Tx stalls |
| 2835 | * hthresh = 1 ==> prefetch when one or more available | 2835 | * hthresh = 1 ==> prefetch when one or more available |
| 2836 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less | 2836 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less |
| 2837 | * BEWARE: this seems to work but should be considered first if | 2837 | * BEWARE: this seems to work but should be considered first if |
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index c911d883c27e..f8064df10cc4 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
| 29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
| 30 | #include <linux/pci-aspm.h> | ||
| 30 | #include <linux/netdevice.h> | 31 | #include <linux/netdevice.h> |
| 31 | #include <linux/etherdevice.h> | 32 | #include <linux/etherdevice.h> |
| 32 | #include <linux/ethtool.h> | 33 | #include <linux/ethtool.h> |
| @@ -2973,6 +2974,9 @@ jme_init_one(struct pci_dev *pdev, | |||
| 2973 | /* | 2974 | /* |
| 2974 | * set up PCI device basics | 2975 | * set up PCI device basics |
| 2975 | */ | 2976 | */ |
| 2977 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
| 2978 | PCIE_LINK_STATE_CLKPM); | ||
| 2979 | |||
| 2976 | rc = pci_enable_device(pdev); | 2980 | rc = pci_enable_device(pdev); |
| 2977 | if (rc) { | 2981 | if (rc) { |
| 2978 | pr_err("Cannot enable PCI device\n"); | 2982 | pr_err("Cannot enable PCI device\n"); |
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index 434d5af8e6fb..c81e278629ff 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c | |||
| @@ -244,8 +244,12 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
| 244 | * - suspend: peripheral ready to suspend | 244 | * - suspend: peripheral ready to suspend |
| 245 | * - response: suggest N millisec polling | 245 | * - response: suggest N millisec polling |
| 246 | * - response complete: suggest N sec polling | 246 | * - response complete: suggest N sec polling |
| 247 | * | ||
| 248 | * Suspend is reported and maybe heeded. | ||
| 247 | */ | 249 | */ |
| 248 | case 2: /* Suspend hint */ | 250 | case 2: /* Suspend hint */ |
| 251 | usbnet_device_suggests_idle(dev); | ||
| 252 | continue; | ||
| 249 | case 3: /* Response hint */ | 253 | case 3: /* Response hint */ |
| 250 | case 4: /* Response complete hint */ | 254 | case 4: /* Response complete hint */ |
| 251 | continue; | 255 | continue; |
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index c75e11e1b385..afb117c16d2d 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
| @@ -424,7 +424,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth, | |||
| 424 | 424 | ||
| 425 | netdev_dbg(kaweth->net, | 425 | netdev_dbg(kaweth->net, |
| 426 | "Downloading firmware at %p to kaweth device at %p\n", | 426 | "Downloading firmware at %p to kaweth device at %p\n", |
| 427 | fw->data, kaweth); | 427 | kaweth->firmware_buf, kaweth); |
| 428 | netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); | 428 | netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); |
| 429 | 429 | ||
| 430 | return kaweth_control(kaweth, | 430 | return kaweth_control(kaweth, |
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 03c2d8d653df..cc7e72010ac3 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c | |||
| @@ -117,6 +117,7 @@ enum { | |||
| 117 | struct mcs7830_data { | 117 | struct mcs7830_data { |
| 118 | u8 multi_filter[8]; | 118 | u8 multi_filter[8]; |
| 119 | u8 config; | 119 | u8 config; |
| 120 | u8 link_counter; | ||
| 120 | }; | 121 | }; |
| 121 | 122 | ||
| 122 | static const char driver_name[] = "MOSCHIP usb-ethernet driver"; | 123 | static const char driver_name[] = "MOSCHIP usb-ethernet driver"; |
| @@ -632,20 +633,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
| 632 | static void mcs7830_status(struct usbnet *dev, struct urb *urb) | 633 | static void mcs7830_status(struct usbnet *dev, struct urb *urb) |
| 633 | { | 634 | { |
| 634 | u8 *buf = urb->transfer_buffer; | 635 | u8 *buf = urb->transfer_buffer; |
| 635 | bool link; | 636 | bool link, link_changed; |
| 637 | struct mcs7830_data *data = mcs7830_get_data(dev); | ||
| 636 | 638 | ||
| 637 | if (urb->actual_length < 16) | 639 | if (urb->actual_length < 16) |
| 638 | return; | 640 | return; |
| 639 | 641 | ||
| 640 | link = !(buf[1] & 0x20); | 642 | link = !(buf[1] & 0x20); |
| 641 | if (netif_carrier_ok(dev->net) != link) { | 643 | link_changed = netif_carrier_ok(dev->net) != link; |
| 642 | if (link) { | 644 | if (link_changed) { |
| 643 | netif_carrier_on(dev->net); | 645 | data->link_counter++; |
| 644 | usbnet_defer_kevent(dev, EVENT_LINK_RESET); | 646 | /* |
| 645 | } else | 647 | track link state 20 times to guard against erroneous |
| 646 | netif_carrier_off(dev->net); | 648 | link state changes reported sometimes by the chip |
| 647 | netdev_dbg(dev->net, "Link Status is: %d\n", link); | 649 | */ |
| 648 | } | 650 | if (data->link_counter > 20) { |
| 651 | data->link_counter = 0; | ||
| 652 | if (link) { | ||
| 653 | netif_carrier_on(dev->net); | ||
| 654 | usbnet_defer_kevent(dev, EVENT_LINK_RESET); | ||
| 655 | } else | ||
| 656 | netif_carrier_off(dev->net); | ||
| 657 | netdev_dbg(dev->net, "Link Status is: %d\n", link); | ||
| 658 | } | ||
| 659 | } else | ||
| 660 | data->link_counter = 0; | ||
| 649 | } | 661 | } |
| 650 | 662 | ||
| 651 | static const struct driver_info moschip_info = { | 663 | static const struct driver_info moschip_info = { |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index fc9f578a1e25..f9819d10b1f9 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
| @@ -1588,10 +1588,27 @@ int usbnet_resume (struct usb_interface *intf) | |||
| 1588 | tasklet_schedule (&dev->bh); | 1588 | tasklet_schedule (&dev->bh); |
| 1589 | } | 1589 | } |
| 1590 | } | 1590 | } |
| 1591 | |||
| 1592 | if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) | ||
| 1593 | usb_autopm_get_interface_no_resume(intf); | ||
| 1594 | |||
| 1591 | return 0; | 1595 | return 0; |
| 1592 | } | 1596 | } |
| 1593 | EXPORT_SYMBOL_GPL(usbnet_resume); | 1597 | EXPORT_SYMBOL_GPL(usbnet_resume); |
| 1594 | 1598 | ||
| 1599 | /* | ||
| 1600 | * Either a subdriver implements manage_power, then it is assumed to always | ||
| 1601 | * be ready to be suspended or it reports the readiness to be suspended | ||
| 1602 | * explicitly | ||
| 1603 | */ | ||
| 1604 | void usbnet_device_suggests_idle(struct usbnet *dev) | ||
| 1605 | { | ||
| 1606 | if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) { | ||
| 1607 | dev->intf->needs_remote_wakeup = 1; | ||
| 1608 | usb_autopm_put_interface_async(dev->intf); | ||
| 1609 | } | ||
| 1610 | } | ||
| 1611 | EXPORT_SYMBOL(usbnet_device_suggests_idle); | ||
| 1595 | 1612 | ||
| 1596 | /*-------------------------------------------------------------------------*/ | 1613 | /*-------------------------------------------------------------------------*/ |
| 1597 | 1614 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8be9bf07bd39..607976c00162 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -106,6 +106,8 @@ struct vxlan_dev { | |||
| 106 | __be32 gaddr; /* multicast group */ | 106 | __be32 gaddr; /* multicast group */ |
| 107 | __be32 saddr; /* source address */ | 107 | __be32 saddr; /* source address */ |
| 108 | unsigned int link; /* link to multicast over */ | 108 | unsigned int link; /* link to multicast over */ |
| 109 | __u16 port_min; /* source port range */ | ||
| 110 | __u16 port_max; | ||
| 109 | __u8 tos; /* TOS override */ | 111 | __u8 tos; /* TOS override */ |
| 110 | __u8 ttl; | 112 | __u8 ttl; |
| 111 | bool learn; | 113 | bool learn; |
| @@ -228,9 +230,9 @@ static u32 eth_hash(const unsigned char *addr) | |||
| 228 | 230 | ||
| 229 | /* only want 6 bytes */ | 231 | /* only want 6 bytes */ |
| 230 | #ifdef __BIG_ENDIAN | 232 | #ifdef __BIG_ENDIAN |
| 231 | value <<= 16; | ||
| 232 | #else | ||
| 233 | value >>= 16; | 233 | value >>= 16; |
| 234 | #else | ||
| 235 | value <<= 16; | ||
| 234 | #endif | 236 | #endif |
| 235 | return hash_64(value, FDB_HASH_BITS); | 237 | return hash_64(value, FDB_HASH_BITS); |
| 236 | } | 238 | } |
| @@ -535,7 +537,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
| 535 | } | 537 | } |
| 536 | 538 | ||
| 537 | __skb_pull(skb, sizeof(struct vxlanhdr)); | 539 | __skb_pull(skb, sizeof(struct vxlanhdr)); |
| 538 | skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr)); | ||
| 539 | 540 | ||
| 540 | /* Is this VNI defined? */ | 541 | /* Is this VNI defined? */ |
| 541 | vni = ntohl(vxh->vx_vni) >> 8; | 542 | vni = ntohl(vxh->vx_vni) >> 8; |
| @@ -554,7 +555,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
| 554 | /* Re-examine inner Ethernet packet */ | 555 | /* Re-examine inner Ethernet packet */ |
| 555 | oip = ip_hdr(skb); | 556 | oip = ip_hdr(skb); |
| 556 | skb->protocol = eth_type_trans(skb, vxlan->dev); | 557 | skb->protocol = eth_type_trans(skb, vxlan->dev); |
| 557 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | ||
| 558 | 558 | ||
| 559 | /* Ignore packet loops (and multicast echo) */ | 559 | /* Ignore packet loops (and multicast echo) */ |
| 560 | if (compare_ether_addr(eth_hdr(skb)->h_source, | 560 | if (compare_ether_addr(eth_hdr(skb)->h_source, |
| @@ -566,6 +566,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
| 566 | 566 | ||
| 567 | __skb_tunnel_rx(skb, vxlan->dev); | 567 | __skb_tunnel_rx(skb, vxlan->dev); |
| 568 | skb_reset_network_header(skb); | 568 | skb_reset_network_header(skb); |
| 569 | skb->ip_summed = CHECKSUM_NONE; | ||
| 569 | 570 | ||
| 570 | err = IP_ECN_decapsulate(oip, skb); | 571 | err = IP_ECN_decapsulate(oip, skb); |
| 571 | if (unlikely(err)) { | 572 | if (unlikely(err)) { |
| @@ -621,46 +622,89 @@ static inline u8 vxlan_ecn_encap(u8 tos, | |||
| 621 | return INET_ECN_encapsulate(tos, inner); | 622 | return INET_ECN_encapsulate(tos, inner); |
| 622 | } | 623 | } |
| 623 | 624 | ||
| 625 | static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb) | ||
| 626 | { | ||
| 627 | const struct ethhdr *eth = (struct ethhdr *) skb->data; | ||
| 628 | const struct vxlan_fdb *f; | ||
| 629 | |||
| 630 | if (is_multicast_ether_addr(eth->h_dest)) | ||
| 631 | return vxlan->gaddr; | ||
| 632 | |||
| 633 | f = vxlan_find_mac(vxlan, eth->h_dest); | ||
| 634 | if (f) | ||
| 635 | return f->remote_ip; | ||
| 636 | else | ||
| 637 | return vxlan->gaddr; | ||
| 638 | |||
| 639 | } | ||
| 640 | |||
| 641 | static void vxlan_sock_free(struct sk_buff *skb) | ||
| 642 | { | ||
| 643 | sock_put(skb->sk); | ||
| 644 | } | ||
| 645 | |||
| 646 | /* On transmit, associate with the tunnel socket */ | ||
| 647 | static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb) | ||
| 648 | { | ||
| 649 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); | ||
| 650 | struct sock *sk = vn->sock->sk; | ||
| 651 | |||
| 652 | skb_orphan(skb); | ||
| 653 | sock_hold(sk); | ||
| 654 | skb->sk = sk; | ||
| 655 | skb->destructor = vxlan_sock_free; | ||
| 656 | } | ||
| 657 | |||
| 658 | /* Compute source port for outgoing packet | ||
| 659 | * first choice to use L4 flow hash since it will spread | ||
| 660 | * better and maybe available from hardware | ||
| 661 | * secondary choice is to use jhash on the Ethernet header | ||
| 662 | */ | ||
| 663 | static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb) | ||
| 664 | { | ||
| 665 | unsigned int range = (vxlan->port_max - vxlan->port_min) + 1; | ||
| 666 | u32 hash; | ||
| 667 | |||
| 668 | hash = skb_get_rxhash(skb); | ||
| 669 | if (!hash) | ||
| 670 | hash = jhash(skb->data, 2 * ETH_ALEN, | ||
| 671 | (__force u32) skb->protocol); | ||
| 672 | |||
| 673 | return (((u64) hash * range) >> 32) + vxlan->port_min; | ||
| 674 | } | ||
| 675 | |||
| 624 | /* Transmit local packets over Vxlan | 676 | /* Transmit local packets over Vxlan |
| 625 | * | 677 | * |
| 626 | * Outer IP header inherits ECN and DF from inner header. | 678 | * Outer IP header inherits ECN and DF from inner header. |
| 627 | * Outer UDP destination is the VXLAN assigned port. | 679 | * Outer UDP destination is the VXLAN assigned port. |
| 628 | * source port is based on hash of flow if available | 680 | * source port is based on hash of flow |
| 629 | * otherwise use a random value | ||
| 630 | */ | 681 | */ |
| 631 | static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) | 682 | static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) |
| 632 | { | 683 | { |
| 633 | struct vxlan_dev *vxlan = netdev_priv(dev); | 684 | struct vxlan_dev *vxlan = netdev_priv(dev); |
| 634 | struct rtable *rt; | 685 | struct rtable *rt; |
| 635 | const struct ethhdr *eth; | ||
| 636 | const struct iphdr *old_iph; | 686 | const struct iphdr *old_iph; |
| 637 | struct iphdr *iph; | 687 | struct iphdr *iph; |
| 638 | struct vxlanhdr *vxh; | 688 | struct vxlanhdr *vxh; |
| 639 | struct udphdr *uh; | 689 | struct udphdr *uh; |
| 640 | struct flowi4 fl4; | 690 | struct flowi4 fl4; |
| 641 | struct vxlan_fdb *f; | ||
| 642 | unsigned int pkt_len = skb->len; | 691 | unsigned int pkt_len = skb->len; |
| 643 | u32 hash; | ||
| 644 | __be32 dst; | 692 | __be32 dst; |
| 693 | __u16 src_port; | ||
| 645 | __be16 df = 0; | 694 | __be16 df = 0; |
| 646 | __u8 tos, ttl; | 695 | __u8 tos, ttl; |
| 647 | int err; | 696 | int err; |
| 648 | 697 | ||
| 698 | dst = vxlan_find_dst(vxlan, skb); | ||
| 699 | if (!dst) | ||
| 700 | goto drop; | ||
| 701 | |||
| 649 | /* Need space for new headers (invalidates iph ptr) */ | 702 | /* Need space for new headers (invalidates iph ptr) */ |
| 650 | if (skb_cow_head(skb, VXLAN_HEADROOM)) | 703 | if (skb_cow_head(skb, VXLAN_HEADROOM)) |
| 651 | goto drop; | 704 | goto drop; |
| 652 | 705 | ||
| 653 | eth = (void *)skb->data; | ||
| 654 | old_iph = ip_hdr(skb); | 706 | old_iph = ip_hdr(skb); |
| 655 | 707 | ||
| 656 | if (!is_multicast_ether_addr(eth->h_dest) && | ||
| 657 | (f = vxlan_find_mac(vxlan, eth->h_dest))) | ||
| 658 | dst = f->remote_ip; | ||
| 659 | else if (vxlan->gaddr) { | ||
| 660 | dst = vxlan->gaddr; | ||
| 661 | } else | ||
| 662 | goto drop; | ||
| 663 | |||
| 664 | ttl = vxlan->ttl; | 708 | ttl = vxlan->ttl; |
| 665 | if (!ttl && IN_MULTICAST(ntohl(dst))) | 709 | if (!ttl && IN_MULTICAST(ntohl(dst))) |
| 666 | ttl = 1; | 710 | ttl = 1; |
| @@ -669,11 +713,15 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 669 | if (tos == 1) | 713 | if (tos == 1) |
| 670 | tos = vxlan_get_dsfield(old_iph, skb); | 714 | tos = vxlan_get_dsfield(old_iph, skb); |
| 671 | 715 | ||
| 672 | hash = skb_get_rxhash(skb); | 716 | src_port = vxlan_src_port(vxlan, skb); |
| 717 | |||
| 718 | memset(&fl4, 0, sizeof(fl4)); | ||
| 719 | fl4.flowi4_oif = vxlan->link; | ||
| 720 | fl4.flowi4_tos = RT_TOS(tos); | ||
| 721 | fl4.daddr = dst; | ||
| 722 | fl4.saddr = vxlan->saddr; | ||
| 673 | 723 | ||
| 674 | rt = ip_route_output_gre(dev_net(dev), &fl4, dst, | 724 | rt = ip_route_output_key(dev_net(dev), &fl4); |
| 675 | vxlan->saddr, vxlan->vni, | ||
| 676 | RT_TOS(tos), vxlan->link); | ||
| 677 | if (IS_ERR(rt)) { | 725 | if (IS_ERR(rt)) { |
| 678 | netdev_dbg(dev, "no route to %pI4\n", &dst); | 726 | netdev_dbg(dev, "no route to %pI4\n", &dst); |
| 679 | dev->stats.tx_carrier_errors++; | 727 | dev->stats.tx_carrier_errors++; |
| @@ -702,7 +750,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 702 | uh = udp_hdr(skb); | 750 | uh = udp_hdr(skb); |
| 703 | 751 | ||
| 704 | uh->dest = htons(vxlan_port); | 752 | uh->dest = htons(vxlan_port); |
| 705 | uh->source = hash ? :random32(); | 753 | uh->source = htons(src_port); |
| 706 | 754 | ||
| 707 | uh->len = htons(skb->len); | 755 | uh->len = htons(skb->len); |
| 708 | uh->check = 0; | 756 | uh->check = 0; |
| @@ -715,10 +763,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 715 | iph->frag_off = df; | 763 | iph->frag_off = df; |
| 716 | iph->protocol = IPPROTO_UDP; | 764 | iph->protocol = IPPROTO_UDP; |
| 717 | iph->tos = vxlan_ecn_encap(tos, old_iph, skb); | 765 | iph->tos = vxlan_ecn_encap(tos, old_iph, skb); |
| 718 | iph->daddr = fl4.daddr; | 766 | iph->daddr = dst; |
| 719 | iph->saddr = fl4.saddr; | 767 | iph->saddr = fl4.saddr; |
| 720 | iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); | 768 | iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); |
| 721 | 769 | ||
| 770 | vxlan_set_owner(dev, skb); | ||
| 771 | |||
| 722 | /* See __IPTUNNEL_XMIT */ | 772 | /* See __IPTUNNEL_XMIT */ |
| 723 | skb->ip_summed = CHECKSUM_NONE; | 773 | skb->ip_summed = CHECKSUM_NONE; |
| 724 | ip_select_ident(iph, &rt->dst, NULL); | 774 | ip_select_ident(iph, &rt->dst, NULL); |
| @@ -928,9 +978,11 @@ static void vxlan_setup(struct net_device *dev) | |||
| 928 | { | 978 | { |
| 929 | struct vxlan_dev *vxlan = netdev_priv(dev); | 979 | struct vxlan_dev *vxlan = netdev_priv(dev); |
| 930 | unsigned h; | 980 | unsigned h; |
| 981 | int low, high; | ||
| 931 | 982 | ||
| 932 | eth_hw_addr_random(dev); | 983 | eth_hw_addr_random(dev); |
| 933 | ether_setup(dev); | 984 | ether_setup(dev); |
| 985 | dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; | ||
| 934 | 986 | ||
| 935 | dev->netdev_ops = &vxlan_netdev_ops; | 987 | dev->netdev_ops = &vxlan_netdev_ops; |
| 936 | dev->destructor = vxlan_free; | 988 | dev->destructor = vxlan_free; |
| @@ -947,6 +999,10 @@ static void vxlan_setup(struct net_device *dev) | |||
| 947 | vxlan->age_timer.function = vxlan_cleanup; | 999 | vxlan->age_timer.function = vxlan_cleanup; |
| 948 | vxlan->age_timer.data = (unsigned long) vxlan; | 1000 | vxlan->age_timer.data = (unsigned long) vxlan; |
| 949 | 1001 | ||
| 1002 | inet_get_local_port_range(&low, &high); | ||
| 1003 | vxlan->port_min = low; | ||
| 1004 | vxlan->port_max = high; | ||
| 1005 | |||
| 950 | vxlan->dev = dev; | 1006 | vxlan->dev = dev; |
| 951 | 1007 | ||
| 952 | for (h = 0; h < FDB_HASH_SIZE; ++h) | 1008 | for (h = 0; h < FDB_HASH_SIZE; ++h) |
| @@ -963,6 +1019,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { | |||
| 963 | [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, | 1019 | [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, |
| 964 | [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, | 1020 | [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, |
| 965 | [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, | 1021 | [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, |
| 1022 | [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, | ||
| 966 | }; | 1023 | }; |
| 967 | 1024 | ||
| 968 | static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) | 1025 | static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) |
| @@ -995,6 +1052,18 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
| 995 | return -EADDRNOTAVAIL; | 1052 | return -EADDRNOTAVAIL; |
| 996 | } | 1053 | } |
| 997 | } | 1054 | } |
| 1055 | |||
| 1056 | if (data[IFLA_VXLAN_PORT_RANGE]) { | ||
| 1057 | const struct ifla_vxlan_port_range *p | ||
| 1058 | = nla_data(data[IFLA_VXLAN_PORT_RANGE]); | ||
| 1059 | |||
| 1060 | if (ntohs(p->high) < ntohs(p->low)) { | ||
| 1061 | pr_debug("port range %u .. %u not valid\n", | ||
| 1062 | ntohs(p->low), ntohs(p->high)); | ||
| 1063 | return -EINVAL; | ||
| 1064 | } | ||
| 1065 | } | ||
| 1066 | |||
| 998 | return 0; | 1067 | return 0; |
| 999 | } | 1068 | } |
| 1000 | 1069 | ||
| @@ -1021,14 +1090,18 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
| 1021 | if (data[IFLA_VXLAN_LOCAL]) | 1090 | if (data[IFLA_VXLAN_LOCAL]) |
| 1022 | vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); | 1091 | vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); |
| 1023 | 1092 | ||
| 1024 | if (data[IFLA_VXLAN_LINK]) { | 1093 | if (data[IFLA_VXLAN_LINK] && |
| 1025 | vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]); | 1094 | (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) { |
| 1095 | struct net_device *lowerdev | ||
| 1096 | = __dev_get_by_index(net, vxlan->link); | ||
| 1026 | 1097 | ||
| 1027 | if (!tb[IFLA_MTU]) { | 1098 | if (!lowerdev) { |
| 1028 | struct net_device *lowerdev; | 1099 | pr_info("ifindex %d does not exist\n", vxlan->link); |
| 1029 | lowerdev = __dev_get_by_index(net, vxlan->link); | 1100 | return -ENODEV; |
| 1030 | dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; | ||
| 1031 | } | 1101 | } |
| 1102 | |||
| 1103 | if (!tb[IFLA_MTU]) | ||
| 1104 | dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; | ||
| 1032 | } | 1105 | } |
| 1033 | 1106 | ||
| 1034 | if (data[IFLA_VXLAN_TOS]) | 1107 | if (data[IFLA_VXLAN_TOS]) |
| @@ -1045,6 +1118,13 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
| 1045 | if (data[IFLA_VXLAN_LIMIT]) | 1118 | if (data[IFLA_VXLAN_LIMIT]) |
| 1046 | vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); | 1119 | vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); |
| 1047 | 1120 | ||
| 1121 | if (data[IFLA_VXLAN_PORT_RANGE]) { | ||
| 1122 | const struct ifla_vxlan_port_range *p | ||
| 1123 | = nla_data(data[IFLA_VXLAN_PORT_RANGE]); | ||
| 1124 | vxlan->port_min = ntohs(p->low); | ||
| 1125 | vxlan->port_max = ntohs(p->high); | ||
| 1126 | } | ||
| 1127 | |||
| 1048 | err = register_netdevice(dev); | 1128 | err = register_netdevice(dev); |
| 1049 | if (!err) | 1129 | if (!err) |
| 1050 | hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni)); | 1130 | hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni)); |
| @@ -1073,12 +1153,17 @@ static size_t vxlan_get_size(const struct net_device *dev) | |||
| 1073 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ | 1153 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ |
| 1074 | nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ | 1154 | nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ |
| 1075 | nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ | 1155 | nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ |
| 1156 | nla_total_size(sizeof(struct ifla_vxlan_port_range)) + | ||
| 1076 | 0; | 1157 | 0; |
| 1077 | } | 1158 | } |
| 1078 | 1159 | ||
| 1079 | static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) | 1160 | static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) |
| 1080 | { | 1161 | { |
| 1081 | const struct vxlan_dev *vxlan = netdev_priv(dev); | 1162 | const struct vxlan_dev *vxlan = netdev_priv(dev); |
| 1163 | struct ifla_vxlan_port_range ports = { | ||
| 1164 | .low = htons(vxlan->port_min), | ||
| 1165 | .high = htons(vxlan->port_max), | ||
| 1166 | }; | ||
| 1082 | 1167 | ||
| 1083 | if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni)) | 1168 | if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni)) |
| 1084 | goto nla_put_failure; | 1169 | goto nla_put_failure; |
| @@ -1099,6 +1184,9 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
| 1099 | nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax)) | 1184 | nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax)) |
| 1100 | goto nla_put_failure; | 1185 | goto nla_put_failure; |
| 1101 | 1186 | ||
| 1187 | if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) | ||
| 1188 | goto nla_put_failure; | ||
| 1189 | |||
| 1102 | return 0; | 1190 | return 0; |
| 1103 | 1191 | ||
| 1104 | nla_put_failure: | 1192 | nla_put_failure: |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 9fd6d9a9942e..9f31cfa56cc0 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
| @@ -1804,7 +1804,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) | |||
| 1804 | { | 1804 | { |
| 1805 | int ret; | 1805 | int ret; |
| 1806 | struct ath5k_hw *ah = hw->priv; | 1806 | struct ath5k_hw *ah = hw->priv; |
| 1807 | struct ath5k_vif *avf = (void *)vif->drv_priv; | 1807 | struct ath5k_vif *avf; |
| 1808 | struct sk_buff *skb; | 1808 | struct sk_buff *skb; |
| 1809 | 1809 | ||
| 1810 | if (WARN_ON(!vif)) { | 1810 | if (WARN_ON(!vif)) { |
| @@ -1819,6 +1819,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) | |||
| 1819 | goto out; | 1819 | goto out; |
| 1820 | } | 1820 | } |
| 1821 | 1821 | ||
| 1822 | avf = (void *)vif->drv_priv; | ||
| 1822 | ath5k_txbuf_free_skb(ah, avf->bbuf); | 1823 | ath5k_txbuf_free_skb(ah, avf->bbuf); |
| 1823 | avf->bbuf->skb = skb; | 1824 | avf->bbuf->skb = skb; |
| 1824 | ret = ath5k_beacon_setup(ah, avf->bbuf); | 1825 | ret = ath5k_beacon_setup(ah, avf->bbuf); |
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 76f07d8c272d..1b48414dca95 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c | |||
| @@ -120,7 +120,7 @@ static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 120 | 120 | ||
| 121 | if (ath_tx_start(hw, skb, &txctl) != 0) { | 121 | if (ath_tx_start(hw, skb, &txctl) != 0) { |
| 122 | ath_dbg(common, XMIT, "CABQ TX failed\n"); | 122 | ath_dbg(common, XMIT, "CABQ TX failed\n"); |
| 123 | dev_kfree_skb_any(skb); | 123 | ieee80211_free_txskb(hw, skb); |
| 124 | } | 124 | } |
| 125 | } | 125 | } |
| 126 | 126 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index f9a6ec5cf470..8e1559aba495 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
| @@ -1450,9 +1450,14 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) | |||
| 1450 | REG_WRITE(ah, AR_RTC_FORCE_WAKE, | 1450 | REG_WRITE(ah, AR_RTC_FORCE_WAKE, |
| 1451 | AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); | 1451 | AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); |
| 1452 | 1452 | ||
| 1453 | if (!ah->reset_power_on) | ||
| 1454 | type = ATH9K_RESET_POWER_ON; | ||
| 1455 | |||
| 1453 | switch (type) { | 1456 | switch (type) { |
| 1454 | case ATH9K_RESET_POWER_ON: | 1457 | case ATH9K_RESET_POWER_ON: |
| 1455 | ret = ath9k_hw_set_reset_power_on(ah); | 1458 | ret = ath9k_hw_set_reset_power_on(ah); |
| 1459 | if (!ret) | ||
| 1460 | ah->reset_power_on = true; | ||
| 1456 | break; | 1461 | break; |
| 1457 | case ATH9K_RESET_WARM: | 1462 | case ATH9K_RESET_WARM: |
| 1458 | case ATH9K_RESET_COLD: | 1463 | case ATH9K_RESET_COLD: |
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 566a4ce4f156..dbc1b7a4cbfd 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h | |||
| @@ -741,6 +741,7 @@ struct ath_hw { | |||
| 741 | u32 rfkill_polarity; | 741 | u32 rfkill_polarity; |
| 742 | u32 ah_flags; | 742 | u32 ah_flags; |
| 743 | 743 | ||
| 744 | bool reset_power_on; | ||
| 744 | bool htc_reset_init; | 745 | bool htc_reset_init; |
| 745 | 746 | ||
| 746 | enum nl80211_iftype opmode; | 747 | enum nl80211_iftype opmode; |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 31ab82e3ba85..dd45edfa6bae 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -639,8 +639,7 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
| 639 | ath_err(common, | 639 | ath_err(common, |
| 640 | "Unable to reset hardware; reset status %d (freq %u MHz)\n", | 640 | "Unable to reset hardware; reset status %d (freq %u MHz)\n", |
| 641 | r, curchan->center_freq); | 641 | r, curchan->center_freq); |
| 642 | spin_unlock_bh(&sc->sc_pcu_lock); | 642 | ah->reset_power_on = false; |
| 643 | goto mutex_unlock; | ||
| 644 | } | 643 | } |
| 645 | 644 | ||
| 646 | /* Setup our intr mask. */ | 645 | /* Setup our intr mask. */ |
| @@ -665,11 +664,8 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
| 665 | clear_bit(SC_OP_INVALID, &sc->sc_flags); | 664 | clear_bit(SC_OP_INVALID, &sc->sc_flags); |
| 666 | sc->sc_ah->is_monitoring = false; | 665 | sc->sc_ah->is_monitoring = false; |
| 667 | 666 | ||
| 668 | if (!ath_complete_reset(sc, false)) { | 667 | if (!ath_complete_reset(sc, false)) |
| 669 | r = -EIO; | 668 | ah->reset_power_on = false; |
| 670 | spin_unlock_bh(&sc->sc_pcu_lock); | ||
| 671 | goto mutex_unlock; | ||
| 672 | } | ||
| 673 | 669 | ||
| 674 | if (ah->led_pin >= 0) { | 670 | if (ah->led_pin >= 0) { |
| 675 | ath9k_hw_cfg_output(ah, ah->led_pin, | 671 | ath9k_hw_cfg_output(ah, ah->led_pin, |
| @@ -688,12 +684,11 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
| 688 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) | 684 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) |
| 689 | common->bus_ops->extn_synch_en(common); | 685 | common->bus_ops->extn_synch_en(common); |
| 690 | 686 | ||
| 691 | mutex_unlock: | ||
| 692 | mutex_unlock(&sc->mutex); | 687 | mutex_unlock(&sc->mutex); |
| 693 | 688 | ||
| 694 | ath9k_ps_restore(sc); | 689 | ath9k_ps_restore(sc); |
| 695 | 690 | ||
| 696 | return r; | 691 | return 0; |
| 697 | } | 692 | } |
| 698 | 693 | ||
| 699 | static void ath9k_tx(struct ieee80211_hw *hw, | 694 | static void ath9k_tx(struct ieee80211_hw *hw, |
| @@ -770,7 +765,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, | |||
| 770 | 765 | ||
| 771 | return; | 766 | return; |
| 772 | exit: | 767 | exit: |
| 773 | dev_kfree_skb_any(skb); | 768 | ieee80211_free_txskb(hw, skb); |
| 774 | } | 769 | } |
| 775 | 770 | ||
| 776 | static void ath9k_stop(struct ieee80211_hw *hw) | 771 | static void ath9k_stop(struct ieee80211_hw *hw) |
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 0e630a99b68b..f088f4bf9a26 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c | |||
| @@ -324,6 +324,10 @@ static int ath_pci_suspend(struct device *device) | |||
| 324 | static int ath_pci_resume(struct device *device) | 324 | static int ath_pci_resume(struct device *device) |
| 325 | { | 325 | { |
| 326 | struct pci_dev *pdev = to_pci_dev(device); | 326 | struct pci_dev *pdev = to_pci_dev(device); |
| 327 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | ||
| 328 | struct ath_softc *sc = hw->priv; | ||
| 329 | struct ath_hw *ah = sc->sc_ah; | ||
| 330 | struct ath_common *common = ath9k_hw_common(ah); | ||
| 327 | u32 val; | 331 | u32 val; |
| 328 | 332 | ||
| 329 | /* | 333 | /* |
| @@ -335,6 +339,9 @@ static int ath_pci_resume(struct device *device) | |||
| 335 | if ((val & 0x0000ff00) != 0) | 339 | if ((val & 0x0000ff00) != 0) |
| 336 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | 340 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); |
| 337 | 341 | ||
| 342 | ath_pci_aspm_init(common); | ||
| 343 | ah->reset_power_on = false; | ||
| 344 | |||
| 338 | return 0; | 345 | return 0; |
| 339 | } | 346 | } |
| 340 | 347 | ||
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 36618e3a5e60..378bd70256b2 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
| @@ -66,8 +66,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
| 66 | static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | 66 | static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, |
| 67 | struct ath_txq *txq, | 67 | struct ath_txq *txq, |
| 68 | struct ath_atx_tid *tid, | 68 | struct ath_atx_tid *tid, |
| 69 | struct sk_buff *skb, | 69 | struct sk_buff *skb); |
| 70 | bool dequeue); | ||
| 71 | 70 | ||
| 72 | enum { | 71 | enum { |
| 73 | MCS_HT20, | 72 | MCS_HT20, |
| @@ -176,7 +175,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |||
| 176 | fi = get_frame_info(skb); | 175 | fi = get_frame_info(skb); |
| 177 | bf = fi->bf; | 176 | bf = fi->bf; |
| 178 | 177 | ||
| 179 | if (bf && fi->retries) { | 178 | if (!bf) { |
| 179 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); | ||
| 180 | if (!bf) { | ||
| 181 | ieee80211_free_txskb(sc->hw, skb); | ||
| 182 | continue; | ||
| 183 | } | ||
| 184 | } | ||
| 185 | |||
| 186 | if (fi->retries) { | ||
| 180 | list_add_tail(&bf->list, &bf_head); | 187 | list_add_tail(&bf->list, &bf_head); |
| 181 | ath_tx_update_baw(sc, tid, bf->bf_state.seqno); | 188 | ath_tx_update_baw(sc, tid, bf->bf_state.seqno); |
| 182 | ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); | 189 | ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); |
| @@ -785,10 +792,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
| 785 | fi = get_frame_info(skb); | 792 | fi = get_frame_info(skb); |
| 786 | bf = fi->bf; | 793 | bf = fi->bf; |
| 787 | if (!fi->bf) | 794 | if (!fi->bf) |
| 788 | bf = ath_tx_setup_buffer(sc, txq, tid, skb, true); | 795 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); |
| 789 | 796 | ||
| 790 | if (!bf) | 797 | if (!bf) { |
| 798 | __skb_unlink(skb, &tid->buf_q); | ||
| 799 | ieee80211_free_txskb(sc->hw, skb); | ||
| 791 | continue; | 800 | continue; |
| 801 | } | ||
| 792 | 802 | ||
| 793 | bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; | 803 | bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; |
| 794 | seqno = bf->bf_state.seqno; | 804 | seqno = bf->bf_state.seqno; |
| @@ -1731,9 +1741,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
| 1731 | return; | 1741 | return; |
| 1732 | } | 1742 | } |
| 1733 | 1743 | ||
| 1734 | bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false); | 1744 | bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); |
| 1735 | if (!bf) | 1745 | if (!bf) { |
| 1746 | ieee80211_free_txskb(sc->hw, skb); | ||
| 1736 | return; | 1747 | return; |
| 1748 | } | ||
| 1737 | 1749 | ||
| 1738 | bf->bf_state.bf_type = BUF_AMPDU; | 1750 | bf->bf_state.bf_type = BUF_AMPDU; |
| 1739 | INIT_LIST_HEAD(&bf_head); | 1751 | INIT_LIST_HEAD(&bf_head); |
| @@ -1757,11 +1769,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, | |||
| 1757 | struct ath_buf *bf; | 1769 | struct ath_buf *bf; |
| 1758 | 1770 | ||
| 1759 | bf = fi->bf; | 1771 | bf = fi->bf; |
| 1760 | if (!bf) | ||
| 1761 | bf = ath_tx_setup_buffer(sc, txq, tid, skb, false); | ||
| 1762 | |||
| 1763 | if (!bf) | ||
| 1764 | return; | ||
| 1765 | 1772 | ||
| 1766 | INIT_LIST_HEAD(&bf_head); | 1773 | INIT_LIST_HEAD(&bf_head); |
| 1767 | list_add_tail(&bf->list, &bf_head); | 1774 | list_add_tail(&bf->list, &bf_head); |
| @@ -1839,8 +1846,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) | |||
| 1839 | static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | 1846 | static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, |
| 1840 | struct ath_txq *txq, | 1847 | struct ath_txq *txq, |
| 1841 | struct ath_atx_tid *tid, | 1848 | struct ath_atx_tid *tid, |
| 1842 | struct sk_buff *skb, | 1849 | struct sk_buff *skb) |
| 1843 | bool dequeue) | ||
| 1844 | { | 1850 | { |
| 1845 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 1851 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
| 1846 | struct ath_frame_info *fi = get_frame_info(skb); | 1852 | struct ath_frame_info *fi = get_frame_info(skb); |
| @@ -1852,7 +1858,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
| 1852 | bf = ath_tx_get_buffer(sc); | 1858 | bf = ath_tx_get_buffer(sc); |
| 1853 | if (!bf) { | 1859 | if (!bf) { |
| 1854 | ath_dbg(common, XMIT, "TX buffers are full\n"); | 1860 | ath_dbg(common, XMIT, "TX buffers are full\n"); |
| 1855 | goto error; | 1861 | return NULL; |
| 1856 | } | 1862 | } |
| 1857 | 1863 | ||
| 1858 | ATH_TXBUF_RESET(bf); | 1864 | ATH_TXBUF_RESET(bf); |
| @@ -1881,18 +1887,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
| 1881 | ath_err(ath9k_hw_common(sc->sc_ah), | 1887 | ath_err(ath9k_hw_common(sc->sc_ah), |
| 1882 | "dma_mapping_error() on TX\n"); | 1888 | "dma_mapping_error() on TX\n"); |
| 1883 | ath_tx_return_buffer(sc, bf); | 1889 | ath_tx_return_buffer(sc, bf); |
| 1884 | goto error; | 1890 | return NULL; |
| 1885 | } | 1891 | } |
| 1886 | 1892 | ||
| 1887 | fi->bf = bf; | 1893 | fi->bf = bf; |
| 1888 | 1894 | ||
| 1889 | return bf; | 1895 | return bf; |
| 1890 | |||
| 1891 | error: | ||
| 1892 | if (dequeue) | ||
| 1893 | __skb_unlink(skb, &tid->buf_q); | ||
| 1894 | dev_kfree_skb_any(skb); | ||
| 1895 | return NULL; | ||
| 1896 | } | 1896 | } |
| 1897 | 1897 | ||
| 1898 | /* FIXME: tx power */ | 1898 | /* FIXME: tx power */ |
| @@ -1921,9 +1921,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, | |||
| 1921 | */ | 1921 | */ |
| 1922 | ath_tx_send_ampdu(sc, tid, skb, txctl); | 1922 | ath_tx_send_ampdu(sc, tid, skb, txctl); |
| 1923 | } else { | 1923 | } else { |
| 1924 | bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false); | 1924 | bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); |
| 1925 | if (!bf) | 1925 | if (!bf) { |
| 1926 | if (txctl->paprd) | ||
| 1927 | dev_kfree_skb_any(skb); | ||
| 1928 | else | ||
| 1929 | ieee80211_free_txskb(sc->hw, skb); | ||
| 1926 | return; | 1930 | return; |
| 1931 | } | ||
| 1927 | 1932 | ||
| 1928 | bf->bf_state.bfs_paprd = txctl->paprd; | 1933 | bf->bf_state.bfs_paprd = txctl->paprd; |
| 1929 | 1934 | ||
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 2aa4a59c72c8..2df17f1e49ef 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h | |||
| @@ -303,6 +303,7 @@ struct ar9170 { | |||
| 303 | unsigned long queue_stop_timeout[__AR9170_NUM_TXQ]; | 303 | unsigned long queue_stop_timeout[__AR9170_NUM_TXQ]; |
| 304 | unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ]; | 304 | unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ]; |
| 305 | bool needs_full_reset; | 305 | bool needs_full_reset; |
| 306 | bool force_usb_reset; | ||
| 306 | atomic_t pending_restarts; | 307 | atomic_t pending_restarts; |
| 307 | 308 | ||
| 308 | /* interface mode settings */ | 309 | /* interface mode settings */ |
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 67997b39aba7..25a1e2f4f738 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c | |||
| @@ -465,27 +465,26 @@ static void carl9170_restart_work(struct work_struct *work) | |||
| 465 | { | 465 | { |
| 466 | struct ar9170 *ar = container_of(work, struct ar9170, | 466 | struct ar9170 *ar = container_of(work, struct ar9170, |
| 467 | restart_work); | 467 | restart_work); |
| 468 | int err; | 468 | int err = -EIO; |
| 469 | 469 | ||
| 470 | ar->usedkeys = 0; | 470 | ar->usedkeys = 0; |
| 471 | ar->filter_state = 0; | 471 | ar->filter_state = 0; |
| 472 | carl9170_cancel_worker(ar); | 472 | carl9170_cancel_worker(ar); |
| 473 | 473 | ||
| 474 | mutex_lock(&ar->mutex); | 474 | mutex_lock(&ar->mutex); |
| 475 | err = carl9170_usb_restart(ar); | 475 | if (!ar->force_usb_reset) { |
| 476 | if (net_ratelimit()) { | 476 | err = carl9170_usb_restart(ar); |
| 477 | if (err) { | 477 | if (net_ratelimit()) { |
| 478 | dev_err(&ar->udev->dev, "Failed to restart device " | 478 | if (err) |
| 479 | " (%d).\n", err); | 479 | dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err); |
| 480 | } else { | 480 | else |
| 481 | dev_info(&ar->udev->dev, "device restarted " | 481 | dev_info(&ar->udev->dev, "device restarted successfully.\n"); |
| 482 | "successfully.\n"); | ||
| 483 | } | 482 | } |
| 484 | } | 483 | } |
| 485 | |||
| 486 | carl9170_zap_queues(ar); | 484 | carl9170_zap_queues(ar); |
| 487 | mutex_unlock(&ar->mutex); | 485 | mutex_unlock(&ar->mutex); |
| 488 | if (!err) { | 486 | |
| 487 | if (!err && !ar->force_usb_reset) { | ||
| 489 | ar->restart_counter++; | 488 | ar->restart_counter++; |
| 490 | atomic_set(&ar->pending_restarts, 0); | 489 | atomic_set(&ar->pending_restarts, 0); |
| 491 | 490 | ||
| @@ -526,10 +525,10 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r) | |||
| 526 | if (!ar->registered) | 525 | if (!ar->registered) |
| 527 | return; | 526 | return; |
| 528 | 527 | ||
| 529 | if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset) | 528 | if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset) |
| 530 | ieee80211_queue_work(ar->hw, &ar->restart_work); | 529 | ar->force_usb_reset = true; |
| 531 | else | 530 | |
| 532 | carl9170_usb_reset(ar); | 531 | ieee80211_queue_work(ar->hw, &ar->restart_work); |
| 533 | 532 | ||
| 534 | /* | 533 | /* |
| 535 | * At this point, the device instance might have vanished/disabled. | 534 | * At this point, the device instance might have vanished/disabled. |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 2691620393ea..0679458a1bac 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
| @@ -1596,8 +1596,9 @@ done: | |||
| 1596 | } | 1596 | } |
| 1597 | } | 1597 | } |
| 1598 | 1598 | ||
| 1599 | if (mwifiex_bss_start(priv, bss, &req_ssid)) | 1599 | ret = mwifiex_bss_start(priv, bss, &req_ssid); |
| 1600 | return -EFAULT; | 1600 | if (ret) |
| 1601 | return ret; | ||
| 1601 | 1602 | ||
| 1602 | if (mode == NL80211_IFTYPE_ADHOC) { | 1603 | if (mode == NL80211_IFTYPE_ADHOC) { |
| 1603 | /* Inform the BSS information to kernel, otherwise | 1604 | /* Inform the BSS information to kernel, otherwise |
| @@ -1652,9 +1653,19 @@ done: | |||
| 1652 | "info: association to bssid %pM failed\n", | 1653 | "info: association to bssid %pM failed\n", |
| 1653 | priv->cfg_bssid); | 1654 | priv->cfg_bssid); |
| 1654 | memset(priv->cfg_bssid, 0, ETH_ALEN); | 1655 | memset(priv->cfg_bssid, 0, ETH_ALEN); |
| 1656 | |||
| 1657 | if (ret > 0) | ||
| 1658 | cfg80211_connect_result(priv->netdev, priv->cfg_bssid, | ||
| 1659 | NULL, 0, NULL, 0, ret, | ||
| 1660 | GFP_KERNEL); | ||
| 1661 | else | ||
| 1662 | cfg80211_connect_result(priv->netdev, priv->cfg_bssid, | ||
| 1663 | NULL, 0, NULL, 0, | ||
| 1664 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
| 1665 | GFP_KERNEL); | ||
| 1655 | } | 1666 | } |
| 1656 | 1667 | ||
| 1657 | return ret; | 1668 | return 0; |
| 1658 | } | 1669 | } |
| 1659 | 1670 | ||
| 1660 | /* | 1671 | /* |
| @@ -1802,7 +1813,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, | |||
| 1802 | { | 1813 | { |
| 1803 | struct net_device *dev = request->wdev->netdev; | 1814 | struct net_device *dev = request->wdev->netdev; |
| 1804 | struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); | 1815 | struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); |
| 1805 | int i, offset; | 1816 | int i, offset, ret; |
| 1806 | struct ieee80211_channel *chan; | 1817 | struct ieee80211_channel *chan; |
| 1807 | struct ieee_types_header *ie; | 1818 | struct ieee_types_header *ie; |
| 1808 | 1819 | ||
| @@ -1855,8 +1866,12 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, | |||
| 1855 | 1866 | ||
| 1856 | priv->user_scan_cfg->chan_list[i].scan_time = 0; | 1867 | priv->user_scan_cfg->chan_list[i].scan_time = 0; |
| 1857 | } | 1868 | } |
| 1858 | if (mwifiex_scan_networks(priv, priv->user_scan_cfg)) | 1869 | |
| 1859 | return -EFAULT; | 1870 | ret = mwifiex_scan_networks(priv, priv->user_scan_cfg); |
| 1871 | if (ret) { | ||
| 1872 | dev_err(priv->adapter->dev, "scan failed: %d\n", ret); | ||
| 1873 | return ret; | ||
| 1874 | } | ||
| 1860 | 1875 | ||
| 1861 | if (request->ie && request->ie_len) { | 1876 | if (request->ie && request->ie_len) { |
| 1862 | for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { | 1877 | for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { |
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 82e63cee1e97..7b0858af8f5d 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c | |||
| @@ -1180,16 +1180,18 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, | |||
| 1180 | struct mwifiex_adapter *adapter = priv->adapter; | 1180 | struct mwifiex_adapter *adapter = priv->adapter; |
| 1181 | struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result; | 1181 | struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result; |
| 1182 | struct mwifiex_bssdescriptor *bss_desc; | 1182 | struct mwifiex_bssdescriptor *bss_desc; |
| 1183 | u16 reason_code; | ||
| 1183 | 1184 | ||
| 1184 | adhoc_result = &resp->params.adhoc_result; | 1185 | adhoc_result = &resp->params.adhoc_result; |
| 1185 | 1186 | ||
| 1186 | bss_desc = priv->attempted_bss_desc; | 1187 | bss_desc = priv->attempted_bss_desc; |
| 1187 | 1188 | ||
| 1188 | /* Join result code 0 --> SUCCESS */ | 1189 | /* Join result code 0 --> SUCCESS */ |
| 1189 | if (le16_to_cpu(resp->result)) { | 1190 | reason_code = le16_to_cpu(resp->result); |
| 1191 | if (reason_code) { | ||
| 1190 | dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n"); | 1192 | dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n"); |
| 1191 | if (priv->media_connected) | 1193 | if (priv->media_connected) |
| 1192 | mwifiex_reset_connect_state(priv); | 1194 | mwifiex_reset_connect_state(priv, reason_code); |
| 1193 | 1195 | ||
| 1194 | memset(&priv->curr_bss_params.bss_descriptor, | 1196 | memset(&priv->curr_bss_params.bss_descriptor, |
| 1195 | 0x00, sizeof(struct mwifiex_bssdescriptor)); | 1197 | 0x00, sizeof(struct mwifiex_bssdescriptor)); |
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index bfb3fa69805c..c2d0ab146af5 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h | |||
| @@ -847,7 +847,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, | |||
| 847 | struct mwifiex_bssdescriptor *bss_desc); | 847 | struct mwifiex_bssdescriptor *bss_desc); |
| 848 | int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, | 848 | int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, |
| 849 | struct host_cmd_ds_command *resp); | 849 | struct host_cmd_ds_command *resp); |
| 850 | void mwifiex_reset_connect_state(struct mwifiex_private *priv); | 850 | void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason); |
| 851 | u8 mwifiex_band_to_radio_type(u8 band); | 851 | u8 mwifiex_band_to_radio_type(u8 band); |
| 852 | int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); | 852 | int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); |
| 853 | int mwifiex_adhoc_start(struct mwifiex_private *priv, | 853 | int mwifiex_adhoc_start(struct mwifiex_private *priv, |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index e36a75988f87..00b658d3b6ec 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
| @@ -1296,7 +1296,7 @@ mwifiex_radio_type_to_band(u8 radio_type) | |||
| 1296 | int mwifiex_scan_networks(struct mwifiex_private *priv, | 1296 | int mwifiex_scan_networks(struct mwifiex_private *priv, |
| 1297 | const struct mwifiex_user_scan_cfg *user_scan_in) | 1297 | const struct mwifiex_user_scan_cfg *user_scan_in) |
| 1298 | { | 1298 | { |
| 1299 | int ret = 0; | 1299 | int ret; |
| 1300 | struct mwifiex_adapter *adapter = priv->adapter; | 1300 | struct mwifiex_adapter *adapter = priv->adapter; |
| 1301 | struct cmd_ctrl_node *cmd_node; | 1301 | struct cmd_ctrl_node *cmd_node; |
| 1302 | union mwifiex_scan_cmd_config_tlv *scan_cfg_out; | 1302 | union mwifiex_scan_cmd_config_tlv *scan_cfg_out; |
| @@ -1309,25 +1309,26 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, | |||
| 1309 | unsigned long flags; | 1309 | unsigned long flags; |
| 1310 | 1310 | ||
| 1311 | if (adapter->scan_processing) { | 1311 | if (adapter->scan_processing) { |
| 1312 | dev_dbg(adapter->dev, "cmd: Scan already in process...\n"); | 1312 | dev_err(adapter->dev, "cmd: Scan already in process...\n"); |
| 1313 | return ret; | 1313 | return -EBUSY; |
| 1314 | } | 1314 | } |
| 1315 | 1315 | ||
| 1316 | spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); | ||
| 1317 | adapter->scan_processing = true; | ||
| 1318 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); | ||
| 1319 | |||
| 1320 | if (priv->scan_block) { | 1316 | if (priv->scan_block) { |
| 1321 | dev_dbg(adapter->dev, | 1317 | dev_err(adapter->dev, |
| 1322 | "cmd: Scan is blocked during association...\n"); | 1318 | "cmd: Scan is blocked during association...\n"); |
| 1323 | return ret; | 1319 | return -EBUSY; |
| 1324 | } | 1320 | } |
| 1325 | 1321 | ||
| 1322 | spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); | ||
| 1323 | adapter->scan_processing = true; | ||
| 1324 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); | ||
| 1325 | |||
| 1326 | scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv), | 1326 | scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv), |
| 1327 | GFP_KERNEL); | 1327 | GFP_KERNEL); |
| 1328 | if (!scan_cfg_out) { | 1328 | if (!scan_cfg_out) { |
| 1329 | dev_err(adapter->dev, "failed to alloc scan_cfg_out\n"); | 1329 | dev_err(adapter->dev, "failed to alloc scan_cfg_out\n"); |
| 1330 | return -ENOMEM; | 1330 | ret = -ENOMEM; |
| 1331 | goto done; | ||
| 1331 | } | 1332 | } |
| 1332 | 1333 | ||
| 1333 | buf_size = sizeof(struct mwifiex_chan_scan_param_set) * | 1334 | buf_size = sizeof(struct mwifiex_chan_scan_param_set) * |
| @@ -1336,7 +1337,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, | |||
| 1336 | if (!scan_chan_list) { | 1337 | if (!scan_chan_list) { |
| 1337 | dev_err(adapter->dev, "failed to alloc scan_chan_list\n"); | 1338 | dev_err(adapter->dev, "failed to alloc scan_chan_list\n"); |
| 1338 | kfree(scan_cfg_out); | 1339 | kfree(scan_cfg_out); |
| 1339 | return -ENOMEM; | 1340 | ret = -ENOMEM; |
| 1341 | goto done; | ||
| 1340 | } | 1342 | } |
| 1341 | 1343 | ||
| 1342 | mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config, | 1344 | mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config, |
| @@ -1364,14 +1366,16 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, | |||
| 1364 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, | 1366 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, |
| 1365 | flags); | 1367 | flags); |
| 1366 | } | 1368 | } |
| 1367 | } else { | ||
| 1368 | spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); | ||
| 1369 | adapter->scan_processing = true; | ||
| 1370 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); | ||
| 1371 | } | 1369 | } |
| 1372 | 1370 | ||
| 1373 | kfree(scan_cfg_out); | 1371 | kfree(scan_cfg_out); |
| 1374 | kfree(scan_chan_list); | 1372 | kfree(scan_chan_list); |
| 1373 | done: | ||
| 1374 | if (ret) { | ||
| 1375 | spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); | ||
| 1376 | adapter->scan_processing = false; | ||
| 1377 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); | ||
| 1378 | } | ||
| 1375 | return ret; | 1379 | return ret; |
| 1376 | } | 1380 | } |
| 1377 | 1381 | ||
| @@ -1430,8 +1434,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, | |||
| 1430 | ret = mwifiex_is_network_compatible(priv, bss_desc, | 1434 | ret = mwifiex_is_network_compatible(priv, bss_desc, |
| 1431 | priv->bss_mode); | 1435 | priv->bss_mode); |
| 1432 | if (ret) | 1436 | if (ret) |
| 1433 | dev_err(priv->adapter->dev, "cannot find ssid " | 1437 | dev_err(priv->adapter->dev, |
| 1434 | "%s\n", bss_desc->ssid.ssid); | 1438 | "Incompatible network settings\n"); |
| 1435 | break; | 1439 | break; |
| 1436 | default: | 1440 | default: |
| 1437 | ret = 0; | 1441 | ret = 0; |
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index e380171c4c5d..09e6a267f566 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c | |||
| @@ -545,7 +545,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv, | |||
| 545 | if (!memcmp(resp->params.deauth.mac_addr, | 545 | if (!memcmp(resp->params.deauth.mac_addr, |
| 546 | &priv->curr_bss_params.bss_descriptor.mac_address, | 546 | &priv->curr_bss_params.bss_descriptor.mac_address, |
| 547 | sizeof(resp->params.deauth.mac_addr))) | 547 | sizeof(resp->params.deauth.mac_addr))) |
| 548 | mwifiex_reset_connect_state(priv); | 548 | mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); |
| 549 | 549 | ||
| 550 | return 0; | 550 | return 0; |
| 551 | } | 551 | } |
| @@ -558,7 +558,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv, | |||
| 558 | static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv, | 558 | static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv, |
| 559 | struct host_cmd_ds_command *resp) | 559 | struct host_cmd_ds_command *resp) |
| 560 | { | 560 | { |
| 561 | mwifiex_reset_connect_state(priv); | 561 | mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); |
| 562 | return 0; | 562 | return 0; |
| 563 | } | 563 | } |
| 564 | 564 | ||
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index aafde30e714a..8132119e1a21 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c | |||
| @@ -41,7 +41,7 @@ | |||
| 41 | * - Sends a disconnect event to upper layers/applications. | 41 | * - Sends a disconnect event to upper layers/applications. |
| 42 | */ | 42 | */ |
| 43 | void | 43 | void |
| 44 | mwifiex_reset_connect_state(struct mwifiex_private *priv) | 44 | mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) |
| 45 | { | 45 | { |
| 46 | struct mwifiex_adapter *adapter = priv->adapter; | 46 | struct mwifiex_adapter *adapter = priv->adapter; |
| 47 | 47 | ||
| @@ -117,10 +117,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv) | |||
| 117 | priv->media_connected = false; | 117 | priv->media_connected = false; |
| 118 | dev_dbg(adapter->dev, | 118 | dev_dbg(adapter->dev, |
| 119 | "info: successfully disconnected from %pM: reason code %d\n", | 119 | "info: successfully disconnected from %pM: reason code %d\n", |
| 120 | priv->cfg_bssid, WLAN_REASON_DEAUTH_LEAVING); | 120 | priv->cfg_bssid, reason_code); |
| 121 | if (priv->bss_mode == NL80211_IFTYPE_STATION) { | 121 | if (priv->bss_mode == NL80211_IFTYPE_STATION) { |
| 122 | cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING, | 122 | cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, |
| 123 | NULL, 0, GFP_KERNEL); | 123 | GFP_KERNEL); |
| 124 | } | 124 | } |
| 125 | memset(priv->cfg_bssid, 0, ETH_ALEN); | 125 | memset(priv->cfg_bssid, 0, ETH_ALEN); |
| 126 | 126 | ||
| @@ -186,7 +186,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) | |||
| 186 | struct mwifiex_adapter *adapter = priv->adapter; | 186 | struct mwifiex_adapter *adapter = priv->adapter; |
| 187 | int ret = 0; | 187 | int ret = 0; |
| 188 | u32 eventcause = adapter->event_cause; | 188 | u32 eventcause = adapter->event_cause; |
| 189 | u16 ctrl; | 189 | u16 ctrl, reason_code; |
| 190 | 190 | ||
| 191 | switch (eventcause) { | 191 | switch (eventcause) { |
| 192 | case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: | 192 | case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: |
| @@ -204,22 +204,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) | |||
| 204 | case EVENT_DEAUTHENTICATED: | 204 | case EVENT_DEAUTHENTICATED: |
| 205 | dev_dbg(adapter->dev, "event: Deauthenticated\n"); | 205 | dev_dbg(adapter->dev, "event: Deauthenticated\n"); |
| 206 | adapter->dbg.num_event_deauth++; | 206 | adapter->dbg.num_event_deauth++; |
| 207 | if (priv->media_connected) | 207 | if (priv->media_connected) { |
| 208 | mwifiex_reset_connect_state(priv); | 208 | reason_code = |
| 209 | le16_to_cpu(*(__le16 *)adapter->event_body); | ||
| 210 | mwifiex_reset_connect_state(priv, reason_code); | ||
| 211 | } | ||
| 209 | break; | 212 | break; |
| 210 | 213 | ||
| 211 | case EVENT_DISASSOCIATED: | 214 | case EVENT_DISASSOCIATED: |
| 212 | dev_dbg(adapter->dev, "event: Disassociated\n"); | 215 | dev_dbg(adapter->dev, "event: Disassociated\n"); |
| 213 | adapter->dbg.num_event_disassoc++; | 216 | adapter->dbg.num_event_disassoc++; |
| 214 | if (priv->media_connected) | 217 | if (priv->media_connected) { |
| 215 | mwifiex_reset_connect_state(priv); | 218 | reason_code = |
| 219 | le16_to_cpu(*(__le16 *)adapter->event_body); | ||
| 220 | mwifiex_reset_connect_state(priv, reason_code); | ||
| 221 | } | ||
| 216 | break; | 222 | break; |
| 217 | 223 | ||
| 218 | case EVENT_LINK_LOST: | 224 | case EVENT_LINK_LOST: |
| 219 | dev_dbg(adapter->dev, "event: Link lost\n"); | 225 | dev_dbg(adapter->dev, "event: Link lost\n"); |
| 220 | adapter->dbg.num_event_link_lost++; | 226 | adapter->dbg.num_event_link_lost++; |
| 221 | if (priv->media_connected) | 227 | if (priv->media_connected) { |
| 222 | mwifiex_reset_connect_state(priv); | 228 | reason_code = |
| 229 | le16_to_cpu(*(__le16 *)adapter->event_body); | ||
| 230 | mwifiex_reset_connect_state(priv, reason_code); | ||
| 231 | } | ||
| 223 | break; | 232 | break; |
| 224 | 233 | ||
| 225 | case EVENT_PS_SLEEP: | 234 | case EVENT_PS_SLEEP: |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 540c94f8505a..01dc8891070c 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
| @@ -2252,9 +2252,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, | |||
| 2252 | */ | 2252 | */ |
| 2253 | if (rt2x00_rt(rt2x00dev, RT3352)) { | 2253 | if (rt2x00_rt(rt2x00dev, RT3352)) { |
| 2254 | rt2800_bbp_write(rt2x00dev, 27, 0x0); | 2254 | rt2800_bbp_write(rt2x00dev, 27, 0x0); |
| 2255 | rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain); | 2255 | rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); |
| 2256 | rt2800_bbp_write(rt2x00dev, 27, 0x20); | 2256 | rt2800_bbp_write(rt2x00dev, 27, 0x20); |
| 2257 | rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain); | 2257 | rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); |
| 2258 | } else { | 2258 | } else { |
| 2259 | rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); | 2259 | rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); |
| 2260 | rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); | 2260 | rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4ebfcf3d8a3b..f2d6b78d901d 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -335,21 +335,35 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) | |||
| 335 | 335 | ||
| 336 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 336 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 337 | unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | 337 | unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 338 | unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; | ||
| 338 | unsigned long bytes; | 339 | unsigned long bytes; |
| 340 | |||
| 341 | offset &= ~PAGE_MASK; | ||
| 342 | |||
| 339 | while (size > 0) { | 343 | while (size > 0) { |
| 344 | BUG_ON(offset >= PAGE_SIZE); | ||
| 340 | BUG_ON(copy_off > MAX_BUFFER_OFFSET); | 345 | BUG_ON(copy_off > MAX_BUFFER_OFFSET); |
| 341 | 346 | ||
| 342 | if (start_new_rx_buffer(copy_off, size, 0)) { | 347 | bytes = PAGE_SIZE - offset; |
| 348 | |||
| 349 | if (bytes > size) | ||
| 350 | bytes = size; | ||
| 351 | |||
| 352 | if (start_new_rx_buffer(copy_off, bytes, 0)) { | ||
| 343 | count++; | 353 | count++; |
| 344 | copy_off = 0; | 354 | copy_off = 0; |
| 345 | } | 355 | } |
| 346 | 356 | ||
| 347 | bytes = size; | ||
| 348 | if (copy_off + bytes > MAX_BUFFER_OFFSET) | 357 | if (copy_off + bytes > MAX_BUFFER_OFFSET) |
| 349 | bytes = MAX_BUFFER_OFFSET - copy_off; | 358 | bytes = MAX_BUFFER_OFFSET - copy_off; |
| 350 | 359 | ||
| 351 | copy_off += bytes; | 360 | copy_off += bytes; |
| 361 | |||
| 362 | offset += bytes; | ||
| 352 | size -= bytes; | 363 | size -= bytes; |
| 364 | |||
| 365 | if (offset == PAGE_SIZE) | ||
| 366 | offset = 0; | ||
| 353 | } | 367 | } |
| 354 | } | 368 | } |
| 355 | return count; | 369 | return count; |
| @@ -403,14 +417,24 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
| 403 | unsigned long bytes; | 417 | unsigned long bytes; |
| 404 | 418 | ||
| 405 | /* Data must not cross a page boundary. */ | 419 | /* Data must not cross a page boundary. */ |
| 406 | BUG_ON(size + offset > PAGE_SIZE); | 420 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); |
| 407 | 421 | ||
| 408 | meta = npo->meta + npo->meta_prod - 1; | 422 | meta = npo->meta + npo->meta_prod - 1; |
| 409 | 423 | ||
| 424 | /* Skip unused frames from start of page */ | ||
| 425 | page += offset >> PAGE_SHIFT; | ||
| 426 | offset &= ~PAGE_MASK; | ||
| 427 | |||
| 410 | while (size > 0) { | 428 | while (size > 0) { |
| 429 | BUG_ON(offset >= PAGE_SIZE); | ||
| 411 | BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); | 430 | BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); |
| 412 | 431 | ||
| 413 | if (start_new_rx_buffer(npo->copy_off, size, *head)) { | 432 | bytes = PAGE_SIZE - offset; |
| 433 | |||
| 434 | if (bytes > size) | ||
| 435 | bytes = size; | ||
| 436 | |||
| 437 | if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { | ||
| 414 | /* | 438 | /* |
| 415 | * Netfront requires there to be some data in the head | 439 | * Netfront requires there to be some data in the head |
| 416 | * buffer. | 440 | * buffer. |
| @@ -420,7 +444,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
| 420 | meta = get_next_rx_buffer(vif, npo); | 444 | meta = get_next_rx_buffer(vif, npo); |
| 421 | } | 445 | } |
| 422 | 446 | ||
| 423 | bytes = size; | ||
| 424 | if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) | 447 | if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) |
| 425 | bytes = MAX_BUFFER_OFFSET - npo->copy_off; | 448 | bytes = MAX_BUFFER_OFFSET - npo->copy_off; |
| 426 | 449 | ||
| @@ -453,6 +476,13 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
| 453 | offset += bytes; | 476 | offset += bytes; |
| 454 | size -= bytes; | 477 | size -= bytes; |
| 455 | 478 | ||
| 479 | /* Next frame */ | ||
| 480 | if (offset == PAGE_SIZE && size) { | ||
| 481 | BUG_ON(!PageCompound(page)); | ||
| 482 | page++; | ||
| 483 | offset = 0; | ||
| 484 | } | ||
| 485 | |||
| 456 | /* Leave a gap for the GSO descriptor. */ | 486 | /* Leave a gap for the GSO descriptor. */ |
| 457 | if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) | 487 | if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) |
| 458 | vif->rx.req_cons++; | 488 | vif->rx.req_cons++; |
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 39abb150bdd4..84c56881ba80 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c | |||
| @@ -329,7 +329,8 @@ static int acerhdf_bind(struct thermal_zone_device *thermal, | |||
| 329 | if (cdev != cl_dev) | 329 | if (cdev != cl_dev) |
| 330 | return 0; | 330 | return 0; |
| 331 | 331 | ||
| 332 | if (thermal_zone_bind_cooling_device(thermal, 0, cdev)) { | 332 | if (thermal_zone_bind_cooling_device(thermal, 0, cdev, |
| 333 | THERMAL_NO_LIMIT, THERMAL_NO_LIMIT)) { | ||
| 333 | pr_err("error binding cooling dev\n"); | 334 | pr_err("error binding cooling dev\n"); |
| 334 | return -EINVAL; | 335 | return -EINVAL; |
| 335 | } | 336 | } |
| @@ -661,7 +662,7 @@ static int acerhdf_register_thermal(void) | |||
| 661 | return -EINVAL; | 662 | return -EINVAL; |
| 662 | 663 | ||
| 663 | thz_dev = thermal_zone_device_register("acerhdf", 1, 0, NULL, | 664 | thz_dev = thermal_zone_device_register("acerhdf", 1, 0, NULL, |
| 664 | &acerhdf_dev_ops, 0, 0, 0, | 665 | &acerhdf_dev_ops, 0, |
| 665 | (kernelmode) ? interval*1000 : 0); | 666 | (kernelmode) ? interval*1000 : 0); |
| 666 | if (IS_ERR(thz_dev)) | 667 | if (IS_ERR(thz_dev)) |
| 667 | return -EINVAL; | 668 | return -EINVAL; |
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c index 3a27113deda9..c8097616dd62 100644 --- a/drivers/platform/x86/intel_mid_thermal.c +++ b/drivers/platform/x86/intel_mid_thermal.c | |||
| @@ -502,7 +502,7 @@ static int mid_thermal_probe(struct platform_device *pdev) | |||
| 502 | goto err; | 502 | goto err; |
| 503 | } | 503 | } |
| 504 | pinfo->tzd[i] = thermal_zone_device_register(name[i], | 504 | pinfo->tzd[i] = thermal_zone_device_register(name[i], |
| 505 | 0, 0, td_info, &tzd_ops, 0, 0, 0, 0); | 505 | 0, 0, td_info, &tzd_ops, 0, 0); |
| 506 | if (IS_ERR(pinfo->tzd[i])) { | 506 | if (IS_ERR(pinfo->tzd[i])) { |
| 507 | kfree(td_info); | 507 | kfree(td_info); |
| 508 | ret = PTR_ERR(pinfo->tzd[i]); | 508 | ret = PTR_ERR(pinfo->tzd[i]); |
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 08cc8a3c15af..2436f1350013 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c | |||
| @@ -201,7 +201,7 @@ static int psy_register_thermal(struct power_supply *psy) | |||
| 201 | for (i = 0; i < psy->num_properties; i++) { | 201 | for (i = 0; i < psy->num_properties; i++) { |
| 202 | if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) { | 202 | if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) { |
| 203 | psy->tzd = thermal_zone_device_register(psy->name, 0, 0, | 203 | psy->tzd = thermal_zone_device_register(psy->name, 0, 0, |
| 204 | psy, &psy_tzd_ops, 0, 0, 0, 0); | 204 | psy, &psy_tzd_ops, 0, 0); |
| 205 | if (IS_ERR(psy->tzd)) | 205 | if (IS_ERR(psy->tzd)) |
| 206 | return PTR_ERR(psy->tzd); | 206 | return PTR_ERR(psy->tzd); |
| 207 | break; | 207 | break; |
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index b7c326f7a6d0..342d7d9c0997 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c | |||
| @@ -165,6 +165,16 @@ bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) | |||
| 165 | bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); | 165 | bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | static void | ||
| 169 | bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg) | ||
| 170 | { | ||
| 171 | struct bfa_fru_s *fru = BFA_FRU(bfa); | ||
| 172 | struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); | ||
| 173 | |||
| 174 | bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg); | ||
| 175 | bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg); | ||
| 176 | } | ||
| 177 | |||
| 168 | /* | 178 | /* |
| 169 | * BFA IOC FC related definitions | 179 | * BFA IOC FC related definitions |
| 170 | */ | 180 | */ |
| @@ -274,6 +284,15 @@ bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | |||
| 274 | case IOCFC_E_IOC_ENABLED: | 284 | case IOCFC_E_IOC_ENABLED: |
| 275 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); | 285 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); |
| 276 | break; | 286 | break; |
| 287 | |||
| 288 | case IOCFC_E_DISABLE: | ||
| 289 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); | ||
| 290 | break; | ||
| 291 | |||
| 292 | case IOCFC_E_STOP: | ||
| 293 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); | ||
| 294 | break; | ||
| 295 | |||
| 277 | case IOCFC_E_IOC_FAILED: | 296 | case IOCFC_E_IOC_FAILED: |
| 278 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); | 297 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); |
| 279 | break; | 298 | break; |
| @@ -298,6 +317,15 @@ bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | |||
| 298 | case IOCFC_E_DCONF_DONE: | 317 | case IOCFC_E_DCONF_DONE: |
| 299 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); | 318 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); |
| 300 | break; | 319 | break; |
| 320 | |||
| 321 | case IOCFC_E_DISABLE: | ||
| 322 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); | ||
| 323 | break; | ||
| 324 | |||
| 325 | case IOCFC_E_STOP: | ||
| 326 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); | ||
| 327 | break; | ||
| 328 | |||
| 301 | case IOCFC_E_IOC_FAILED: | 329 | case IOCFC_E_IOC_FAILED: |
| 302 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); | 330 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); |
| 303 | break; | 331 | break; |
| @@ -322,6 +350,15 @@ bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | |||
| 322 | case IOCFC_E_CFG_DONE: | 350 | case IOCFC_E_CFG_DONE: |
| 323 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); | 351 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); |
| 324 | break; | 352 | break; |
| 353 | |||
| 354 | case IOCFC_E_DISABLE: | ||
| 355 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); | ||
| 356 | break; | ||
| 357 | |||
| 358 | case IOCFC_E_STOP: | ||
| 359 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); | ||
| 360 | break; | ||
| 361 | |||
| 325 | case IOCFC_E_IOC_FAILED: | 362 | case IOCFC_E_IOC_FAILED: |
| 326 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); | 363 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); |
| 327 | break; | 364 | break; |
| @@ -433,6 +470,12 @@ bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | |||
| 433 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, | 470 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, |
| 434 | bfa_iocfc_stop_cb, iocfc->bfa); | 471 | bfa_iocfc_stop_cb, iocfc->bfa); |
| 435 | break; | 472 | break; |
| 473 | |||
| 474 | case IOCFC_E_IOC_ENABLED: | ||
| 475 | case IOCFC_E_DCONF_DONE: | ||
| 476 | case IOCFC_E_CFG_DONE: | ||
| 477 | break; | ||
| 478 | |||
| 436 | default: | 479 | default: |
| 437 | bfa_sm_fault(iocfc->bfa, event); | 480 | bfa_sm_fault(iocfc->bfa, event); |
| 438 | break; | 481 | break; |
| @@ -454,6 +497,15 @@ bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | |||
| 454 | case IOCFC_E_IOC_ENABLED: | 497 | case IOCFC_E_IOC_ENABLED: |
| 455 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); | 498 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); |
| 456 | break; | 499 | break; |
| 500 | |||
| 501 | case IOCFC_E_DISABLE: | ||
| 502 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); | ||
| 503 | break; | ||
| 504 | |||
| 505 | case IOCFC_E_STOP: | ||
| 506 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); | ||
| 507 | break; | ||
| 508 | |||
| 457 | case IOCFC_E_IOC_FAILED: | 509 | case IOCFC_E_IOC_FAILED: |
| 458 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); | 510 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); |
| 459 | 511 | ||
| @@ -493,6 +545,13 @@ bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | |||
| 493 | bfa_iocfc_enable_cb, iocfc->bfa); | 545 | bfa_iocfc_enable_cb, iocfc->bfa); |
| 494 | iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; | 546 | iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; |
| 495 | break; | 547 | break; |
| 548 | case IOCFC_E_DISABLE: | ||
| 549 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); | ||
| 550 | break; | ||
| 551 | |||
| 552 | case IOCFC_E_STOP: | ||
| 553 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); | ||
| 554 | break; | ||
| 496 | case IOCFC_E_IOC_FAILED: | 555 | case IOCFC_E_IOC_FAILED: |
| 497 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); | 556 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); |
| 498 | if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) | 557 | if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) |
| @@ -524,6 +583,10 @@ bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | |||
| 524 | case IOCFC_E_IOC_DISABLED: | 583 | case IOCFC_E_IOC_DISABLED: |
| 525 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); | 584 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); |
| 526 | break; | 585 | break; |
| 586 | case IOCFC_E_IOC_ENABLED: | ||
| 587 | case IOCFC_E_DCONF_DONE: | ||
| 588 | case IOCFC_E_CFG_DONE: | ||
| 589 | break; | ||
| 527 | default: | 590 | default: |
| 528 | bfa_sm_fault(iocfc->bfa, event); | 591 | bfa_sm_fault(iocfc->bfa, event); |
| 529 | break; | 592 | break; |
| @@ -785,19 +848,20 @@ void | |||
| 785 | bfa_isr_enable(struct bfa_s *bfa) | 848 | bfa_isr_enable(struct bfa_s *bfa) |
| 786 | { | 849 | { |
| 787 | u32 umsk; | 850 | u32 umsk; |
| 788 | int pci_func = bfa_ioc_pcifn(&bfa->ioc); | 851 | int port_id = bfa_ioc_portid(&bfa->ioc); |
| 789 | 852 | ||
| 790 | bfa_trc(bfa, pci_func); | 853 | bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc)); |
| 854 | bfa_trc(bfa, port_id); | ||
| 791 | 855 | ||
| 792 | bfa_msix_ctrl_install(bfa); | 856 | bfa_msix_ctrl_install(bfa); |
| 793 | 857 | ||
| 794 | if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { | 858 | if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { |
| 795 | umsk = __HFN_INT_ERR_MASK_CT2; | 859 | umsk = __HFN_INT_ERR_MASK_CT2; |
| 796 | umsk |= pci_func == 0 ? | 860 | umsk |= port_id == 0 ? |
| 797 | __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; | 861 | __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; |
| 798 | } else { | 862 | } else { |
| 799 | umsk = __HFN_INT_ERR_MASK; | 863 | umsk = __HFN_INT_ERR_MASK; |
| 800 | umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; | 864 | umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; |
| 801 | } | 865 | } |
| 802 | 866 | ||
| 803 | writel(umsk, bfa->iocfc.bfa_regs.intr_status); | 867 | writel(umsk, bfa->iocfc.bfa_regs.intr_status); |
| @@ -930,7 +994,8 @@ bfa_iocfc_send_cfg(void *bfa_arg) | |||
| 930 | cfg_info->single_msix_vec = 1; | 994 | cfg_info->single_msix_vec = 1; |
| 931 | cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; | 995 | cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; |
| 932 | cfg_info->num_cqs = cfg->fwcfg.num_cqs; | 996 | cfg_info->num_cqs = cfg->fwcfg.num_cqs; |
| 933 | cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); | 997 | cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa, |
| 998 | cfg->fwcfg.num_ioim_reqs)); | ||
| 934 | cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); | 999 | cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); |
| 935 | 1000 | ||
| 936 | bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); | 1001 | bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); |
| @@ -1192,10 +1257,14 @@ bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg) | |||
| 1192 | static void | 1257 | static void |
| 1193 | bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) | 1258 | bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) |
| 1194 | { | 1259 | { |
| 1260 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
| 1261 | struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; | ||
| 1262 | |||
| 1195 | bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); | 1263 | bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); |
| 1196 | bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); | 1264 | bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); |
| 1197 | bfa_rport_res_recfg(bfa, fwcfg->num_rports); | 1265 | bfa_rport_res_recfg(bfa, fwcfg->num_rports); |
| 1198 | bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); | 1266 | bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs), |
| 1267 | fwcfg->num_ioim_reqs); | ||
| 1199 | bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); | 1268 | bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); |
| 1200 | } | 1269 | } |
| 1201 | 1270 | ||
| @@ -1693,6 +1762,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, | |||
| 1693 | struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); | 1762 | struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); |
| 1694 | struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); | 1763 | struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); |
| 1695 | struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); | 1764 | struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); |
| 1765 | struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa); | ||
| 1696 | 1766 | ||
| 1697 | WARN_ON((cfg == NULL) || (meminfo == NULL)); | 1767 | WARN_ON((cfg == NULL) || (meminfo == NULL)); |
| 1698 | 1768 | ||
| @@ -1717,6 +1787,8 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, | |||
| 1717 | bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); | 1787 | bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); |
| 1718 | bfa_mem_dma_setup(meminfo, phy_dma, | 1788 | bfa_mem_dma_setup(meminfo, phy_dma, |
| 1719 | bfa_phy_meminfo(cfg->drvcfg.min_cfg)); | 1789 | bfa_phy_meminfo(cfg->drvcfg.min_cfg)); |
| 1790 | bfa_mem_dma_setup(meminfo, fru_dma, | ||
| 1791 | bfa_fru_meminfo(cfg->drvcfg.min_cfg)); | ||
| 1720 | } | 1792 | } |
| 1721 | 1793 | ||
| 1722 | /* | 1794 | /* |
| @@ -1789,6 +1861,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
| 1789 | bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); | 1861 | bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); |
| 1790 | bfa_com_diag_attach(bfa); | 1862 | bfa_com_diag_attach(bfa); |
| 1791 | bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); | 1863 | bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); |
| 1864 | bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg); | ||
| 1792 | } | 1865 | } |
| 1793 | 1866 | ||
| 1794 | /* | 1867 | /* |
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h index b5a1595cc0a5..0efdf312b42c 100644 --- a/drivers/scsi/bfa/bfa_defs.h +++ b/drivers/scsi/bfa/bfa_defs.h | |||
| @@ -159,10 +159,13 @@ enum bfa_status { | |||
| 159 | BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */ | 159 | BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */ |
| 160 | BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ | 160 | BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ |
| 161 | BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ | 161 | BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ |
| 162 | BFA_STATUS_ERROR_TRL_ENABLED = 87, /* TRL is enabled */ | ||
| 163 | BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */ | ||
| 162 | BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */ | 164 | BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */ |
| 163 | BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */ | 165 | BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */ |
| 164 | BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ | 166 | BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ |
| 165 | BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ | 167 | BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ |
| 168 | BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */ | ||
| 166 | BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot | 169 | BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot |
| 167 | * configuration */ | 170 | * configuration */ |
| 168 | BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ | 171 | BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ |
| @@ -184,6 +187,17 @@ enum bfa_status { | |||
| 184 | BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ | 187 | BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ |
| 185 | BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ | 188 | BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ |
| 186 | BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */ | 189 | BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */ |
| 190 | BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */ | ||
| 191 | BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported | ||
| 192 | * on mezz cards */ | ||
| 193 | BFA_STATUS_INVALID_BW = 233, /* Invalid bandwidth value */ | ||
| 194 | BFA_STATUS_QOS_BW_INVALID = 234, /* Invalid QOS bandwidth | ||
| 195 | * configuration */ | ||
| 196 | BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */ | ||
| 197 | BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */ | ||
| 198 | BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */ | ||
| 199 | BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */ | ||
| 200 | BFA_STATUS_DPORT_ERR = 245, /* D-port mode is enabled */ | ||
| 187 | BFA_STATUS_MAX_VAL /* Unknown error code */ | 201 | BFA_STATUS_MAX_VAL /* Unknown error code */ |
| 188 | }; | 202 | }; |
| 189 | #define bfa_status_t enum bfa_status | 203 | #define bfa_status_t enum bfa_status |
| @@ -249,6 +263,10 @@ struct bfa_adapter_attr_s { | |||
| 249 | 263 | ||
| 250 | u8 is_mezz; | 264 | u8 is_mezz; |
| 251 | u8 trunk_capable; | 265 | u8 trunk_capable; |
| 266 | u8 mfg_day; /* manufacturing day */ | ||
| 267 | u8 mfg_month; /* manufacturing month */ | ||
| 268 | u16 mfg_year; /* manufacturing year */ | ||
| 269 | u16 rsvd; | ||
| 252 | }; | 270 | }; |
| 253 | 271 | ||
| 254 | /* | 272 | /* |
| @@ -499,6 +517,17 @@ struct bfa_ioc_aen_data_s { | |||
| 499 | }; | 517 | }; |
| 500 | 518 | ||
| 501 | /* | 519 | /* |
| 520 | * D-port states | ||
| 521 | * | ||
| 522 | */ | ||
| 523 | enum bfa_dport_state { | ||
| 524 | BFA_DPORT_ST_DISABLED = 0, /* D-port is Disabled */ | ||
| 525 | BFA_DPORT_ST_DISABLING = 1, /* D-port is Disabling */ | ||
| 526 | BFA_DPORT_ST_ENABLING = 2, /* D-port is Enabling */ | ||
| 527 | BFA_DPORT_ST_ENABLED = 3, /* D-port is Enabled */ | ||
| 528 | }; | ||
| 529 | |||
| 530 | /* | ||
| 502 | * ---------------------- mfg definitions ------------ | 531 | * ---------------------- mfg definitions ------------ |
| 503 | */ | 532 | */ |
| 504 | 533 | ||
| @@ -722,7 +751,8 @@ struct bfa_ablk_cfg_pf_s { | |||
| 722 | u8 rsvd[1]; | 751 | u8 rsvd[1]; |
| 723 | u16 num_qpairs; | 752 | u16 num_qpairs; |
| 724 | u16 num_vectors; | 753 | u16 num_vectors; |
| 725 | u32 bw; | 754 | u16 bw_min; |
| 755 | u16 bw_max; | ||
| 726 | }; | 756 | }; |
| 727 | 757 | ||
| 728 | struct bfa_ablk_cfg_port_s { | 758 | struct bfa_ablk_cfg_port_s { |
| @@ -889,11 +919,40 @@ struct sfp_diag_ext_s { | |||
| 889 | u8 ext_status_ctl[2]; | 919 | u8 ext_status_ctl[2]; |
| 890 | }; | 920 | }; |
| 891 | 921 | ||
| 922 | /* | ||
| 923 | * Diagnostic: Data Fields -- Address A2h | ||
| 924 | * General Use Fields: User Writable Table - Features's Control Registers | ||
| 925 | * Total 32 bytes | ||
| 926 | */ | ||
| 927 | struct sfp_usr_eeprom_s { | ||
| 928 | u8 rsvd1[2]; /* 128-129 */ | ||
| 929 | u8 ewrap; /* 130 */ | ||
| 930 | u8 rsvd2[2]; /* */ | ||
| 931 | u8 owrap; /* 133 */ | ||
| 932 | u8 rsvd3[2]; /* */ | ||
| 933 | u8 prbs; /* 136: PRBS 7 generator */ | ||
| 934 | u8 rsvd4[2]; /* */ | ||
| 935 | u8 tx_eqz_16; /* 139: TX Equalizer (16xFC) */ | ||
| 936 | u8 tx_eqz_8; /* 140: TX Equalizer (8xFC) */ | ||
| 937 | u8 rsvd5[2]; /* */ | ||
| 938 | u8 rx_emp_16; /* 143: RX Emphasis (16xFC) */ | ||
| 939 | u8 rx_emp_8; /* 144: RX Emphasis (8xFC) */ | ||
| 940 | u8 rsvd6[2]; /* */ | ||
| 941 | u8 tx_eye_adj; /* 147: TX eye Threshold Adjust */ | ||
| 942 | u8 rsvd7[3]; /* */ | ||
| 943 | u8 tx_eye_qctl; /* 151: TX eye Quality Control */ | ||
| 944 | u8 tx_eye_qres; /* 152: TX eye Quality Result */ | ||
| 945 | u8 rsvd8[2]; /* */ | ||
| 946 | u8 poh[3]; /* 155-157: Power On Hours */ | ||
| 947 | u8 rsvd9[2]; /* */ | ||
| 948 | }; | ||
| 949 | |||
| 892 | struct sfp_mem_s { | 950 | struct sfp_mem_s { |
| 893 | struct sfp_srlid_base_s srlid_base; | 951 | struct sfp_srlid_base_s srlid_base; |
| 894 | struct sfp_srlid_ext_s srlid_ext; | 952 | struct sfp_srlid_ext_s srlid_ext; |
| 895 | struct sfp_diag_base_s diag_base; | 953 | struct sfp_diag_base_s diag_base; |
| 896 | struct sfp_diag_ext_s diag_ext; | 954 | struct sfp_diag_ext_s diag_ext; |
| 955 | struct sfp_usr_eeprom_s usr_eeprom; | ||
| 897 | }; | 956 | }; |
| 898 | 957 | ||
| 899 | /* | 958 | /* |
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index 36756ce0e58f..ec03c8cd8dac 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h | |||
| @@ -258,6 +258,7 @@ struct bfa_fw_port_lksm_stats_s { | |||
| 258 | u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ | 258 | u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ |
| 259 | u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ | 259 | u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ |
| 260 | u32 bbsc_lr; /* LKSM LR tx for credit recovery */ | 260 | u32 bbsc_lr; /* LKSM LR tx for credit recovery */ |
| 261 | u32 rsvd; | ||
| 261 | }; | 262 | }; |
| 262 | 263 | ||
| 263 | struct bfa_fw_port_snsm_stats_s { | 264 | struct bfa_fw_port_snsm_stats_s { |
| @@ -270,6 +271,9 @@ struct bfa_fw_port_snsm_stats_s { | |||
| 270 | u32 sync_lost; /* Sync loss count */ | 271 | u32 sync_lost; /* Sync loss count */ |
| 271 | u32 sig_lost; /* Signal loss count */ | 272 | u32 sig_lost; /* Signal loss count */ |
| 272 | u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */ | 273 | u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */ |
| 274 | u32 adapt_success; /* SNSM adaptation success */ | ||
| 275 | u32 adapt_fails; /* SNSM adaptation failures */ | ||
| 276 | u32 adapt_ign_fails; /* SNSM adaptation failures ignored */ | ||
| 273 | }; | 277 | }; |
| 274 | 278 | ||
| 275 | struct bfa_fw_port_physm_stats_s { | 279 | struct bfa_fw_port_physm_stats_s { |
| @@ -324,12 +328,46 @@ struct bfa_fw_fcoe_port_stats_s { | |||
| 324 | struct bfa_fw_fip_stats_s fip_stats; | 328 | struct bfa_fw_fip_stats_s fip_stats; |
| 325 | }; | 329 | }; |
| 326 | 330 | ||
| 331 | /** | ||
| 332 | * @brief LPSM statistics | ||
| 333 | */ | ||
| 334 | struct bfa_fw_lpsm_stats_s { | ||
| 335 | u32 cls_rx; /* LPSM cls_rx */ | ||
| 336 | u32 cls_tx; /* LPSM cls_tx */ | ||
| 337 | u32 arbf0_rx; /* LPSM abrf0 rcvd */ | ||
| 338 | u32 arbf0_tx; /* LPSM abrf0 xmit */ | ||
| 339 | u32 init_rx; /* LPSM loop init start */ | ||
| 340 | u32 unexp_hwst; /* LPSM unknown hw state */ | ||
| 341 | u32 unexp_frame; /* LPSM unknown_frame */ | ||
| 342 | u32 unexp_prim; /* LPSM unexpected primitive */ | ||
| 343 | u32 prev_alpa_unavail; /* LPSM prev alpa unavailable */ | ||
| 344 | u32 alpa_unavail; /* LPSM alpa not available */ | ||
| 345 | u32 lip_rx; /* LPSM lip rcvd */ | ||
| 346 | u32 lip_f7f7_rx; /* LPSM lip f7f7 rcvd */ | ||
| 347 | u32 lip_f8_rx; /* LPSM lip f8 rcvd */ | ||
| 348 | u32 lip_f8f7_rx; /* LPSM lip f8f7 rcvd */ | ||
| 349 | u32 lip_other_rx; /* LPSM lip other rcvd */ | ||
| 350 | u32 lip_tx; /* LPSM lip xmit */ | ||
| 351 | u32 retry_tov; /* LPSM retry TOV */ | ||
| 352 | u32 lip_tov; /* LPSM LIP wait TOV */ | ||
| 353 | u32 idle_tov; /* LPSM idle wait TOV */ | ||
| 354 | u32 arbf0_tov; /* LPSM arbfo wait TOV */ | ||
| 355 | u32 stop_loop_tov; /* LPSM stop loop wait TOV */ | ||
| 356 | u32 lixa_tov; /* LPSM lisa wait TOV */ | ||
| 357 | u32 lixx_tov; /* LPSM lilp/lirp wait TOV */ | ||
| 358 | u32 cls_tov; /* LPSM cls wait TOV */ | ||
| 359 | u32 sler; /* LPSM SLER recvd */ | ||
| 360 | u32 failed; /* LPSM failed */ | ||
| 361 | u32 success; /* LPSM online */ | ||
| 362 | }; | ||
| 363 | |||
| 327 | /* | 364 | /* |
| 328 | * IOC firmware FC uport stats | 365 | * IOC firmware FC uport stats |
| 329 | */ | 366 | */ |
| 330 | struct bfa_fw_fc_uport_stats_s { | 367 | struct bfa_fw_fc_uport_stats_s { |
| 331 | struct bfa_fw_port_snsm_stats_s snsm_stats; | 368 | struct bfa_fw_port_snsm_stats_s snsm_stats; |
| 332 | struct bfa_fw_port_lksm_stats_s lksm_stats; | 369 | struct bfa_fw_port_lksm_stats_s lksm_stats; |
| 370 | struct bfa_fw_lpsm_stats_s lpsm_stats; | ||
| 333 | }; | 371 | }; |
| 334 | 372 | ||
| 335 | /* | 373 | /* |
| @@ -357,11 +395,6 @@ struct bfa_fw_fcxchg_stats_s { | |||
| 357 | u32 ua_state_inv; | 395 | u32 ua_state_inv; |
| 358 | }; | 396 | }; |
| 359 | 397 | ||
| 360 | struct bfa_fw_lpsm_stats_s { | ||
| 361 | u32 cls_rx; | ||
| 362 | u32 cls_tx; | ||
| 363 | }; | ||
| 364 | |||
| 365 | /* | 398 | /* |
| 366 | * Trunk statistics | 399 | * Trunk statistics |
| 367 | */ | 400 | */ |
| @@ -454,7 +487,6 @@ struct bfa_fw_stats_s { | |||
| 454 | struct bfa_fw_io_stats_s io_stats; | 487 | struct bfa_fw_io_stats_s io_stats; |
| 455 | struct bfa_fw_port_stats_s port_stats; | 488 | struct bfa_fw_port_stats_s port_stats; |
| 456 | struct bfa_fw_fcxchg_stats_s fcxchg_stats; | 489 | struct bfa_fw_fcxchg_stats_s fcxchg_stats; |
| 457 | struct bfa_fw_lpsm_stats_s lpsm_stats; | ||
| 458 | struct bfa_fw_lps_stats_s lps_stats; | 490 | struct bfa_fw_lps_stats_s lps_stats; |
| 459 | struct bfa_fw_trunk_stats_s trunk_stats; | 491 | struct bfa_fw_trunk_stats_s trunk_stats; |
| 460 | struct bfa_fw_advsm_stats_s advsm_stats; | 492 | struct bfa_fw_advsm_stats_s advsm_stats; |
| @@ -494,13 +526,23 @@ enum bfa_qos_bw_alloc { | |||
| 494 | BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ | 526 | BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ |
| 495 | }; | 527 | }; |
| 496 | #pragma pack(1) | 528 | #pragma pack(1) |
| 529 | |||
| 530 | struct bfa_qos_bw_s { | ||
| 531 | u8 qos_bw_set; | ||
| 532 | u8 high; | ||
| 533 | u8 med; | ||
| 534 | u8 low; | ||
| 535 | }; | ||
| 536 | |||
| 497 | /* | 537 | /* |
| 498 | * QoS attribute returned in QoS Query | 538 | * QoS attribute returned in QoS Query |
| 499 | */ | 539 | */ |
| 500 | struct bfa_qos_attr_s { | 540 | struct bfa_qos_attr_s { |
| 501 | u8 state; /* QoS current state */ | 541 | u8 state; /* QoS current state */ |
| 502 | u8 rsvd[3]; | 542 | u8 rsvd1[3]; |
| 503 | u32 total_bb_cr; /* Total BB Credits */ | 543 | u32 total_bb_cr; /* Total BB Credits */ |
| 544 | struct bfa_qos_bw_s qos_bw; /* QOS bw cfg */ | ||
| 545 | struct bfa_qos_bw_s qos_bw_op; /* QOS bw operational */ | ||
| 504 | }; | 546 | }; |
| 505 | 547 | ||
| 506 | /* | 548 | /* |
| @@ -692,7 +734,8 @@ enum bfa_port_states { | |||
| 692 | BFA_PORT_ST_FWMISMATCH = 12, | 734 | BFA_PORT_ST_FWMISMATCH = 12, |
| 693 | BFA_PORT_ST_PREBOOT_DISABLED = 13, | 735 | BFA_PORT_ST_PREBOOT_DISABLED = 13, |
| 694 | BFA_PORT_ST_TOGGLING_QWAIT = 14, | 736 | BFA_PORT_ST_TOGGLING_QWAIT = 14, |
| 695 | BFA_PORT_ST_ACQ_ADDR = 15, | 737 | BFA_PORT_ST_FAA_MISCONFIG = 15, |
| 738 | BFA_PORT_ST_DPORT = 16, | ||
| 696 | BFA_PORT_ST_MAX_STATE, | 739 | BFA_PORT_ST_MAX_STATE, |
| 697 | }; | 740 | }; |
| 698 | 741 | ||
| @@ -714,9 +757,11 @@ enum bfa_port_type { | |||
| 714 | */ | 757 | */ |
| 715 | enum bfa_port_topology { | 758 | enum bfa_port_topology { |
| 716 | BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */ | 759 | BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */ |
| 717 | BFA_PORT_TOPOLOGY_P2P = 1, /* P2P only */ | 760 | BFA_PORT_TOPOLOGY_P2P_OLD_VER = 1, /* P2P def for older ver */ |
| 718 | BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ | 761 | BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ |
| 719 | BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ | 762 | BFA_PORT_TOPOLOGY_AUTO_OLD_VER = 3, /* auto def for older ver */ |
| 763 | BFA_PORT_TOPOLOGY_AUTO = 4, /* auto topology selection */ | ||
| 764 | BFA_PORT_TOPOLOGY_P2P = 5, /* P2P only */ | ||
| 720 | }; | 765 | }; |
| 721 | 766 | ||
| 722 | /* | 767 | /* |
| @@ -760,6 +805,7 @@ enum bfa_port_linkstate_rsn { | |||
| 760 | BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9, | 805 | BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9, |
| 761 | BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10, | 806 | BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10, |
| 762 | BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11, | 807 | BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11, |
| 808 | BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG = 12, | ||
| 763 | 809 | ||
| 764 | 810 | ||
| 765 | 811 | ||
| @@ -833,6 +879,19 @@ struct bfa_lunmask_cfg_s { | |||
| 833 | struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG]; | 879 | struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG]; |
| 834 | }; | 880 | }; |
| 835 | 881 | ||
| 882 | struct bfa_throttle_cfg_s { | ||
| 883 | u16 is_valid; | ||
| 884 | u16 value; | ||
| 885 | u32 rsvd; | ||
| 886 | }; | ||
| 887 | |||
| 888 | struct bfa_defs_fcpim_throttle_s { | ||
| 889 | u16 max_value; | ||
| 890 | u16 cur_value; | ||
| 891 | u16 cfg_value; | ||
| 892 | u16 rsvd; | ||
| 893 | }; | ||
| 894 | |||
| 836 | /* | 895 | /* |
| 837 | * Physical port configuration | 896 | * Physical port configuration |
| 838 | */ | 897 | */ |
| @@ -851,9 +910,10 @@ struct bfa_port_cfg_s { | |||
| 851 | u8 bb_scn; /* BB_SCN value from FLOGI Exchg */ | 910 | u8 bb_scn; /* BB_SCN value from FLOGI Exchg */ |
| 852 | u8 bb_scn_state; /* Config state of BB_SCN */ | 911 | u8 bb_scn_state; /* Config state of BB_SCN */ |
| 853 | u8 faa_state; /* FAA enabled/disabled */ | 912 | u8 faa_state; /* FAA enabled/disabled */ |
| 854 | u8 rsvd[1]; | 913 | u8 rsvd1; |
| 855 | u16 path_tov; /* device path timeout */ | 914 | u16 path_tov; /* device path timeout */ |
| 856 | u16 q_depth; /* SCSI Queue depth */ | 915 | u16 q_depth; /* SCSI Queue depth */ |
| 916 | struct bfa_qos_bw_s qos_bw; /* QOS bandwidth */ | ||
| 857 | }; | 917 | }; |
| 858 | #pragma pack() | 918 | #pragma pack() |
| 859 | 919 | ||
| @@ -901,7 +961,7 @@ struct bfa_port_attr_s { | |||
| 901 | 961 | ||
| 902 | /* FCoE specific */ | 962 | /* FCoE specific */ |
| 903 | u16 fcoe_vlan; | 963 | u16 fcoe_vlan; |
| 904 | u8 rsvd1[2]; | 964 | u8 rsvd1[6]; |
| 905 | }; | 965 | }; |
| 906 | 966 | ||
| 907 | /* | 967 | /* |
| @@ -971,6 +1031,13 @@ struct bfa_trunk_vc_attr_s { | |||
| 971 | u16 vc_credits[8]; | 1031 | u16 vc_credits[8]; |
| 972 | }; | 1032 | }; |
| 973 | 1033 | ||
| 1034 | struct bfa_fcport_loop_info_s { | ||
| 1035 | u8 myalpa; /* alpa claimed */ | ||
| 1036 | u8 alpabm_val; /* alpa bitmap valid or not (1 or 0) */ | ||
| 1037 | u8 resvd[6]; | ||
| 1038 | struct fc_alpabm_s alpabm; /* alpa bitmap */ | ||
| 1039 | }; | ||
| 1040 | |||
| 974 | /* | 1041 | /* |
| 975 | * Link state information | 1042 | * Link state information |
| 976 | */ | 1043 | */ |
| @@ -981,13 +1048,18 @@ struct bfa_port_link_s { | |||
| 981 | u8 speed; /* Link speed (1/2/4/8 G) */ | 1048 | u8 speed; /* Link speed (1/2/4/8 G) */ |
| 982 | u32 linkstate_opt; /* Linkstate optional data (debug) */ | 1049 | u32 linkstate_opt; /* Linkstate optional data (debug) */ |
| 983 | u8 trunked; /* Trunked or not (1 or 0) */ | 1050 | u8 trunked; /* Trunked or not (1 or 0) */ |
| 984 | u8 resvd[3]; | 1051 | u8 resvd[7]; |
| 985 | struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ | 1052 | struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ |
| 986 | union { | 1053 | union { |
| 987 | struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ | 1054 | struct bfa_fcport_loop_info_s loop_info; |
| 988 | struct bfa_trunk_vc_attr_s trunk_vc_attr; | 1055 | union { |
| 989 | struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */ | 1056 | struct bfa_qos_vc_attr_s qos_vc_attr; |
| 990 | } vc_fcf; | 1057 | /* VC info from ELP */ |
| 1058 | struct bfa_trunk_vc_attr_s trunk_vc_attr; | ||
| 1059 | struct bfa_fcport_fcf_s fcf; | ||
| 1060 | /* FCF information (for FCoE) */ | ||
| 1061 | } vc_fcf; | ||
| 1062 | } attr; | ||
| 991 | }; | 1063 | }; |
| 992 | #pragma pack() | 1064 | #pragma pack() |
| 993 | 1065 | ||
| @@ -1112,6 +1184,9 @@ struct bfa_port_fc_stats_s { | |||
| 1112 | u64 tx_frames; /* Tx frames */ | 1184 | u64 tx_frames; /* Tx frames */ |
| 1113 | u64 tx_words; /* Tx words */ | 1185 | u64 tx_words; /* Tx words */ |
| 1114 | u64 tx_lip; /* Tx LIP */ | 1186 | u64 tx_lip; /* Tx LIP */ |
| 1187 | u64 tx_lip_f7f7; /* Tx LIP_F7F7 */ | ||
| 1188 | u64 tx_lip_f8f7; /* Tx LIP_F8F7 */ | ||
| 1189 | u64 tx_arbf0; /* Tx ARB F0 */ | ||
| 1115 | u64 tx_nos; /* Tx NOS */ | 1190 | u64 tx_nos; /* Tx NOS */ |
| 1116 | u64 tx_ols; /* Tx OLS */ | 1191 | u64 tx_ols; /* Tx OLS */ |
| 1117 | u64 tx_lr; /* Tx LR */ | 1192 | u64 tx_lr; /* Tx LR */ |
| @@ -1119,6 +1194,9 @@ struct bfa_port_fc_stats_s { | |||
| 1119 | u64 rx_frames; /* Rx frames */ | 1194 | u64 rx_frames; /* Rx frames */ |
| 1120 | u64 rx_words; /* Rx words */ | 1195 | u64 rx_words; /* Rx words */ |
| 1121 | u64 lip_count; /* Rx LIP */ | 1196 | u64 lip_count; /* Rx LIP */ |
| 1197 | u64 rx_lip_f7f7; /* Rx LIP_F7F7 */ | ||
| 1198 | u64 rx_lip_f8f7; /* Rx LIP_F8F7 */ | ||
| 1199 | u64 rx_arbf0; /* Rx ARB F0 */ | ||
| 1122 | u64 nos_count; /* Rx NOS */ | 1200 | u64 nos_count; /* Rx NOS */ |
| 1123 | u64 ols_count; /* Rx OLS */ | 1201 | u64 ols_count; /* Rx OLS */ |
| 1124 | u64 lr_count; /* Rx LR */ | 1202 | u64 lr_count; /* Rx LR */ |
| @@ -1140,6 +1218,7 @@ struct bfa_port_fc_stats_s { | |||
| 1140 | u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */ | 1218 | u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */ |
| 1141 | u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */ | 1219 | u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */ |
| 1142 | u64 bbsc_link_resets; /* Credit Recovery-Link Resets */ | 1220 | u64 bbsc_link_resets; /* Credit Recovery-Link Resets */ |
| 1221 | u64 loop_timeouts; /* Loop timeouts */ | ||
| 1143 | }; | 1222 | }; |
| 1144 | 1223 | ||
| 1145 | /* | 1224 | /* |
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index e0beb4d7e264..bea821b98030 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h | |||
| @@ -24,6 +24,7 @@ typedef u64 wwn_t; | |||
| 24 | 24 | ||
| 25 | #define WWN_NULL (0) | 25 | #define WWN_NULL (0) |
| 26 | #define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ | 26 | #define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ |
| 27 | #define FC_ALPA_MAX 128 | ||
| 27 | 28 | ||
| 28 | #pragma pack(1) | 29 | #pragma pack(1) |
| 29 | 30 | ||
| @@ -1015,6 +1016,10 @@ struct fc_symname_s { | |||
| 1015 | u8 symname[FC_SYMNAME_MAX]; | 1016 | u8 symname[FC_SYMNAME_MAX]; |
| 1016 | }; | 1017 | }; |
| 1017 | 1018 | ||
| 1019 | struct fc_alpabm_s { | ||
| 1020 | u8 alpa_bm[FC_ALPA_MAX / 8]; | ||
| 1021 | }; | ||
| 1022 | |||
| 1018 | /* | 1023 | /* |
| 1019 | * protocol default timeout values | 1024 | * protocol default timeout values |
| 1020 | */ | 1025 | */ |
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c index 273cee90b3b4..dce787f6cca2 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.c +++ b/drivers/scsi/bfa/bfa_fcbuild.c | |||
| @@ -228,6 +228,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | |||
| 228 | 228 | ||
| 229 | memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); | 229 | memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); |
| 230 | 230 | ||
| 231 | /* For FC AL bb_cr is 0 and altbbcred is 1 */ | ||
| 232 | if (!bb_cr) | ||
| 233 | plogi->csp.altbbcred = 1; | ||
| 234 | |||
| 231 | plogi->els_cmd.els_code = els_code; | 235 | plogi->els_cmd.els_code = els_code; |
| 232 | if (els_code == FC_ELS_PLOGI) | 236 | if (els_code == FC_ELS_PLOGI) |
| 233 | fc_els_req_build(fchs, d_id, s_id, ox_id); | 237 | fc_els_req_build(fchs, d_id, s_id, ox_id); |
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 1633963c66ca..27b560962357 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c | |||
| @@ -158,6 +158,7 @@ enum bfa_tskim_event { | |||
| 158 | BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ | 158 | BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ |
| 159 | BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ | 159 | BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ |
| 160 | BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ | 160 | BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ |
| 161 | BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */ | ||
| 161 | }; | 162 | }; |
| 162 | 163 | ||
| 163 | /* | 164 | /* |
| @@ -3036,7 +3037,7 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim) | |||
| 3036 | static void | 3037 | static void |
| 3037 | bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 3038 | bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
| 3038 | { | 3039 | { |
| 3039 | bfa_trc(tskim->bfa, event); | 3040 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
| 3040 | 3041 | ||
| 3041 | switch (event) { | 3042 | switch (event) { |
| 3042 | case BFA_TSKIM_SM_START: | 3043 | case BFA_TSKIM_SM_START: |
| @@ -3074,7 +3075,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
| 3074 | static void | 3075 | static void |
| 3075 | bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 3076 | bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
| 3076 | { | 3077 | { |
| 3077 | bfa_trc(tskim->bfa, event); | 3078 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
| 3078 | 3079 | ||
| 3079 | switch (event) { | 3080 | switch (event) { |
| 3080 | case BFA_TSKIM_SM_DONE: | 3081 | case BFA_TSKIM_SM_DONE: |
| @@ -3110,7 +3111,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
| 3110 | static void | 3111 | static void |
| 3111 | bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 3112 | bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
| 3112 | { | 3113 | { |
| 3113 | bfa_trc(tskim->bfa, event); | 3114 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
| 3114 | 3115 | ||
| 3115 | switch (event) { | 3116 | switch (event) { |
| 3116 | case BFA_TSKIM_SM_DONE: | 3117 | case BFA_TSKIM_SM_DONE: |
| @@ -3119,6 +3120,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
| 3119 | */ | 3120 | */ |
| 3120 | break; | 3121 | break; |
| 3121 | 3122 | ||
| 3123 | case BFA_TSKIM_SM_UTAG: | ||
| 3122 | case BFA_TSKIM_SM_CLEANUP_DONE: | 3124 | case BFA_TSKIM_SM_CLEANUP_DONE: |
| 3123 | bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); | 3125 | bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); |
| 3124 | bfa_tskim_cleanup_ios(tskim); | 3126 | bfa_tskim_cleanup_ios(tskim); |
| @@ -3138,7 +3140,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
| 3138 | static void | 3140 | static void |
| 3139 | bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 3141 | bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
| 3140 | { | 3142 | { |
| 3141 | bfa_trc(tskim->bfa, event); | 3143 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
| 3142 | 3144 | ||
| 3143 | switch (event) { | 3145 | switch (event) { |
| 3144 | case BFA_TSKIM_SM_IOS_DONE: | 3146 | case BFA_TSKIM_SM_IOS_DONE: |
| @@ -3170,7 +3172,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
| 3170 | static void | 3172 | static void |
| 3171 | bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 3173 | bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
| 3172 | { | 3174 | { |
| 3173 | bfa_trc(tskim->bfa, event); | 3175 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
| 3174 | 3176 | ||
| 3175 | switch (event) { | 3177 | switch (event) { |
| 3176 | case BFA_TSKIM_SM_QRESUME: | 3178 | case BFA_TSKIM_SM_QRESUME: |
| @@ -3207,7 +3209,7 @@ static void | |||
| 3207 | bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, | 3209 | bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, |
| 3208 | enum bfa_tskim_event event) | 3210 | enum bfa_tskim_event event) |
| 3209 | { | 3211 | { |
| 3210 | bfa_trc(tskim->bfa, event); | 3212 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
| 3211 | 3213 | ||
| 3212 | switch (event) { | 3214 | switch (event) { |
| 3213 | case BFA_TSKIM_SM_DONE: | 3215 | case BFA_TSKIM_SM_DONE: |
| @@ -3238,7 +3240,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, | |||
| 3238 | static void | 3240 | static void |
| 3239 | bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 3241 | bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
| 3240 | { | 3242 | { |
| 3241 | bfa_trc(tskim->bfa, event); | 3243 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
| 3242 | 3244 | ||
| 3243 | switch (event) { | 3245 | switch (event) { |
| 3244 | case BFA_TSKIM_SM_HCB: | 3246 | case BFA_TSKIM_SM_HCB: |
| @@ -3560,6 +3562,8 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
| 3560 | if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { | 3562 | if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { |
| 3561 | bfa_stats(tskim->itnim, tm_cleanup_comps); | 3563 | bfa_stats(tskim->itnim, tm_cleanup_comps); |
| 3562 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); | 3564 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); |
| 3565 | } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) { | ||
| 3566 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG); | ||
| 3563 | } else { | 3567 | } else { |
| 3564 | bfa_stats(tskim->itnim, tm_fw_rsps); | 3568 | bfa_stats(tskim->itnim, tm_fw_rsps); |
| 3565 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); | 3569 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); |
| @@ -3699,6 +3703,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
| 3699 | struct bfa_mem_dma_s *seg_ptr; | 3703 | struct bfa_mem_dma_s *seg_ptr; |
| 3700 | u16 idx, nsegs, num_io_req; | 3704 | u16 idx, nsegs, num_io_req; |
| 3701 | 3705 | ||
| 3706 | fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs; | ||
| 3702 | fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; | 3707 | fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; |
| 3703 | fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; | 3708 | fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; |
| 3704 | fcp->num_itns = cfg->fwcfg.num_rports; | 3709 | fcp->num_itns = cfg->fwcfg.num_rports; |
| @@ -3721,6 +3726,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
| 3721 | bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); | 3726 | bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); |
| 3722 | } | 3727 | } |
| 3723 | 3728 | ||
| 3729 | fcp->throttle_update_required = 1; | ||
| 3724 | bfa_fcpim_attach(fcp, bfad, cfg, pcidev); | 3730 | bfa_fcpim_attach(fcp, bfad, cfg, pcidev); |
| 3725 | 3731 | ||
| 3726 | bfa_iotag_attach(fcp); | 3732 | bfa_iotag_attach(fcp); |
| @@ -3759,23 +3765,33 @@ bfa_fcp_iocdisable(struct bfa_s *bfa) | |||
| 3759 | { | 3765 | { |
| 3760 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); | 3766 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); |
| 3761 | 3767 | ||
| 3762 | /* Enqueue unused ioim resources to free_q */ | ||
| 3763 | list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q); | ||
| 3764 | |||
| 3765 | bfa_fcpim_iocdisable(fcp); | 3768 | bfa_fcpim_iocdisable(fcp); |
| 3766 | } | 3769 | } |
| 3767 | 3770 | ||
| 3768 | void | 3771 | void |
| 3769 | bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw) | 3772 | bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw) |
| 3770 | { | 3773 | { |
| 3771 | struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); | 3774 | struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); |
| 3772 | struct list_head *qe; | 3775 | struct list_head *qe; |
| 3773 | int i; | 3776 | int i; |
| 3774 | 3777 | ||
| 3778 | /* Update io throttle value only once during driver load time */ | ||
| 3779 | if (!mod->throttle_update_required) | ||
| 3780 | return; | ||
| 3781 | |||
| 3775 | for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { | 3782 | for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { |
| 3776 | bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); | 3783 | bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); |
| 3777 | list_add_tail(qe, &mod->iotag_unused_q); | 3784 | list_add_tail(qe, &mod->iotag_unused_q); |
| 3778 | } | 3785 | } |
| 3786 | |||
| 3787 | if (mod->num_ioim_reqs != num_ioim_fw) { | ||
| 3788 | bfa_trc(bfa, mod->num_ioim_reqs); | ||
| 3789 | bfa_trc(bfa, num_ioim_fw); | ||
| 3790 | } | ||
| 3791 | |||
| 3792 | mod->max_ioim_reqs = max_ioim_fw; | ||
| 3793 | mod->num_ioim_reqs = num_ioim_fw; | ||
| 3794 | mod->throttle_update_required = 0; | ||
| 3779 | } | 3795 | } |
| 3780 | 3796 | ||
| 3781 | void | 3797 | void |
| @@ -3833,3 +3849,88 @@ bfa_iotag_attach(struct bfa_fcp_mod_s *fcp) | |||
| 3833 | 3849 | ||
| 3834 | bfa_mem_kva_curp(fcp) = (u8 *) iotag; | 3850 | bfa_mem_kva_curp(fcp) = (u8 *) iotag; |
| 3835 | } | 3851 | } |
| 3852 | |||
| 3853 | |||
| 3854 | /** | ||
| 3855 | * To send config req, first try to use throttle value from flash | ||
| 3856 | * If 0, then use driver parameter | ||
| 3857 | * We need to use min(flash_val, drv_val) because | ||
| 3858 | * memory allocation was done based on this cfg'd value | ||
| 3859 | */ | ||
| 3860 | u16 | ||
| 3861 | bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param) | ||
| 3862 | { | ||
| 3863 | u16 tmp; | ||
| 3864 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); | ||
| 3865 | |||
| 3866 | /* | ||
| 3867 | * If throttle value from flash is already in effect after driver is | ||
| 3868 | * loaded then until next load, always return current value instead | ||
| 3869 | * of actual flash value | ||
| 3870 | */ | ||
| 3871 | if (!fcp->throttle_update_required) | ||
| 3872 | return (u16)fcp->num_ioim_reqs; | ||
| 3873 | |||
| 3874 | tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0; | ||
| 3875 | if (!tmp || (tmp > drv_cfg_param)) | ||
| 3876 | tmp = drv_cfg_param; | ||
| 3877 | |||
| 3878 | return tmp; | ||
| 3879 | } | ||
| 3880 | |||
| 3881 | bfa_status_t | ||
| 3882 | bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value) | ||
| 3883 | { | ||
| 3884 | if (!bfa_dconf_get_min_cfg(bfa)) { | ||
| 3885 | BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value; | ||
| 3886 | BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1; | ||
| 3887 | return BFA_STATUS_OK; | ||
| 3888 | } | ||
| 3889 | |||
| 3890 | return BFA_STATUS_FAILED; | ||
| 3891 | } | ||
| 3892 | |||
| 3893 | u16 | ||
| 3894 | bfa_fcpim_read_throttle(struct bfa_s *bfa) | ||
| 3895 | { | ||
| 3896 | struct bfa_throttle_cfg_s *throttle_cfg = | ||
| 3897 | &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg); | ||
| 3898 | |||
| 3899 | return ((!bfa_dconf_get_min_cfg(bfa)) ? | ||
| 3900 | ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0); | ||
| 3901 | } | ||
| 3902 | |||
| 3903 | bfa_status_t | ||
| 3904 | bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value) | ||
| 3905 | { | ||
| 3906 | /* in min cfg no commands should run. */ | ||
| 3907 | if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || | ||
| 3908 | (!bfa_dconf_read_data_valid(bfa))) | ||
| 3909 | return BFA_STATUS_FAILED; | ||
| 3910 | |||
| 3911 | bfa_fcpim_write_throttle(bfa, value); | ||
| 3912 | |||
| 3913 | return bfa_dconf_update(bfa); | ||
| 3914 | } | ||
| 3915 | |||
| 3916 | bfa_status_t | ||
| 3917 | bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf) | ||
| 3918 | { | ||
| 3919 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); | ||
| 3920 | struct bfa_defs_fcpim_throttle_s throttle; | ||
| 3921 | |||
| 3922 | if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || | ||
| 3923 | (!bfa_dconf_read_data_valid(bfa))) | ||
| 3924 | return BFA_STATUS_FAILED; | ||
| 3925 | |||
| 3926 | memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s)); | ||
| 3927 | |||
| 3928 | throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs); | ||
| 3929 | throttle.cfg_value = bfa_fcpim_read_throttle(bfa); | ||
| 3930 | if (!throttle.cfg_value) | ||
| 3931 | throttle.cfg_value = throttle.cur_value; | ||
| 3932 | throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs); | ||
| 3933 | memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s)); | ||
| 3934 | |||
| 3935 | return BFA_STATUS_OK; | ||
| 3936 | } | ||
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 36f26da80f76..e693af6e5930 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h | |||
| @@ -42,7 +42,7 @@ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, | |||
| 42 | void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); | 42 | void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); |
| 43 | void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m); | 43 | void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m); |
| 44 | void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp); | 44 | void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp); |
| 45 | void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw); | 45 | void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw); |
| 46 | 46 | ||
| 47 | #define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod) | 47 | #define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod) |
| 48 | #define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg)) | 48 | #define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg)) |
| @@ -51,7 +51,9 @@ void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw); | |||
| 51 | #define BFA_ITN_FROM_TAG(_fcp, _tag) \ | 51 | #define BFA_ITN_FROM_TAG(_fcp, _tag) \ |
| 52 | ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1))) | 52 | ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1))) |
| 53 | #define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \ | 53 | #define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \ |
| 54 | bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN) | 54 | bfa_mem_get_dmabuf_kva(_fcp, (_tag & BFA_IOIM_IOTAG_MASK), \ |
| 55 | BFI_IOIM_SNSLEN) | ||
| 56 | |||
| 55 | 57 | ||
| 56 | #define BFA_ITNIM_MIN 32 | 58 | #define BFA_ITNIM_MIN 32 |
| 57 | #define BFA_ITNIM_MAX 1024 | 59 | #define BFA_ITNIM_MAX 1024 |
| @@ -148,6 +150,7 @@ struct bfa_fcp_mod_s { | |||
| 148 | struct list_head iotag_unused_q; /* unused IO resources*/ | 150 | struct list_head iotag_unused_q; /* unused IO resources*/ |
| 149 | struct bfa_iotag_s *iotag_arr; | 151 | struct bfa_iotag_s *iotag_arr; |
| 150 | struct bfa_itn_s *itn_arr; | 152 | struct bfa_itn_s *itn_arr; |
| 153 | int max_ioim_reqs; | ||
| 151 | int num_ioim_reqs; | 154 | int num_ioim_reqs; |
| 152 | int num_fwtio_reqs; | 155 | int num_fwtio_reqs; |
| 153 | int num_itns; | 156 | int num_itns; |
| @@ -155,6 +158,7 @@ struct bfa_fcp_mod_s { | |||
| 155 | struct bfa_fcpim_s fcpim; | 158 | struct bfa_fcpim_s fcpim; |
| 156 | struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS]; | 159 | struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS]; |
| 157 | struct bfa_mem_kva_s kva_seg; | 160 | struct bfa_mem_kva_s kva_seg; |
| 161 | int throttle_update_required; | ||
| 158 | }; | 162 | }; |
| 159 | 163 | ||
| 160 | /* | 164 | /* |
| @@ -416,5 +420,10 @@ bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, | |||
| 416 | bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, | 420 | bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, |
| 417 | wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); | 421 | wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); |
| 418 | bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa); | 422 | bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa); |
| 423 | u16 bfa_fcpim_read_throttle(struct bfa_s *bfa); | ||
| 424 | bfa_status_t bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value); | ||
| 425 | bfa_status_t bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value); | ||
| 426 | bfa_status_t bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf); | ||
| 427 | u16 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param); | ||
| 419 | 428 | ||
| 420 | #endif /* __BFA_FCPIM_H__ */ | 429 | #endif /* __BFA_FCPIM_H__ */ |
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index fd3e84d32bd2..d428808fb37e 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c | |||
| @@ -303,16 +303,30 @@ static void | |||
| 303 | bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, | 303 | bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, |
| 304 | enum bfa_fcs_fabric_event event) | 304 | enum bfa_fcs_fabric_event event) |
| 305 | { | 305 | { |
| 306 | struct bfa_s *bfa = fabric->fcs->bfa; | ||
| 307 | |||
| 306 | bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); | 308 | bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); |
| 307 | bfa_trc(fabric->fcs, event); | 309 | bfa_trc(fabric->fcs, event); |
| 308 | 310 | ||
| 309 | switch (event) { | 311 | switch (event) { |
| 310 | case BFA_FCS_FABRIC_SM_START: | 312 | case BFA_FCS_FABRIC_SM_START: |
| 311 | if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { | 313 | if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) { |
| 314 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); | ||
| 315 | break; | ||
| 316 | } | ||
| 317 | if (bfa_fcport_get_topology(bfa) == | ||
| 318 | BFA_PORT_TOPOLOGY_LOOP) { | ||
| 319 | fabric->fab_type = BFA_FCS_FABRIC_LOOP; | ||
| 320 | fabric->bport.pid = bfa_fcport_get_myalpa(bfa); | ||
| 321 | fabric->bport.pid = bfa_hton3b(fabric->bport.pid); | ||
| 322 | bfa_sm_set_state(fabric, | ||
| 323 | bfa_fcs_fabric_sm_online); | ||
| 324 | bfa_fcs_fabric_set_opertype(fabric); | ||
| 325 | bfa_fcs_lport_online(&fabric->bport); | ||
| 326 | } else { | ||
| 312 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); | 327 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); |
| 313 | bfa_fcs_fabric_login(fabric); | 328 | bfa_fcs_fabric_login(fabric); |
| 314 | } else | 329 | } |
| 315 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); | ||
| 316 | break; | 330 | break; |
| 317 | 331 | ||
| 318 | case BFA_FCS_FABRIC_SM_LINK_UP: | 332 | case BFA_FCS_FABRIC_SM_LINK_UP: |
| @@ -337,16 +351,28 @@ static void | |||
| 337 | bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, | 351 | bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, |
| 338 | enum bfa_fcs_fabric_event event) | 352 | enum bfa_fcs_fabric_event event) |
| 339 | { | 353 | { |
| 354 | struct bfa_s *bfa = fabric->fcs->bfa; | ||
| 355 | |||
| 340 | bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); | 356 | bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); |
| 341 | bfa_trc(fabric->fcs, event); | 357 | bfa_trc(fabric->fcs, event); |
| 342 | 358 | ||
| 343 | switch (event) { | 359 | switch (event) { |
| 344 | case BFA_FCS_FABRIC_SM_LINK_UP: | 360 | case BFA_FCS_FABRIC_SM_LINK_UP: |
| 345 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); | 361 | if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) { |
| 346 | bfa_fcs_fabric_login(fabric); | 362 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); |
| 363 | bfa_fcs_fabric_login(fabric); | ||
| 364 | break; | ||
| 365 | } | ||
| 366 | fabric->fab_type = BFA_FCS_FABRIC_LOOP; | ||
| 367 | fabric->bport.pid = bfa_fcport_get_myalpa(bfa); | ||
| 368 | fabric->bport.pid = bfa_hton3b(fabric->bport.pid); | ||
| 369 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); | ||
| 370 | bfa_fcs_fabric_set_opertype(fabric); | ||
| 371 | bfa_fcs_lport_online(&fabric->bport); | ||
| 347 | break; | 372 | break; |
| 348 | 373 | ||
| 349 | case BFA_FCS_FABRIC_SM_RETRY_OP: | 374 | case BFA_FCS_FABRIC_SM_RETRY_OP: |
| 375 | case BFA_FCS_FABRIC_SM_LOOPBACK: | ||
| 350 | break; | 376 | break; |
| 351 | 377 | ||
| 352 | case BFA_FCS_FABRIC_SM_DELETE: | 378 | case BFA_FCS_FABRIC_SM_DELETE: |
| @@ -595,14 +621,20 @@ void | |||
| 595 | bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, | 621 | bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, |
| 596 | enum bfa_fcs_fabric_event event) | 622 | enum bfa_fcs_fabric_event event) |
| 597 | { | 623 | { |
| 624 | struct bfa_s *bfa = fabric->fcs->bfa; | ||
| 625 | |||
| 598 | bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); | 626 | bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); |
| 599 | bfa_trc(fabric->fcs, event); | 627 | bfa_trc(fabric->fcs, event); |
| 600 | 628 | ||
| 601 | switch (event) { | 629 | switch (event) { |
| 602 | case BFA_FCS_FABRIC_SM_LINK_DOWN: | 630 | case BFA_FCS_FABRIC_SM_LINK_DOWN: |
| 603 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); | 631 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); |
| 604 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); | 632 | if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { |
| 605 | bfa_fcs_fabric_notify_offline(fabric); | 633 | bfa_fcs_lport_offline(&fabric->bport); |
| 634 | } else { | ||
| 635 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); | ||
| 636 | bfa_fcs_fabric_notify_offline(fabric); | ||
| 637 | } | ||
| 606 | break; | 638 | break; |
| 607 | 639 | ||
| 608 | case BFA_FCS_FABRIC_SM_DELETE: | 640 | case BFA_FCS_FABRIC_SM_DELETE: |
| @@ -719,20 +751,29 @@ static void | |||
| 719 | bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, | 751 | bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, |
| 720 | enum bfa_fcs_fabric_event event) | 752 | enum bfa_fcs_fabric_event event) |
| 721 | { | 753 | { |
| 754 | struct bfa_s *bfa = fabric->fcs->bfa; | ||
| 755 | |||
| 722 | bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); | 756 | bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); |
| 723 | bfa_trc(fabric->fcs, event); | 757 | bfa_trc(fabric->fcs, event); |
| 724 | 758 | ||
| 725 | switch (event) { | 759 | switch (event) { |
| 726 | case BFA_FCS_FABRIC_SM_STOPCOMP: | 760 | case BFA_FCS_FABRIC_SM_STOPCOMP: |
| 727 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); | 761 | if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) { |
| 728 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); | 762 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); |
| 763 | } else { | ||
| 764 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); | ||
| 765 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); | ||
| 766 | } | ||
| 729 | break; | 767 | break; |
| 730 | 768 | ||
| 731 | case BFA_FCS_FABRIC_SM_LINK_UP: | 769 | case BFA_FCS_FABRIC_SM_LINK_UP: |
| 732 | break; | 770 | break; |
| 733 | 771 | ||
| 734 | case BFA_FCS_FABRIC_SM_LINK_DOWN: | 772 | case BFA_FCS_FABRIC_SM_LINK_DOWN: |
| 735 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); | 773 | if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) |
| 774 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); | ||
| 775 | else | ||
| 776 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); | ||
| 736 | break; | 777 | break; |
| 737 | 778 | ||
| 738 | default: | 779 | default: |
| @@ -975,9 +1016,6 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) | |||
| 975 | struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; | 1016 | struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; |
| 976 | u8 alpa = 0, bb_scn = 0; | 1017 | u8 alpa = 0, bb_scn = 0; |
| 977 | 1018 | ||
| 978 | if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) | ||
| 979 | alpa = bfa_fcport_get_myalpa(bfa); | ||
| 980 | |||
| 981 | if (bfa_fcs_fabric_is_bbscn_enabled(fabric) && | 1019 | if (bfa_fcs_fabric_is_bbscn_enabled(fabric) && |
| 982 | (!fabric->fcs->bbscn_flogi_rjt)) | 1020 | (!fabric->fcs->bbscn_flogi_rjt)) |
| 983 | bb_scn = BFA_FCS_PORT_DEF_BB_SCN; | 1021 | bb_scn = BFA_FCS_PORT_DEF_BB_SCN; |
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 6c4377cb287f..a449706c6bc0 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h | |||
| @@ -118,9 +118,9 @@ struct bfa_fcs_lport_fab_s { | |||
| 118 | #define MAX_ALPA_COUNT 127 | 118 | #define MAX_ALPA_COUNT 127 |
| 119 | 119 | ||
| 120 | struct bfa_fcs_lport_loop_s { | 120 | struct bfa_fcs_lport_loop_s { |
| 121 | u8 num_alpa; /* Num of ALPA entries in the map */ | 121 | u8 num_alpa; /* Num of ALPA entries in the map */ |
| 122 | u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional | 122 | u8 alpabm_valid; /* alpa bitmap valid or not (1 or 0) */ |
| 123 | *Map */ | 123 | u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional Map */ |
| 124 | struct bfa_fcs_lport_s *port; /* parent port */ | 124 | struct bfa_fcs_lport_s *port; /* parent port */ |
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| @@ -175,6 +175,7 @@ enum bfa_fcs_fabric_type { | |||
| 175 | BFA_FCS_FABRIC_UNKNOWN = 0, | 175 | BFA_FCS_FABRIC_UNKNOWN = 0, |
| 176 | BFA_FCS_FABRIC_SWITCHED = 1, | 176 | BFA_FCS_FABRIC_SWITCHED = 1, |
| 177 | BFA_FCS_FABRIC_N2N = 2, | 177 | BFA_FCS_FABRIC_N2N = 2, |
| 178 | BFA_FCS_FABRIC_LOOP = 3, | ||
| 178 | }; | 179 | }; |
| 179 | 180 | ||
| 180 | 181 | ||
| @@ -350,9 +351,10 @@ void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, | |||
| 350 | struct bfa_fcxp_s *fcxp_alloced); | 351 | struct bfa_fcxp_s *fcxp_alloced); |
| 351 | void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport); | 352 | void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport); |
| 352 | void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport); | 353 | void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport); |
| 353 | void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport); | 354 | void bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *vport); |
| 354 | void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, | 355 | void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, |
| 355 | struct fchs_s *rx_frame, u32 len); | 356 | struct fchs_s *rx_frame, u32 len); |
| 357 | void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port); | ||
| 356 | 358 | ||
| 357 | struct bfa_fcs_vport_s { | 359 | struct bfa_fcs_vport_s { |
| 358 | struct list_head qe; /* queue elem */ | 360 | struct list_head qe; /* queue elem */ |
| @@ -453,6 +455,7 @@ struct bfa_fcs_rport_s { | |||
| 453 | struct bfa_rport_stats_s stats; /* rport stats */ | 455 | struct bfa_rport_stats_s stats; /* rport stats */ |
| 454 | enum bfa_rport_function scsi_function; /* Initiator/Target */ | 456 | enum bfa_rport_function scsi_function; /* Initiator/Target */ |
| 455 | struct bfa_fcs_rpf_s rpf; /* Rport features module */ | 457 | struct bfa_fcs_rpf_s rpf; /* Rport features module */ |
| 458 | bfa_boolean_t scn_online; /* SCN online flag */ | ||
| 456 | }; | 459 | }; |
| 457 | 460 | ||
| 458 | static inline struct bfa_rport_s * | 461 | static inline struct bfa_rport_s * |
| @@ -639,9 +642,9 @@ struct bfa_fcs_fdmi_hba_attr_s { | |||
| 639 | u8 model[16]; | 642 | u8 model[16]; |
| 640 | u8 model_desc[256]; | 643 | u8 model_desc[256]; |
| 641 | u8 hw_version[8]; | 644 | u8 hw_version[8]; |
| 642 | u8 driver_version[8]; | 645 | u8 driver_version[BFA_VERSION_LEN]; |
| 643 | u8 option_rom_ver[BFA_VERSION_LEN]; | 646 | u8 option_rom_ver[BFA_VERSION_LEN]; |
| 644 | u8 fw_version[8]; | 647 | u8 fw_version[BFA_VERSION_LEN]; |
| 645 | u8 os_name[256]; | 648 | u8 os_name[256]; |
| 646 | __be32 max_ct_pyld; | 649 | __be32 max_ct_pyld; |
| 647 | }; | 650 | }; |
| @@ -733,7 +736,7 @@ enum rport_event { | |||
| 733 | RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ | 736 | RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ |
| 734 | RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ | 737 | RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ |
| 735 | RPSM_EVENT_DELETE = 7, /* RPORT delete request */ | 738 | RPSM_EVENT_DELETE = 7, /* RPORT delete request */ |
| 736 | RPSM_EVENT_SCN = 8, /* state change notification */ | 739 | RPSM_EVENT_FAB_SCN = 8, /* state change notification */ |
| 737 | RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ | 740 | RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ |
| 738 | RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ | 741 | RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ |
| 739 | RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ | 742 | RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ |
| @@ -744,7 +747,9 @@ enum rport_event { | |||
| 744 | RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ | 747 | RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ |
| 745 | RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ | 748 | RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ |
| 746 | RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */ | 749 | RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */ |
| 747 | RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */ | 750 | RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */ |
| 751 | RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */ | ||
| 752 | RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */ | ||
| 748 | }; | 753 | }; |
| 749 | 754 | ||
| 750 | /* | 755 | /* |
| @@ -763,7 +768,7 @@ enum bfa_fcs_itnim_event { | |||
| 763 | BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ | 768 | BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ |
| 764 | BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ | 769 | BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ |
| 765 | BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */ | 770 | BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */ |
| 766 | BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */ | 771 | BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */ |
| 767 | }; | 772 | }; |
| 768 | 773 | ||
| 769 | /* | 774 | /* |
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 3b75f6fb2de1..1224d0462a49 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c | |||
| @@ -23,6 +23,34 @@ | |||
| 23 | 23 | ||
| 24 | BFA_TRC_FILE(FCS, PORT); | 24 | BFA_TRC_FILE(FCS, PORT); |
| 25 | 25 | ||
| 26 | /* | ||
| 27 | * ALPA to LIXA bitmap mapping | ||
| 28 | * | ||
| 29 | * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31 | ||
| 30 | * is for L_bit (login required) and is filled as ALPA 0x00 here. | ||
| 31 | */ | ||
| 32 | static const u8 loop_alpa_map[] = { | ||
| 33 | 0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */ | ||
| 34 | 0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */ | ||
| 35 | 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */ | ||
| 36 | 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */ | ||
| 37 | |||
| 38 | 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */ | ||
| 39 | 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */ | ||
| 40 | 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */ | ||
| 41 | 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */ | ||
| 42 | |||
| 43 | 0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */ | ||
| 44 | 0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */ | ||
| 45 | 0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */ | ||
| 46 | 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */ | ||
| 47 | |||
| 48 | 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */ | ||
| 49 | 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */ | ||
| 50 | 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */ | ||
| 51 | 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */ | ||
| 52 | }; | ||
| 53 | |||
| 26 | static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, | 54 | static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, |
| 27 | struct fchs_s *rx_fchs, u8 reason_code, | 55 | struct fchs_s *rx_fchs, u8 reason_code, |
| 28 | u8 reason_code_expl); | 56 | u8 reason_code_expl); |
| @@ -51,6 +79,10 @@ static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port); | |||
| 51 | static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port); | 79 | static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port); |
| 52 | static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port); | 80 | static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port); |
| 53 | 81 | ||
| 82 | static void bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port); | ||
| 83 | static void bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port); | ||
| 84 | static void bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port); | ||
| 85 | |||
| 54 | static struct { | 86 | static struct { |
| 55 | void (*init) (struct bfa_fcs_lport_s *port); | 87 | void (*init) (struct bfa_fcs_lport_s *port); |
| 56 | void (*online) (struct bfa_fcs_lport_s *port); | 88 | void (*online) (struct bfa_fcs_lport_s *port); |
| @@ -62,7 +94,9 @@ static struct { | |||
| 62 | bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, | 94 | bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, |
| 63 | bfa_fcs_lport_fab_offline}, { | 95 | bfa_fcs_lport_fab_offline}, { |
| 64 | bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, | 96 | bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, |
| 65 | bfa_fcs_lport_n2n_offline}, | 97 | bfa_fcs_lport_n2n_offline}, { |
| 98 | bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online, | ||
| 99 | bfa_fcs_lport_loop_offline}, | ||
| 66 | }; | 100 | }; |
| 67 | 101 | ||
| 68 | /* | 102 | /* |
| @@ -1127,7 +1161,7 @@ static void | |||
| 1127 | bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) | 1161 | bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) |
| 1128 | { | 1162 | { |
| 1129 | bfa_fcs_lport_ns_online(port); | 1163 | bfa_fcs_lport_ns_online(port); |
| 1130 | bfa_fcs_lport_scn_online(port); | 1164 | bfa_fcs_lport_fab_scn_online(port); |
| 1131 | } | 1165 | } |
| 1132 | 1166 | ||
| 1133 | /* | 1167 | /* |
| @@ -1221,6 +1255,98 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port) | |||
| 1221 | n2n_port->reply_oxid = 0; | 1255 | n2n_port->reply_oxid = 0; |
| 1222 | } | 1256 | } |
| 1223 | 1257 | ||
| 1258 | void | ||
| 1259 | bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port) | ||
| 1260 | { | ||
| 1261 | int i = 0, j = 0, bit = 0, alpa_bit = 0; | ||
| 1262 | u8 k = 0; | ||
| 1263 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa); | ||
| 1264 | |||
| 1265 | port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid; | ||
| 1266 | port->pid = fcport->myalpa; | ||
| 1267 | port->pid = bfa_hton3b(port->pid); | ||
| 1268 | |||
| 1269 | for (i = 0; i < (FC_ALPA_MAX / 8); i++) { | ||
| 1270 | for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) { | ||
| 1271 | bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]); | ||
| 1272 | bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j))); | ||
| 1273 | if (bit) { | ||
| 1274 | port->port_topo.ploop.alpa_pos_map[k] = | ||
| 1275 | loop_alpa_map[(i * 8) + alpa_bit]; | ||
| 1276 | k++; | ||
| 1277 | bfa_trc(port->fcs->bfa, k); | ||
| 1278 | bfa_trc(port->fcs->bfa, | ||
| 1279 | port->port_topo.ploop.alpa_pos_map[k]); | ||
| 1280 | } | ||
| 1281 | } | ||
| 1282 | } | ||
| 1283 | port->port_topo.ploop.num_alpa = k; | ||
| 1284 | } | ||
| 1285 | |||
| 1286 | /* | ||
| 1287 | * Called by fcs/port to initialize Loop topology. | ||
| 1288 | */ | ||
| 1289 | static void | ||
| 1290 | bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port) | ||
| 1291 | { | ||
| 1292 | } | ||
| 1293 | |||
| 1294 | /* | ||
| 1295 | * Called by fcs/port to notify transition to online state. | ||
| 1296 | */ | ||
| 1297 | static void | ||
| 1298 | bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port) | ||
| 1299 | { | ||
| 1300 | u8 num_alpa = 0, alpabm_valid = 0; | ||
| 1301 | struct bfa_fcs_rport_s *rport; | ||
| 1302 | u8 *alpa_map = NULL; | ||
| 1303 | int i = 0; | ||
| 1304 | u32 pid; | ||
| 1305 | |||
| 1306 | bfa_fcport_get_loop_attr(port); | ||
| 1307 | |||
| 1308 | num_alpa = port->port_topo.ploop.num_alpa; | ||
| 1309 | alpabm_valid = port->port_topo.ploop.alpabm_valid; | ||
| 1310 | alpa_map = port->port_topo.ploop.alpa_pos_map; | ||
| 1311 | |||
| 1312 | bfa_trc(port->fcs->bfa, port->pid); | ||
| 1313 | bfa_trc(port->fcs->bfa, num_alpa); | ||
| 1314 | if (alpabm_valid == 1) { | ||
| 1315 | for (i = 0; i < num_alpa; i++) { | ||
| 1316 | bfa_trc(port->fcs->bfa, alpa_map[i]); | ||
| 1317 | if (alpa_map[i] != bfa_hton3b(port->pid)) { | ||
| 1318 | pid = alpa_map[i]; | ||
| 1319 | bfa_trc(port->fcs->bfa, pid); | ||
| 1320 | rport = bfa_fcs_lport_get_rport_by_pid(port, | ||
| 1321 | bfa_hton3b(pid)); | ||
| 1322 | if (!rport) | ||
| 1323 | rport = bfa_fcs_rport_create(port, | ||
| 1324 | bfa_hton3b(pid)); | ||
| 1325 | } | ||
| 1326 | } | ||
| 1327 | } else { | ||
| 1328 | for (i = 0; i < MAX_ALPA_COUNT; i++) { | ||
| 1329 | if (alpa_map[i] != port->pid) { | ||
| 1330 | pid = loop_alpa_map[i]; | ||
| 1331 | bfa_trc(port->fcs->bfa, pid); | ||
| 1332 | rport = bfa_fcs_lport_get_rport_by_pid(port, | ||
| 1333 | bfa_hton3b(pid)); | ||
| 1334 | if (!rport) | ||
| 1335 | rport = bfa_fcs_rport_create(port, | ||
| 1336 | bfa_hton3b(pid)); | ||
| 1337 | } | ||
| 1338 | } | ||
| 1339 | } | ||
| 1340 | } | ||
| 1341 | |||
| 1342 | /* | ||
| 1343 | * Called by fcs/port to notify transition to offline state. | ||
| 1344 | */ | ||
| 1345 | static void | ||
| 1346 | bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port) | ||
| 1347 | { | ||
| 1348 | } | ||
| 1349 | |||
| 1224 | #define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 | 1350 | #define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 |
| 1225 | 1351 | ||
| 1226 | /* | 1352 | /* |
| @@ -1888,13 +2014,10 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
| 1888 | sizeof(templen)); | 2014 | sizeof(templen)); |
| 1889 | } | 2015 | } |
| 1890 | 2016 | ||
| 1891 | /* | ||
| 1892 | * f/w Version = driver version | ||
| 1893 | */ | ||
| 1894 | attr = (struct fdmi_attr_s *) curr_ptr; | 2017 | attr = (struct fdmi_attr_s *) curr_ptr; |
| 1895 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); | 2018 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); |
| 1896 | templen = (u16) strlen(fcs_hba_attr->driver_version); | 2019 | templen = (u16) strlen(fcs_hba_attr->fw_version); |
| 1897 | memcpy(attr->value, fcs_hba_attr->driver_version, templen); | 2020 | memcpy(attr->value, fcs_hba_attr->fw_version, templen); |
| 1898 | templen = fc_roundup(templen, sizeof(u32)); | 2021 | templen = fc_roundup(templen, sizeof(u32)); |
| 1899 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; | 2022 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
| 1900 | len += templen; | 2023 | len += templen; |
| @@ -2296,6 +2419,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, | |||
| 2296 | { | 2419 | { |
| 2297 | struct bfa_fcs_lport_s *port = fdmi->ms->port; | 2420 | struct bfa_fcs_lport_s *port = fdmi->ms->port; |
| 2298 | struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; | 2421 | struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; |
| 2422 | struct bfa_fcs_fdmi_port_attr_s fcs_port_attr; | ||
| 2299 | 2423 | ||
| 2300 | memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); | 2424 | memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); |
| 2301 | 2425 | ||
| @@ -2331,7 +2455,9 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, | |||
| 2331 | sizeof(driver_info->host_os_patch)); | 2455 | sizeof(driver_info->host_os_patch)); |
| 2332 | } | 2456 | } |
| 2333 | 2457 | ||
| 2334 | hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); | 2458 | /* Retrieve the max frame size from the port attr */ |
| 2459 | bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); | ||
| 2460 | hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size; | ||
| 2335 | } | 2461 | } |
| 2336 | 2462 | ||
| 2337 | static void | 2463 | static void |
| @@ -2391,7 +2517,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, | |||
| 2391 | /* | 2517 | /* |
| 2392 | * Max PDU Size. | 2518 | * Max PDU Size. |
| 2393 | */ | 2519 | */ |
| 2394 | port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ); | 2520 | port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize); |
| 2395 | 2521 | ||
| 2396 | /* | 2522 | /* |
| 2397 | * OS device Name | 2523 | * OS device Name |
| @@ -5199,7 +5325,7 @@ bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port) | |||
| 5199 | } | 5325 | } |
| 5200 | 5326 | ||
| 5201 | void | 5327 | void |
| 5202 | bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port) | 5328 | bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port) |
| 5203 | { | 5329 | { |
| 5204 | struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); | 5330 | struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); |
| 5205 | 5331 | ||
| @@ -5621,6 +5747,15 @@ bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port) | |||
| 5621 | } | 5747 | } |
| 5622 | 5748 | ||
| 5623 | /* | 5749 | /* |
| 5750 | * Let new loop map create missing rports | ||
| 5751 | */ | ||
| 5752 | void | ||
| 5753 | bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port) | ||
| 5754 | { | ||
| 5755 | bfa_fcs_lport_loop_online(port); | ||
| 5756 | } | ||
| 5757 | |||
| 5758 | /* | ||
| 5624 | * FCS virtual port state machine | 5759 | * FCS virtual port state machine |
| 5625 | */ | 5760 | */ |
| 5626 | 5761 | ||
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index cc43b2a58ce3..58ac643ba9f3 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c | |||
| @@ -106,9 +106,13 @@ static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, | |||
| 106 | enum rport_event event); | 106 | enum rport_event event); |
| 107 | static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, | 107 | static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, |
| 108 | enum rport_event event); | 108 | enum rport_event event); |
| 109 | static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, | 109 | static void bfa_fcs_rport_sm_adisc_online_sending( |
| 110 | enum rport_event event); | 110 | struct bfa_fcs_rport_s *rport, enum rport_event event); |
| 111 | static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, | 111 | static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, |
| 112 | enum rport_event event); | ||
| 113 | static void bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s | ||
| 114 | *rport, enum rport_event event); | ||
| 115 | static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, | ||
| 112 | enum rport_event event); | 116 | enum rport_event event); |
| 113 | static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, | 117 | static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, |
| 114 | enum rport_event event); | 118 | enum rport_event event); |
| @@ -150,8 +154,10 @@ static struct bfa_sm_table_s rport_sm_table[] = { | |||
| 150 | {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, | 154 | {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, |
| 151 | {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, | 155 | {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, |
| 152 | {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, | 156 | {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, |
| 153 | {BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC}, | 157 | {BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC}, |
| 154 | {BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC}, | 158 | {BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC}, |
| 159 | {BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC}, | ||
| 160 | {BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC}, | ||
| 155 | {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, | 161 | {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, |
| 156 | {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, | 162 | {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, |
| 157 | {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, | 163 | {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, |
| @@ -231,10 +237,19 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, | |||
| 231 | bfa_fcs_rport_send_plogiacc(rport, NULL); | 237 | bfa_fcs_rport_send_plogiacc(rport, NULL); |
| 232 | break; | 238 | break; |
| 233 | 239 | ||
| 240 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 241 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); | ||
| 242 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); | ||
| 243 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | ||
| 244 | bfa_fcs_rport_timeout, rport, | ||
| 245 | bfa_fcs_rport_del_timeout); | ||
| 246 | break; | ||
| 234 | case RPSM_EVENT_ADDRESS_CHANGE: | 247 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 235 | case RPSM_EVENT_SCN: | 248 | case RPSM_EVENT_FAB_SCN: |
| 236 | /* query the NS */ | 249 | /* query the NS */ |
| 237 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); | 250 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); |
| 251 | WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != | ||
| 252 | BFA_PORT_TOPOLOGY_LOOP)); | ||
| 238 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); | 253 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); |
| 239 | rport->ns_retries = 0; | 254 | rport->ns_retries = 0; |
| 240 | bfa_fcs_rport_send_nsdisc(rport, NULL); | 255 | bfa_fcs_rport_send_nsdisc(rport, NULL); |
| @@ -280,12 +295,20 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, | |||
| 280 | 295 | ||
| 281 | case RPSM_EVENT_PLOGI_RCVD: | 296 | case RPSM_EVENT_PLOGI_RCVD: |
| 282 | case RPSM_EVENT_PLOGI_COMP: | 297 | case RPSM_EVENT_PLOGI_COMP: |
| 283 | case RPSM_EVENT_SCN: | 298 | case RPSM_EVENT_FAB_SCN: |
| 284 | /* | 299 | /* |
| 285 | * Ignore, SCN is possibly online notification. | 300 | * Ignore, SCN is possibly online notification. |
| 286 | */ | 301 | */ |
| 287 | break; | 302 | break; |
| 288 | 303 | ||
| 304 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 305 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); | ||
| 306 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); | ||
| 307 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | ||
| 308 | bfa_fcs_rport_timeout, rport, | ||
| 309 | bfa_fcs_rport_del_timeout); | ||
| 310 | break; | ||
| 311 | |||
| 289 | case RPSM_EVENT_ADDRESS_CHANGE: | 312 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 290 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); | 313 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); |
| 291 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); | 314 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); |
| @@ -346,9 +369,19 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, | |||
| 346 | bfa_fcs_rport_send_plogiacc(rport, NULL); | 369 | bfa_fcs_rport_send_plogiacc(rport, NULL); |
| 347 | break; | 370 | break; |
| 348 | 371 | ||
| 372 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 373 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); | ||
| 374 | bfa_timer_stop(&rport->timer); | ||
| 375 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | ||
| 376 | bfa_fcs_rport_timeout, rport, | ||
| 377 | bfa_fcs_rport_del_timeout); | ||
| 378 | break; | ||
| 379 | |||
| 349 | case RPSM_EVENT_ADDRESS_CHANGE: | 380 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 350 | case RPSM_EVENT_SCN: | 381 | case RPSM_EVENT_FAB_SCN: |
| 351 | bfa_timer_stop(&rport->timer); | 382 | bfa_timer_stop(&rport->timer); |
| 383 | WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != | ||
| 384 | BFA_PORT_TOPOLOGY_LOOP)); | ||
| 352 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); | 385 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); |
| 353 | rport->ns_retries = 0; | 386 | rport->ns_retries = 0; |
| 354 | bfa_fcs_rport_send_nsdisc(rport, NULL); | 387 | bfa_fcs_rport_send_nsdisc(rport, NULL); |
| @@ -422,7 +455,18 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 422 | } | 455 | } |
| 423 | break; | 456 | break; |
| 424 | 457 | ||
| 425 | case RPSM_EVENT_PLOGI_RETRY: | 458 | case RPSM_EVENT_SCN_ONLINE: |
| 459 | break; | ||
| 460 | |||
| 461 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 462 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); | ||
| 463 | bfa_fcxp_discard(rport->fcxp); | ||
| 464 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | ||
| 465 | bfa_fcs_rport_timeout, rport, | ||
| 466 | bfa_fcs_rport_del_timeout); | ||
| 467 | break; | ||
| 468 | |||
| 469 | case RPSM_EVENT_PLOGI_RETRY: | ||
| 426 | rport->plogi_retries = 0; | 470 | rport->plogi_retries = 0; |
| 427 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); | 471 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); |
| 428 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | 472 | bfa_timer_start(rport->fcs->bfa, &rport->timer, |
| @@ -440,8 +484,10 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 440 | break; | 484 | break; |
| 441 | 485 | ||
| 442 | case RPSM_EVENT_ADDRESS_CHANGE: | 486 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 443 | case RPSM_EVENT_SCN: | 487 | case RPSM_EVENT_FAB_SCN: |
| 444 | bfa_fcxp_discard(rport->fcxp); | 488 | bfa_fcxp_discard(rport->fcxp); |
| 489 | WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != | ||
| 490 | BFA_PORT_TOPOLOGY_LOOP)); | ||
| 445 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); | 491 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); |
| 446 | rport->ns_retries = 0; | 492 | rport->ns_retries = 0; |
| 447 | bfa_fcs_rport_send_nsdisc(rport, NULL); | 493 | bfa_fcs_rport_send_nsdisc(rport, NULL); |
| @@ -512,7 +558,8 @@ bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport, | |||
| 512 | case RPSM_EVENT_PLOGI_COMP: | 558 | case RPSM_EVENT_PLOGI_COMP: |
| 513 | case RPSM_EVENT_LOGO_IMP: | 559 | case RPSM_EVENT_LOGO_IMP: |
| 514 | case RPSM_EVENT_ADDRESS_CHANGE: | 560 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 515 | case RPSM_EVENT_SCN: | 561 | case RPSM_EVENT_FAB_SCN: |
| 562 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 516 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); | 563 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); |
| 517 | bfa_fcs_rport_fcs_offline_action(rport); | 564 | bfa_fcs_rport_fcs_offline_action(rport); |
| 518 | break; | 565 | break; |
| @@ -561,9 +608,10 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, | |||
| 561 | bfa_fcs_rport_fcs_offline_action(rport); | 608 | bfa_fcs_rport_fcs_offline_action(rport); |
| 562 | break; | 609 | break; |
| 563 | 610 | ||
| 564 | case RPSM_EVENT_SCN: | 611 | case RPSM_EVENT_FAB_SCN: |
| 565 | case RPSM_EVENT_LOGO_IMP: | 612 | case RPSM_EVENT_LOGO_IMP: |
| 566 | case RPSM_EVENT_ADDRESS_CHANGE: | 613 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 614 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 567 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); | 615 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); |
| 568 | bfa_fcs_rport_fcs_offline_action(rport); | 616 | bfa_fcs_rport_fcs_offline_action(rport); |
| 569 | break; | 617 | break; |
| @@ -595,14 +643,15 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 595 | bfa_trc(rport->fcs, event); | 643 | bfa_trc(rport->fcs, event); |
| 596 | 644 | ||
| 597 | switch (event) { | 645 | switch (event) { |
| 598 | case RPSM_EVENT_SCN: | 646 | case RPSM_EVENT_FAB_SCN: |
| 599 | if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { | 647 | if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { |
| 600 | bfa_sm_set_state(rport, | 648 | bfa_sm_set_state(rport, |
| 601 | bfa_fcs_rport_sm_nsquery_sending); | 649 | bfa_fcs_rport_sm_nsquery_sending); |
| 602 | rport->ns_retries = 0; | 650 | rport->ns_retries = 0; |
| 603 | bfa_fcs_rport_send_nsdisc(rport, NULL); | 651 | bfa_fcs_rport_send_nsdisc(rport, NULL); |
| 604 | } else { | 652 | } else { |
| 605 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); | 653 | bfa_sm_set_state(rport, |
| 654 | bfa_fcs_rport_sm_adisc_online_sending); | ||
| 606 | bfa_fcs_rport_send_adisc(rport, NULL); | 655 | bfa_fcs_rport_send_adisc(rport, NULL); |
| 607 | } | 656 | } |
| 608 | break; | 657 | break; |
| @@ -610,6 +659,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 610 | case RPSM_EVENT_PLOGI_RCVD: | 659 | case RPSM_EVENT_PLOGI_RCVD: |
| 611 | case RPSM_EVENT_LOGO_IMP: | 660 | case RPSM_EVENT_LOGO_IMP: |
| 612 | case RPSM_EVENT_ADDRESS_CHANGE: | 661 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 662 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 613 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); | 663 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); |
| 614 | bfa_fcs_rport_hal_offline_action(rport); | 664 | bfa_fcs_rport_hal_offline_action(rport); |
| 615 | break; | 665 | break; |
| @@ -625,6 +675,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 625 | bfa_fcs_rport_hal_offline_action(rport); | 675 | bfa_fcs_rport_hal_offline_action(rport); |
| 626 | break; | 676 | break; |
| 627 | 677 | ||
| 678 | case RPSM_EVENT_SCN_ONLINE: | ||
| 628 | case RPSM_EVENT_PLOGI_COMP: | 679 | case RPSM_EVENT_PLOGI_COMP: |
| 629 | break; | 680 | break; |
| 630 | 681 | ||
| @@ -656,7 +707,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, | |||
| 656 | bfa_fcs_rport_hal_offline_action(rport); | 707 | bfa_fcs_rport_hal_offline_action(rport); |
| 657 | break; | 708 | break; |
| 658 | 709 | ||
| 659 | case RPSM_EVENT_SCN: | 710 | case RPSM_EVENT_FAB_SCN: |
| 660 | /* | 711 | /* |
| 661 | * ignore SCN, wait for response to query itself | 712 | * ignore SCN, wait for response to query itself |
| 662 | */ | 713 | */ |
| @@ -696,7 +747,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 696 | 747 | ||
| 697 | switch (event) { | 748 | switch (event) { |
| 698 | case RPSM_EVENT_ACCEPTED: | 749 | case RPSM_EVENT_ACCEPTED: |
| 699 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); | 750 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending); |
| 700 | bfa_fcs_rport_send_adisc(rport, NULL); | 751 | bfa_fcs_rport_send_adisc(rport, NULL); |
| 701 | break; | 752 | break; |
| 702 | 753 | ||
| @@ -718,7 +769,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 718 | bfa_fcs_rport_hal_offline_action(rport); | 769 | bfa_fcs_rport_hal_offline_action(rport); |
| 719 | break; | 770 | break; |
| 720 | 771 | ||
| 721 | case RPSM_EVENT_SCN: | 772 | case RPSM_EVENT_FAB_SCN: |
| 722 | break; | 773 | break; |
| 723 | 774 | ||
| 724 | case RPSM_EVENT_LOGO_RCVD: | 775 | case RPSM_EVENT_LOGO_RCVD: |
| @@ -747,7 +798,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 747 | * authenticating with rport. FC-4s are paused. | 798 | * authenticating with rport. FC-4s are paused. |
| 748 | */ | 799 | */ |
| 749 | static void | 800 | static void |
| 750 | bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, | 801 | bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport, |
| 751 | enum rport_event event) | 802 | enum rport_event event) |
| 752 | { | 803 | { |
| 753 | bfa_trc(rport->fcs, rport->pwwn); | 804 | bfa_trc(rport->fcs, rport->pwwn); |
| @@ -756,7 +807,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, | |||
| 756 | 807 | ||
| 757 | switch (event) { | 808 | switch (event) { |
| 758 | case RPSM_EVENT_FCXP_SENT: | 809 | case RPSM_EVENT_FCXP_SENT: |
| 759 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc); | 810 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online); |
| 760 | break; | 811 | break; |
| 761 | 812 | ||
| 762 | case RPSM_EVENT_DELETE: | 813 | case RPSM_EVENT_DELETE: |
| @@ -779,7 +830,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, | |||
| 779 | bfa_fcs_rport_hal_offline_action(rport); | 830 | bfa_fcs_rport_hal_offline_action(rport); |
| 780 | break; | 831 | break; |
| 781 | 832 | ||
| 782 | case RPSM_EVENT_SCN: | 833 | case RPSM_EVENT_FAB_SCN: |
| 783 | break; | 834 | break; |
| 784 | 835 | ||
| 785 | case RPSM_EVENT_PLOGI_RCVD: | 836 | case RPSM_EVENT_PLOGI_RCVD: |
| @@ -798,7 +849,8 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, | |||
| 798 | * FC-4s are paused. | 849 | * FC-4s are paused. |
| 799 | */ | 850 | */ |
| 800 | static void | 851 | static void |
| 801 | bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) | 852 | bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, |
| 853 | enum rport_event event) | ||
| 802 | { | 854 | { |
| 803 | bfa_trc(rport->fcs, rport->pwwn); | 855 | bfa_trc(rport->fcs, rport->pwwn); |
| 804 | bfa_trc(rport->fcs, rport->pid); | 856 | bfa_trc(rport->fcs, rport->pid); |
| @@ -831,7 +883,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 831 | bfa_fcs_rport_hal_offline_action(rport); | 883 | bfa_fcs_rport_hal_offline_action(rport); |
| 832 | break; | 884 | break; |
| 833 | 885 | ||
| 834 | case RPSM_EVENT_SCN: | 886 | case RPSM_EVENT_FAB_SCN: |
| 835 | /* | 887 | /* |
| 836 | * already processing RSCN | 888 | * already processing RSCN |
| 837 | */ | 889 | */ |
| @@ -856,7 +908,96 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 856 | } | 908 | } |
| 857 | 909 | ||
| 858 | /* | 910 | /* |
| 859 | * Rport has sent LOGO. Awaiting FC-4 offline completion callback. | 911 | * ADISC is being sent for authenticating with rport |
| 912 | * Already did offline actions. | ||
| 913 | */ | ||
| 914 | static void | ||
| 915 | bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport, | ||
| 916 | enum rport_event event) | ||
| 917 | { | ||
| 918 | bfa_trc(rport->fcs, rport->pwwn); | ||
| 919 | bfa_trc(rport->fcs, rport->pid); | ||
| 920 | bfa_trc(rport->fcs, event); | ||
| 921 | |||
| 922 | switch (event) { | ||
| 923 | case RPSM_EVENT_FCXP_SENT: | ||
| 924 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline); | ||
| 925 | break; | ||
| 926 | |||
| 927 | case RPSM_EVENT_DELETE: | ||
| 928 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 929 | case RPSM_EVENT_LOGO_IMP: | ||
| 930 | case RPSM_EVENT_LOGO_RCVD: | ||
| 931 | case RPSM_EVENT_PRLO_RCVD: | ||
| 932 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); | ||
| 933 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, | ||
| 934 | &rport->fcxp_wqe); | ||
| 935 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | ||
| 936 | bfa_fcs_rport_timeout, rport, | ||
| 937 | bfa_fcs_rport_del_timeout); | ||
| 938 | break; | ||
| 939 | |||
| 940 | case RPSM_EVENT_PLOGI_RCVD: | ||
| 941 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); | ||
| 942 | bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); | ||
| 943 | bfa_fcs_rport_send_plogiacc(rport, NULL); | ||
| 944 | break; | ||
| 945 | |||
| 946 | default: | ||
| 947 | bfa_sm_fault(rport->fcs, event); | ||
| 948 | } | ||
| 949 | } | ||
| 950 | |||
| 951 | /* | ||
| 952 | * ADISC to rport | ||
| 953 | * Already did offline actions | ||
| 954 | */ | ||
| 955 | static void | ||
| 956 | bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, | ||
| 957 | enum rport_event event) | ||
| 958 | { | ||
| 959 | bfa_trc(rport->fcs, rport->pwwn); | ||
| 960 | bfa_trc(rport->fcs, rport->pid); | ||
| 961 | bfa_trc(rport->fcs, event); | ||
| 962 | |||
| 963 | switch (event) { | ||
| 964 | case RPSM_EVENT_ACCEPTED: | ||
| 965 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); | ||
| 966 | bfa_fcs_rport_hal_online(rport); | ||
| 967 | break; | ||
| 968 | |||
| 969 | case RPSM_EVENT_PLOGI_RCVD: | ||
| 970 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); | ||
| 971 | bfa_fcxp_discard(rport->fcxp); | ||
| 972 | bfa_fcs_rport_send_plogiacc(rport, NULL); | ||
| 973 | break; | ||
| 974 | |||
| 975 | case RPSM_EVENT_FAILED: | ||
| 976 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); | ||
| 977 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | ||
| 978 | bfa_fcs_rport_timeout, rport, | ||
| 979 | bfa_fcs_rport_del_timeout); | ||
| 980 | break; | ||
| 981 | |||
| 982 | case RPSM_EVENT_DELETE: | ||
| 983 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 984 | case RPSM_EVENT_LOGO_IMP: | ||
| 985 | case RPSM_EVENT_LOGO_RCVD: | ||
| 986 | case RPSM_EVENT_PRLO_RCVD: | ||
| 987 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); | ||
| 988 | bfa_fcxp_discard(rport->fcxp); | ||
| 989 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | ||
| 990 | bfa_fcs_rport_timeout, rport, | ||
| 991 | bfa_fcs_rport_del_timeout); | ||
| 992 | break; | ||
| 993 | |||
| 994 | default: | ||
| 995 | bfa_sm_fault(rport->fcs, event); | ||
| 996 | } | ||
| 997 | } | ||
| 998 | |||
| 999 | /* | ||
| 1000 | * Rport has sent LOGO. Awaiting FC-4 offline completion callback. | ||
| 860 | */ | 1001 | */ |
| 861 | static void | 1002 | static void |
| 862 | bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, | 1003 | bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, |
| @@ -881,6 +1022,8 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, | |||
| 881 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); | 1022 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); |
| 882 | break; | 1023 | break; |
| 883 | 1024 | ||
| 1025 | case RPSM_EVENT_SCN_ONLINE: | ||
| 1026 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 884 | case RPSM_EVENT_HCB_ONLINE: | 1027 | case RPSM_EVENT_HCB_ONLINE: |
| 885 | case RPSM_EVENT_LOGO_RCVD: | 1028 | case RPSM_EVENT_LOGO_RCVD: |
| 886 | case RPSM_EVENT_PRLO_RCVD: | 1029 | case RPSM_EVENT_PRLO_RCVD: |
| @@ -945,6 +1088,8 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, | |||
| 945 | bfa_fcs_rport_hal_offline(rport); | 1088 | bfa_fcs_rport_hal_offline(rport); |
| 946 | break; | 1089 | break; |
| 947 | 1090 | ||
| 1091 | case RPSM_EVENT_SCN_ONLINE: | ||
| 1092 | break; | ||
| 948 | case RPSM_EVENT_LOGO_RCVD: | 1093 | case RPSM_EVENT_LOGO_RCVD: |
| 949 | /* | 1094 | /* |
| 950 | * Rport is going offline. Just ack the logo | 1095 | * Rport is going offline. Just ack the logo |
| @@ -956,8 +1101,9 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, | |||
| 956 | bfa_fcs_rport_send_prlo_acc(rport); | 1101 | bfa_fcs_rport_send_prlo_acc(rport); |
| 957 | break; | 1102 | break; |
| 958 | 1103 | ||
| 1104 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 959 | case RPSM_EVENT_HCB_ONLINE: | 1105 | case RPSM_EVENT_HCB_ONLINE: |
| 960 | case RPSM_EVENT_SCN: | 1106 | case RPSM_EVENT_FAB_SCN: |
| 961 | case RPSM_EVENT_LOGO_IMP: | 1107 | case RPSM_EVENT_LOGO_IMP: |
| 962 | case RPSM_EVENT_ADDRESS_CHANGE: | 1108 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 963 | /* | 1109 | /* |
| @@ -1015,6 +1161,19 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, | |||
| 1015 | bfa_fcs_rport_sm_nsdisc_sending); | 1161 | bfa_fcs_rport_sm_nsdisc_sending); |
| 1016 | rport->ns_retries = 0; | 1162 | rport->ns_retries = 0; |
| 1017 | bfa_fcs_rport_send_nsdisc(rport, NULL); | 1163 | bfa_fcs_rport_send_nsdisc(rport, NULL); |
| 1164 | } else if (bfa_fcport_get_topology(rport->port->fcs->bfa) == | ||
| 1165 | BFA_PORT_TOPOLOGY_LOOP) { | ||
| 1166 | if (rport->scn_online) { | ||
| 1167 | bfa_sm_set_state(rport, | ||
| 1168 | bfa_fcs_rport_sm_adisc_offline_sending); | ||
| 1169 | bfa_fcs_rport_send_adisc(rport, NULL); | ||
| 1170 | } else { | ||
| 1171 | bfa_sm_set_state(rport, | ||
| 1172 | bfa_fcs_rport_sm_offline); | ||
| 1173 | bfa_timer_start(rport->fcs->bfa, &rport->timer, | ||
| 1174 | bfa_fcs_rport_timeout, rport, | ||
| 1175 | bfa_fcs_rport_del_timeout); | ||
| 1176 | } | ||
| 1018 | } else { | 1177 | } else { |
| 1019 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); | 1178 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); |
| 1020 | rport->plogi_retries = 0; | 1179 | rport->plogi_retries = 0; |
| @@ -1027,7 +1186,9 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, | |||
| 1027 | bfa_fcs_rport_free(rport); | 1186 | bfa_fcs_rport_free(rport); |
| 1028 | break; | 1187 | break; |
| 1029 | 1188 | ||
| 1030 | case RPSM_EVENT_SCN: | 1189 | case RPSM_EVENT_SCN_ONLINE: |
| 1190 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 1191 | case RPSM_EVENT_FAB_SCN: | ||
| 1031 | case RPSM_EVENT_LOGO_RCVD: | 1192 | case RPSM_EVENT_LOGO_RCVD: |
| 1032 | case RPSM_EVENT_PRLO_RCVD: | 1193 | case RPSM_EVENT_PRLO_RCVD: |
| 1033 | case RPSM_EVENT_PLOGI_RCVD: | 1194 | case RPSM_EVENT_PLOGI_RCVD: |
| @@ -1106,6 +1267,8 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, | |||
| 1106 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); | 1267 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); |
| 1107 | break; | 1268 | break; |
| 1108 | 1269 | ||
| 1270 | case RPSM_EVENT_SCN_ONLINE: | ||
| 1271 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 1109 | case RPSM_EVENT_LOGO_RCVD: | 1272 | case RPSM_EVENT_LOGO_RCVD: |
| 1110 | case RPSM_EVENT_PRLO_RCVD: | 1273 | case RPSM_EVENT_PRLO_RCVD: |
| 1111 | /* | 1274 | /* |
| @@ -1146,6 +1309,8 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, | |||
| 1146 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); | 1309 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); |
| 1147 | break; | 1310 | break; |
| 1148 | 1311 | ||
| 1312 | case RPSM_EVENT_SCN_ONLINE: | ||
| 1313 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 1149 | case RPSM_EVENT_ADDRESS_CHANGE: | 1314 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 1150 | break; | 1315 | break; |
| 1151 | 1316 | ||
| @@ -1172,7 +1337,9 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, | |||
| 1172 | bfa_fcs_rport_free(rport); | 1337 | bfa_fcs_rport_free(rport); |
| 1173 | break; | 1338 | break; |
| 1174 | 1339 | ||
| 1175 | case RPSM_EVENT_SCN: | 1340 | case RPSM_EVENT_SCN_ONLINE: |
| 1341 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 1342 | case RPSM_EVENT_FAB_SCN: | ||
| 1176 | case RPSM_EVENT_ADDRESS_CHANGE: | 1343 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 1177 | break; | 1344 | break; |
| 1178 | 1345 | ||
| @@ -1209,10 +1376,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 1209 | bfa_fcs_rport_free(rport); | 1376 | bfa_fcs_rport_free(rport); |
| 1210 | break; | 1377 | break; |
| 1211 | 1378 | ||
| 1212 | case RPSM_EVENT_SCN: | 1379 | case RPSM_EVENT_FAB_SCN: |
| 1213 | case RPSM_EVENT_ADDRESS_CHANGE: | 1380 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 1214 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); | ||
| 1215 | bfa_timer_stop(&rport->timer); | 1381 | bfa_timer_stop(&rport->timer); |
| 1382 | WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != | ||
| 1383 | BFA_PORT_TOPOLOGY_LOOP)); | ||
| 1384 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); | ||
| 1216 | rport->ns_retries = 0; | 1385 | rport->ns_retries = 0; |
| 1217 | bfa_fcs_rport_send_nsdisc(rport, NULL); | 1386 | bfa_fcs_rport_send_nsdisc(rport, NULL); |
| 1218 | break; | 1387 | break; |
| @@ -1232,6 +1401,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 1232 | case RPSM_EVENT_LOGO_RCVD: | 1401 | case RPSM_EVENT_LOGO_RCVD: |
| 1233 | case RPSM_EVENT_PRLO_RCVD: | 1402 | case RPSM_EVENT_PRLO_RCVD: |
| 1234 | case RPSM_EVENT_LOGO_IMP: | 1403 | case RPSM_EVENT_LOGO_IMP: |
| 1404 | case RPSM_EVENT_SCN_OFFLINE: | ||
| 1235 | break; | 1405 | break; |
| 1236 | 1406 | ||
| 1237 | case RPSM_EVENT_PLOGI_COMP: | 1407 | case RPSM_EVENT_PLOGI_COMP: |
| @@ -1240,6 +1410,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) | |||
| 1240 | bfa_fcs_rport_fcs_online_action(rport); | 1410 | bfa_fcs_rport_fcs_online_action(rport); |
| 1241 | break; | 1411 | break; |
| 1242 | 1412 | ||
| 1413 | case RPSM_EVENT_SCN_ONLINE: | ||
| 1414 | bfa_timer_stop(&rport->timer); | ||
| 1415 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); | ||
| 1416 | bfa_fcs_rport_send_plogi(rport, NULL); | ||
| 1417 | break; | ||
| 1418 | |||
| 1243 | case RPSM_EVENT_PLOGI_SEND: | 1419 | case RPSM_EVENT_PLOGI_SEND: |
| 1244 | bfa_timer_stop(&rport->timer); | 1420 | bfa_timer_stop(&rport->timer); |
| 1245 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); | 1421 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); |
| @@ -1280,7 +1456,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, | |||
| 1280 | bfa_fcs_rport_send_plogiacc(rport, NULL); | 1456 | bfa_fcs_rport_send_plogiacc(rport, NULL); |
| 1281 | break; | 1457 | break; |
| 1282 | 1458 | ||
| 1283 | case RPSM_EVENT_SCN: | 1459 | case RPSM_EVENT_FAB_SCN: |
| 1284 | case RPSM_EVENT_LOGO_RCVD: | 1460 | case RPSM_EVENT_LOGO_RCVD: |
| 1285 | case RPSM_EVENT_PRLO_RCVD: | 1461 | case RPSM_EVENT_PRLO_RCVD: |
| 1286 | case RPSM_EVENT_PLOGI_SEND: | 1462 | case RPSM_EVENT_PLOGI_SEND: |
| @@ -1326,7 +1502,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, | |||
| 1326 | bfa_fcs_rport_send_nsdisc(rport, NULL); | 1502 | bfa_fcs_rport_send_nsdisc(rport, NULL); |
| 1327 | break; | 1503 | break; |
| 1328 | 1504 | ||
| 1329 | case RPSM_EVENT_SCN: | 1505 | case RPSM_EVENT_FAB_SCN: |
| 1330 | case RPSM_EVENT_ADDRESS_CHANGE: | 1506 | case RPSM_EVENT_ADDRESS_CHANGE: |
| 1331 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); | 1507 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); |
| 1332 | bfa_timer_stop(&rport->timer); | 1508 | bfa_timer_stop(&rport->timer); |
| @@ -1439,7 +1615,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, | |||
| 1439 | case RPSM_EVENT_PRLO_RCVD: | 1615 | case RPSM_EVENT_PRLO_RCVD: |
| 1440 | bfa_fcs_rport_send_prlo_acc(rport); | 1616 | bfa_fcs_rport_send_prlo_acc(rport); |
| 1441 | break; | 1617 | break; |
| 1442 | case RPSM_EVENT_SCN: | 1618 | case RPSM_EVENT_FAB_SCN: |
| 1443 | /* | 1619 | /* |
| 1444 | * ignore, wait for NS query response | 1620 | * ignore, wait for NS query response |
| 1445 | */ | 1621 | */ |
| @@ -2546,7 +2722,7 @@ void | |||
| 2546 | bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) | 2722 | bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) |
| 2547 | { | 2723 | { |
| 2548 | rport->stats.rscns++; | 2724 | rport->stats.rscns++; |
| 2549 | bfa_sm_send_event(rport, RPSM_EVENT_SCN); | 2725 | bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN); |
| 2550 | } | 2726 | } |
| 2551 | 2727 | ||
| 2552 | /* | 2728 | /* |
| @@ -2621,6 +2797,48 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg, | |||
| 2621 | bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); | 2797 | bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); |
| 2622 | } | 2798 | } |
| 2623 | 2799 | ||
| 2800 | void | ||
| 2801 | bfa_cb_rport_scn_online(struct bfa_s *bfa) | ||
| 2802 | { | ||
| 2803 | struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs; | ||
| 2804 | struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs); | ||
| 2805 | struct bfa_fcs_rport_s *rp; | ||
| 2806 | struct list_head *qe; | ||
| 2807 | |||
| 2808 | list_for_each(qe, &port->rport_q) { | ||
| 2809 | rp = (struct bfa_fcs_rport_s *) qe; | ||
| 2810 | bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE); | ||
| 2811 | rp->scn_online = BFA_TRUE; | ||
| 2812 | } | ||
| 2813 | |||
| 2814 | if (bfa_fcs_lport_is_online(port)) | ||
| 2815 | bfa_fcs_lport_lip_scn_online(port); | ||
| 2816 | } | ||
| 2817 | |||
| 2818 | void | ||
| 2819 | bfa_cb_rport_scn_no_dev(void *rport) | ||
| 2820 | { | ||
| 2821 | struct bfa_fcs_rport_s *rp = rport; | ||
| 2822 | |||
| 2823 | bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE); | ||
| 2824 | rp->scn_online = BFA_FALSE; | ||
| 2825 | } | ||
| 2826 | |||
| 2827 | void | ||
| 2828 | bfa_cb_rport_scn_offline(struct bfa_s *bfa) | ||
| 2829 | { | ||
| 2830 | struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs; | ||
| 2831 | struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs); | ||
| 2832 | struct bfa_fcs_rport_s *rp; | ||
| 2833 | struct list_head *qe; | ||
| 2834 | |||
| 2835 | list_for_each(qe, &port->rport_q) { | ||
| 2836 | rp = (struct bfa_fcs_rport_s *) qe; | ||
| 2837 | bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE); | ||
| 2838 | rp->scn_online = BFA_FALSE; | ||
| 2839 | } | ||
| 2840 | } | ||
| 2841 | |||
| 2624 | /* | 2842 | /* |
| 2625 | * brief | 2843 | * brief |
| 2626 | * This routine is a static BFA callback when there is a QoS priority | 2844 | * This routine is a static BFA callback when there is a QoS priority |
| @@ -2808,6 +3026,9 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, | |||
| 2808 | struct bfa_rport_qos_attr_s qos_attr; | 3026 | struct bfa_rport_qos_attr_s qos_attr; |
| 2809 | struct bfa_fcs_lport_s *port = rport->port; | 3027 | struct bfa_fcs_lport_s *port = rport->port; |
| 2810 | bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; | 3028 | bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; |
| 3029 | struct bfa_port_attr_s port_attr; | ||
| 3030 | |||
| 3031 | bfa_fcport_get_attr(rport->fcs->bfa, &port_attr); | ||
| 2811 | 3032 | ||
| 2812 | memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); | 3033 | memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); |
| 2813 | memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); | 3034 | memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); |
| @@ -2838,7 +3059,8 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, | |||
| 2838 | rport_speed = | 3059 | rport_speed = |
| 2839 | bfa_fcport_get_ratelim_speed(rport->fcs->bfa); | 3060 | bfa_fcport_get_ratelim_speed(rport->fcs->bfa); |
| 2840 | 3061 | ||
| 2841 | if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port)) | 3062 | if ((bfa_fcs_lport_get_rport_max_speed(port) != |
| 3063 | BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed)) | ||
| 2842 | rport_attr->trl_enforced = BFA_TRUE; | 3064 | rport_attr->trl_enforced = BFA_TRUE; |
| 2843 | } | 3065 | } |
| 2844 | } | 3066 | } |
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 75ca8752b9f4..0116c1032e25 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c | |||
| @@ -731,8 +731,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) | |||
| 731 | /* | 731 | /* |
| 732 | * Unlock the hw semaphore. Should be here only once per boot. | 732 | * Unlock the hw semaphore. Should be here only once per boot. |
| 733 | */ | 733 | */ |
| 734 | readl(iocpf->ioc->ioc_regs.ioc_sem_reg); | 734 | bfa_ioc_ownership_reset(iocpf->ioc); |
| 735 | writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); | ||
| 736 | 735 | ||
| 737 | /* | 736 | /* |
| 738 | * unlock init semaphore. | 737 | * unlock init semaphore. |
| @@ -1751,6 +1750,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) | |||
| 1751 | attr->card_type = be32_to_cpu(attr->card_type); | 1750 | attr->card_type = be32_to_cpu(attr->card_type); |
| 1752 | attr->maxfrsize = be16_to_cpu(attr->maxfrsize); | 1751 | attr->maxfrsize = be16_to_cpu(attr->maxfrsize); |
| 1753 | ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); | 1752 | ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); |
| 1753 | attr->mfg_year = be16_to_cpu(attr->mfg_year); | ||
| 1754 | 1754 | ||
| 1755 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); | 1755 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); |
| 1756 | } | 1756 | } |
| @@ -2497,6 +2497,9 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, | |||
| 2497 | ad_attr->cna_capable = bfa_ioc_is_cna(ioc); | 2497 | ad_attr->cna_capable = bfa_ioc_is_cna(ioc); |
| 2498 | ad_attr->trunk_capable = (ad_attr->nports > 1) && | 2498 | ad_attr->trunk_capable = (ad_attr->nports > 1) && |
| 2499 | !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; | 2499 | !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; |
| 2500 | ad_attr->mfg_day = ioc_attr->mfg_day; | ||
| 2501 | ad_attr->mfg_month = ioc_attr->mfg_month; | ||
| 2502 | ad_attr->mfg_year = ioc_attr->mfg_year; | ||
| 2500 | } | 2503 | } |
| 2501 | 2504 | ||
| 2502 | enum bfa_ioc_type_e | 2505 | enum bfa_ioc_type_e |
| @@ -2923,7 +2926,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) | |||
| 2923 | return; | 2926 | return; |
| 2924 | } | 2927 | } |
| 2925 | 2928 | ||
| 2926 | if (ioc->iocpf.poll_time >= BFA_IOC_TOV) | 2929 | if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV)) |
| 2927 | bfa_iocpf_timeout(ioc); | 2930 | bfa_iocpf_timeout(ioc); |
| 2928 | else { | 2931 | else { |
| 2929 | ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; | 2932 | ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; |
| @@ -3016,7 +3019,6 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) | |||
| 3016 | struct bfa_ablk_cfg_inst_s *cfg_inst; | 3019 | struct bfa_ablk_cfg_inst_s *cfg_inst; |
| 3017 | int i, j; | 3020 | int i, j; |
| 3018 | u16 be16; | 3021 | u16 be16; |
| 3019 | u32 be32; | ||
| 3020 | 3022 | ||
| 3021 | for (i = 0; i < BFA_ABLK_MAX; i++) { | 3023 | for (i = 0; i < BFA_ABLK_MAX; i++) { |
| 3022 | cfg_inst = &cfg->inst[i]; | 3024 | cfg_inst = &cfg->inst[i]; |
| @@ -3027,8 +3029,10 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) | |||
| 3027 | cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); | 3029 | cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); |
| 3028 | be16 = cfg_inst->pf_cfg[j].num_vectors; | 3030 | be16 = cfg_inst->pf_cfg[j].num_vectors; |
| 3029 | cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); | 3031 | cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); |
| 3030 | be32 = cfg_inst->pf_cfg[j].bw; | 3032 | be16 = cfg_inst->pf_cfg[j].bw_min; |
| 3031 | cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32); | 3033 | cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16); |
| 3034 | be16 = cfg_inst->pf_cfg[j].bw_max; | ||
| 3035 | cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16); | ||
| 3032 | } | 3036 | } |
| 3033 | } | 3037 | } |
| 3034 | } | 3038 | } |
| @@ -3170,7 +3174,8 @@ bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg, | |||
| 3170 | 3174 | ||
| 3171 | bfa_status_t | 3175 | bfa_status_t |
| 3172 | bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, | 3176 | bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, |
| 3173 | u8 port, enum bfi_pcifn_class personality, int bw, | 3177 | u8 port, enum bfi_pcifn_class personality, |
| 3178 | u16 bw_min, u16 bw_max, | ||
| 3174 | bfa_ablk_cbfn_t cbfn, void *cbarg) | 3179 | bfa_ablk_cbfn_t cbfn, void *cbarg) |
| 3175 | { | 3180 | { |
| 3176 | struct bfi_ablk_h2i_pf_req_s *m; | 3181 | struct bfi_ablk_h2i_pf_req_s *m; |
| @@ -3194,7 +3199,8 @@ bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, | |||
| 3194 | bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, | 3199 | bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, |
| 3195 | bfa_ioc_portid(ablk->ioc)); | 3200 | bfa_ioc_portid(ablk->ioc)); |
| 3196 | m->pers = cpu_to_be16((u16)personality); | 3201 | m->pers = cpu_to_be16((u16)personality); |
| 3197 | m->bw = cpu_to_be32(bw); | 3202 | m->bw_min = cpu_to_be16(bw_min); |
| 3203 | m->bw_max = cpu_to_be16(bw_max); | ||
| 3198 | m->port = port; | 3204 | m->port = port; |
| 3199 | bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); | 3205 | bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); |
| 3200 | 3206 | ||
| @@ -3294,8 +3300,8 @@ bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode, | |||
| 3294 | } | 3300 | } |
| 3295 | 3301 | ||
| 3296 | bfa_status_t | 3302 | bfa_status_t |
| 3297 | bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, | 3303 | bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min, |
| 3298 | bfa_ablk_cbfn_t cbfn, void *cbarg) | 3304 | u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg) |
| 3299 | { | 3305 | { |
| 3300 | struct bfi_ablk_h2i_pf_req_s *m; | 3306 | struct bfi_ablk_h2i_pf_req_s *m; |
| 3301 | 3307 | ||
| @@ -3317,7 +3323,8 @@ bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, | |||
| 3317 | bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, | 3323 | bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, |
| 3318 | bfa_ioc_portid(ablk->ioc)); | 3324 | bfa_ioc_portid(ablk->ioc)); |
| 3319 | m->pcifn = (u8)pcifn; | 3325 | m->pcifn = (u8)pcifn; |
| 3320 | m->bw = cpu_to_be32(bw); | 3326 | m->bw_min = cpu_to_be16(bw_min); |
| 3327 | m->bw_max = cpu_to_be16(bw_max); | ||
| 3321 | bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); | 3328 | bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); |
| 3322 | 3329 | ||
| 3323 | return BFA_STATUS_OK; | 3330 | return BFA_STATUS_OK; |
| @@ -4680,22 +4687,25 @@ diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp) | |||
| 4680 | diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); | 4687 | diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); |
| 4681 | diag->tsensor.temp->ts_junc = rsp->ts_junc; | 4688 | diag->tsensor.temp->ts_junc = rsp->ts_junc; |
| 4682 | diag->tsensor.temp->ts_brd = rsp->ts_brd; | 4689 | diag->tsensor.temp->ts_brd = rsp->ts_brd; |
| 4683 | diag->tsensor.temp->status = BFA_STATUS_OK; | ||
| 4684 | 4690 | ||
| 4685 | if (rsp->ts_brd) { | 4691 | if (rsp->ts_brd) { |
| 4692 | /* tsensor.temp->status is brd_temp status */ | ||
| 4693 | diag->tsensor.temp->status = rsp->status; | ||
| 4686 | if (rsp->status == BFA_STATUS_OK) { | 4694 | if (rsp->status == BFA_STATUS_OK) { |
| 4687 | diag->tsensor.temp->brd_temp = | 4695 | diag->tsensor.temp->brd_temp = |
| 4688 | be16_to_cpu(rsp->brd_temp); | 4696 | be16_to_cpu(rsp->brd_temp); |
| 4689 | } else { | 4697 | } else |
| 4690 | bfa_trc(diag, rsp->status); | ||
| 4691 | diag->tsensor.temp->brd_temp = 0; | 4698 | diag->tsensor.temp->brd_temp = 0; |
| 4692 | diag->tsensor.temp->status = BFA_STATUS_DEVBUSY; | ||
| 4693 | } | ||
| 4694 | } | 4699 | } |
| 4700 | |||
| 4701 | bfa_trc(diag, rsp->status); | ||
| 4695 | bfa_trc(diag, rsp->ts_junc); | 4702 | bfa_trc(diag, rsp->ts_junc); |
| 4696 | bfa_trc(diag, rsp->temp); | 4703 | bfa_trc(diag, rsp->temp); |
| 4697 | bfa_trc(diag, rsp->ts_brd); | 4704 | bfa_trc(diag, rsp->ts_brd); |
| 4698 | bfa_trc(diag, rsp->brd_temp); | 4705 | bfa_trc(diag, rsp->brd_temp); |
| 4706 | |||
| 4707 | /* tsensor status is always good bcos we always have junction temp */ | ||
| 4708 | diag->tsensor.status = BFA_STATUS_OK; | ||
| 4699 | diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); | 4709 | diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); |
| 4700 | diag->tsensor.lock = 0; | 4710 | diag->tsensor.lock = 0; |
| 4701 | } | 4711 | } |
| @@ -4924,6 +4934,7 @@ bfa_diag_tsensor_query(struct bfa_diag_s *diag, | |||
| 4924 | diag->tsensor.temp = result; | 4934 | diag->tsensor.temp = result; |
| 4925 | diag->tsensor.cbfn = cbfn; | 4935 | diag->tsensor.cbfn = cbfn; |
| 4926 | diag->tsensor.cbarg = cbarg; | 4936 | diag->tsensor.cbarg = cbarg; |
| 4937 | diag->tsensor.status = BFA_STATUS_OK; | ||
| 4927 | 4938 | ||
| 4928 | /* Send msg to fw */ | 4939 | /* Send msg to fw */ |
| 4929 | diag_tempsensor_send(diag); | 4940 | diag_tempsensor_send(diag); |
| @@ -5615,7 +5626,7 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) | |||
| 5615 | } | 5626 | } |
| 5616 | bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); | 5627 | bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); |
| 5617 | bfa_timer_start(dconf->bfa, &dconf->timer, | 5628 | bfa_timer_start(dconf->bfa, &dconf->timer, |
| 5618 | bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); | 5629 | bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV); |
| 5619 | bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), | 5630 | bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), |
| 5620 | BFA_FLASH_PART_DRV, dconf->instance, | 5631 | BFA_FLASH_PART_DRV, dconf->instance, |
| 5621 | dconf->dconf, | 5632 | dconf->dconf, |
| @@ -5655,7 +5666,7 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, | |||
| 5655 | break; | 5666 | break; |
| 5656 | case BFA_DCONF_SM_TIMEOUT: | 5667 | case BFA_DCONF_SM_TIMEOUT: |
| 5657 | bfa_sm_set_state(dconf, bfa_dconf_sm_ready); | 5668 | bfa_sm_set_state(dconf, bfa_dconf_sm_ready); |
| 5658 | bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED); | 5669 | bfa_ioc_suspend(&dconf->bfa->ioc); |
| 5659 | break; | 5670 | break; |
| 5660 | case BFA_DCONF_SM_EXIT: | 5671 | case BFA_DCONF_SM_EXIT: |
| 5661 | bfa_timer_stop(&dconf->timer); | 5672 | bfa_timer_stop(&dconf->timer); |
| @@ -5853,7 +5864,6 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) | |||
| 5853 | struct bfa_s *bfa = arg; | 5864 | struct bfa_s *bfa = arg; |
| 5854 | struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); | 5865 | struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); |
| 5855 | 5866 | ||
| 5856 | bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); | ||
| 5857 | if (status == BFA_STATUS_OK) { | 5867 | if (status == BFA_STATUS_OK) { |
| 5858 | bfa_dconf_read_data_valid(bfa) = BFA_TRUE; | 5868 | bfa_dconf_read_data_valid(bfa) = BFA_TRUE; |
| 5859 | if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) | 5869 | if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) |
| @@ -5861,6 +5871,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) | |||
| 5861 | if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) | 5871 | if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) |
| 5862 | dconf->dconf->hdr.version = BFI_DCONF_VERSION; | 5872 | dconf->dconf->hdr.version = BFI_DCONF_VERSION; |
| 5863 | } | 5873 | } |
| 5874 | bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); | ||
| 5864 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); | 5875 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); |
| 5865 | } | 5876 | } |
| 5866 | 5877 | ||
| @@ -5945,3 +5956,448 @@ bfa_dconf_modexit(struct bfa_s *bfa) | |||
| 5945 | struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); | 5956 | struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); |
| 5946 | bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); | 5957 | bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); |
| 5947 | } | 5958 | } |
| 5959 | |||
| 5960 | /* | ||
| 5961 | * FRU specific functions | ||
| 5962 | */ | ||
| 5963 | |||
| 5964 | #define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */ | ||
| 5965 | #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000 | ||
| 5966 | #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200 | ||
| 5967 | |||
| 5968 | static void | ||
| 5969 | bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event) | ||
| 5970 | { | ||
| 5971 | struct bfa_fru_s *fru = cbarg; | ||
| 5972 | |||
| 5973 | bfa_trc(fru, event); | ||
| 5974 | |||
| 5975 | switch (event) { | ||
| 5976 | case BFA_IOC_E_DISABLED: | ||
| 5977 | case BFA_IOC_E_FAILED: | ||
| 5978 | if (fru->op_busy) { | ||
| 5979 | fru->status = BFA_STATUS_IOC_FAILURE; | ||
| 5980 | fru->cbfn(fru->cbarg, fru->status); | ||
| 5981 | fru->op_busy = 0; | ||
| 5982 | } | ||
| 5983 | break; | ||
| 5984 | |||
| 5985 | default: | ||
| 5986 | break; | ||
| 5987 | } | ||
| 5988 | } | ||
| 5989 | |||
| 5990 | /* | ||
| 5991 | * Send fru write request. | ||
| 5992 | * | ||
| 5993 | * @param[in] cbarg - callback argument | ||
| 5994 | */ | ||
| 5995 | static void | ||
| 5996 | bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) | ||
| 5997 | { | ||
| 5998 | struct bfa_fru_s *fru = cbarg; | ||
| 5999 | struct bfi_fru_write_req_s *msg = | ||
| 6000 | (struct bfi_fru_write_req_s *) fru->mb.msg; | ||
| 6001 | u32 len; | ||
| 6002 | |||
| 6003 | msg->offset = cpu_to_be32(fru->addr_off + fru->offset); | ||
| 6004 | len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? | ||
| 6005 | fru->residue : BFA_FRU_DMA_BUF_SZ; | ||
| 6006 | msg->length = cpu_to_be32(len); | ||
| 6007 | |||
| 6008 | /* | ||
| 6009 | * indicate if it's the last msg of the whole write operation | ||
| 6010 | */ | ||
| 6011 | msg->last = (len == fru->residue) ? 1 : 0; | ||
| 6012 | |||
| 6013 | bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); | ||
| 6014 | bfa_alen_set(&msg->alen, len, fru->dbuf_pa); | ||
| 6015 | |||
| 6016 | memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len); | ||
| 6017 | bfa_ioc_mbox_queue(fru->ioc, &fru->mb); | ||
| 6018 | |||
| 6019 | fru->residue -= len; | ||
| 6020 | fru->offset += len; | ||
| 6021 | } | ||
| 6022 | |||
| 6023 | /* | ||
| 6024 | * Send fru read request. | ||
| 6025 | * | ||
| 6026 | * @param[in] cbarg - callback argument | ||
| 6027 | */ | ||
| 6028 | static void | ||
| 6029 | bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) | ||
| 6030 | { | ||
| 6031 | struct bfa_fru_s *fru = cbarg; | ||
| 6032 | struct bfi_fru_read_req_s *msg = | ||
| 6033 | (struct bfi_fru_read_req_s *) fru->mb.msg; | ||
| 6034 | u32 len; | ||
| 6035 | |||
| 6036 | msg->offset = cpu_to_be32(fru->addr_off + fru->offset); | ||
| 6037 | len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? | ||
| 6038 | fru->residue : BFA_FRU_DMA_BUF_SZ; | ||
| 6039 | msg->length = cpu_to_be32(len); | ||
| 6040 | bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); | ||
| 6041 | bfa_alen_set(&msg->alen, len, fru->dbuf_pa); | ||
| 6042 | bfa_ioc_mbox_queue(fru->ioc, &fru->mb); | ||
| 6043 | } | ||
| 6044 | |||
| 6045 | /* | ||
| 6046 | * Flash memory info API. | ||
| 6047 | * | ||
| 6048 | * @param[in] mincfg - minimal cfg variable | ||
| 6049 | */ | ||
| 6050 | u32 | ||
| 6051 | bfa_fru_meminfo(bfa_boolean_t mincfg) | ||
| 6052 | { | ||
| 6053 | /* min driver doesn't need fru */ | ||
| 6054 | if (mincfg) | ||
| 6055 | return 0; | ||
| 6056 | |||
| 6057 | return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); | ||
| 6058 | } | ||
| 6059 | |||
| 6060 | /* | ||
| 6061 | * Flash attach API. | ||
| 6062 | * | ||
| 6063 | * @param[in] fru - fru structure | ||
| 6064 | * @param[in] ioc - ioc structure | ||
| 6065 | * @param[in] dev - device structure | ||
| 6066 | * @param[in] trcmod - trace module | ||
| 6067 | * @param[in] logmod - log module | ||
| 6068 | */ | ||
| 6069 | void | ||
| 6070 | bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev, | ||
| 6071 | struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) | ||
| 6072 | { | ||
| 6073 | fru->ioc = ioc; | ||
| 6074 | fru->trcmod = trcmod; | ||
| 6075 | fru->cbfn = NULL; | ||
| 6076 | fru->cbarg = NULL; | ||
| 6077 | fru->op_busy = 0; | ||
| 6078 | |||
| 6079 | bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru); | ||
| 6080 | bfa_q_qe_init(&fru->ioc_notify); | ||
| 6081 | bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru); | ||
| 6082 | list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q); | ||
| 6083 | |||
| 6084 | /* min driver doesn't need fru */ | ||
| 6085 | if (mincfg) { | ||
| 6086 | fru->dbuf_kva = NULL; | ||
| 6087 | fru->dbuf_pa = 0; | ||
| 6088 | } | ||
| 6089 | } | ||
| 6090 | |||
| 6091 | /* | ||
| 6092 | * Claim memory for fru | ||
| 6093 | * | ||
| 6094 | * @param[in] fru - fru structure | ||
| 6095 | * @param[in] dm_kva - pointer to virtual memory address | ||
| 6096 | * @param[in] dm_pa - frusical memory address | ||
| 6097 | * @param[in] mincfg - minimal cfg variable | ||
| 6098 | */ | ||
| 6099 | void | ||
| 6100 | bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa, | ||
| 6101 | bfa_boolean_t mincfg) | ||
| 6102 | { | ||
| 6103 | if (mincfg) | ||
| 6104 | return; | ||
| 6105 | |||
| 6106 | fru->dbuf_kva = dm_kva; | ||
| 6107 | fru->dbuf_pa = dm_pa; | ||
| 6108 | memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ); | ||
| 6109 | dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); | ||
| 6110 | dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); | ||
| 6111 | } | ||
| 6112 | |||
| 6113 | /* | ||
| 6114 | * Update fru vpd image. | ||
| 6115 | * | ||
| 6116 | * @param[in] fru - fru structure | ||
| 6117 | * @param[in] buf - update data buffer | ||
| 6118 | * @param[in] len - data buffer length | ||
| 6119 | * @param[in] offset - offset relative to starting address | ||
| 6120 | * @param[in] cbfn - callback function | ||
| 6121 | * @param[in] cbarg - callback argument | ||
| 6122 | * | ||
| 6123 | * Return status. | ||
| 6124 | */ | ||
| 6125 | bfa_status_t | ||
| 6126 | bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, | ||
| 6127 | bfa_cb_fru_t cbfn, void *cbarg) | ||
| 6128 | { | ||
| 6129 | bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ); | ||
| 6130 | bfa_trc(fru, len); | ||
| 6131 | bfa_trc(fru, offset); | ||
| 6132 | |||
| 6133 | if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) | ||
| 6134 | return BFA_STATUS_FRU_NOT_PRESENT; | ||
| 6135 | |||
| 6136 | if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) | ||
| 6137 | return BFA_STATUS_CMD_NOTSUPP; | ||
| 6138 | |||
| 6139 | if (!bfa_ioc_is_operational(fru->ioc)) | ||
| 6140 | return BFA_STATUS_IOC_NON_OP; | ||
| 6141 | |||
| 6142 | if (fru->op_busy) { | ||
| 6143 | bfa_trc(fru, fru->op_busy); | ||
| 6144 | return BFA_STATUS_DEVBUSY; | ||
| 6145 | } | ||
| 6146 | |||
| 6147 | fru->op_busy = 1; | ||
| 6148 | |||
| 6149 | fru->cbfn = cbfn; | ||
| 6150 | fru->cbarg = cbarg; | ||
| 6151 | fru->residue = len; | ||
| 6152 | fru->offset = 0; | ||
| 6153 | fru->addr_off = offset; | ||
| 6154 | fru->ubuf = buf; | ||
| 6155 | |||
| 6156 | bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ); | ||
| 6157 | |||
| 6158 | return BFA_STATUS_OK; | ||
| 6159 | } | ||
| 6160 | |||
| 6161 | /* | ||
| 6162 | * Read fru vpd image. | ||
| 6163 | * | ||
| 6164 | * @param[in] fru - fru structure | ||
| 6165 | * @param[in] buf - read data buffer | ||
| 6166 | * @param[in] len - data buffer length | ||
| 6167 | * @param[in] offset - offset relative to starting address | ||
| 6168 | * @param[in] cbfn - callback function | ||
| 6169 | * @param[in] cbarg - callback argument | ||
| 6170 | * | ||
| 6171 | * Return status. | ||
| 6172 | */ | ||
| 6173 | bfa_status_t | ||
| 6174 | bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, | ||
| 6175 | bfa_cb_fru_t cbfn, void *cbarg) | ||
| 6176 | { | ||
| 6177 | bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ); | ||
| 6178 | bfa_trc(fru, len); | ||
| 6179 | bfa_trc(fru, offset); | ||
| 6180 | |||
| 6181 | if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) | ||
| 6182 | return BFA_STATUS_FRU_NOT_PRESENT; | ||
| 6183 | |||
| 6184 | if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) | ||
| 6185 | return BFA_STATUS_CMD_NOTSUPP; | ||
| 6186 | |||
| 6187 | if (!bfa_ioc_is_operational(fru->ioc)) | ||
| 6188 | return BFA_STATUS_IOC_NON_OP; | ||
| 6189 | |||
| 6190 | if (fru->op_busy) { | ||
| 6191 | bfa_trc(fru, fru->op_busy); | ||
| 6192 | return BFA_STATUS_DEVBUSY; | ||
| 6193 | } | ||
| 6194 | |||
| 6195 | fru->op_busy = 1; | ||
| 6196 | |||
| 6197 | fru->cbfn = cbfn; | ||
| 6198 | fru->cbarg = cbarg; | ||
| 6199 | fru->residue = len; | ||
| 6200 | fru->offset = 0; | ||
| 6201 | fru->addr_off = offset; | ||
| 6202 | fru->ubuf = buf; | ||
| 6203 | bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ); | ||
| 6204 | |||
| 6205 | return BFA_STATUS_OK; | ||
| 6206 | } | ||
| 6207 | |||
| 6208 | /* | ||
| 6209 | * Get maximum size fru vpd image. | ||
| 6210 | * | ||
| 6211 | * @param[in] fru - fru structure | ||
| 6212 | * @param[out] size - maximum size of fru vpd data | ||
| 6213 | * | ||
| 6214 | * Return status. | ||
| 6215 | */ | ||
| 6216 | bfa_status_t | ||
| 6217 | bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size) | ||
| 6218 | { | ||
| 6219 | if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) | ||
| 6220 | return BFA_STATUS_FRU_NOT_PRESENT; | ||
| 6221 | |||
| 6222 | if (!bfa_ioc_is_operational(fru->ioc)) | ||
| 6223 | return BFA_STATUS_IOC_NON_OP; | ||
| 6224 | |||
| 6225 | if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK) | ||
| 6226 | *max_size = BFA_FRU_CHINOOK_MAX_SIZE; | ||
| 6227 | else | ||
| 6228 | return BFA_STATUS_CMD_NOTSUPP; | ||
| 6229 | return BFA_STATUS_OK; | ||
| 6230 | } | ||
| 6231 | /* | ||
| 6232 | * tfru write. | ||
| 6233 | * | ||
| 6234 | * @param[in] fru - fru structure | ||
| 6235 | * @param[in] buf - update data buffer | ||
| 6236 | * @param[in] len - data buffer length | ||
| 6237 | * @param[in] offset - offset relative to starting address | ||
| 6238 | * @param[in] cbfn - callback function | ||
| 6239 | * @param[in] cbarg - callback argument | ||
| 6240 | * | ||
| 6241 | * Return status. | ||
| 6242 | */ | ||
| 6243 | bfa_status_t | ||
| 6244 | bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, | ||
| 6245 | bfa_cb_fru_t cbfn, void *cbarg) | ||
| 6246 | { | ||
| 6247 | bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ); | ||
| 6248 | bfa_trc(fru, len); | ||
| 6249 | bfa_trc(fru, offset); | ||
| 6250 | bfa_trc(fru, *((u8 *) buf)); | ||
| 6251 | |||
| 6252 | if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) | ||
| 6253 | return BFA_STATUS_FRU_NOT_PRESENT; | ||
| 6254 | |||
| 6255 | if (!bfa_ioc_is_operational(fru->ioc)) | ||
| 6256 | return BFA_STATUS_IOC_NON_OP; | ||
| 6257 | |||
| 6258 | if (fru->op_busy) { | ||
| 6259 | bfa_trc(fru, fru->op_busy); | ||
| 6260 | return BFA_STATUS_DEVBUSY; | ||
| 6261 | } | ||
| 6262 | |||
| 6263 | fru->op_busy = 1; | ||
| 6264 | |||
| 6265 | fru->cbfn = cbfn; | ||
| 6266 | fru->cbarg = cbarg; | ||
| 6267 | fru->residue = len; | ||
| 6268 | fru->offset = 0; | ||
| 6269 | fru->addr_off = offset; | ||
| 6270 | fru->ubuf = buf; | ||
| 6271 | |||
| 6272 | bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ); | ||
| 6273 | |||
| 6274 | return BFA_STATUS_OK; | ||
| 6275 | } | ||
| 6276 | |||
| 6277 | /* | ||
| 6278 | * tfru read. | ||
| 6279 | * | ||
| 6280 | * @param[in] fru - fru structure | ||
| 6281 | * @param[in] buf - read data buffer | ||
| 6282 | * @param[in] len - data buffer length | ||
| 6283 | * @param[in] offset - offset relative to starting address | ||
| 6284 | * @param[in] cbfn - callback function | ||
| 6285 | * @param[in] cbarg - callback argument | ||
| 6286 | * | ||
| 6287 | * Return status. | ||
| 6288 | */ | ||
| 6289 | bfa_status_t | ||
| 6290 | bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, | ||
| 6291 | bfa_cb_fru_t cbfn, void *cbarg) | ||
| 6292 | { | ||
| 6293 | bfa_trc(fru, BFI_TFRU_H2I_READ_REQ); | ||
| 6294 | bfa_trc(fru, len); | ||
| 6295 | bfa_trc(fru, offset); | ||
| 6296 | |||
| 6297 | if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) | ||
| 6298 | return BFA_STATUS_FRU_NOT_PRESENT; | ||
| 6299 | |||
| 6300 | if (!bfa_ioc_is_operational(fru->ioc)) | ||
| 6301 | return BFA_STATUS_IOC_NON_OP; | ||
| 6302 | |||
| 6303 | if (fru->op_busy) { | ||
| 6304 | bfa_trc(fru, fru->op_busy); | ||
| 6305 | return BFA_STATUS_DEVBUSY; | ||
| 6306 | } | ||
| 6307 | |||
| 6308 | fru->op_busy = 1; | ||
| 6309 | |||
| 6310 | fru->cbfn = cbfn; | ||
| 6311 | fru->cbarg = cbarg; | ||
| 6312 | fru->residue = len; | ||
| 6313 | fru->offset = 0; | ||
| 6314 | fru->addr_off = offset; | ||
| 6315 | fru->ubuf = buf; | ||
| 6316 | bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ); | ||
| 6317 | |||
| 6318 | return BFA_STATUS_OK; | ||
| 6319 | } | ||
| 6320 | |||
| 6321 | /* | ||
| 6322 | * Process fru response messages upon receiving interrupts. | ||
| 6323 | * | ||
| 6324 | * @param[in] fruarg - fru structure | ||
| 6325 | * @param[in] msg - message structure | ||
| 6326 | */ | ||
| 6327 | void | ||
| 6328 | bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg) | ||
| 6329 | { | ||
| 6330 | struct bfa_fru_s *fru = fruarg; | ||
| 6331 | struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg; | ||
| 6332 | u32 status; | ||
| 6333 | |||
| 6334 | bfa_trc(fru, msg->mh.msg_id); | ||
| 6335 | |||
| 6336 | if (!fru->op_busy) { | ||
| 6337 | /* | ||
| 6338 | * receiving response after ioc failure | ||
| 6339 | */ | ||
| 6340 | bfa_trc(fru, 0x9999); | ||
| 6341 | return; | ||
| 6342 | } | ||
| 6343 | |||
| 6344 | switch (msg->mh.msg_id) { | ||
| 6345 | case BFI_FRUVPD_I2H_WRITE_RSP: | ||
| 6346 | case BFI_TFRU_I2H_WRITE_RSP: | ||
| 6347 | status = be32_to_cpu(rsp->status); | ||
| 6348 | bfa_trc(fru, status); | ||
| 6349 | |||
| 6350 | if (status != BFA_STATUS_OK || fru->residue == 0) { | ||
| 6351 | fru->status = status; | ||
| 6352 | fru->op_busy = 0; | ||
| 6353 | if (fru->cbfn) | ||
| 6354 | fru->cbfn(fru->cbarg, fru->status); | ||
| 6355 | } else { | ||
| 6356 | bfa_trc(fru, fru->offset); | ||
| 6357 | if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP) | ||
| 6358 | bfa_fru_write_send(fru, | ||
| 6359 | BFI_FRUVPD_H2I_WRITE_REQ); | ||
| 6360 | else | ||
| 6361 | bfa_fru_write_send(fru, | ||
| 6362 | BFI_TFRU_H2I_WRITE_REQ); | ||
| 6363 | } | ||
| 6364 | break; | ||
| 6365 | case BFI_FRUVPD_I2H_READ_RSP: | ||
| 6366 | case BFI_TFRU_I2H_READ_RSP: | ||
| 6367 | status = be32_to_cpu(rsp->status); | ||
| 6368 | bfa_trc(fru, status); | ||
| 6369 | |||
| 6370 | if (status != BFA_STATUS_OK) { | ||
| 6371 | fru->status = status; | ||
| 6372 | fru->op_busy = 0; | ||
| 6373 | if (fru->cbfn) | ||
| 6374 | fru->cbfn(fru->cbarg, fru->status); | ||
| 6375 | } else { | ||
| 6376 | u32 len = be32_to_cpu(rsp->length); | ||
| 6377 | |||
| 6378 | bfa_trc(fru, fru->offset); | ||
| 6379 | bfa_trc(fru, len); | ||
| 6380 | |||
| 6381 | memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len); | ||
| 6382 | fru->residue -= len; | ||
| 6383 | fru->offset += len; | ||
| 6384 | |||
| 6385 | if (fru->residue == 0) { | ||
| 6386 | fru->status = status; | ||
| 6387 | fru->op_busy = 0; | ||
| 6388 | if (fru->cbfn) | ||
| 6389 | fru->cbfn(fru->cbarg, fru->status); | ||
| 6390 | } else { | ||
| 6391 | if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP) | ||
| 6392 | bfa_fru_read_send(fru, | ||
| 6393 | BFI_FRUVPD_H2I_READ_REQ); | ||
| 6394 | else | ||
| 6395 | bfa_fru_read_send(fru, | ||
| 6396 | BFI_TFRU_H2I_READ_REQ); | ||
| 6397 | } | ||
| 6398 | } | ||
| 6399 | break; | ||
| 6400 | default: | ||
| 6401 | WARN_ON(1); | ||
| 6402 | } | ||
| 6403 | } | ||
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index b2856f96567c..23a90e7b7107 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h | |||
| @@ -702,6 +702,55 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy, | |||
| 702 | void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); | 702 | void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); |
| 703 | 703 | ||
| 704 | /* | 704 | /* |
| 705 | * FRU module specific | ||
| 706 | */ | ||
| 707 | typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status); | ||
| 708 | |||
| 709 | struct bfa_fru_s { | ||
| 710 | struct bfa_ioc_s *ioc; /* back pointer to ioc */ | ||
| 711 | struct bfa_trc_mod_s *trcmod; /* trace module */ | ||
| 712 | u8 op_busy; /* operation busy flag */ | ||
| 713 | u8 rsv[3]; | ||
| 714 | u32 residue; /* residual length */ | ||
| 715 | u32 offset; /* offset */ | ||
| 716 | bfa_status_t status; /* status */ | ||
| 717 | u8 *dbuf_kva; /* dma buf virtual address */ | ||
| 718 | u64 dbuf_pa; /* dma buf physical address */ | ||
| 719 | struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ | ||
| 720 | bfa_cb_fru_t cbfn; /* user callback function */ | ||
| 721 | void *cbarg; /* user callback arg */ | ||
| 722 | u8 *ubuf; /* user supplied buffer */ | ||
| 723 | struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */ | ||
| 724 | u32 addr_off; /* fru address offset */ | ||
| 725 | struct bfa_mbox_cmd_s mb; /* mailbox */ | ||
| 726 | struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ | ||
| 727 | struct bfa_mem_dma_s fru_dma; | ||
| 728 | }; | ||
| 729 | |||
| 730 | #define BFA_FRU(__bfa) (&(__bfa)->modules.fru) | ||
| 731 | #define BFA_MEM_FRU_DMA(__bfa) (&(BFA_FRU(__bfa)->fru_dma)) | ||
| 732 | |||
| 733 | bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru, | ||
| 734 | void *buf, u32 len, u32 offset, | ||
| 735 | bfa_cb_fru_t cbfn, void *cbarg); | ||
| 736 | bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru, | ||
| 737 | void *buf, u32 len, u32 offset, | ||
| 738 | bfa_cb_fru_t cbfn, void *cbarg); | ||
| 739 | bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size); | ||
| 740 | bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru, | ||
| 741 | void *buf, u32 len, u32 offset, | ||
| 742 | bfa_cb_fru_t cbfn, void *cbarg); | ||
| 743 | bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru, | ||
| 744 | void *buf, u32 len, u32 offset, | ||
| 745 | bfa_cb_fru_t cbfn, void *cbarg); | ||
| 746 | u32 bfa_fru_meminfo(bfa_boolean_t mincfg); | ||
| 747 | void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, | ||
| 748 | void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg); | ||
| 749 | void bfa_fru_memclaim(struct bfa_fru_s *fru, | ||
| 750 | u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); | ||
| 751 | void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg); | ||
| 752 | |||
| 753 | /* | ||
| 705 | * Driver Config( dconf) specific | 754 | * Driver Config( dconf) specific |
| 706 | */ | 755 | */ |
| 707 | #define BFI_DCONF_SIGNATURE 0xabcdabcd | 756 | #define BFI_DCONF_SIGNATURE 0xabcdabcd |
| @@ -716,6 +765,7 @@ struct bfa_dconf_hdr_s { | |||
| 716 | struct bfa_dconf_s { | 765 | struct bfa_dconf_s { |
| 717 | struct bfa_dconf_hdr_s hdr; | 766 | struct bfa_dconf_hdr_s hdr; |
| 718 | struct bfa_lunmask_cfg_s lun_mask; | 767 | struct bfa_lunmask_cfg_s lun_mask; |
| 768 | struct bfa_throttle_cfg_s throttle_cfg; | ||
| 719 | }; | 769 | }; |
| 720 | #pragma pack() | 770 | #pragma pack() |
| 721 | 771 | ||
| @@ -738,6 +788,8 @@ struct bfa_dconf_mod_s { | |||
| 738 | #define bfa_dconf_read_data_valid(__bfa) \ | 788 | #define bfa_dconf_read_data_valid(__bfa) \ |
| 739 | (BFA_DCONF_MOD(__bfa)->read_data_valid) | 789 | (BFA_DCONF_MOD(__bfa)->read_data_valid) |
| 740 | #define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */ | 790 | #define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */ |
| 791 | #define bfa_dconf_get_min_cfg(__bfa) \ | ||
| 792 | (BFA_DCONF_MOD(__bfa)->min_cfg) | ||
| 741 | 793 | ||
| 742 | void bfa_dconf_modinit(struct bfa_s *bfa); | 794 | void bfa_dconf_modinit(struct bfa_s *bfa); |
| 743 | void bfa_dconf_modexit(struct bfa_s *bfa); | 795 | void bfa_dconf_modexit(struct bfa_s *bfa); |
| @@ -761,7 +813,8 @@ bfa_status_t bfa_dconf_update(struct bfa_s *bfa); | |||
| 761 | #define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) | 813 | #define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) |
| 762 | #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) | 814 | #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) |
| 763 | #define bfa_ioc_speed_sup(__ioc) \ | 815 | #define bfa_ioc_speed_sup(__ioc) \ |
| 764 | BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) | 816 | ((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS : \ |
| 817 | BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)) | ||
| 765 | #define bfa_ioc_get_nports(__ioc) \ | 818 | #define bfa_ioc_get_nports(__ioc) \ |
| 766 | BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) | 819 | BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) |
| 767 | 820 | ||
| @@ -885,12 +938,12 @@ bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, | |||
| 885 | enum bfa_mode_s mode, int max_pf, int max_vf, | 938 | enum bfa_mode_s mode, int max_pf, int max_vf, |
| 886 | bfa_ablk_cbfn_t cbfn, void *cbarg); | 939 | bfa_ablk_cbfn_t cbfn, void *cbarg); |
| 887 | bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, | 940 | bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, |
| 888 | u8 port, enum bfi_pcifn_class personality, int bw, | 941 | u8 port, enum bfi_pcifn_class personality, |
| 889 | bfa_ablk_cbfn_t cbfn, void *cbarg); | 942 | u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg); |
| 890 | bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, | 943 | bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, |
| 891 | bfa_ablk_cbfn_t cbfn, void *cbarg); | 944 | bfa_ablk_cbfn_t cbfn, void *cbarg); |
| 892 | bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, | 945 | bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, |
| 893 | bfa_ablk_cbfn_t cbfn, void *cbarg); | 946 | u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg); |
| 894 | bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, | 947 | bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, |
| 895 | bfa_ablk_cbfn_t cbfn, void *cbarg); | 948 | bfa_ablk_cbfn_t cbfn, void *cbarg); |
| 896 | bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, | 949 | bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, |
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 2eb0c6a2938d..de4e726a1263 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c | |||
| @@ -57,13 +57,6 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) | |||
| 57 | u32 usecnt; | 57 | u32 usecnt; |
| 58 | struct bfi_ioc_image_hdr_s fwhdr; | 58 | struct bfi_ioc_image_hdr_s fwhdr; |
| 59 | 59 | ||
| 60 | /* | ||
| 61 | * If bios boot (flash based) -- do not increment usage count | ||
| 62 | */ | ||
| 63 | if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < | ||
| 64 | BFA_IOC_FWIMG_MINSZ) | ||
| 65 | return BFA_TRUE; | ||
| 66 | |||
| 67 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); | 60 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); |
| 68 | usecnt = readl(ioc->ioc_regs.ioc_usage_reg); | 61 | usecnt = readl(ioc->ioc_regs.ioc_usage_reg); |
| 69 | 62 | ||
| @@ -115,13 +108,6 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) | |||
| 115 | u32 usecnt; | 108 | u32 usecnt; |
| 116 | 109 | ||
| 117 | /* | 110 | /* |
| 118 | * If bios boot (flash based) -- do not decrement usage count | ||
| 119 | */ | ||
| 120 | if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < | ||
| 121 | BFA_IOC_FWIMG_MINSZ) | ||
| 122 | return; | ||
| 123 | |||
| 124 | /* | ||
| 125 | * decrement usage count | 111 | * decrement usage count |
| 126 | */ | 112 | */ |
| 127 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); | 113 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); |
| @@ -400,13 +386,12 @@ static void | |||
| 400 | bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) | 386 | bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) |
| 401 | { | 387 | { |
| 402 | 388 | ||
| 403 | if (bfa_ioc_is_cna(ioc)) { | 389 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); |
| 404 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); | 390 | writel(0, ioc->ioc_regs.ioc_usage_reg); |
| 405 | writel(0, ioc->ioc_regs.ioc_usage_reg); | 391 | readl(ioc->ioc_regs.ioc_usage_sem_reg); |
| 406 | readl(ioc->ioc_regs.ioc_usage_sem_reg); | 392 | writel(1, ioc->ioc_regs.ioc_usage_sem_reg); |
| 407 | writel(1, ioc->ioc_regs.ioc_usage_sem_reg); | ||
| 408 | } | ||
| 409 | 393 | ||
| 394 | writel(0, ioc->ioc_regs.ioc_fail_sync); | ||
| 410 | /* | 395 | /* |
| 411 | * Read the hw sem reg to make sure that it is locked | 396 | * Read the hw sem reg to make sure that it is locked |
| 412 | * before we clear it. If it is not locked, writing 1 | 397 | * before we clear it. If it is not locked, writing 1 |
| @@ -759,25 +744,6 @@ bfa_ioc_ct2_mem_init(void __iomem *rb) | |||
| 759 | void | 744 | void |
| 760 | bfa_ioc_ct2_mac_reset(void __iomem *rb) | 745 | bfa_ioc_ct2_mac_reset(void __iomem *rb) |
| 761 | { | 746 | { |
| 762 | u32 r32; | ||
| 763 | |||
| 764 | bfa_ioc_ct2_sclk_init(rb); | ||
| 765 | bfa_ioc_ct2_lclk_init(rb); | ||
| 766 | |||
| 767 | /* | ||
| 768 | * release soft reset on s_clk & l_clk | ||
| 769 | */ | ||
| 770 | r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
| 771 | writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, | ||
| 772 | (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
| 773 | |||
| 774 | /* | ||
| 775 | * release soft reset on s_clk & l_clk | ||
| 776 | */ | ||
| 777 | r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
| 778 | writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, | ||
| 779 | (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
| 780 | |||
| 781 | /* put port0, port1 MAC & AHB in reset */ | 747 | /* put port0, port1 MAC & AHB in reset */ |
| 782 | writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), | 748 | writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), |
| 783 | rb + CT2_CSI_MAC_CONTROL_REG(0)); | 749 | rb + CT2_CSI_MAC_CONTROL_REG(0)); |
| @@ -785,8 +751,21 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb) | |||
| 785 | rb + CT2_CSI_MAC_CONTROL_REG(1)); | 751 | rb + CT2_CSI_MAC_CONTROL_REG(1)); |
| 786 | } | 752 | } |
| 787 | 753 | ||
| 754 | static void | ||
| 755 | bfa_ioc_ct2_enable_flash(void __iomem *rb) | ||
| 756 | { | ||
| 757 | u32 r32; | ||
| 758 | |||
| 759 | r32 = readl((rb + PSS_GPIO_OUT_REG)); | ||
| 760 | writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); | ||
| 761 | r32 = readl((rb + PSS_GPIO_OE_REG)); | ||
| 762 | writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); | ||
| 763 | } | ||
| 764 | |||
| 788 | #define CT2_NFC_MAX_DELAY 1000 | 765 | #define CT2_NFC_MAX_DELAY 1000 |
| 789 | #define CT2_NFC_VER_VALID 0x143 | 766 | #define CT2_NFC_PAUSE_MAX_DELAY 4000 |
| 767 | #define CT2_NFC_VER_VALID 0x147 | ||
| 768 | #define CT2_NFC_STATE_RUNNING 0x20000001 | ||
| 790 | #define BFA_IOC_PLL_POLL 1000000 | 769 | #define BFA_IOC_PLL_POLL 1000000 |
| 791 | 770 | ||
| 792 | static bfa_boolean_t | 771 | static bfa_boolean_t |
| @@ -802,6 +781,20 @@ bfa_ioc_ct2_nfc_halted(void __iomem *rb) | |||
| 802 | } | 781 | } |
| 803 | 782 | ||
| 804 | static void | 783 | static void |
| 784 | bfa_ioc_ct2_nfc_halt(void __iomem *rb) | ||
| 785 | { | ||
| 786 | int i; | ||
| 787 | |||
| 788 | writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); | ||
| 789 | for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { | ||
| 790 | if (bfa_ioc_ct2_nfc_halted(rb)) | ||
| 791 | break; | ||
| 792 | udelay(1000); | ||
| 793 | } | ||
| 794 | WARN_ON(!bfa_ioc_ct2_nfc_halted(rb)); | ||
| 795 | } | ||
| 796 | |||
| 797 | static void | ||
| 805 | bfa_ioc_ct2_nfc_resume(void __iomem *rb) | 798 | bfa_ioc_ct2_nfc_resume(void __iomem *rb) |
| 806 | { | 799 | { |
| 807 | u32 r32; | 800 | u32 r32; |
| @@ -817,105 +810,142 @@ bfa_ioc_ct2_nfc_resume(void __iomem *rb) | |||
| 817 | WARN_ON(1); | 810 | WARN_ON(1); |
| 818 | } | 811 | } |
| 819 | 812 | ||
| 820 | bfa_status_t | 813 | static void |
| 821 | bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) | 814 | bfa_ioc_ct2_clk_reset(void __iomem *rb) |
| 822 | { | 815 | { |
| 823 | u32 wgn, r32, nfc_ver, i; | 816 | u32 r32; |
| 824 | 817 | ||
| 825 | wgn = readl(rb + CT2_WGN_STATUS); | 818 | bfa_ioc_ct2_sclk_init(rb); |
| 826 | nfc_ver = readl(rb + CT2_RSC_GPR15_REG); | 819 | bfa_ioc_ct2_lclk_init(rb); |
| 827 | 820 | ||
| 828 | if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) && | 821 | /* |
| 829 | (nfc_ver >= CT2_NFC_VER_VALID)) { | 822 | * release soft reset on s_clk & l_clk |
| 830 | if (bfa_ioc_ct2_nfc_halted(rb)) | 823 | */ |
| 831 | bfa_ioc_ct2_nfc_resume(rb); | 824 | r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); |
| 825 | writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, | ||
| 826 | (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
| 832 | 827 | ||
| 833 | writel(__RESET_AND_START_SCLK_LCLK_PLLS, | 828 | r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); |
| 834 | rb + CT2_CSI_FW_CTL_SET_REG); | 829 | writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, |
| 830 | (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
| 835 | 831 | ||
| 836 | for (i = 0; i < BFA_IOC_PLL_POLL; i++) { | 832 | } |
| 837 | r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); | ||
| 838 | if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) | ||
| 839 | break; | ||
| 840 | } | ||
| 841 | 833 | ||
| 842 | WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); | 834 | static void |
| 835 | bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb) | ||
| 836 | { | ||
| 837 | u32 r32, i; | ||
| 843 | 838 | ||
| 844 | for (i = 0; i < BFA_IOC_PLL_POLL; i++) { | 839 | r32 = readl((rb + PSS_CTL_REG)); |
| 845 | r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); | 840 | r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); |
| 846 | if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) | 841 | writel(r32, (rb + PSS_CTL_REG)); |
| 847 | break; | 842 | |
| 848 | } | 843 | writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG); |
| 849 | 844 | ||
| 850 | WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); | 845 | for (i = 0; i < BFA_IOC_PLL_POLL; i++) { |
| 846 | r32 = readl(rb + CT2_NFC_FLASH_STS_REG); | ||
| 847 | |||
| 848 | if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) | ||
| 849 | break; | ||
| 850 | } | ||
| 851 | WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); | ||
| 852 | |||
| 853 | for (i = 0; i < BFA_IOC_PLL_POLL; i++) { | ||
| 854 | r32 = readl(rb + CT2_NFC_FLASH_STS_REG); | ||
| 855 | |||
| 856 | if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) | ||
| 857 | break; | ||
| 858 | } | ||
| 859 | WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); | ||
| 860 | |||
| 861 | r32 = readl(rb + CT2_CSI_FW_CTL_REG); | ||
| 862 | WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); | ||
| 863 | } | ||
| 864 | |||
| 865 | static void | ||
| 866 | bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb) | ||
| 867 | { | ||
| 868 | u32 r32; | ||
| 869 | int i; | ||
| 870 | |||
| 871 | if (bfa_ioc_ct2_nfc_halted(rb)) | ||
| 872 | bfa_ioc_ct2_nfc_resume(rb); | ||
| 873 | for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) { | ||
| 874 | r32 = readl(rb + CT2_NFC_STS_REG); | ||
| 875 | if (r32 == CT2_NFC_STATE_RUNNING) | ||
| 876 | return; | ||
| 851 | udelay(1000); | 877 | udelay(1000); |
| 878 | } | ||
| 852 | 879 | ||
| 853 | r32 = readl(rb + CT2_CSI_FW_CTL_REG); | 880 | r32 = readl(rb + CT2_NFC_STS_REG); |
| 854 | WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); | 881 | WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING)); |
| 855 | } else { | 882 | } |
| 856 | writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); | ||
| 857 | for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { | ||
| 858 | r32 = readl(rb + CT2_NFC_CSR_SET_REG); | ||
| 859 | if (r32 & __NFC_CONTROLLER_HALTED) | ||
| 860 | break; | ||
| 861 | udelay(1000); | ||
| 862 | } | ||
| 863 | 883 | ||
| 864 | bfa_ioc_ct2_mac_reset(rb); | 884 | bfa_status_t |
| 865 | bfa_ioc_ct2_sclk_init(rb); | 885 | bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) |
| 866 | bfa_ioc_ct2_lclk_init(rb); | 886 | { |
| 887 | u32 wgn, r32, nfc_ver; | ||
| 867 | 888 | ||
| 868 | /* | 889 | wgn = readl(rb + CT2_WGN_STATUS); |
| 869 | * release soft reset on s_clk & l_clk | ||
| 870 | */ | ||
| 871 | r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); | ||
| 872 | writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, | ||
| 873 | (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
| 874 | 890 | ||
| 891 | if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { | ||
| 875 | /* | 892 | /* |
| 876 | * release soft reset on s_clk & l_clk | 893 | * If flash is corrupted, enable flash explicitly |
| 877 | */ | 894 | */ |
| 878 | r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); | 895 | bfa_ioc_ct2_clk_reset(rb); |
| 879 | writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, | 896 | bfa_ioc_ct2_enable_flash(rb); |
| 880 | (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
| 881 | } | ||
| 882 | 897 | ||
| 883 | /* | 898 | bfa_ioc_ct2_mac_reset(rb); |
| 884 | * Announce flash device presence, if flash was corrupted. | 899 | |
| 885 | */ | 900 | bfa_ioc_ct2_clk_reset(rb); |
| 886 | if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { | 901 | bfa_ioc_ct2_enable_flash(rb); |
| 887 | r32 = readl(rb + PSS_GPIO_OUT_REG); | 902 | |
| 888 | writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); | 903 | } else { |
| 889 | r32 = readl(rb + PSS_GPIO_OE_REG); | 904 | nfc_ver = readl(rb + CT2_RSC_GPR15_REG); |
| 890 | writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); | 905 | |
| 906 | if ((nfc_ver >= CT2_NFC_VER_VALID) && | ||
| 907 | (wgn == (__A2T_AHB_LOAD | __WGN_READY))) { | ||
| 908 | |||
| 909 | bfa_ioc_ct2_wait_till_nfc_running(rb); | ||
| 910 | |||
| 911 | bfa_ioc_ct2_nfc_clk_reset(rb); | ||
| 912 | } else { | ||
| 913 | bfa_ioc_ct2_nfc_halt(rb); | ||
| 914 | |||
| 915 | bfa_ioc_ct2_clk_reset(rb); | ||
| 916 | bfa_ioc_ct2_mac_reset(rb); | ||
| 917 | bfa_ioc_ct2_clk_reset(rb); | ||
| 918 | |||
| 919 | } | ||
| 891 | } | 920 | } |
| 892 | 921 | ||
| 893 | /* | 922 | /* |
| 894 | * Mask the interrupts and clear any | 923 | * Mask the interrupts and clear any |
| 895 | * pending interrupts. | 924 | * pending interrupts left by BIOS/EFI |
| 896 | */ | 925 | */ |
| 926 | |||
| 897 | writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); | 927 | writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); |
| 898 | writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); | 928 | writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); |
| 899 | 929 | ||
| 900 | /* For first time initialization, no need to clear interrupts */ | 930 | /* For first time initialization, no need to clear interrupts */ |
| 901 | r32 = readl(rb + HOST_SEM5_REG); | 931 | r32 = readl(rb + HOST_SEM5_REG); |
| 902 | if (r32 & 0x1) { | 932 | if (r32 & 0x1) { |
| 903 | r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); | 933 | r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); |
| 904 | if (r32 == 1) { | 934 | if (r32 == 1) { |
| 905 | writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); | 935 | writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); |
| 906 | readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); | 936 | readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); |
| 907 | } | 937 | } |
| 908 | r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); | 938 | r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); |
| 909 | if (r32 == 1) { | 939 | if (r32 == 1) { |
| 910 | writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); | 940 | writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); |
| 911 | readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); | 941 | readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); |
| 912 | } | 942 | } |
| 913 | } | 943 | } |
| 914 | 944 | ||
| 915 | bfa_ioc_ct2_mem_init(rb); | 945 | bfa_ioc_ct2_mem_init(rb); |
| 916 | 946 | ||
| 917 | writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); | 947 | writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); |
| 918 | writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); | 948 | writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); |
| 919 | 949 | ||
| 920 | return BFA_STATUS_OK; | 950 | return BFA_STATUS_OK; |
| 921 | } | 951 | } |
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 189fff71e3c2..a14c784ff3fc 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h | |||
| @@ -45,6 +45,7 @@ struct bfa_modules_s { | |||
| 45 | struct bfa_diag_s diag_mod; /* diagnostics module */ | 45 | struct bfa_diag_s diag_mod; /* diagnostics module */ |
| 46 | struct bfa_phy_s phy; /* phy module */ | 46 | struct bfa_phy_s phy; /* phy module */ |
| 47 | struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */ | 47 | struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */ |
| 48 | struct bfa_fru_s fru; /* fru module */ | ||
| 48 | }; | 49 | }; |
| 49 | 50 | ||
| 50 | /* | 51 | /* |
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c index 95e4ad8759ac..8ea7697deb9b 100644 --- a/drivers/scsi/bfa/bfa_port.c +++ b/drivers/scsi/bfa/bfa_port.c | |||
| @@ -250,6 +250,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, | |||
| 250 | return BFA_STATUS_IOC_FAILURE; | 250 | return BFA_STATUS_IOC_FAILURE; |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | /* if port is d-port enabled, return error */ | ||
| 254 | if (port->dport_enabled) { | ||
| 255 | bfa_trc(port, BFA_STATUS_DPORT_ERR); | ||
| 256 | return BFA_STATUS_DPORT_ERR; | ||
| 257 | } | ||
| 258 | |||
| 253 | if (port->endis_pending) { | 259 | if (port->endis_pending) { |
| 254 | bfa_trc(port, BFA_STATUS_DEVBUSY); | 260 | bfa_trc(port, BFA_STATUS_DEVBUSY); |
| 255 | return BFA_STATUS_DEVBUSY; | 261 | return BFA_STATUS_DEVBUSY; |
| @@ -300,6 +306,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, | |||
| 300 | return BFA_STATUS_IOC_FAILURE; | 306 | return BFA_STATUS_IOC_FAILURE; |
| 301 | } | 307 | } |
| 302 | 308 | ||
| 309 | /* if port is d-port enabled, return error */ | ||
| 310 | if (port->dport_enabled) { | ||
| 311 | bfa_trc(port, BFA_STATUS_DPORT_ERR); | ||
| 312 | return BFA_STATUS_DPORT_ERR; | ||
| 313 | } | ||
| 314 | |||
| 303 | if (port->endis_pending) { | 315 | if (port->endis_pending) { |
| 304 | bfa_trc(port, BFA_STATUS_DEVBUSY); | 316 | bfa_trc(port, BFA_STATUS_DEVBUSY); |
| 305 | return BFA_STATUS_DEVBUSY; | 317 | return BFA_STATUS_DEVBUSY; |
| @@ -431,6 +443,10 @@ bfa_port_notify(void *arg, enum bfa_ioc_event_e event) | |||
| 431 | port->endis_cbfn = NULL; | 443 | port->endis_cbfn = NULL; |
| 432 | port->endis_pending = BFA_FALSE; | 444 | port->endis_pending = BFA_FALSE; |
| 433 | } | 445 | } |
| 446 | |||
| 447 | /* clear D-port mode */ | ||
| 448 | if (port->dport_enabled) | ||
| 449 | bfa_port_set_dportenabled(port, BFA_FALSE); | ||
| 434 | break; | 450 | break; |
| 435 | default: | 451 | default: |
| 436 | break; | 452 | break; |
| @@ -467,6 +483,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, | |||
| 467 | port->stats_cbfn = NULL; | 483 | port->stats_cbfn = NULL; |
| 468 | port->endis_cbfn = NULL; | 484 | port->endis_cbfn = NULL; |
| 469 | port->pbc_disabled = BFA_FALSE; | 485 | port->pbc_disabled = BFA_FALSE; |
| 486 | port->dport_enabled = BFA_FALSE; | ||
| 470 | 487 | ||
| 471 | bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); | 488 | bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); |
| 472 | bfa_q_qe_init(&port->ioc_notify); | 489 | bfa_q_qe_init(&port->ioc_notify); |
| @@ -483,6 +500,21 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, | |||
| 483 | } | 500 | } |
| 484 | 501 | ||
| 485 | /* | 502 | /* |
| 503 | * bfa_port_set_dportenabled(); | ||
| 504 | * | ||
| 505 | * Port module- set pbc disabled flag | ||
| 506 | * | ||
| 507 | * @param[in] port - Pointer to the Port module data structure | ||
| 508 | * | ||
| 509 | * @return void | ||
| 510 | */ | ||
| 511 | void | ||
| 512 | bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled) | ||
| 513 | { | ||
| 514 | port->dport_enabled = enabled; | ||
| 515 | } | ||
| 516 | |||
| 517 | /* | ||
| 486 | * CEE module specific definitions | 518 | * CEE module specific definitions |
| 487 | */ | 519 | */ |
| 488 | 520 | ||
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h index 947f897328d6..2fcab6bc6280 100644 --- a/drivers/scsi/bfa/bfa_port.h +++ b/drivers/scsi/bfa/bfa_port.h | |||
| @@ -45,6 +45,7 @@ struct bfa_port_s { | |||
| 45 | bfa_status_t endis_status; | 45 | bfa_status_t endis_status; |
| 46 | struct bfa_ioc_notify_s ioc_notify; | 46 | struct bfa_ioc_notify_s ioc_notify; |
| 47 | bfa_boolean_t pbc_disabled; | 47 | bfa_boolean_t pbc_disabled; |
| 48 | bfa_boolean_t dport_enabled; | ||
| 48 | struct bfa_mem_dma_s port_dma; | 49 | struct bfa_mem_dma_s port_dma; |
| 49 | }; | 50 | }; |
| 50 | 51 | ||
| @@ -66,6 +67,8 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port, | |||
| 66 | u32 bfa_port_meminfo(void); | 67 | u32 bfa_port_meminfo(void); |
| 67 | void bfa_port_mem_claim(struct bfa_port_s *port, | 68 | void bfa_port_mem_claim(struct bfa_port_s *port, |
| 68 | u8 *dma_kva, u64 dma_pa); | 69 | u8 *dma_kva, u64 dma_pa); |
| 70 | void bfa_port_set_dportenabled(struct bfa_port_s *port, | ||
| 71 | bfa_boolean_t enabled); | ||
| 69 | 72 | ||
| 70 | /* | 73 | /* |
| 71 | * CEE declaration | 74 | * CEE declaration |
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index b2538d60db34..299c1c889b33 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c | |||
| @@ -67,6 +67,9 @@ enum bfa_fcport_sm_event { | |||
| 67 | BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ | 67 | BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ |
| 68 | BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ | 68 | BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ |
| 69 | BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ | 69 | BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ |
| 70 | BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */ | ||
| 71 | BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */ | ||
| 72 | BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */ | ||
| 70 | }; | 73 | }; |
| 71 | 74 | ||
| 72 | /* | 75 | /* |
| @@ -197,6 +200,10 @@ static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, | |||
| 197 | enum bfa_fcport_sm_event event); | 200 | enum bfa_fcport_sm_event event); |
| 198 | static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, | 201 | static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, |
| 199 | enum bfa_fcport_sm_event event); | 202 | enum bfa_fcport_sm_event event); |
| 203 | static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, | ||
| 204 | enum bfa_fcport_sm_event event); | ||
| 205 | static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, | ||
| 206 | enum bfa_fcport_sm_event event); | ||
| 200 | 207 | ||
| 201 | static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, | 208 | static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, |
| 202 | enum bfa_fcport_ln_sm_event event); | 209 | enum bfa_fcport_ln_sm_event event); |
| @@ -226,6 +233,8 @@ static struct bfa_sm_table_s hal_port_sm_table[] = { | |||
| 226 | {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, | 233 | {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, |
| 227 | {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, | 234 | {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, |
| 228 | {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, | 235 | {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, |
| 236 | {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT}, | ||
| 237 | {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG}, | ||
| 229 | }; | 238 | }; |
| 230 | 239 | ||
| 231 | 240 | ||
| @@ -1244,6 +1253,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) | |||
| 1244 | * Just ignore | 1253 | * Just ignore |
| 1245 | */ | 1254 | */ |
| 1246 | break; | 1255 | break; |
| 1256 | case BFA_LPS_SM_SET_N2N_PID: | ||
| 1257 | /* | ||
| 1258 | * When topology is set to loop, bfa_lps_set_n2n_pid() sends | ||
| 1259 | * this event. Ignore this event. | ||
| 1260 | */ | ||
| 1261 | break; | ||
| 1247 | 1262 | ||
| 1248 | default: | 1263 | default: |
| 1249 | bfa_sm_fault(lps->bfa, event); | 1264 | bfa_sm_fault(lps->bfa, event); |
| @@ -1261,6 +1276,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) | |||
| 1261 | 1276 | ||
| 1262 | switch (event) { | 1277 | switch (event) { |
| 1263 | case BFA_LPS_SM_FWRSP: | 1278 | case BFA_LPS_SM_FWRSP: |
| 1279 | case BFA_LPS_SM_OFFLINE: | ||
| 1264 | if (lps->status == BFA_STATUS_OK) { | 1280 | if (lps->status == BFA_STATUS_OK) { |
| 1265 | bfa_sm_set_state(lps, bfa_lps_sm_online); | 1281 | bfa_sm_set_state(lps, bfa_lps_sm_online); |
| 1266 | if (lps->fdisc) | 1282 | if (lps->fdisc) |
| @@ -1289,7 +1305,6 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) | |||
| 1289 | bfa_lps_login_comp(lps); | 1305 | bfa_lps_login_comp(lps); |
| 1290 | break; | 1306 | break; |
| 1291 | 1307 | ||
| 1292 | case BFA_LPS_SM_OFFLINE: | ||
| 1293 | case BFA_LPS_SM_DELETE: | 1308 | case BFA_LPS_SM_DELETE: |
| 1294 | bfa_sm_set_state(lps, bfa_lps_sm_init); | 1309 | bfa_sm_set_state(lps, bfa_lps_sm_init); |
| 1295 | break; | 1310 | break; |
| @@ -2169,6 +2184,12 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, | |||
| 2169 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); | 2184 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); |
| 2170 | break; | 2185 | break; |
| 2171 | 2186 | ||
| 2187 | case BFA_FCPORT_SM_FAA_MISCONFIG: | ||
| 2188 | bfa_fcport_reset_linkinfo(fcport); | ||
| 2189 | bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); | ||
| 2190 | bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); | ||
| 2191 | break; | ||
| 2192 | |||
| 2172 | default: | 2193 | default: |
| 2173 | bfa_sm_fault(fcport->bfa, event); | 2194 | bfa_sm_fault(fcport->bfa, event); |
| 2174 | } | 2195 | } |
| @@ -2225,6 +2246,12 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, | |||
| 2225 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); | 2246 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); |
| 2226 | break; | 2247 | break; |
| 2227 | 2248 | ||
| 2249 | case BFA_FCPORT_SM_FAA_MISCONFIG: | ||
| 2250 | bfa_fcport_reset_linkinfo(fcport); | ||
| 2251 | bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); | ||
| 2252 | bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); | ||
| 2253 | break; | ||
| 2254 | |||
| 2228 | default: | 2255 | default: |
| 2229 | bfa_sm_fault(fcport->bfa, event); | 2256 | bfa_sm_fault(fcport->bfa, event); |
| 2230 | } | 2257 | } |
| @@ -2250,11 +2277,11 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, | |||
| 2250 | if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { | 2277 | if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { |
| 2251 | 2278 | ||
| 2252 | bfa_trc(fcport->bfa, | 2279 | bfa_trc(fcport->bfa, |
| 2253 | pevent->link_state.vc_fcf.fcf.fipenabled); | 2280 | pevent->link_state.attr.vc_fcf.fcf.fipenabled); |
| 2254 | bfa_trc(fcport->bfa, | 2281 | bfa_trc(fcport->bfa, |
| 2255 | pevent->link_state.vc_fcf.fcf.fipfailed); | 2282 | pevent->link_state.attr.vc_fcf.fcf.fipfailed); |
| 2256 | 2283 | ||
| 2257 | if (pevent->link_state.vc_fcf.fcf.fipfailed) | 2284 | if (pevent->link_state.attr.vc_fcf.fcf.fipfailed) |
| 2258 | bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, | 2285 | bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, |
| 2259 | BFA_PL_EID_FIP_FCF_DISC, 0, | 2286 | BFA_PL_EID_FIP_FCF_DISC, 0, |
| 2260 | "FIP FCF Discovery Failed"); | 2287 | "FIP FCF Discovery Failed"); |
| @@ -2311,6 +2338,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, | |||
| 2311 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); | 2338 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); |
| 2312 | break; | 2339 | break; |
| 2313 | 2340 | ||
| 2341 | case BFA_FCPORT_SM_FAA_MISCONFIG: | ||
| 2342 | bfa_fcport_reset_linkinfo(fcport); | ||
| 2343 | bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); | ||
| 2344 | bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); | ||
| 2345 | break; | ||
| 2346 | |||
| 2314 | default: | 2347 | default: |
| 2315 | bfa_sm_fault(fcport->bfa, event); | 2348 | bfa_sm_fault(fcport->bfa, event); |
| 2316 | } | 2349 | } |
| @@ -2404,6 +2437,12 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, | |||
| 2404 | } | 2437 | } |
| 2405 | break; | 2438 | break; |
| 2406 | 2439 | ||
| 2440 | case BFA_FCPORT_SM_FAA_MISCONFIG: | ||
| 2441 | bfa_fcport_reset_linkinfo(fcport); | ||
| 2442 | bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); | ||
| 2443 | bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); | ||
| 2444 | break; | ||
| 2445 | |||
| 2407 | default: | 2446 | default: |
| 2408 | bfa_sm_fault(fcport->bfa, event); | 2447 | bfa_sm_fault(fcport->bfa, event); |
| 2409 | } | 2448 | } |
| @@ -2449,6 +2488,12 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, | |||
| 2449 | bfa_reqq_wcancel(&fcport->reqq_wait); | 2488 | bfa_reqq_wcancel(&fcport->reqq_wait); |
| 2450 | break; | 2489 | break; |
| 2451 | 2490 | ||
| 2491 | case BFA_FCPORT_SM_FAA_MISCONFIG: | ||
| 2492 | bfa_fcport_reset_linkinfo(fcport); | ||
| 2493 | bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); | ||
| 2494 | bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig); | ||
| 2495 | break; | ||
| 2496 | |||
| 2452 | default: | 2497 | default: |
| 2453 | bfa_sm_fault(fcport->bfa, event); | 2498 | bfa_sm_fault(fcport->bfa, event); |
| 2454 | } | 2499 | } |
| @@ -2600,6 +2645,10 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, | |||
| 2600 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); | 2645 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); |
| 2601 | break; | 2646 | break; |
| 2602 | 2647 | ||
| 2648 | case BFA_FCPORT_SM_DPORTENABLE: | ||
| 2649 | bfa_sm_set_state(fcport, bfa_fcport_sm_dport); | ||
| 2650 | break; | ||
| 2651 | |||
| 2603 | default: | 2652 | default: |
| 2604 | bfa_sm_fault(fcport->bfa, event); | 2653 | bfa_sm_fault(fcport->bfa, event); |
| 2605 | } | 2654 | } |
| @@ -2680,6 +2729,81 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, | |||
| 2680 | } | 2729 | } |
| 2681 | } | 2730 | } |
| 2682 | 2731 | ||
| 2732 | static void | ||
| 2733 | bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) | ||
| 2734 | { | ||
| 2735 | bfa_trc(fcport->bfa, event); | ||
| 2736 | |||
| 2737 | switch (event) { | ||
| 2738 | case BFA_FCPORT_SM_DPORTENABLE: | ||
| 2739 | case BFA_FCPORT_SM_DISABLE: | ||
| 2740 | case BFA_FCPORT_SM_ENABLE: | ||
| 2741 | case BFA_FCPORT_SM_START: | ||
| 2742 | /* | ||
| 2743 | * Ignore event for a port that is dport | ||
| 2744 | */ | ||
| 2745 | break; | ||
| 2746 | |||
| 2747 | case BFA_FCPORT_SM_STOP: | ||
| 2748 | bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); | ||
| 2749 | break; | ||
| 2750 | |||
| 2751 | case BFA_FCPORT_SM_HWFAIL: | ||
| 2752 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); | ||
| 2753 | break; | ||
| 2754 | |||
| 2755 | case BFA_FCPORT_SM_DPORTDISABLE: | ||
| 2756 | bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); | ||
| 2757 | break; | ||
| 2758 | |||
| 2759 | default: | ||
| 2760 | bfa_sm_fault(fcport->bfa, event); | ||
| 2761 | } | ||
| 2762 | } | ||
| 2763 | |||
| 2764 | static void | ||
| 2765 | bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, | ||
| 2766 | enum bfa_fcport_sm_event event) | ||
| 2767 | { | ||
| 2768 | bfa_trc(fcport->bfa, event); | ||
| 2769 | |||
| 2770 | switch (event) { | ||
| 2771 | case BFA_FCPORT_SM_DPORTENABLE: | ||
| 2772 | case BFA_FCPORT_SM_ENABLE: | ||
| 2773 | case BFA_FCPORT_SM_START: | ||
| 2774 | /* | ||
| 2775 | * Ignore event for a port as there is FAA misconfig | ||
| 2776 | */ | ||
| 2777 | break; | ||
| 2778 | |||
| 2779 | case BFA_FCPORT_SM_DISABLE: | ||
| 2780 | if (bfa_fcport_send_disable(fcport)) | ||
| 2781 | bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); | ||
| 2782 | else | ||
| 2783 | bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); | ||
| 2784 | |||
| 2785 | bfa_fcport_reset_linkinfo(fcport); | ||
| 2786 | bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); | ||
| 2787 | bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, | ||
| 2788 | BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); | ||
| 2789 | bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); | ||
| 2790 | break; | ||
| 2791 | |||
| 2792 | case BFA_FCPORT_SM_STOP: | ||
| 2793 | bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); | ||
| 2794 | break; | ||
| 2795 | |||
| 2796 | case BFA_FCPORT_SM_HWFAIL: | ||
| 2797 | bfa_fcport_reset_linkinfo(fcport); | ||
| 2798 | bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); | ||
| 2799 | bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); | ||
| 2800 | break; | ||
| 2801 | |||
| 2802 | default: | ||
| 2803 | bfa_sm_fault(fcport->bfa, event); | ||
| 2804 | } | ||
| 2805 | } | ||
| 2806 | |||
| 2683 | /* | 2807 | /* |
| 2684 | * Link state is down | 2808 | * Link state is down |
| 2685 | */ | 2809 | */ |
| @@ -2943,6 +3067,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
| 2943 | */ | 3067 | */ |
| 2944 | do_gettimeofday(&tv); | 3068 | do_gettimeofday(&tv); |
| 2945 | fcport->stats_reset_time = tv.tv_sec; | 3069 | fcport->stats_reset_time = tv.tv_sec; |
| 3070 | fcport->stats_dma_ready = BFA_FALSE; | ||
| 2946 | 3071 | ||
| 2947 | /* | 3072 | /* |
| 2948 | * initialize and set default configuration | 3073 | * initialize and set default configuration |
| @@ -2953,6 +3078,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
| 2953 | port_cfg->maxfrsize = 0; | 3078 | port_cfg->maxfrsize = 0; |
| 2954 | 3079 | ||
| 2955 | port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; | 3080 | port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; |
| 3081 | port_cfg->qos_bw.high = BFA_QOS_BW_HIGH; | ||
| 3082 | port_cfg->qos_bw.med = BFA_QOS_BW_MED; | ||
| 3083 | port_cfg->qos_bw.low = BFA_QOS_BW_LOW; | ||
| 2956 | 3084 | ||
| 2957 | INIT_LIST_HEAD(&fcport->stats_pending_q); | 3085 | INIT_LIST_HEAD(&fcport->stats_pending_q); |
| 2958 | INIT_LIST_HEAD(&fcport->statsclr_pending_q); | 3086 | INIT_LIST_HEAD(&fcport->statsclr_pending_q); |
| @@ -2996,6 +3124,21 @@ bfa_fcport_iocdisable(struct bfa_s *bfa) | |||
| 2996 | bfa_trunk_iocdisable(bfa); | 3124 | bfa_trunk_iocdisable(bfa); |
| 2997 | } | 3125 | } |
| 2998 | 3126 | ||
| 3127 | /* | ||
| 3128 | * Update loop info in fcport for SCN online | ||
| 3129 | */ | ||
| 3130 | static void | ||
| 3131 | bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport, | ||
| 3132 | struct bfa_fcport_loop_info_s *loop_info) | ||
| 3133 | { | ||
| 3134 | fcport->myalpa = loop_info->myalpa; | ||
| 3135 | fcport->alpabm_valid = | ||
| 3136 | loop_info->alpabm_val; | ||
| 3137 | memcpy(fcport->alpabm.alpa_bm, | ||
| 3138 | loop_info->alpabm.alpa_bm, | ||
| 3139 | sizeof(struct fc_alpabm_s)); | ||
| 3140 | } | ||
| 3141 | |||
| 2999 | static void | 3142 | static void |
| 3000 | bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) | 3143 | bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) |
| 3001 | { | 3144 | { |
| @@ -3005,12 +3148,15 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) | |||
| 3005 | fcport->speed = pevent->link_state.speed; | 3148 | fcport->speed = pevent->link_state.speed; |
| 3006 | fcport->topology = pevent->link_state.topology; | 3149 | fcport->topology = pevent->link_state.topology; |
| 3007 | 3150 | ||
| 3008 | if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) | 3151 | if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) { |
| 3009 | fcport->myalpa = 0; | 3152 | bfa_fcport_update_loop_info(fcport, |
| 3153 | &pevent->link_state.attr.loop_info); | ||
| 3154 | return; | ||
| 3155 | } | ||
| 3010 | 3156 | ||
| 3011 | /* QoS Details */ | 3157 | /* QoS Details */ |
| 3012 | fcport->qos_attr = pevent->link_state.qos_attr; | 3158 | fcport->qos_attr = pevent->link_state.qos_attr; |
| 3013 | fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; | 3159 | fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr; |
| 3014 | 3160 | ||
| 3015 | /* | 3161 | /* |
| 3016 | * update trunk state if applicable | 3162 | * update trunk state if applicable |
| @@ -3019,7 +3165,8 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) | |||
| 3019 | trunk->attr.state = BFA_TRUNK_DISABLED; | 3165 | trunk->attr.state = BFA_TRUNK_DISABLED; |
| 3020 | 3166 | ||
| 3021 | /* update FCoE specific */ | 3167 | /* update FCoE specific */ |
| 3022 | fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan); | 3168 | fcport->fcoe_vlan = |
| 3169 | be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan); | ||
| 3023 | 3170 | ||
| 3024 | bfa_trc(fcport->bfa, fcport->speed); | 3171 | bfa_trc(fcport->bfa, fcport->speed); |
| 3025 | bfa_trc(fcport->bfa, fcport->topology); | 3172 | bfa_trc(fcport->bfa, fcport->topology); |
| @@ -3453,6 +3600,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
| 3453 | case BFI_FCPORT_I2H_ENABLE_RSP: | 3600 | case BFI_FCPORT_I2H_ENABLE_RSP: |
| 3454 | if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { | 3601 | if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { |
| 3455 | 3602 | ||
| 3603 | fcport->stats_dma_ready = BFA_TRUE; | ||
| 3456 | if (fcport->use_flash_cfg) { | 3604 | if (fcport->use_flash_cfg) { |
| 3457 | fcport->cfg = i2hmsg.penable_rsp->port_cfg; | 3605 | fcport->cfg = i2hmsg.penable_rsp->port_cfg; |
| 3458 | fcport->cfg.maxfrsize = | 3606 | fcport->cfg.maxfrsize = |
| @@ -3468,6 +3616,8 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
| 3468 | else | 3616 | else |
| 3469 | fcport->trunk.attr.state = | 3617 | fcport->trunk.attr.state = |
| 3470 | BFA_TRUNK_DISABLED; | 3618 | BFA_TRUNK_DISABLED; |
| 3619 | fcport->qos_attr.qos_bw = | ||
| 3620 | i2hmsg.penable_rsp->port_cfg.qos_bw; | ||
| 3471 | fcport->use_flash_cfg = BFA_FALSE; | 3621 | fcport->use_flash_cfg = BFA_FALSE; |
| 3472 | } | 3622 | } |
| 3473 | 3623 | ||
| @@ -3476,6 +3626,9 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
| 3476 | else | 3626 | else |
| 3477 | fcport->qos_attr.state = BFA_QOS_DISABLED; | 3627 | fcport->qos_attr.state = BFA_QOS_DISABLED; |
| 3478 | 3628 | ||
| 3629 | fcport->qos_attr.qos_bw_op = | ||
| 3630 | i2hmsg.penable_rsp->port_cfg.qos_bw; | ||
| 3631 | |||
| 3479 | bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); | 3632 | bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); |
| 3480 | } | 3633 | } |
| 3481 | break; | 3634 | break; |
| @@ -3488,8 +3641,17 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
| 3488 | case BFI_FCPORT_I2H_EVENT: | 3641 | case BFI_FCPORT_I2H_EVENT: |
| 3489 | if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) | 3642 | if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) |
| 3490 | bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); | 3643 | bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); |
| 3491 | else | 3644 | else { |
| 3492 | bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); | 3645 | if (i2hmsg.event->link_state.linkstate_rsn == |
| 3646 | BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG) | ||
| 3647 | bfa_sm_send_event(fcport, | ||
| 3648 | BFA_FCPORT_SM_FAA_MISCONFIG); | ||
| 3649 | else | ||
| 3650 | bfa_sm_send_event(fcport, | ||
| 3651 | BFA_FCPORT_SM_LINKDOWN); | ||
| 3652 | } | ||
| 3653 | fcport->qos_attr.qos_bw_op = | ||
| 3654 | i2hmsg.event->link_state.qos_attr.qos_bw_op; | ||
| 3493 | break; | 3655 | break; |
| 3494 | 3656 | ||
| 3495 | case BFI_FCPORT_I2H_TRUNK_SCN: | 3657 | case BFI_FCPORT_I2H_TRUNK_SCN: |
| @@ -3609,6 +3771,9 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) | |||
| 3609 | 3771 | ||
| 3610 | if (fcport->cfg.trunked == BFA_TRUE) | 3772 | if (fcport->cfg.trunked == BFA_TRUE) |
| 3611 | return BFA_STATUS_TRUNK_ENABLED; | 3773 | return BFA_STATUS_TRUNK_ENABLED; |
| 3774 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && | ||
| 3775 | (speed == BFA_PORT_SPEED_16GBPS)) | ||
| 3776 | return BFA_STATUS_UNSUPP_SPEED; | ||
| 3612 | if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { | 3777 | if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { |
| 3613 | bfa_trc(bfa, fcport->speed_sup); | 3778 | bfa_trc(bfa, fcport->speed_sup); |
| 3614 | return BFA_STATUS_UNSUPP_SPEED; | 3779 | return BFA_STATUS_UNSUPP_SPEED; |
| @@ -3663,7 +3828,26 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) | |||
| 3663 | 3828 | ||
| 3664 | switch (topology) { | 3829 | switch (topology) { |
| 3665 | case BFA_PORT_TOPOLOGY_P2P: | 3830 | case BFA_PORT_TOPOLOGY_P2P: |
| 3831 | break; | ||
| 3832 | |||
| 3666 | case BFA_PORT_TOPOLOGY_LOOP: | 3833 | case BFA_PORT_TOPOLOGY_LOOP: |
| 3834 | if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) || | ||
| 3835 | (fcport->qos_attr.state != BFA_QOS_DISABLED)) | ||
| 3836 | return BFA_STATUS_ERROR_QOS_ENABLED; | ||
| 3837 | if (fcport->cfg.ratelimit != BFA_FALSE) | ||
| 3838 | return BFA_STATUS_ERROR_TRL_ENABLED; | ||
| 3839 | if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) || | ||
| 3840 | (fcport->trunk.attr.state != BFA_TRUNK_DISABLED)) | ||
| 3841 | return BFA_STATUS_ERROR_TRUNK_ENABLED; | ||
| 3842 | if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) || | ||
| 3843 | (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS)) | ||
| 3844 | return BFA_STATUS_UNSUPP_SPEED; | ||
| 3845 | if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) | ||
| 3846 | return BFA_STATUS_LOOP_UNSUPP_MEZZ; | ||
| 3847 | if (bfa_fcport_is_dport(bfa) != BFA_FALSE) | ||
| 3848 | return BFA_STATUS_DPORT_ERR; | ||
| 3849 | break; | ||
| 3850 | |||
| 3667 | case BFA_PORT_TOPOLOGY_AUTO: | 3851 | case BFA_PORT_TOPOLOGY_AUTO: |
| 3668 | break; | 3852 | break; |
| 3669 | 3853 | ||
| @@ -3686,6 +3870,17 @@ bfa_fcport_get_topology(struct bfa_s *bfa) | |||
| 3686 | return fcport->topology; | 3870 | return fcport->topology; |
| 3687 | } | 3871 | } |
| 3688 | 3872 | ||
| 3873 | /** | ||
| 3874 | * Get config topology. | ||
| 3875 | */ | ||
| 3876 | enum bfa_port_topology | ||
| 3877 | bfa_fcport_get_cfg_topology(struct bfa_s *bfa) | ||
| 3878 | { | ||
| 3879 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
| 3880 | |||
| 3881 | return fcport->cfg.topology; | ||
| 3882 | } | ||
| 3883 | |||
| 3689 | bfa_status_t | 3884 | bfa_status_t |
| 3690 | bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) | 3885 | bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) |
| 3691 | { | 3886 | { |
| @@ -3761,9 +3956,11 @@ bfa_fcport_get_maxfrsize(struct bfa_s *bfa) | |||
| 3761 | u8 | 3956 | u8 |
| 3762 | bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) | 3957 | bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) |
| 3763 | { | 3958 | { |
| 3764 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | 3959 | if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) |
| 3960 | return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit; | ||
| 3765 | 3961 | ||
| 3766 | return fcport->cfg.rx_bbcredit; | 3962 | else |
| 3963 | return 0; | ||
| 3767 | } | 3964 | } |
| 3768 | 3965 | ||
| 3769 | void | 3966 | void |
| @@ -3850,8 +4047,9 @@ bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) | |||
| 3850 | { | 4047 | { |
| 3851 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | 4048 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); |
| 3852 | 4049 | ||
| 3853 | if (bfa_ioc_is_disabled(&bfa->ioc)) | 4050 | if (!bfa_iocfc_is_operational(bfa) || |
| 3854 | return BFA_STATUS_IOC_DISABLED; | 4051 | !fcport->stats_dma_ready) |
| 4052 | return BFA_STATUS_IOC_NON_OP; | ||
| 3855 | 4053 | ||
| 3856 | if (!list_empty(&fcport->statsclr_pending_q)) | 4054 | if (!list_empty(&fcport->statsclr_pending_q)) |
| 3857 | return BFA_STATUS_DEVBUSY; | 4055 | return BFA_STATUS_DEVBUSY; |
| @@ -3876,6 +4074,10 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb) | |||
| 3876 | { | 4074 | { |
| 3877 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | 4075 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); |
| 3878 | 4076 | ||
| 4077 | if (!bfa_iocfc_is_operational(bfa) || | ||
| 4078 | !fcport->stats_dma_ready) | ||
| 4079 | return BFA_STATUS_IOC_NON_OP; | ||
| 4080 | |||
| 3879 | if (!list_empty(&fcport->stats_pending_q)) | 4081 | if (!list_empty(&fcport->stats_pending_q)) |
| 3880 | return BFA_STATUS_DEVBUSY; | 4082 | return BFA_STATUS_DEVBUSY; |
| 3881 | 4083 | ||
| @@ -3905,6 +4107,40 @@ bfa_fcport_is_disabled(struct bfa_s *bfa) | |||
| 3905 | } | 4107 | } |
| 3906 | 4108 | ||
| 3907 | bfa_boolean_t | 4109 | bfa_boolean_t |
| 4110 | bfa_fcport_is_dport(struct bfa_s *bfa) | ||
| 4111 | { | ||
| 4112 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
| 4113 | |||
| 4114 | return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == | ||
| 4115 | BFA_PORT_ST_DPORT); | ||
| 4116 | } | ||
| 4117 | |||
| 4118 | bfa_status_t | ||
| 4119 | bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw) | ||
| 4120 | { | ||
| 4121 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
| 4122 | enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); | ||
| 4123 | |||
| 4124 | bfa_trc(bfa, ioc_type); | ||
| 4125 | |||
| 4126 | if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0)) | ||
| 4127 | return BFA_STATUS_QOS_BW_INVALID; | ||
| 4128 | |||
| 4129 | if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100) | ||
| 4130 | return BFA_STATUS_QOS_BW_INVALID; | ||
| 4131 | |||
| 4132 | if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) || | ||
| 4133 | (qos_bw->low > qos_bw->high)) | ||
| 4134 | return BFA_STATUS_QOS_BW_INVALID; | ||
| 4135 | |||
| 4136 | if ((ioc_type == BFA_IOC_TYPE_FC) && | ||
| 4137 | (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP)) | ||
| 4138 | fcport->cfg.qos_bw = *qos_bw; | ||
| 4139 | |||
| 4140 | return BFA_STATUS_OK; | ||
| 4141 | } | ||
| 4142 | |||
| 4143 | bfa_boolean_t | ||
| 3908 | bfa_fcport_is_ratelim(struct bfa_s *bfa) | 4144 | bfa_fcport_is_ratelim(struct bfa_s *bfa) |
| 3909 | { | 4145 | { |
| 3910 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | 4146 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); |
| @@ -3981,6 +4217,26 @@ bfa_fcport_is_trunk_enabled(struct bfa_s *bfa) | |||
| 3981 | return fcport->cfg.trunked; | 4217 | return fcport->cfg.trunked; |
| 3982 | } | 4218 | } |
| 3983 | 4219 | ||
| 4220 | void | ||
| 4221 | bfa_fcport_dportenable(struct bfa_s *bfa) | ||
| 4222 | { | ||
| 4223 | /* | ||
| 4224 | * Assume caller check for port is in disable state | ||
| 4225 | */ | ||
| 4226 | bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE); | ||
| 4227 | bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE); | ||
| 4228 | } | ||
| 4229 | |||
| 4230 | void | ||
| 4231 | bfa_fcport_dportdisable(struct bfa_s *bfa) | ||
| 4232 | { | ||
| 4233 | /* | ||
| 4234 | * Assume caller check for port is in disable state | ||
| 4235 | */ | ||
| 4236 | bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE); | ||
| 4237 | bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE); | ||
| 4238 | } | ||
| 4239 | |||
| 3984 | /* | 4240 | /* |
| 3985 | * Rport State machine functions | 4241 | * Rport State machine functions |
| 3986 | */ | 4242 | */ |
| @@ -4707,6 +4963,21 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
| 4707 | bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); | 4963 | bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); |
| 4708 | break; | 4964 | break; |
| 4709 | 4965 | ||
| 4966 | case BFI_RPORT_I2H_LIP_SCN_ONLINE: | ||
| 4967 | bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa), | ||
| 4968 | &msg.lip_scn->loop_info); | ||
| 4969 | bfa_cb_rport_scn_online(bfa); | ||
| 4970 | break; | ||
| 4971 | |||
| 4972 | case BFI_RPORT_I2H_LIP_SCN_OFFLINE: | ||
| 4973 | bfa_cb_rport_scn_offline(bfa); | ||
| 4974 | break; | ||
| 4975 | |||
| 4976 | case BFI_RPORT_I2H_NO_DEV: | ||
| 4977 | rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle); | ||
| 4978 | bfa_cb_rport_scn_no_dev(rp->rport_drv); | ||
| 4979 | break; | ||
| 4980 | |||
| 4710 | default: | 4981 | default: |
| 4711 | bfa_trc(bfa, m->mhdr.msg_id); | 4982 | bfa_trc(bfa, m->mhdr.msg_id); |
| 4712 | WARN_ON(1); | 4983 | WARN_ON(1); |
| @@ -5348,6 +5619,37 @@ bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw) | |||
| 5348 | } | 5619 | } |
| 5349 | 5620 | ||
| 5350 | /* | 5621 | /* |
| 5622 | * Dport forward declaration | ||
| 5623 | */ | ||
| 5624 | |||
| 5625 | /* | ||
| 5626 | * BFA DPORT state machine events | ||
| 5627 | */ | ||
| 5628 | enum bfa_dport_sm_event { | ||
| 5629 | BFA_DPORT_SM_ENABLE = 1, /* dport enable event */ | ||
| 5630 | BFA_DPORT_SM_DISABLE = 2, /* dport disable event */ | ||
| 5631 | BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */ | ||
| 5632 | BFA_DPORT_SM_QRESUME = 4, /* CQ space available */ | ||
| 5633 | BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */ | ||
| 5634 | }; | ||
| 5635 | |||
| 5636 | static void bfa_dport_sm_disabled(struct bfa_dport_s *dport, | ||
| 5637 | enum bfa_dport_sm_event event); | ||
| 5638 | static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, | ||
| 5639 | enum bfa_dport_sm_event event); | ||
| 5640 | static void bfa_dport_sm_enabling(struct bfa_dport_s *dport, | ||
| 5641 | enum bfa_dport_sm_event event); | ||
| 5642 | static void bfa_dport_sm_enabled(struct bfa_dport_s *dport, | ||
| 5643 | enum bfa_dport_sm_event event); | ||
| 5644 | static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, | ||
| 5645 | enum bfa_dport_sm_event event); | ||
| 5646 | static void bfa_dport_sm_disabling(struct bfa_dport_s *dport, | ||
| 5647 | enum bfa_dport_sm_event event); | ||
| 5648 | static void bfa_dport_qresume(void *cbarg); | ||
| 5649 | static void bfa_dport_req_comp(struct bfa_dport_s *dport, | ||
| 5650 | bfi_diag_dport_rsp_t *msg); | ||
| 5651 | |||
| 5652 | /* | ||
| 5351 | * BFA fcdiag module | 5653 | * BFA fcdiag module |
| 5352 | */ | 5654 | */ |
| 5353 | #define BFA_DIAG_QTEST_TOV 1000 /* msec */ | 5655 | #define BFA_DIAG_QTEST_TOV 1000 /* msec */ |
| @@ -5377,15 +5679,24 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
| 5377 | struct bfa_pcidev_s *pcidev) | 5679 | struct bfa_pcidev_s *pcidev) |
| 5378 | { | 5680 | { |
| 5379 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); | 5681 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); |
| 5682 | struct bfa_dport_s *dport = &fcdiag->dport; | ||
| 5683 | |||
| 5380 | fcdiag->bfa = bfa; | 5684 | fcdiag->bfa = bfa; |
| 5381 | fcdiag->trcmod = bfa->trcmod; | 5685 | fcdiag->trcmod = bfa->trcmod; |
| 5382 | /* The common DIAG attach bfa_diag_attach() will do all memory claim */ | 5686 | /* The common DIAG attach bfa_diag_attach() will do all memory claim */ |
| 5687 | dport->bfa = bfa; | ||
| 5688 | bfa_sm_set_state(dport, bfa_dport_sm_disabled); | ||
| 5689 | bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport); | ||
| 5690 | dport->cbfn = NULL; | ||
| 5691 | dport->cbarg = NULL; | ||
| 5383 | } | 5692 | } |
| 5384 | 5693 | ||
| 5385 | static void | 5694 | static void |
| 5386 | bfa_fcdiag_iocdisable(struct bfa_s *bfa) | 5695 | bfa_fcdiag_iocdisable(struct bfa_s *bfa) |
| 5387 | { | 5696 | { |
| 5388 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); | 5697 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); |
| 5698 | struct bfa_dport_s *dport = &fcdiag->dport; | ||
| 5699 | |||
| 5389 | bfa_trc(fcdiag, fcdiag->lb.lock); | 5700 | bfa_trc(fcdiag, fcdiag->lb.lock); |
| 5390 | if (fcdiag->lb.lock) { | 5701 | if (fcdiag->lb.lock) { |
| 5391 | fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; | 5702 | fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; |
| @@ -5393,6 +5704,8 @@ bfa_fcdiag_iocdisable(struct bfa_s *bfa) | |||
| 5393 | fcdiag->lb.lock = 0; | 5704 | fcdiag->lb.lock = 0; |
| 5394 | bfa_fcdiag_set_busy_status(fcdiag); | 5705 | bfa_fcdiag_set_busy_status(fcdiag); |
| 5395 | } | 5706 | } |
| 5707 | |||
| 5708 | bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL); | ||
| 5396 | } | 5709 | } |
| 5397 | 5710 | ||
| 5398 | static void | 5711 | static void |
| @@ -5577,6 +5890,9 @@ bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
| 5577 | case BFI_DIAG_I2H_QTEST: | 5890 | case BFI_DIAG_I2H_QTEST: |
| 5578 | bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); | 5891 | bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); |
| 5579 | break; | 5892 | break; |
| 5893 | case BFI_DIAG_I2H_DPORT: | ||
| 5894 | bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg); | ||
| 5895 | break; | ||
| 5580 | default: | 5896 | default: |
| 5581 | bfa_trc(fcdiag, msg->mhdr.msg_id); | 5897 | bfa_trc(fcdiag, msg->mhdr.msg_id); |
| 5582 | WARN_ON(1); | 5898 | WARN_ON(1); |
| @@ -5646,12 +5962,18 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode, | |||
| 5646 | } | 5962 | } |
| 5647 | } | 5963 | } |
| 5648 | 5964 | ||
| 5965 | /* | ||
| 5966 | * For CT2, 1G is not supported | ||
| 5967 | */ | ||
| 5968 | if ((speed == BFA_PORT_SPEED_1GBPS) && | ||
| 5969 | (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) { | ||
| 5970 | bfa_trc(fcdiag, speed); | ||
| 5971 | return BFA_STATUS_UNSUPP_SPEED; | ||
| 5972 | } | ||
| 5973 | |||
| 5649 | /* For Mezz card, port speed entered needs to be checked */ | 5974 | /* For Mezz card, port speed entered needs to be checked */ |
| 5650 | if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { | 5975 | if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { |
| 5651 | if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { | 5976 | if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { |
| 5652 | if ((speed == BFA_PORT_SPEED_1GBPS) && | ||
| 5653 | (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) | ||
| 5654 | return BFA_STATUS_UNSUPP_SPEED; | ||
| 5655 | if (!(speed == BFA_PORT_SPEED_1GBPS || | 5977 | if (!(speed == BFA_PORT_SPEED_1GBPS || |
| 5656 | speed == BFA_PORT_SPEED_2GBPS || | 5978 | speed == BFA_PORT_SPEED_2GBPS || |
| 5657 | speed == BFA_PORT_SPEED_4GBPS || | 5979 | speed == BFA_PORT_SPEED_4GBPS || |
| @@ -5764,3 +6086,379 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa) | |||
| 5764 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); | 6086 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); |
| 5765 | return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; | 6087 | return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; |
| 5766 | } | 6088 | } |
| 6089 | |||
| 6090 | /* | ||
| 6091 | * D-port | ||
| 6092 | */ | ||
| 6093 | static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, | ||
| 6094 | enum bfi_dport_req req); | ||
| 6095 | static void | ||
| 6096 | bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status) | ||
| 6097 | { | ||
| 6098 | if (dport->cbfn != NULL) { | ||
| 6099 | dport->cbfn(dport->cbarg, bfa_status); | ||
| 6100 | dport->cbfn = NULL; | ||
| 6101 | dport->cbarg = NULL; | ||
| 6102 | } | ||
| 6103 | } | ||
| 6104 | |||
| 6105 | static void | ||
| 6106 | bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) | ||
| 6107 | { | ||
| 6108 | bfa_trc(dport->bfa, event); | ||
| 6109 | |||
| 6110 | switch (event) { | ||
| 6111 | case BFA_DPORT_SM_ENABLE: | ||
| 6112 | bfa_fcport_dportenable(dport->bfa); | ||
| 6113 | if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE)) | ||
| 6114 | bfa_sm_set_state(dport, bfa_dport_sm_enabling); | ||
| 6115 | else | ||
| 6116 | bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait); | ||
| 6117 | break; | ||
| 6118 | |||
| 6119 | case BFA_DPORT_SM_DISABLE: | ||
| 6120 | /* Already disabled */ | ||
| 6121 | break; | ||
| 6122 | |||
| 6123 | case BFA_DPORT_SM_HWFAIL: | ||
| 6124 | /* ignore */ | ||
| 6125 | break; | ||
| 6126 | |||
| 6127 | default: | ||
| 6128 | bfa_sm_fault(dport->bfa, event); | ||
| 6129 | } | ||
| 6130 | } | ||
| 6131 | |||
| 6132 | static void | ||
| 6133 | bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport, | ||
| 6134 | enum bfa_dport_sm_event event) | ||
| 6135 | { | ||
| 6136 | bfa_trc(dport->bfa, event); | ||
| 6137 | |||
| 6138 | switch (event) { | ||
| 6139 | case BFA_DPORT_SM_QRESUME: | ||
| 6140 | bfa_sm_set_state(dport, bfa_dport_sm_enabling); | ||
| 6141 | bfa_dport_send_req(dport, BFI_DPORT_ENABLE); | ||
| 6142 | break; | ||
| 6143 | |||
| 6144 | case BFA_DPORT_SM_HWFAIL: | ||
| 6145 | bfa_reqq_wcancel(&dport->reqq_wait); | ||
| 6146 | bfa_sm_set_state(dport, bfa_dport_sm_disabled); | ||
| 6147 | bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); | ||
| 6148 | break; | ||
| 6149 | |||
| 6150 | default: | ||
| 6151 | bfa_sm_fault(dport->bfa, event); | ||
| 6152 | } | ||
| 6153 | } | ||
| 6154 | |||
| 6155 | static void | ||
| 6156 | bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) | ||
| 6157 | { | ||
| 6158 | bfa_trc(dport->bfa, event); | ||
| 6159 | |||
| 6160 | switch (event) { | ||
| 6161 | case BFA_DPORT_SM_FWRSP: | ||
| 6162 | bfa_sm_set_state(dport, bfa_dport_sm_enabled); | ||
| 6163 | break; | ||
| 6164 | |||
| 6165 | case BFA_DPORT_SM_HWFAIL: | ||
| 6166 | bfa_sm_set_state(dport, bfa_dport_sm_disabled); | ||
| 6167 | bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); | ||
| 6168 | break; | ||
| 6169 | |||
| 6170 | default: | ||
| 6171 | bfa_sm_fault(dport->bfa, event); | ||
| 6172 | } | ||
| 6173 | } | ||
| 6174 | |||
| 6175 | static void | ||
| 6176 | bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) | ||
| 6177 | { | ||
| 6178 | bfa_trc(dport->bfa, event); | ||
| 6179 | |||
| 6180 | switch (event) { | ||
| 6181 | case BFA_DPORT_SM_ENABLE: | ||
| 6182 | /* Already enabled */ | ||
| 6183 | break; | ||
| 6184 | |||
| 6185 | case BFA_DPORT_SM_DISABLE: | ||
| 6186 | bfa_fcport_dportdisable(dport->bfa); | ||
| 6187 | if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE)) | ||
| 6188 | bfa_sm_set_state(dport, bfa_dport_sm_disabling); | ||
| 6189 | else | ||
| 6190 | bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait); | ||
| 6191 | break; | ||
| 6192 | |||
| 6193 | case BFA_DPORT_SM_HWFAIL: | ||
| 6194 | bfa_sm_set_state(dport, bfa_dport_sm_disabled); | ||
| 6195 | break; | ||
| 6196 | |||
| 6197 | default: | ||
| 6198 | bfa_sm_fault(dport->bfa, event); | ||
| 6199 | } | ||
| 6200 | } | ||
| 6201 | |||
| 6202 | static void | ||
| 6203 | bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, | ||
| 6204 | enum bfa_dport_sm_event event) | ||
| 6205 | { | ||
| 6206 | bfa_trc(dport->bfa, event); | ||
| 6207 | |||
| 6208 | switch (event) { | ||
| 6209 | case BFA_DPORT_SM_QRESUME: | ||
| 6210 | bfa_sm_set_state(dport, bfa_dport_sm_disabling); | ||
| 6211 | bfa_dport_send_req(dport, BFI_DPORT_DISABLE); | ||
| 6212 | break; | ||
| 6213 | |||
| 6214 | case BFA_DPORT_SM_HWFAIL: | ||
| 6215 | bfa_sm_set_state(dport, bfa_dport_sm_disabled); | ||
| 6216 | bfa_reqq_wcancel(&dport->reqq_wait); | ||
| 6217 | bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); | ||
| 6218 | break; | ||
| 6219 | |||
| 6220 | default: | ||
| 6221 | bfa_sm_fault(dport->bfa, event); | ||
| 6222 | } | ||
| 6223 | } | ||
| 6224 | |||
| 6225 | static void | ||
| 6226 | bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) | ||
| 6227 | { | ||
| 6228 | bfa_trc(dport->bfa, event); | ||
| 6229 | |||
| 6230 | switch (event) { | ||
| 6231 | case BFA_DPORT_SM_FWRSP: | ||
| 6232 | bfa_sm_set_state(dport, bfa_dport_sm_disabled); | ||
| 6233 | break; | ||
| 6234 | |||
| 6235 | case BFA_DPORT_SM_HWFAIL: | ||
| 6236 | bfa_sm_set_state(dport, bfa_dport_sm_disabled); | ||
| 6237 | bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); | ||
| 6238 | break; | ||
| 6239 | |||
| 6240 | default: | ||
| 6241 | bfa_sm_fault(dport->bfa, event); | ||
| 6242 | } | ||
| 6243 | } | ||
| 6244 | |||
| 6245 | |||
| 6246 | static bfa_boolean_t | ||
| 6247 | bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req) | ||
| 6248 | { | ||
| 6249 | struct bfi_diag_dport_req_s *m; | ||
| 6250 | |||
| 6251 | /* | ||
| 6252 | * Increment message tag before queue check, so that responses to old | ||
| 6253 | * requests are discarded. | ||
| 6254 | */ | ||
| 6255 | dport->msgtag++; | ||
| 6256 | |||
| 6257 | /* | ||
| 6258 | * check for room in queue to send request now | ||
| 6259 | */ | ||
| 6260 | m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG); | ||
| 6261 | if (!m) { | ||
| 6262 | bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait); | ||
| 6263 | return BFA_FALSE; | ||
| 6264 | } | ||
| 6265 | |||
| 6266 | bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT, | ||
| 6267 | bfa_fn_lpu(dport->bfa)); | ||
| 6268 | m->req = req; | ||
| 6269 | m->msgtag = dport->msgtag; | ||
| 6270 | |||
| 6271 | /* | ||
| 6272 | * queue I/O message to firmware | ||
| 6273 | */ | ||
| 6274 | bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh); | ||
| 6275 | |||
| 6276 | return BFA_TRUE; | ||
| 6277 | } | ||
| 6278 | |||
| 6279 | static void | ||
| 6280 | bfa_dport_qresume(void *cbarg) | ||
| 6281 | { | ||
| 6282 | struct bfa_dport_s *dport = cbarg; | ||
| 6283 | |||
| 6284 | bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME); | ||
| 6285 | } | ||
| 6286 | |||
| 6287 | static void | ||
| 6288 | bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg) | ||
| 6289 | { | ||
| 6290 | bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP); | ||
| 6291 | bfa_cb_fcdiag_dport(dport, msg->status); | ||
| 6292 | } | ||
| 6293 | |||
| 6294 | /* | ||
| 6295 | * Dport enable | ||
| 6296 | * | ||
| 6297 | * @param[in] *bfa - bfa data struct | ||
| 6298 | */ | ||
| 6299 | bfa_status_t | ||
| 6300 | bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) | ||
| 6301 | { | ||
| 6302 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); | ||
| 6303 | struct bfa_dport_s *dport = &fcdiag->dport; | ||
| 6304 | |||
| 6305 | /* | ||
| 6306 | * Dport is not support in MEZZ card | ||
| 6307 | */ | ||
| 6308 | if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) { | ||
| 6309 | bfa_trc(dport->bfa, BFA_STATUS_PBC); | ||
| 6310 | return BFA_STATUS_CMD_NOTSUPP_MEZZ; | ||
| 6311 | } | ||
| 6312 | |||
| 6313 | /* | ||
| 6314 | * Check to see if IOC is down | ||
| 6315 | */ | ||
| 6316 | if (!bfa_iocfc_is_operational(bfa)) | ||
| 6317 | return BFA_STATUS_IOC_NON_OP; | ||
| 6318 | |||
| 6319 | /* if port is PBC disabled, return error */ | ||
| 6320 | if (bfa_fcport_is_pbcdisabled(bfa)) { | ||
| 6321 | bfa_trc(dport->bfa, BFA_STATUS_PBC); | ||
| 6322 | return BFA_STATUS_PBC; | ||
| 6323 | } | ||
| 6324 | |||
| 6325 | /* | ||
| 6326 | * Check if port mode is FC port | ||
| 6327 | */ | ||
| 6328 | if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) { | ||
| 6329 | bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc)); | ||
| 6330 | return BFA_STATUS_CMD_NOTSUPP_CNA; | ||
| 6331 | } | ||
| 6332 | |||
| 6333 | /* | ||
| 6334 | * Check if port is in LOOP mode | ||
| 6335 | */ | ||
| 6336 | if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) || | ||
| 6337 | (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) { | ||
| 6338 | bfa_trc(dport->bfa, 0); | ||
| 6339 | return BFA_STATUS_TOPOLOGY_LOOP; | ||
| 6340 | } | ||
| 6341 | |||
| 6342 | /* | ||
| 6343 | * Check if port is TRUNK mode | ||
| 6344 | */ | ||
| 6345 | if (bfa_fcport_is_trunk_enabled(bfa)) { | ||
| 6346 | bfa_trc(dport->bfa, 0); | ||
| 6347 | return BFA_STATUS_ERROR_TRUNK_ENABLED; | ||
| 6348 | } | ||
| 6349 | |||
| 6350 | /* | ||
| 6351 | * Check to see if port is disable or in dport state | ||
| 6352 | */ | ||
| 6353 | if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && | ||
| 6354 | (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { | ||
| 6355 | bfa_trc(dport->bfa, 0); | ||
| 6356 | return BFA_STATUS_PORT_NOT_DISABLED; | ||
| 6357 | } | ||
| 6358 | |||
| 6359 | /* | ||
| 6360 | * Check if dport is busy | ||
| 6361 | */ | ||
| 6362 | if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || | ||
| 6363 | bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || | ||
| 6364 | bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || | ||
| 6365 | bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) { | ||
| 6366 | return BFA_STATUS_DEVBUSY; | ||
| 6367 | } | ||
| 6368 | |||
| 6369 | /* | ||
| 6370 | * Check if dport is already enabled | ||
| 6371 | */ | ||
| 6372 | if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { | ||
| 6373 | bfa_trc(dport->bfa, 0); | ||
| 6374 | return BFA_STATUS_DPORT_ENABLED; | ||
| 6375 | } | ||
| 6376 | |||
| 6377 | dport->cbfn = cbfn; | ||
| 6378 | dport->cbarg = cbarg; | ||
| 6379 | |||
| 6380 | bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE); | ||
| 6381 | return BFA_STATUS_OK; | ||
| 6382 | } | ||
| 6383 | |||
| 6384 | /* | ||
| 6385 | * Dport disable | ||
| 6386 | * | ||
| 6387 | * @param[in] *bfa - bfa data struct | ||
| 6388 | */ | ||
| 6389 | bfa_status_t | ||
| 6390 | bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) | ||
| 6391 | { | ||
| 6392 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); | ||
| 6393 | struct bfa_dport_s *dport = &fcdiag->dport; | ||
| 6394 | |||
| 6395 | if (bfa_ioc_is_disabled(&bfa->ioc)) | ||
| 6396 | return BFA_STATUS_IOC_DISABLED; | ||
| 6397 | |||
| 6398 | /* if port is PBC disabled, return error */ | ||
| 6399 | if (bfa_fcport_is_pbcdisabled(bfa)) { | ||
| 6400 | bfa_trc(dport->bfa, BFA_STATUS_PBC); | ||
| 6401 | return BFA_STATUS_PBC; | ||
| 6402 | } | ||
| 6403 | |||
| 6404 | /* | ||
| 6405 | * Check to see if port is disable or in dport state | ||
| 6406 | */ | ||
| 6407 | if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && | ||
| 6408 | (bfa_fcport_is_dport(bfa) == BFA_FALSE)) { | ||
| 6409 | bfa_trc(dport->bfa, 0); | ||
| 6410 | return BFA_STATUS_PORT_NOT_DISABLED; | ||
| 6411 | } | ||
| 6412 | |||
| 6413 | /* | ||
| 6414 | * Check if dport is busy | ||
| 6415 | */ | ||
| 6416 | if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || | ||
| 6417 | bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || | ||
| 6418 | bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || | ||
| 6419 | bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) | ||
| 6420 | return BFA_STATUS_DEVBUSY; | ||
| 6421 | |||
| 6422 | /* | ||
| 6423 | * Check if dport is already disabled | ||
| 6424 | */ | ||
| 6425 | if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) { | ||
| 6426 | bfa_trc(dport->bfa, 0); | ||
| 6427 | return BFA_STATUS_DPORT_DISABLED; | ||
| 6428 | } | ||
| 6429 | |||
| 6430 | dport->cbfn = cbfn; | ||
| 6431 | dport->cbarg = cbarg; | ||
| 6432 | |||
| 6433 | bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE); | ||
| 6434 | return BFA_STATUS_OK; | ||
| 6435 | } | ||
| 6436 | |||
| 6437 | /* | ||
| 6438 | * Get D-port state | ||
| 6439 | * | ||
| 6440 | * @param[in] *bfa - bfa data struct | ||
| 6441 | */ | ||
| 6442 | |||
| 6443 | bfa_status_t | ||
| 6444 | bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state) | ||
| 6445 | { | ||
| 6446 | struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); | ||
| 6447 | struct bfa_dport_s *dport = &fcdiag->dport; | ||
| 6448 | |||
| 6449 | if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) | ||
| 6450 | *state = BFA_DPORT_ST_ENABLED; | ||
| 6451 | else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || | ||
| 6452 | bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait)) | ||
| 6453 | *state = BFA_DPORT_ST_ENABLING; | ||
| 6454 | else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) | ||
| 6455 | *state = BFA_DPORT_ST_DISABLED; | ||
| 6456 | else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || | ||
| 6457 | bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) | ||
| 6458 | *state = BFA_DPORT_ST_DISABLING; | ||
| 6459 | else { | ||
| 6460 | bfa_trc(dport->bfa, BFA_STATUS_EINVAL); | ||
| 6461 | return BFA_STATUS_EINVAL; | ||
| 6462 | } | ||
| 6463 | return BFA_STATUS_OK; | ||
| 6464 | } | ||
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 1abcf7c51661..8d7fbecfcb22 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h | |||
| @@ -474,8 +474,10 @@ struct bfa_fcport_s { | |||
| 474 | /* supported speeds */ | 474 | /* supported speeds */ |
| 475 | enum bfa_port_speed speed; /* current speed */ | 475 | enum bfa_port_speed speed; /* current speed */ |
| 476 | enum bfa_port_topology topology; /* current topology */ | 476 | enum bfa_port_topology topology; /* current topology */ |
| 477 | u8 myalpa; /* my ALPA in LOOP topology */ | ||
| 478 | u8 rsvd[3]; | 477 | u8 rsvd[3]; |
| 478 | u8 myalpa; /* my ALPA in LOOP topology */ | ||
| 479 | u8 alpabm_valid; /* alpa bitmap valid or not */ | ||
| 480 | struct fc_alpabm_s alpabm; /* alpa bitmap */ | ||
| 479 | struct bfa_port_cfg_s cfg; /* current port configuration */ | 481 | struct bfa_port_cfg_s cfg; /* current port configuration */ |
| 480 | bfa_boolean_t use_flash_cfg; /* get port cfg from flash */ | 482 | bfa_boolean_t use_flash_cfg; /* get port cfg from flash */ |
| 481 | struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ | 483 | struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ |
| @@ -512,6 +514,7 @@ struct bfa_fcport_s { | |||
| 512 | struct bfa_fcport_trunk_s trunk; | 514 | struct bfa_fcport_trunk_s trunk; |
| 513 | u16 fcoe_vlan; | 515 | u16 fcoe_vlan; |
| 514 | struct bfa_mem_dma_s fcport_dma; | 516 | struct bfa_mem_dma_s fcport_dma; |
| 517 | bfa_boolean_t stats_dma_ready; | ||
| 515 | }; | 518 | }; |
| 516 | 519 | ||
| 517 | #define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) | 520 | #define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) |
| @@ -534,6 +537,7 @@ enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa); | |||
| 534 | bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, | 537 | bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, |
| 535 | enum bfa_port_topology topo); | 538 | enum bfa_port_topology topo); |
| 536 | enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); | 539 | enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); |
| 540 | enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa); | ||
| 537 | bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); | 541 | bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); |
| 538 | bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); | 542 | bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); |
| 539 | u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); | 543 | u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); |
| @@ -547,6 +551,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa, | |||
| 547 | void (*event_cbfn) (void *cbarg, | 551 | void (*event_cbfn) (void *cbarg, |
| 548 | enum bfa_port_linkstate event), void *event_cbarg); | 552 | enum bfa_port_linkstate event), void *event_cbarg); |
| 549 | bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); | 553 | bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); |
| 554 | bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa); | ||
| 555 | bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa, | ||
| 556 | struct bfa_qos_bw_s *qos_bw); | ||
| 550 | enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); | 557 | enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); |
| 551 | 558 | ||
| 552 | void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn); | 559 | void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn); |
| @@ -560,6 +567,8 @@ bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, | |||
| 560 | struct bfa_cb_pending_q_s *cb); | 567 | struct bfa_cb_pending_q_s *cb); |
| 561 | bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); | 568 | bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); |
| 562 | bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); | 569 | bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); |
| 570 | void bfa_fcport_dportenable(struct bfa_s *bfa); | ||
| 571 | void bfa_fcport_dportdisable(struct bfa_s *bfa); | ||
| 563 | bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); | 572 | bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); |
| 564 | void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); | 573 | void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); |
| 565 | 574 | ||
| @@ -575,6 +584,9 @@ void bfa_cb_rport_offline(void *rport); | |||
| 575 | void bfa_cb_rport_qos_scn_flowid(void *rport, | 584 | void bfa_cb_rport_qos_scn_flowid(void *rport, |
| 576 | struct bfa_rport_qos_attr_s old_qos_attr, | 585 | struct bfa_rport_qos_attr_s old_qos_attr, |
| 577 | struct bfa_rport_qos_attr_s new_qos_attr); | 586 | struct bfa_rport_qos_attr_s new_qos_attr); |
| 587 | void bfa_cb_rport_scn_online(struct bfa_s *bfa); | ||
| 588 | void bfa_cb_rport_scn_offline(struct bfa_s *bfa); | ||
| 589 | void bfa_cb_rport_scn_no_dev(void *rp); | ||
| 578 | void bfa_cb_rport_qos_scn_prio(void *rport, | 590 | void bfa_cb_rport_qos_scn_prio(void *rport, |
| 579 | struct bfa_rport_qos_attr_s old_qos_attr, | 591 | struct bfa_rport_qos_attr_s old_qos_attr, |
| 580 | struct bfa_rport_qos_attr_s new_qos_attr); | 592 | struct bfa_rport_qos_attr_s new_qos_attr); |
| @@ -697,11 +709,21 @@ struct bfa_fcdiag_lb_s { | |||
| 697 | u32 status; | 709 | u32 status; |
| 698 | }; | 710 | }; |
| 699 | 711 | ||
| 712 | struct bfa_dport_s { | ||
| 713 | struct bfa_s *bfa; /* Back pointer to BFA */ | ||
| 714 | bfa_sm_t sm; /* finite state machine */ | ||
| 715 | u32 msgtag; /* firmware msg tag for reply */ | ||
| 716 | struct bfa_reqq_wait_s reqq_wait; | ||
| 717 | bfa_cb_diag_t cbfn; | ||
| 718 | void *cbarg; | ||
| 719 | }; | ||
| 720 | |||
| 700 | struct bfa_fcdiag_s { | 721 | struct bfa_fcdiag_s { |
| 701 | struct bfa_s *bfa; /* Back pointer to BFA */ | 722 | struct bfa_s *bfa; /* Back pointer to BFA */ |
| 702 | struct bfa_trc_mod_s *trcmod; | 723 | struct bfa_trc_mod_s *trcmod; |
| 703 | struct bfa_fcdiag_lb_s lb; | 724 | struct bfa_fcdiag_lb_s lb; |
| 704 | struct bfa_fcdiag_qtest_s qtest; | 725 | struct bfa_fcdiag_qtest_s qtest; |
| 726 | struct bfa_dport_s dport; | ||
| 705 | }; | 727 | }; |
| 706 | 728 | ||
| 707 | #define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag) | 729 | #define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag) |
| @@ -717,5 +739,11 @@ bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore, | |||
| 717 | u32 queue, struct bfa_diag_qtest_result_s *result, | 739 | u32 queue, struct bfa_diag_qtest_result_s *result, |
| 718 | bfa_cb_diag_t cbfn, void *cbarg); | 740 | bfa_cb_diag_t cbfn, void *cbarg); |
| 719 | bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); | 741 | bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); |
| 742 | bfa_status_t bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, | ||
| 743 | void *cbarg); | ||
| 744 | bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, | ||
| 745 | void *cbarg); | ||
| 746 | bfa_status_t bfa_dport_get_state(struct bfa_s *bfa, | ||
| 747 | enum bfa_dport_state *state); | ||
| 720 | 748 | ||
| 721 | #endif /* __BFA_SVC_H__ */ | 749 | #endif /* __BFA_SVC_H__ */ |
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index c37494916a1a..895b0e516e07 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c | |||
| @@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; | |||
| 63 | u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; | 63 | u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; |
| 64 | u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; | 64 | u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; |
| 65 | 65 | ||
| 66 | #define BFAD_FW_FILE_CB "cbfw.bin" | 66 | #define BFAD_FW_FILE_CB "cbfw-3.1.0.0.bin" |
| 67 | #define BFAD_FW_FILE_CT "ctfw.bin" | 67 | #define BFAD_FW_FILE_CT "ctfw-3.1.0.0.bin" |
| 68 | #define BFAD_FW_FILE_CT2 "ct2fw.bin" | 68 | #define BFAD_FW_FILE_CT2 "ct2fw-3.1.0.0.bin" |
| 69 | 69 | ||
| 70 | static u32 *bfad_load_fwimg(struct pci_dev *pdev); | 70 | static u32 *bfad_load_fwimg(struct pci_dev *pdev); |
| 71 | static void bfad_free_fwimg(void); | 71 | static void bfad_free_fwimg(void); |
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 0afa39076cef..555e7db94a1c 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c | |||
| @@ -33,7 +33,7 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) | |||
| 33 | /* If IOC is not in disabled state - return */ | 33 | /* If IOC is not in disabled state - return */ |
| 34 | if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { | 34 | if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { |
| 35 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 35 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 36 | iocmd->status = BFA_STATUS_IOC_FAILURE; | 36 | iocmd->status = BFA_STATUS_OK; |
| 37 | return rc; | 37 | return rc; |
| 38 | } | 38 | } |
| 39 | 39 | ||
| @@ -54,6 +54,12 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) | |||
| 54 | unsigned long flags; | 54 | unsigned long flags; |
| 55 | 55 | ||
| 56 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 56 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 57 | if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { | ||
| 58 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 59 | iocmd->status = BFA_STATUS_OK; | ||
| 60 | return rc; | ||
| 61 | } | ||
| 62 | |||
| 57 | if (bfad->disable_active) { | 63 | if (bfad->disable_active) { |
| 58 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 64 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 59 | return -EBUSY; | 65 | return -EBUSY; |
| @@ -101,9 +107,10 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) | |||
| 101 | 107 | ||
| 102 | /* set adapter hw path */ | 108 | /* set adapter hw path */ |
| 103 | strcpy(iocmd->adapter_hwpath, bfad->pci_name); | 109 | strcpy(iocmd->adapter_hwpath, bfad->pci_name); |
| 104 | i = strlen(iocmd->adapter_hwpath) - 1; | 110 | for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++) |
| 105 | while (iocmd->adapter_hwpath[i] != '.') | 111 | ; |
| 106 | i--; | 112 | for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; ) |
| 113 | ; | ||
| 107 | iocmd->adapter_hwpath[i] = '\0'; | 114 | iocmd->adapter_hwpath[i] = '\0'; |
| 108 | iocmd->status = BFA_STATUS_OK; | 115 | iocmd->status = BFA_STATUS_OK; |
| 109 | return 0; | 116 | return 0; |
| @@ -880,6 +887,19 @@ out: | |||
| 880 | } | 887 | } |
| 881 | 888 | ||
| 882 | int | 889 | int |
| 890 | bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd) | ||
| 891 | { | ||
| 892 | struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd; | ||
| 893 | unsigned long flags; | ||
| 894 | |||
| 895 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 896 | iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw); | ||
| 897 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 898 | |||
| 899 | return 0; | ||
| 900 | } | ||
| 901 | |||
| 902 | int | ||
| 883 | bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) | 903 | bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) |
| 884 | { | 904 | { |
| 885 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; | 905 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; |
| @@ -888,16 +908,22 @@ bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) | |||
| 888 | 908 | ||
| 889 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 909 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 890 | 910 | ||
| 891 | if (cmd == IOCMD_RATELIM_ENABLE) | 911 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && |
| 892 | fcport->cfg.ratelimit = BFA_TRUE; | 912 | (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) |
| 893 | else if (cmd == IOCMD_RATELIM_DISABLE) | 913 | iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; |
| 894 | fcport->cfg.ratelimit = BFA_FALSE; | 914 | else { |
| 915 | if (cmd == IOCMD_RATELIM_ENABLE) | ||
| 916 | fcport->cfg.ratelimit = BFA_TRUE; | ||
| 917 | else if (cmd == IOCMD_RATELIM_DISABLE) | ||
| 918 | fcport->cfg.ratelimit = BFA_FALSE; | ||
| 895 | 919 | ||
| 896 | if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) | 920 | if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) |
| 897 | fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; | 921 | fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; |
| 922 | |||
| 923 | iocmd->status = BFA_STATUS_OK; | ||
| 924 | } | ||
| 898 | 925 | ||
| 899 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 926 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 900 | iocmd->status = BFA_STATUS_OK; | ||
| 901 | 927 | ||
| 902 | return 0; | 928 | return 0; |
| 903 | } | 929 | } |
| @@ -919,8 +945,13 @@ bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) | |||
| 919 | return 0; | 945 | return 0; |
| 920 | } | 946 | } |
| 921 | 947 | ||
| 922 | fcport->cfg.trl_def_speed = iocmd->speed; | 948 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && |
| 923 | iocmd->status = BFA_STATUS_OK; | 949 | (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) |
| 950 | iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; | ||
| 951 | else { | ||
| 952 | fcport->cfg.trl_def_speed = iocmd->speed; | ||
| 953 | iocmd->status = BFA_STATUS_OK; | ||
| 954 | } | ||
| 924 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 955 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 925 | 956 | ||
| 926 | return 0; | 957 | return 0; |
| @@ -1167,8 +1198,8 @@ bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) | |||
| 1167 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 1198 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 1168 | iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, | 1199 | iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, |
| 1169 | &iocmd->pcifn_id, iocmd->port, | 1200 | &iocmd->pcifn_id, iocmd->port, |
| 1170 | iocmd->pcifn_class, iocmd->bandwidth, | 1201 | iocmd->pcifn_class, iocmd->bw_min, |
| 1171 | bfad_hcb_comp, &fcomp); | 1202 | iocmd->bw_max, bfad_hcb_comp, &fcomp); |
| 1172 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 1203 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 1173 | if (iocmd->status != BFA_STATUS_OK) | 1204 | if (iocmd->status != BFA_STATUS_OK) |
| 1174 | goto out; | 1205 | goto out; |
| @@ -1211,8 +1242,8 @@ bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) | |||
| 1211 | init_completion(&fcomp.comp); | 1242 | init_completion(&fcomp.comp); |
| 1212 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 1243 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 1213 | iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, | 1244 | iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, |
| 1214 | iocmd->pcifn_id, iocmd->bandwidth, | 1245 | iocmd->pcifn_id, iocmd->bw_min, |
| 1215 | bfad_hcb_comp, &fcomp); | 1246 | iocmd->bw_max, bfad_hcb_comp, &fcomp); |
| 1216 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 1247 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 1217 | bfa_trc(bfad, iocmd->status); | 1248 | bfa_trc(bfad, iocmd->status); |
| 1218 | if (iocmd->status != BFA_STATUS_OK) | 1249 | if (iocmd->status != BFA_STATUS_OK) |
| @@ -1736,6 +1767,52 @@ bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) | |||
| 1736 | } | 1767 | } |
| 1737 | 1768 | ||
| 1738 | int | 1769 | int |
| 1770 | bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd) | ||
| 1771 | { | ||
| 1772 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; | ||
| 1773 | unsigned long flags; | ||
| 1774 | struct bfad_hal_comp fcomp; | ||
| 1775 | |||
| 1776 | init_completion(&fcomp.comp); | ||
| 1777 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 1778 | if (cmd == IOCMD_DIAG_DPORT_ENABLE) | ||
| 1779 | iocmd->status = bfa_dport_enable(&bfad->bfa, | ||
| 1780 | bfad_hcb_comp, &fcomp); | ||
| 1781 | else if (cmd == IOCMD_DIAG_DPORT_DISABLE) | ||
| 1782 | iocmd->status = bfa_dport_disable(&bfad->bfa, | ||
| 1783 | bfad_hcb_comp, &fcomp); | ||
| 1784 | else { | ||
| 1785 | bfa_trc(bfad, 0); | ||
| 1786 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 1787 | return -EINVAL; | ||
| 1788 | } | ||
| 1789 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 1790 | |||
| 1791 | if (iocmd->status != BFA_STATUS_OK) | ||
| 1792 | bfa_trc(bfad, iocmd->status); | ||
| 1793 | else { | ||
| 1794 | wait_for_completion(&fcomp.comp); | ||
| 1795 | iocmd->status = fcomp.status; | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | return 0; | ||
| 1799 | } | ||
| 1800 | |||
| 1801 | int | ||
| 1802 | bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd) | ||
| 1803 | { | ||
| 1804 | struct bfa_bsg_diag_dport_get_state_s *iocmd = | ||
| 1805 | (struct bfa_bsg_diag_dport_get_state_s *)pcmd; | ||
| 1806 | unsigned long flags; | ||
| 1807 | |||
| 1808 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 1809 | iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state); | ||
| 1810 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 1811 | |||
| 1812 | return 0; | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | int | ||
| 1739 | bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) | 1816 | bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) |
| 1740 | { | 1817 | { |
| 1741 | struct bfa_bsg_phy_attr_s *iocmd = | 1818 | struct bfa_bsg_phy_attr_s *iocmd = |
| @@ -2052,7 +2129,7 @@ bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) | |||
| 2052 | init_completion(&fcomp.comp); | 2129 | init_completion(&fcomp.comp); |
| 2053 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 2130 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 2054 | iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), | 2131 | iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), |
| 2055 | BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), | 2132 | BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, |
| 2056 | &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, | 2133 | &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, |
| 2057 | bfad_hcb_comp, &fcomp); | 2134 | bfad_hcb_comp, &fcomp); |
| 2058 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 2135 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| @@ -2074,7 +2151,7 @@ bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) | |||
| 2074 | init_completion(&fcomp.comp); | 2151 | init_completion(&fcomp.comp); |
| 2075 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 2152 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 2076 | iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), | 2153 | iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), |
| 2077 | BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), | 2154 | BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id, |
| 2078 | &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, | 2155 | &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, |
| 2079 | bfad_hcb_comp, &fcomp); | 2156 | bfad_hcb_comp, &fcomp); |
| 2080 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 2157 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| @@ -2161,22 +2238,31 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) | |||
| 2161 | 2238 | ||
| 2162 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 2239 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 2163 | 2240 | ||
| 2164 | if (v_cmd == IOCMD_TRUNK_ENABLE) { | 2241 | if (bfa_fcport_is_dport(&bfad->bfa)) |
| 2165 | trunk->attr.state = BFA_TRUNK_OFFLINE; | 2242 | return BFA_STATUS_DPORT_ERR; |
| 2166 | bfa_fcport_disable(&bfad->bfa); | ||
| 2167 | fcport->cfg.trunked = BFA_TRUE; | ||
| 2168 | } else if (v_cmd == IOCMD_TRUNK_DISABLE) { | ||
| 2169 | trunk->attr.state = BFA_TRUNK_DISABLED; | ||
| 2170 | bfa_fcport_disable(&bfad->bfa); | ||
| 2171 | fcport->cfg.trunked = BFA_FALSE; | ||
| 2172 | } | ||
| 2173 | 2243 | ||
| 2174 | if (!bfa_fcport_is_disabled(&bfad->bfa)) | 2244 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || |
| 2175 | bfa_fcport_enable(&bfad->bfa); | 2245 | (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) |
| 2246 | iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; | ||
| 2247 | else { | ||
| 2248 | if (v_cmd == IOCMD_TRUNK_ENABLE) { | ||
| 2249 | trunk->attr.state = BFA_TRUNK_OFFLINE; | ||
| 2250 | bfa_fcport_disable(&bfad->bfa); | ||
| 2251 | fcport->cfg.trunked = BFA_TRUE; | ||
| 2252 | } else if (v_cmd == IOCMD_TRUNK_DISABLE) { | ||
| 2253 | trunk->attr.state = BFA_TRUNK_DISABLED; | ||
| 2254 | bfa_fcport_disable(&bfad->bfa); | ||
| 2255 | fcport->cfg.trunked = BFA_FALSE; | ||
| 2256 | } | ||
| 2257 | |||
| 2258 | if (!bfa_fcport_is_disabled(&bfad->bfa)) | ||
| 2259 | bfa_fcport_enable(&bfad->bfa); | ||
| 2260 | |||
| 2261 | iocmd->status = BFA_STATUS_OK; | ||
| 2262 | } | ||
| 2176 | 2263 | ||
| 2177 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 2264 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 2178 | 2265 | ||
| 2179 | iocmd->status = BFA_STATUS_OK; | ||
| 2180 | return 0; | 2266 | return 0; |
| 2181 | } | 2267 | } |
| 2182 | 2268 | ||
| @@ -2189,12 +2275,17 @@ bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) | |||
| 2189 | unsigned long flags; | 2275 | unsigned long flags; |
| 2190 | 2276 | ||
| 2191 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 2277 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 2192 | memcpy((void *)&iocmd->attr, (void *)&trunk->attr, | 2278 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || |
| 2193 | sizeof(struct bfa_trunk_attr_s)); | 2279 | (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) |
| 2194 | iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); | 2280 | iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; |
| 2281 | else { | ||
| 2282 | memcpy((void *)&iocmd->attr, (void *)&trunk->attr, | ||
| 2283 | sizeof(struct bfa_trunk_attr_s)); | ||
| 2284 | iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); | ||
| 2285 | iocmd->status = BFA_STATUS_OK; | ||
| 2286 | } | ||
| 2195 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 2287 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 2196 | 2288 | ||
| 2197 | iocmd->status = BFA_STATUS_OK; | ||
| 2198 | return 0; | 2289 | return 0; |
| 2199 | } | 2290 | } |
| 2200 | 2291 | ||
| @@ -2207,14 +2298,22 @@ bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) | |||
| 2207 | 2298 | ||
| 2208 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 2299 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 2209 | if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { | 2300 | if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { |
| 2210 | if (v_cmd == IOCMD_QOS_ENABLE) | 2301 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && |
| 2211 | fcport->cfg.qos_enabled = BFA_TRUE; | 2302 | (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) |
| 2212 | else if (v_cmd == IOCMD_QOS_DISABLE) | 2303 | iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; |
| 2213 | fcport->cfg.qos_enabled = BFA_FALSE; | 2304 | else { |
| 2305 | if (v_cmd == IOCMD_QOS_ENABLE) | ||
| 2306 | fcport->cfg.qos_enabled = BFA_TRUE; | ||
| 2307 | else if (v_cmd == IOCMD_QOS_DISABLE) { | ||
| 2308 | fcport->cfg.qos_enabled = BFA_FALSE; | ||
| 2309 | fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH; | ||
| 2310 | fcport->cfg.qos_bw.med = BFA_QOS_BW_MED; | ||
| 2311 | fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW; | ||
| 2312 | } | ||
| 2313 | } | ||
| 2214 | } | 2314 | } |
| 2215 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 2315 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 2216 | 2316 | ||
| 2217 | iocmd->status = BFA_STATUS_OK; | ||
| 2218 | return 0; | 2317 | return 0; |
| 2219 | } | 2318 | } |
| 2220 | 2319 | ||
| @@ -2226,11 +2325,21 @@ bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) | |||
| 2226 | unsigned long flags; | 2325 | unsigned long flags; |
| 2227 | 2326 | ||
| 2228 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 2327 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 2229 | iocmd->attr.state = fcport->qos_attr.state; | 2328 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && |
| 2230 | iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); | 2329 | (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) |
| 2330 | iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; | ||
| 2331 | else { | ||
| 2332 | iocmd->attr.state = fcport->qos_attr.state; | ||
| 2333 | iocmd->attr.total_bb_cr = | ||
| 2334 | be32_to_cpu(fcport->qos_attr.total_bb_cr); | ||
| 2335 | iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high; | ||
| 2336 | iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med; | ||
| 2337 | iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low; | ||
| 2338 | iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op; | ||
| 2339 | iocmd->status = BFA_STATUS_OK; | ||
| 2340 | } | ||
| 2231 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 2341 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 2232 | 2342 | ||
| 2233 | iocmd->status = BFA_STATUS_OK; | ||
| 2234 | return 0; | 2343 | return 0; |
| 2235 | } | 2344 | } |
| 2236 | 2345 | ||
| @@ -2274,6 +2383,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) | |||
| 2274 | struct bfad_hal_comp fcomp; | 2383 | struct bfad_hal_comp fcomp; |
| 2275 | unsigned long flags; | 2384 | unsigned long flags; |
| 2276 | struct bfa_cb_pending_q_s cb_qe; | 2385 | struct bfa_cb_pending_q_s cb_qe; |
| 2386 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); | ||
| 2277 | 2387 | ||
| 2278 | init_completion(&fcomp.comp); | 2388 | init_completion(&fcomp.comp); |
| 2279 | bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, | 2389 | bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, |
| @@ -2281,7 +2391,11 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) | |||
| 2281 | 2391 | ||
| 2282 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 2392 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 2283 | WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); | 2393 | WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); |
| 2284 | iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); | 2394 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && |
| 2395 | (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) | ||
| 2396 | iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; | ||
| 2397 | else | ||
| 2398 | iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); | ||
| 2285 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 2399 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 2286 | if (iocmd->status != BFA_STATUS_OK) { | 2400 | if (iocmd->status != BFA_STATUS_OK) { |
| 2287 | bfa_trc(bfad, iocmd->status); | 2401 | bfa_trc(bfad, iocmd->status); |
| @@ -2300,6 +2414,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) | |||
| 2300 | struct bfad_hal_comp fcomp; | 2414 | struct bfad_hal_comp fcomp; |
| 2301 | unsigned long flags; | 2415 | unsigned long flags; |
| 2302 | struct bfa_cb_pending_q_s cb_qe; | 2416 | struct bfa_cb_pending_q_s cb_qe; |
| 2417 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); | ||
| 2303 | 2418 | ||
| 2304 | init_completion(&fcomp.comp); | 2419 | init_completion(&fcomp.comp); |
| 2305 | bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, | 2420 | bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, |
| @@ -2307,7 +2422,11 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) | |||
| 2307 | 2422 | ||
| 2308 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 2423 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 2309 | WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); | 2424 | WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); |
| 2310 | iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); | 2425 | if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) && |
| 2426 | (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) | ||
| 2427 | iocmd->status = BFA_STATUS_TOPOLOGY_LOOP; | ||
| 2428 | else | ||
| 2429 | iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); | ||
| 2311 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 2430 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 2312 | if (iocmd->status != BFA_STATUS_OK) { | 2431 | if (iocmd->status != BFA_STATUS_OK) { |
| 2313 | bfa_trc(bfad, iocmd->status); | 2432 | bfa_trc(bfad, iocmd->status); |
| @@ -2435,6 +2554,139 @@ bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) | |||
| 2435 | return 0; | 2554 | return 0; |
| 2436 | } | 2555 | } |
| 2437 | 2556 | ||
| 2557 | int | ||
| 2558 | bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd) | ||
| 2559 | { | ||
| 2560 | struct bfa_bsg_fcpim_throttle_s *iocmd = | ||
| 2561 | (struct bfa_bsg_fcpim_throttle_s *)cmd; | ||
| 2562 | unsigned long flags; | ||
| 2563 | |||
| 2564 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 2565 | iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa, | ||
| 2566 | (void *)&iocmd->throttle); | ||
| 2567 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 2568 | |||
| 2569 | return 0; | ||
| 2570 | } | ||
| 2571 | |||
| 2572 | int | ||
| 2573 | bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd) | ||
| 2574 | { | ||
| 2575 | struct bfa_bsg_fcpim_throttle_s *iocmd = | ||
| 2576 | (struct bfa_bsg_fcpim_throttle_s *)cmd; | ||
| 2577 | unsigned long flags; | ||
| 2578 | |||
| 2579 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 2580 | iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa, | ||
| 2581 | iocmd->throttle.cfg_value); | ||
| 2582 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 2583 | |||
| 2584 | return 0; | ||
| 2585 | } | ||
| 2586 | |||
| 2587 | int | ||
| 2588 | bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd) | ||
| 2589 | { | ||
| 2590 | struct bfa_bsg_tfru_s *iocmd = | ||
| 2591 | (struct bfa_bsg_tfru_s *)cmd; | ||
| 2592 | struct bfad_hal_comp fcomp; | ||
| 2593 | unsigned long flags = 0; | ||
| 2594 | |||
| 2595 | init_completion(&fcomp.comp); | ||
| 2596 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 2597 | iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa), | ||
| 2598 | &iocmd->data, iocmd->len, iocmd->offset, | ||
| 2599 | bfad_hcb_comp, &fcomp); | ||
| 2600 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 2601 | if (iocmd->status == BFA_STATUS_OK) { | ||
| 2602 | wait_for_completion(&fcomp.comp); | ||
| 2603 | iocmd->status = fcomp.status; | ||
| 2604 | } | ||
| 2605 | |||
| 2606 | return 0; | ||
| 2607 | } | ||
| 2608 | |||
| 2609 | int | ||
| 2610 | bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd) | ||
| 2611 | { | ||
| 2612 | struct bfa_bsg_tfru_s *iocmd = | ||
| 2613 | (struct bfa_bsg_tfru_s *)cmd; | ||
| 2614 | struct bfad_hal_comp fcomp; | ||
| 2615 | unsigned long flags = 0; | ||
| 2616 | |||
| 2617 | init_completion(&fcomp.comp); | ||
| 2618 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 2619 | iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa), | ||
| 2620 | &iocmd->data, iocmd->len, iocmd->offset, | ||
| 2621 | bfad_hcb_comp, &fcomp); | ||
| 2622 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 2623 | if (iocmd->status == BFA_STATUS_OK) { | ||
| 2624 | wait_for_completion(&fcomp.comp); | ||
| 2625 | iocmd->status = fcomp.status; | ||
| 2626 | } | ||
| 2627 | |||
| 2628 | return 0; | ||
| 2629 | } | ||
| 2630 | |||
| 2631 | int | ||
| 2632 | bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd) | ||
| 2633 | { | ||
| 2634 | struct bfa_bsg_fruvpd_s *iocmd = | ||
| 2635 | (struct bfa_bsg_fruvpd_s *)cmd; | ||
| 2636 | struct bfad_hal_comp fcomp; | ||
| 2637 | unsigned long flags = 0; | ||
| 2638 | |||
| 2639 | init_completion(&fcomp.comp); | ||
| 2640 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 2641 | iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa), | ||
| 2642 | &iocmd->data, iocmd->len, iocmd->offset, | ||
| 2643 | bfad_hcb_comp, &fcomp); | ||
| 2644 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 2645 | if (iocmd->status == BFA_STATUS_OK) { | ||
| 2646 | wait_for_completion(&fcomp.comp); | ||
| 2647 | iocmd->status = fcomp.status; | ||
| 2648 | } | ||
| 2649 | |||
| 2650 | return 0; | ||
| 2651 | } | ||
| 2652 | |||
| 2653 | int | ||
| 2654 | bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd) | ||
| 2655 | { | ||
| 2656 | struct bfa_bsg_fruvpd_s *iocmd = | ||
| 2657 | (struct bfa_bsg_fruvpd_s *)cmd; | ||
| 2658 | struct bfad_hal_comp fcomp; | ||
| 2659 | unsigned long flags = 0; | ||
| 2660 | |||
| 2661 | init_completion(&fcomp.comp); | ||
| 2662 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 2663 | iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa), | ||
| 2664 | &iocmd->data, iocmd->len, iocmd->offset, | ||
| 2665 | bfad_hcb_comp, &fcomp); | ||
| 2666 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 2667 | if (iocmd->status == BFA_STATUS_OK) { | ||
| 2668 | wait_for_completion(&fcomp.comp); | ||
| 2669 | iocmd->status = fcomp.status; | ||
| 2670 | } | ||
| 2671 | |||
| 2672 | return 0; | ||
| 2673 | } | ||
| 2674 | |||
| 2675 | int | ||
| 2676 | bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd) | ||
| 2677 | { | ||
| 2678 | struct bfa_bsg_fruvpd_max_size_s *iocmd = | ||
| 2679 | (struct bfa_bsg_fruvpd_max_size_s *)cmd; | ||
| 2680 | unsigned long flags = 0; | ||
| 2681 | |||
| 2682 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
| 2683 | iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa), | ||
| 2684 | &iocmd->max_size); | ||
| 2685 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
| 2686 | |||
| 2687 | return 0; | ||
| 2688 | } | ||
| 2689 | |||
| 2438 | static int | 2690 | static int |
| 2439 | bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, | 2691 | bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, |
| 2440 | unsigned int payload_len) | 2692 | unsigned int payload_len) |
| @@ -2660,6 +2912,13 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, | |||
| 2660 | case IOCMD_DIAG_LB_STAT: | 2912 | case IOCMD_DIAG_LB_STAT: |
| 2661 | rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); | 2913 | rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); |
| 2662 | break; | 2914 | break; |
| 2915 | case IOCMD_DIAG_DPORT_ENABLE: | ||
| 2916 | case IOCMD_DIAG_DPORT_DISABLE: | ||
| 2917 | rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd); | ||
| 2918 | break; | ||
| 2919 | case IOCMD_DIAG_DPORT_GET_STATE: | ||
| 2920 | rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd); | ||
| 2921 | break; | ||
| 2663 | case IOCMD_PHY_GET_ATTR: | 2922 | case IOCMD_PHY_GET_ATTR: |
| 2664 | rc = bfad_iocmd_phy_get_attr(bfad, iocmd); | 2923 | rc = bfad_iocmd_phy_get_attr(bfad, iocmd); |
| 2665 | break; | 2924 | break; |
| @@ -2741,6 +3000,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, | |||
| 2741 | case IOCMD_QOS_RESET_STATS: | 3000 | case IOCMD_QOS_RESET_STATS: |
| 2742 | rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); | 3001 | rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); |
| 2743 | break; | 3002 | break; |
| 3003 | case IOCMD_QOS_SET_BW: | ||
| 3004 | rc = bfad_iocmd_qos_set_bw(bfad, iocmd); | ||
| 3005 | break; | ||
| 2744 | case IOCMD_VF_GET_STATS: | 3006 | case IOCMD_VF_GET_STATS: |
| 2745 | rc = bfad_iocmd_vf_get_stats(bfad, iocmd); | 3007 | rc = bfad_iocmd_vf_get_stats(bfad, iocmd); |
| 2746 | break; | 3008 | break; |
| @@ -2759,6 +3021,29 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, | |||
| 2759 | case IOCMD_FCPIM_LUNMASK_DELETE: | 3021 | case IOCMD_FCPIM_LUNMASK_DELETE: |
| 2760 | rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); | 3022 | rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); |
| 2761 | break; | 3023 | break; |
| 3024 | case IOCMD_FCPIM_THROTTLE_QUERY: | ||
| 3025 | rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd); | ||
| 3026 | break; | ||
| 3027 | case IOCMD_FCPIM_THROTTLE_SET: | ||
| 3028 | rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd); | ||
| 3029 | break; | ||
| 3030 | /* TFRU */ | ||
| 3031 | case IOCMD_TFRU_READ: | ||
| 3032 | rc = bfad_iocmd_tfru_read(bfad, iocmd); | ||
| 3033 | break; | ||
| 3034 | case IOCMD_TFRU_WRITE: | ||
| 3035 | rc = bfad_iocmd_tfru_write(bfad, iocmd); | ||
| 3036 | break; | ||
| 3037 | /* FRU */ | ||
| 3038 | case IOCMD_FRUVPD_READ: | ||
| 3039 | rc = bfad_iocmd_fruvpd_read(bfad, iocmd); | ||
| 3040 | break; | ||
| 3041 | case IOCMD_FRUVPD_UPDATE: | ||
| 3042 | rc = bfad_iocmd_fruvpd_update(bfad, iocmd); | ||
| 3043 | break; | ||
| 3044 | case IOCMD_FRUVPD_GET_MAX_SIZE: | ||
| 3045 | rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd); | ||
| 3046 | break; | ||
| 2762 | default: | 3047 | default: |
| 2763 | rc = -EINVAL; | 3048 | rc = -EINVAL; |
| 2764 | break; | 3049 | break; |
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h index 8c569ddb750d..15e1fc8e796b 100644 --- a/drivers/scsi/bfa/bfad_bsg.h +++ b/drivers/scsi/bfa/bfad_bsg.h | |||
| @@ -141,6 +141,17 @@ enum { | |||
| 141 | IOCMD_FCPIM_LUNMASK_QUERY, | 141 | IOCMD_FCPIM_LUNMASK_QUERY, |
| 142 | IOCMD_FCPIM_LUNMASK_ADD, | 142 | IOCMD_FCPIM_LUNMASK_ADD, |
| 143 | IOCMD_FCPIM_LUNMASK_DELETE, | 143 | IOCMD_FCPIM_LUNMASK_DELETE, |
| 144 | IOCMD_DIAG_DPORT_ENABLE, | ||
| 145 | IOCMD_DIAG_DPORT_DISABLE, | ||
| 146 | IOCMD_DIAG_DPORT_GET_STATE, | ||
| 147 | IOCMD_QOS_SET_BW, | ||
| 148 | IOCMD_FCPIM_THROTTLE_QUERY, | ||
| 149 | IOCMD_FCPIM_THROTTLE_SET, | ||
| 150 | IOCMD_TFRU_READ, | ||
| 151 | IOCMD_TFRU_WRITE, | ||
| 152 | IOCMD_FRUVPD_READ, | ||
| 153 | IOCMD_FRUVPD_UPDATE, | ||
| 154 | IOCMD_FRUVPD_GET_MAX_SIZE, | ||
| 144 | }; | 155 | }; |
| 145 | 156 | ||
| 146 | struct bfa_bsg_gen_s { | 157 | struct bfa_bsg_gen_s { |
| @@ -463,7 +474,8 @@ struct bfa_bsg_pcifn_s { | |||
| 463 | bfa_status_t status; | 474 | bfa_status_t status; |
| 464 | u16 bfad_num; | 475 | u16 bfad_num; |
| 465 | u16 pcifn_id; | 476 | u16 pcifn_id; |
| 466 | u32 bandwidth; | 477 | u16 bw_min; |
| 478 | u16 bw_max; | ||
| 467 | u8 port; | 479 | u8 port; |
| 468 | enum bfi_pcifn_class pcifn_class; | 480 | enum bfi_pcifn_class pcifn_class; |
| 469 | u8 rsvd[1]; | 481 | u8 rsvd[1]; |
| @@ -613,6 +625,13 @@ struct bfa_bsg_diag_lb_stat_s { | |||
| 613 | u16 rsvd; | 625 | u16 rsvd; |
| 614 | }; | 626 | }; |
| 615 | 627 | ||
| 628 | struct bfa_bsg_diag_dport_get_state_s { | ||
| 629 | bfa_status_t status; | ||
| 630 | u16 bfad_num; | ||
| 631 | u16 rsvd; | ||
| 632 | enum bfa_dport_state state; | ||
| 633 | }; | ||
| 634 | |||
| 616 | struct bfa_bsg_phy_attr_s { | 635 | struct bfa_bsg_phy_attr_s { |
| 617 | bfa_status_t status; | 636 | bfa_status_t status; |
| 618 | u16 bfad_num; | 637 | u16 bfad_num; |
| @@ -694,6 +713,13 @@ struct bfa_bsg_qos_vc_attr_s { | |||
| 694 | struct bfa_qos_vc_attr_s attr; | 713 | struct bfa_qos_vc_attr_s attr; |
| 695 | }; | 714 | }; |
| 696 | 715 | ||
| 716 | struct bfa_bsg_qos_bw_s { | ||
| 717 | bfa_status_t status; | ||
| 718 | u16 bfad_num; | ||
| 719 | u16 rsvd; | ||
| 720 | struct bfa_qos_bw_s qos_bw; | ||
| 721 | }; | ||
| 722 | |||
| 697 | struct bfa_bsg_vf_stats_s { | 723 | struct bfa_bsg_vf_stats_s { |
| 698 | bfa_status_t status; | 724 | bfa_status_t status; |
| 699 | u16 bfad_num; | 725 | u16 bfad_num; |
| @@ -722,6 +748,41 @@ struct bfa_bsg_fcpim_lunmask_s { | |||
| 722 | struct scsi_lun lun; | 748 | struct scsi_lun lun; |
| 723 | }; | 749 | }; |
| 724 | 750 | ||
| 751 | struct bfa_bsg_fcpim_throttle_s { | ||
| 752 | bfa_status_t status; | ||
| 753 | u16 bfad_num; | ||
| 754 | u16 vf_id; | ||
| 755 | struct bfa_defs_fcpim_throttle_s throttle; | ||
| 756 | }; | ||
| 757 | |||
| 758 | #define BFA_TFRU_DATA_SIZE 64 | ||
| 759 | #define BFA_MAX_FRUVPD_TRANSFER_SIZE 0x1000 | ||
| 760 | |||
| 761 | struct bfa_bsg_tfru_s { | ||
| 762 | bfa_status_t status; | ||
| 763 | u16 bfad_num; | ||
| 764 | u16 rsvd; | ||
| 765 | u32 offset; | ||
| 766 | u32 len; | ||
| 767 | u8 data[BFA_TFRU_DATA_SIZE]; | ||
| 768 | }; | ||
| 769 | |||
| 770 | struct bfa_bsg_fruvpd_s { | ||
| 771 | bfa_status_t status; | ||
| 772 | u16 bfad_num; | ||
| 773 | u16 rsvd; | ||
| 774 | u32 offset; | ||
| 775 | u32 len; | ||
| 776 | u8 data[BFA_MAX_FRUVPD_TRANSFER_SIZE]; | ||
| 777 | }; | ||
| 778 | |||
| 779 | struct bfa_bsg_fruvpd_max_size_s { | ||
| 780 | bfa_status_t status; | ||
| 781 | u16 bfad_num; | ||
| 782 | u16 rsvd; | ||
| 783 | u32 max_size; | ||
| 784 | }; | ||
| 785 | |||
| 725 | struct bfa_bsg_fcpt_s { | 786 | struct bfa_bsg_fcpt_s { |
| 726 | bfa_status_t status; | 787 | bfa_status_t status; |
| 727 | u16 vf_id; | 788 | u16 vf_id; |
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 1840651ce1d4..0c64a04f01fa 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h | |||
| @@ -57,7 +57,7 @@ | |||
| 57 | #ifdef BFA_DRIVER_VERSION | 57 | #ifdef BFA_DRIVER_VERSION |
| 58 | #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION | 58 | #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION |
| 59 | #else | 59 | #else |
| 60 | #define BFAD_DRIVER_VERSION "3.1.2.0" | 60 | #define BFAD_DRIVER_VERSION "3.1.2.1" |
| 61 | #endif | 61 | #endif |
| 62 | 62 | ||
| 63 | #define BFAD_PROTO_NAME FCPI_NAME | 63 | #define BFAD_PROTO_NAME FCPI_NAME |
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index b2ba0b2e91b2..57b146bca18c 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h | |||
| @@ -210,7 +210,8 @@ enum bfi_mclass { | |||
| 210 | BFI_MC_PORT = 21, /* Physical port */ | 210 | BFI_MC_PORT = 21, /* Physical port */ |
| 211 | BFI_MC_SFP = 22, /* SFP module */ | 211 | BFI_MC_SFP = 22, /* SFP module */ |
| 212 | BFI_MC_PHY = 25, /* External PHY message class */ | 212 | BFI_MC_PHY = 25, /* External PHY message class */ |
| 213 | BFI_MC_MAX = 32 | 213 | BFI_MC_FRU = 34, |
| 214 | BFI_MC_MAX = 35 | ||
| 214 | }; | 215 | }; |
| 215 | 216 | ||
| 216 | #define BFI_IOC_MAX_CQS 4 | 217 | #define BFI_IOC_MAX_CQS 4 |
| @@ -288,6 +289,9 @@ struct bfi_ioc_attr_s { | |||
| 288 | char optrom_version[BFA_VERSION_LEN]; | 289 | char optrom_version[BFA_VERSION_LEN]; |
| 289 | struct bfa_mfg_vpd_s vpd; | 290 | struct bfa_mfg_vpd_s vpd; |
| 290 | u32 card_type; /* card type */ | 291 | u32 card_type; /* card type */ |
| 292 | u8 mfg_day; /* manufacturing day */ | ||
| 293 | u8 mfg_month; /* manufacturing month */ | ||
| 294 | u16 mfg_year; /* manufacturing year */ | ||
| 291 | }; | 295 | }; |
| 292 | 296 | ||
| 293 | /* | 297 | /* |
| @@ -687,7 +691,8 @@ struct bfi_ablk_h2i_pf_req_s { | |||
| 687 | u8 pcifn; | 691 | u8 pcifn; |
| 688 | u8 port; | 692 | u8 port; |
| 689 | u16 pers; | 693 | u16 pers; |
| 690 | u32 bw; | 694 | u16 bw_min; /* percent BW @ max speed */ |
| 695 | u16 bw_max; /* percent BW @ max speed */ | ||
| 691 | }; | 696 | }; |
| 692 | 697 | ||
| 693 | /* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */ | 698 | /* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */ |
| @@ -957,6 +962,7 @@ enum bfi_diag_h2i { | |||
| 957 | BFI_DIAG_H2I_TEMPSENSOR = 4, | 962 | BFI_DIAG_H2I_TEMPSENSOR = 4, |
| 958 | BFI_DIAG_H2I_LEDTEST = 5, | 963 | BFI_DIAG_H2I_LEDTEST = 5, |
| 959 | BFI_DIAG_H2I_QTEST = 6, | 964 | BFI_DIAG_H2I_QTEST = 6, |
| 965 | BFI_DIAG_H2I_DPORT = 7, | ||
| 960 | }; | 966 | }; |
| 961 | 967 | ||
| 962 | enum bfi_diag_i2h { | 968 | enum bfi_diag_i2h { |
| @@ -966,6 +972,7 @@ enum bfi_diag_i2h { | |||
| 966 | BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR), | 972 | BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR), |
| 967 | BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), | 973 | BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), |
| 968 | BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), | 974 | BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), |
| 975 | BFI_DIAG_I2H_DPORT = BFA_I2HM(BFI_DIAG_H2I_DPORT), | ||
| 969 | }; | 976 | }; |
| 970 | 977 | ||
| 971 | #define BFI_DIAG_MAX_SGES 2 | 978 | #define BFI_DIAG_MAX_SGES 2 |
| @@ -1052,6 +1059,23 @@ struct bfi_diag_qtest_req_s { | |||
| 1052 | #define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s | 1059 | #define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s |
| 1053 | 1060 | ||
| 1054 | /* | 1061 | /* |
| 1062 | * D-port test | ||
| 1063 | */ | ||
| 1064 | enum bfi_dport_req { | ||
| 1065 | BFI_DPORT_DISABLE = 0, /* disable dport request */ | ||
| 1066 | BFI_DPORT_ENABLE = 1, /* enable dport request */ | ||
| 1067 | }; | ||
| 1068 | |||
| 1069 | struct bfi_diag_dport_req_s { | ||
| 1070 | struct bfi_mhdr_s mh; /* 4 bytes */ | ||
| 1071 | u8 req; /* request 1: enable 0: disable */ | ||
| 1072 | u8 status; /* reply status */ | ||
| 1073 | u8 rsvd[2]; | ||
| 1074 | u32 msgtag; /* msgtag for reply */ | ||
| 1075 | }; | ||
| 1076 | #define bfi_diag_dport_rsp_t struct bfi_diag_dport_req_s | ||
| 1077 | |||
| 1078 | /* | ||
| 1055 | * PHY module specific | 1079 | * PHY module specific |
| 1056 | */ | 1080 | */ |
| 1057 | enum bfi_phy_h2i_msgs_e { | 1081 | enum bfi_phy_h2i_msgs_e { |
| @@ -1147,6 +1171,50 @@ struct bfi_phy_write_rsp_s { | |||
| 1147 | u32 length; | 1171 | u32 length; |
| 1148 | }; | 1172 | }; |
| 1149 | 1173 | ||
| 1174 | enum bfi_fru_h2i_msgs { | ||
| 1175 | BFI_FRUVPD_H2I_WRITE_REQ = 1, | ||
| 1176 | BFI_FRUVPD_H2I_READ_REQ = 2, | ||
| 1177 | BFI_TFRU_H2I_WRITE_REQ = 3, | ||
| 1178 | BFI_TFRU_H2I_READ_REQ = 4, | ||
| 1179 | }; | ||
| 1180 | |||
| 1181 | enum bfi_fru_i2h_msgs { | ||
| 1182 | BFI_FRUVPD_I2H_WRITE_RSP = BFA_I2HM(1), | ||
| 1183 | BFI_FRUVPD_I2H_READ_RSP = BFA_I2HM(2), | ||
| 1184 | BFI_TFRU_I2H_WRITE_RSP = BFA_I2HM(3), | ||
| 1185 | BFI_TFRU_I2H_READ_RSP = BFA_I2HM(4), | ||
| 1186 | }; | ||
| 1187 | |||
| 1188 | /* | ||
| 1189 | * FRU write request | ||
| 1190 | */ | ||
| 1191 | struct bfi_fru_write_req_s { | ||
| 1192 | struct bfi_mhdr_s mh; /* Common msg header */ | ||
| 1193 | u8 last; | ||
| 1194 | u8 rsv[3]; | ||
| 1195 | u32 offset; | ||
| 1196 | u32 length; | ||
| 1197 | struct bfi_alen_s alen; | ||
| 1198 | }; | ||
| 1199 | |||
| 1200 | /* | ||
| 1201 | * FRU read request | ||
| 1202 | */ | ||
| 1203 | struct bfi_fru_read_req_s { | ||
| 1204 | struct bfi_mhdr_s mh; /* Common msg header */ | ||
| 1205 | u32 offset; | ||
| 1206 | u32 length; | ||
| 1207 | struct bfi_alen_s alen; | ||
| 1208 | }; | ||
| 1209 | |||
| 1210 | /* | ||
| 1211 | * FRU response | ||
| 1212 | */ | ||
| 1213 | struct bfi_fru_rsp_s { | ||
| 1214 | struct bfi_mhdr_s mh; /* Common msg header */ | ||
| 1215 | u32 status; | ||
| 1216 | u32 length; | ||
| 1217 | }; | ||
| 1150 | #pragma pack() | 1218 | #pragma pack() |
| 1151 | 1219 | ||
| 1152 | #endif /* __BFI_H__ */ | 1220 | #endif /* __BFI_H__ */ |
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index d4220e13cafa..5ae2c167b2c8 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h | |||
| @@ -426,6 +426,7 @@ struct bfi_lps_login_req_s { | |||
| 426 | u8 auth_en; | 426 | u8 auth_en; |
| 427 | u8 lps_role; | 427 | u8 lps_role; |
| 428 | u8 bb_scn; | 428 | u8 bb_scn; |
| 429 | u32 vvl_flag; | ||
| 429 | }; | 430 | }; |
| 430 | 431 | ||
| 431 | struct bfi_lps_login_rsp_s { | 432 | struct bfi_lps_login_rsp_s { |
| @@ -499,6 +500,9 @@ enum bfi_rport_i2h_msgs { | |||
| 499 | BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1), | 500 | BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1), |
| 500 | BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2), | 501 | BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2), |
| 501 | BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3), | 502 | BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3), |
| 503 | BFI_RPORT_I2H_LIP_SCN_ONLINE = BFA_I2HM(4), | ||
| 504 | BFI_RPORT_I2H_LIP_SCN_OFFLINE = BFA_I2HM(5), | ||
| 505 | BFI_RPORT_I2H_NO_DEV = BFA_I2HM(6), | ||
| 502 | }; | 506 | }; |
| 503 | 507 | ||
| 504 | struct bfi_rport_create_req_s { | 508 | struct bfi_rport_create_req_s { |
| @@ -551,6 +555,14 @@ struct bfi_rport_qos_scn_s { | |||
| 551 | struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */ | 555 | struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */ |
| 552 | }; | 556 | }; |
| 553 | 557 | ||
| 558 | struct bfi_rport_lip_scn_s { | ||
| 559 | struct bfi_mhdr_s mh; /*!< common msg header */ | ||
| 560 | u16 bfa_handle; /*!< host rport handle */ | ||
| 561 | u8 status; /*!< scn online status */ | ||
| 562 | u8 rsvd; | ||
| 563 | struct bfa_fcport_loop_info_s loop_info; | ||
| 564 | }; | ||
| 565 | |||
| 554 | union bfi_rport_h2i_msg_u { | 566 | union bfi_rport_h2i_msg_u { |
| 555 | struct bfi_msg_s *msg; | 567 | struct bfi_msg_s *msg; |
| 556 | struct bfi_rport_create_req_s *create_req; | 568 | struct bfi_rport_create_req_s *create_req; |
| @@ -563,6 +575,7 @@ union bfi_rport_i2h_msg_u { | |||
| 563 | struct bfi_rport_create_rsp_s *create_rsp; | 575 | struct bfi_rport_create_rsp_s *create_rsp; |
| 564 | struct bfi_rport_delete_rsp_s *delete_rsp; | 576 | struct bfi_rport_delete_rsp_s *delete_rsp; |
| 565 | struct bfi_rport_qos_scn_s *qos_scn_evt; | 577 | struct bfi_rport_qos_scn_s *qos_scn_evt; |
| 578 | struct bfi_rport_lip_scn_s *lip_scn; | ||
| 566 | }; | 579 | }; |
| 567 | 580 | ||
| 568 | /* | 581 | /* |
| @@ -828,6 +841,7 @@ enum bfi_tskim_status { | |||
| 828 | */ | 841 | */ |
| 829 | BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ | 842 | BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ |
| 830 | BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */ | 843 | BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */ |
| 844 | BFI_TSKIM_STS_UTAG = 12, /* unknown tag for request */ | ||
| 831 | }; | 845 | }; |
| 832 | 846 | ||
| 833 | struct bfi_tskim_rsp_s { | 847 | struct bfi_tskim_rsp_s { |
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h index ed5f159e1867..99133bcf53f9 100644 --- a/drivers/scsi/bfa/bfi_reg.h +++ b/drivers/scsi/bfa/bfi_reg.h | |||
| @@ -338,6 +338,7 @@ enum { | |||
| 338 | #define __A2T_AHB_LOAD 0x00000800 | 338 | #define __A2T_AHB_LOAD 0x00000800 |
| 339 | #define __WGN_READY 0x00000400 | 339 | #define __WGN_READY 0x00000400 |
| 340 | #define __GLBL_PF_VF_CFG_RDY 0x00000200 | 340 | #define __GLBL_PF_VF_CFG_RDY 0x00000200 |
| 341 | #define CT2_NFC_STS_REG 0x00027410 | ||
| 341 | #define CT2_NFC_CSR_CLR_REG 0x00027420 | 342 | #define CT2_NFC_CSR_CLR_REG 0x00027420 |
| 342 | #define CT2_NFC_CSR_SET_REG 0x00027424 | 343 | #define CT2_NFC_CSR_SET_REG 0x00027424 |
| 343 | #define __HALT_NFC_CONTROLLER 0x00000002 | 344 | #define __HALT_NFC_CONTROLLER 0x00000002 |
| @@ -355,6 +356,8 @@ enum { | |||
| 355 | (CT2_CSI_MAC0_CONTROL_REG + \ | 356 | (CT2_CSI_MAC0_CONTROL_REG + \ |
| 356 | (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) | 357 | (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) |
| 357 | 358 | ||
| 359 | #define CT2_NFC_FLASH_STS_REG 0x00014834 | ||
| 360 | #define __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS 0x00000020 | ||
| 358 | /* | 361 | /* |
| 359 | * Name semaphore registers based on usage | 362 | * Name semaphore registers based on usage |
| 360 | */ | 363 | */ |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 078d262ac7cc..666b7ac4475f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
| @@ -1643,7 +1643,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) | |||
| 1643 | skb_reset_network_header(skb); | 1643 | skb_reset_network_header(skb); |
| 1644 | skb->mac_len = elen; | 1644 | skb->mac_len = elen; |
| 1645 | skb->protocol = htons(ETH_P_FCOE); | 1645 | skb->protocol = htons(ETH_P_FCOE); |
| 1646 | skb->priority = port->priority; | 1646 | skb->priority = fcoe->priority; |
| 1647 | 1647 | ||
| 1648 | if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && | 1648 | if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && |
| 1649 | fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { | 1649 | fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { |
| @@ -1917,7 +1917,6 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier, | |||
| 1917 | struct fcoe_ctlr *ctlr; | 1917 | struct fcoe_ctlr *ctlr; |
| 1918 | struct fcoe_interface *fcoe; | 1918 | struct fcoe_interface *fcoe; |
| 1919 | struct net_device *netdev; | 1919 | struct net_device *netdev; |
| 1920 | struct fcoe_port *port; | ||
| 1921 | int prio; | 1920 | int prio; |
| 1922 | 1921 | ||
| 1923 | if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE) | 1922 | if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE) |
| @@ -1946,10 +1945,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier, | |||
| 1946 | entry->app.protocol == ETH_P_FCOE) | 1945 | entry->app.protocol == ETH_P_FCOE) |
| 1947 | ctlr->priority = prio; | 1946 | ctlr->priority = prio; |
| 1948 | 1947 | ||
| 1949 | if (entry->app.protocol == ETH_P_FCOE) { | 1948 | if (entry->app.protocol == ETH_P_FCOE) |
| 1950 | port = lport_priv(ctlr->lp); | 1949 | fcoe->priority = prio; |
| 1951 | port->priority = prio; | ||
| 1952 | } | ||
| 1953 | 1950 | ||
| 1954 | return NOTIFY_OK; | 1951 | return NOTIFY_OK; |
| 1955 | } | 1952 | } |
| @@ -2180,7 +2177,6 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe) | |||
| 2180 | u8 fup, up; | 2177 | u8 fup, up; |
| 2181 | struct net_device *netdev = fcoe->realdev; | 2178 | struct net_device *netdev = fcoe->realdev; |
| 2182 | struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); | 2179 | struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); |
| 2183 | struct fcoe_port *port = lport_priv(ctlr->lp); | ||
| 2184 | struct dcb_app app = { | 2180 | struct dcb_app app = { |
| 2185 | .priority = 0, | 2181 | .priority = 0, |
| 2186 | .protocol = ETH_P_FCOE | 2182 | .protocol = ETH_P_FCOE |
| @@ -2202,8 +2198,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe) | |||
| 2202 | fup = dcb_getapp(netdev, &app); | 2198 | fup = dcb_getapp(netdev, &app); |
| 2203 | } | 2199 | } |
| 2204 | 2200 | ||
| 2205 | port->priority = ffs(up) ? ffs(up) - 1 : 0; | 2201 | fcoe->priority = ffs(up) ? ffs(up) - 1 : 0; |
| 2206 | ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority; | 2202 | ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority; |
| 2207 | } | 2203 | } |
| 2208 | #endif | 2204 | #endif |
| 2209 | } | 2205 | } |
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index a624add4f8ec..b42dc32cb5eb 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h | |||
| @@ -71,6 +71,7 @@ do { \ | |||
| 71 | * @oem: The offload exchange manager for all local port | 71 | * @oem: The offload exchange manager for all local port |
| 72 | * instances associated with this port | 72 | * instances associated with this port |
| 73 | * @removed: Indicates fcoe interface removed from net device | 73 | * @removed: Indicates fcoe interface removed from net device |
| 74 | * @priority: Priority for the FCoE packet (DCB) | ||
| 74 | * This structure is 1:1 with a net device. | 75 | * This structure is 1:1 with a net device. |
| 75 | */ | 76 | */ |
| 76 | struct fcoe_interface { | 77 | struct fcoe_interface { |
| @@ -81,6 +82,7 @@ struct fcoe_interface { | |||
| 81 | struct packet_type fip_packet_type; | 82 | struct packet_type fip_packet_type; |
| 82 | struct fc_exch_mgr *oem; | 83 | struct fc_exch_mgr *oem; |
| 83 | u8 removed; | 84 | u8 removed; |
| 85 | u8 priority; | ||
| 84 | }; | 86 | }; |
| 85 | 87 | ||
| 86 | #define fcoe_to_ctlr(x) \ | 88 | #define fcoe_to_ctlr(x) \ |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 14243fa5f8e8..fcb9d0b20ee4 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
| @@ -851,7 +851,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
| 851 | fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); | 851 | fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); |
| 852 | if (flags & FCP_RSP_LEN_VAL) { | 852 | if (flags & FCP_RSP_LEN_VAL) { |
| 853 | respl = ntohl(rp_ex->fr_rsp_len); | 853 | respl = ntohl(rp_ex->fr_rsp_len); |
| 854 | if (respl != sizeof(*fc_rp_info)) | 854 | if ((respl != FCP_RESP_RSP_INFO_LEN4) && |
| 855 | (respl != FCP_RESP_RSP_INFO_LEN8)) | ||
| 855 | goto len_err; | 856 | goto len_err; |
| 856 | if (fsp->wait_for_comp) { | 857 | if (fsp->wait_for_comp) { |
| 857 | /* Abuse cdb_status for rsp code */ | 858 | /* Abuse cdb_status for rsp code */ |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index a184c2443a64..69b59935b53f 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | 27 | ||
| 28 | struct lpfc_sli2_slim; | 28 | struct lpfc_sli2_slim; |
| 29 | 29 | ||
| 30 | #define ELX_MODEL_NAME_SIZE 80 | ||
| 31 | |||
| 30 | #define LPFC_PCI_DEV_LP 0x1 | 32 | #define LPFC_PCI_DEV_LP 0x1 |
| 31 | #define LPFC_PCI_DEV_OC 0x2 | 33 | #define LPFC_PCI_DEV_OC 0x2 |
| 32 | 34 | ||
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index b032562aa0d9..ad16e54ac383 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -3935,6 +3935,12 @@ MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions"); | |||
| 3935 | # - Only meaningful if BG is turned on (lpfc_enable_bg=1). | 3935 | # - Only meaningful if BG is turned on (lpfc_enable_bg=1). |
| 3936 | # - Allows you to ultimately specify which profiles to use | 3936 | # - Allows you to ultimately specify which profiles to use |
| 3937 | # - Default will result in registering capabilities for all profiles. | 3937 | # - Default will result in registering capabilities for all profiles. |
| 3938 | # - SHOST_DIF_TYPE1_PROTECTION 1 | ||
| 3939 | # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection | ||
| 3940 | # - SHOST_DIX_TYPE0_PROTECTION 8 | ||
| 3941 | # HBA supports DIX Type 0: Host to HBA protection only | ||
| 3942 | # - SHOST_DIX_TYPE1_PROTECTION 16 | ||
| 3943 | # HBA supports DIX Type 1: Host to HBA Type 1 protection | ||
| 3938 | # | 3944 | # |
| 3939 | */ | 3945 | */ |
| 3940 | unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | | 3946 | unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | |
| @@ -3947,7 +3953,7 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); | |||
| 3947 | /* | 3953 | /* |
| 3948 | # lpfc_prot_guard: i | 3954 | # lpfc_prot_guard: i |
| 3949 | # - Bit mask of protection guard types to register with the SCSI mid-layer | 3955 | # - Bit mask of protection guard types to register with the SCSI mid-layer |
| 3950 | # - Guard types are currently either 1) IP checksum 2) T10-DIF CRC | 3956 | # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum |
| 3951 | # - Allows you to ultimately specify which profiles to use | 3957 | # - Allows you to ultimately specify which profiles to use |
| 3952 | # - Default will result in registering capabilities for all guard types | 3958 | # - Default will result in registering capabilities for all guard types |
| 3953 | # | 3959 | # |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index e470c489de07..4380a44000bc 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
| @@ -467,3 +467,4 @@ int lpfc_sli4_read_config(struct lpfc_hba *); | |||
| 467 | void lpfc_sli4_node_prep(struct lpfc_hba *); | 467 | void lpfc_sli4_node_prep(struct lpfc_hba *); |
| 468 | int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); | 468 | int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); |
| 469 | void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); | 469 | void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); |
| 470 | uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); | ||
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index cfe533bc9790..f19e9b6f9f13 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
| @@ -809,6 +809,8 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
| 809 | phba->fc_ratov = FF_DEF_RATOV; | 809 | phba->fc_ratov = FF_DEF_RATOV; |
| 810 | rc = memcmp(&vport->fc_portname, &sp->portName, | 810 | rc = memcmp(&vport->fc_portname, &sp->portName, |
| 811 | sizeof(vport->fc_portname)); | 811 | sizeof(vport->fc_portname)); |
| 812 | memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); | ||
| 813 | |||
| 812 | if (rc >= 0) { | 814 | if (rc >= 0) { |
| 813 | /* This side will initiate the PLOGI */ | 815 | /* This side will initiate the PLOGI */ |
| 814 | spin_lock_irq(shost->host_lock); | 816 | spin_lock_irq(shost->host_lock); |
| @@ -3160,7 +3162,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 3160 | retry = 1; | 3162 | retry = 1; |
| 3161 | break; | 3163 | break; |
| 3162 | } | 3164 | } |
| 3163 | if (cmd == ELS_CMD_PLOGI) { | 3165 | if ((cmd == ELS_CMD_PLOGI) || |
| 3166 | (cmd == ELS_CMD_PRLI)) { | ||
| 3164 | delay = 1000; | 3167 | delay = 1000; |
| 3165 | maxretry = lpfc_max_els_tries + 1; | 3168 | maxretry = lpfc_max_els_tries + 1; |
| 3166 | retry = 1; | 3169 | retry = 1; |
| @@ -3305,7 +3308,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
| 3305 | ndlp->nlp_prev_state = ndlp->nlp_state; | 3308 | ndlp->nlp_prev_state = ndlp->nlp_state; |
| 3306 | if (cmd == ELS_CMD_PRLI) | 3309 | if (cmd == ELS_CMD_PRLI) |
| 3307 | lpfc_nlp_set_state(vport, ndlp, | 3310 | lpfc_nlp_set_state(vport, ndlp, |
| 3308 | NLP_STE_REG_LOGIN_ISSUE); | 3311 | NLP_STE_PRLI_ISSUE); |
| 3309 | else | 3312 | else |
| 3310 | lpfc_nlp_set_state(vport, ndlp, | 3313 | lpfc_nlp_set_state(vport, ndlp, |
| 3311 | NLP_STE_NPR_NODE); | 3314 | NLP_STE_NPR_NODE); |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e9845d2ecf10..d7096ad94d3f 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
| @@ -1506,9 +1506,10 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, | |||
| 1506 | } | 1506 | } |
| 1507 | } | 1507 | } |
| 1508 | 1508 | ||
| 1509 | /* If FCF not available return 0 */ | 1509 | /* FCF not valid/available or solicitation in progress */ |
| 1510 | if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || | 1510 | if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || |
| 1511 | !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) | 1511 | !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || |
| 1512 | bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) | ||
| 1512 | return 0; | 1513 | return 0; |
| 1513 | 1514 | ||
| 1514 | if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { | 1515 | if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { |
| @@ -1842,6 +1843,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, | |||
| 1842 | "\tFCF_Index : x%x\n" | 1843 | "\tFCF_Index : x%x\n" |
| 1843 | "\tFCF_Avail : x%x\n" | 1844 | "\tFCF_Avail : x%x\n" |
| 1844 | "\tFCF_Valid : x%x\n" | 1845 | "\tFCF_Valid : x%x\n" |
| 1846 | "\tFCF_SOL : x%x\n" | ||
| 1845 | "\tFIP_Priority : x%x\n" | 1847 | "\tFIP_Priority : x%x\n" |
| 1846 | "\tMAC_Provider : x%x\n" | 1848 | "\tMAC_Provider : x%x\n" |
| 1847 | "\tLowest VLANID : x%x\n" | 1849 | "\tLowest VLANID : x%x\n" |
| @@ -1852,6 +1854,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, | |||
| 1852 | bf_get(lpfc_fcf_record_fcf_index, fcf_record), | 1854 | bf_get(lpfc_fcf_record_fcf_index, fcf_record), |
| 1853 | bf_get(lpfc_fcf_record_fcf_avail, fcf_record), | 1855 | bf_get(lpfc_fcf_record_fcf_avail, fcf_record), |
| 1854 | bf_get(lpfc_fcf_record_fcf_valid, fcf_record), | 1856 | bf_get(lpfc_fcf_record_fcf_valid, fcf_record), |
| 1857 | bf_get(lpfc_fcf_record_fcf_sol, fcf_record), | ||
| 1855 | fcf_record->fip_priority, | 1858 | fcf_record->fip_priority, |
| 1856 | bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), | 1859 | bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), |
| 1857 | vlan_id, | 1860 | vlan_id, |
| @@ -2185,12 +2188,14 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 2185 | new_fcf_record)); | 2188 | new_fcf_record)); |
| 2186 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 2189 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
| 2187 | "2781 FCF (x%x) failed connection " | 2190 | "2781 FCF (x%x) failed connection " |
| 2188 | "list check: (x%x/x%x)\n", | 2191 | "list check: (x%x/x%x/%x)\n", |
| 2189 | bf_get(lpfc_fcf_record_fcf_index, | 2192 | bf_get(lpfc_fcf_record_fcf_index, |
| 2190 | new_fcf_record), | 2193 | new_fcf_record), |
| 2191 | bf_get(lpfc_fcf_record_fcf_avail, | 2194 | bf_get(lpfc_fcf_record_fcf_avail, |
| 2192 | new_fcf_record), | 2195 | new_fcf_record), |
| 2193 | bf_get(lpfc_fcf_record_fcf_valid, | 2196 | bf_get(lpfc_fcf_record_fcf_valid, |
| 2197 | new_fcf_record), | ||
| 2198 | bf_get(lpfc_fcf_record_fcf_sol, | ||
| 2194 | new_fcf_record)); | 2199 | new_fcf_record)); |
| 2195 | if ((phba->fcf.fcf_flag & FCF_IN_USE) && | 2200 | if ((phba->fcf.fcf_flag & FCF_IN_USE) && |
| 2196 | lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, | 2201 | lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 834b699cac76..2cdeb5434fb7 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
| @@ -1305,6 +1305,11 @@ struct lpfc_mbx_mq_create_ext { | |||
| 1305 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK | 1305 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK |
| 1306 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 | 1306 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 |
| 1307 | #define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap | 1307 | #define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap |
| 1308 | #define LPFC_EVT_CODE_LINK_NO_LINK 0x0 | ||
| 1309 | #define LPFC_EVT_CODE_LINK_10_MBIT 0x1 | ||
| 1310 | #define LPFC_EVT_CODE_LINK_100_MBIT 0x2 | ||
| 1311 | #define LPFC_EVT_CODE_LINK_1_GBIT 0x3 | ||
| 1312 | #define LPFC_EVT_CODE_LINK_10_GBIT 0x4 | ||
| 1308 | #define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE | 1313 | #define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE |
| 1309 | #define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001 | 1314 | #define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001 |
| 1310 | #define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap | 1315 | #define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap |
| @@ -1314,6 +1319,13 @@ struct lpfc_mbx_mq_create_ext { | |||
| 1314 | #define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC | 1319 | #define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC |
| 1315 | #define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001 | 1320 | #define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001 |
| 1316 | #define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap | 1321 | #define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap |
| 1322 | #define LPFC_EVT_CODE_FC_NO_LINK 0x0 | ||
| 1323 | #define LPFC_EVT_CODE_FC_1_GBAUD 0x1 | ||
| 1324 | #define LPFC_EVT_CODE_FC_2_GBAUD 0x2 | ||
| 1325 | #define LPFC_EVT_CODE_FC_4_GBAUD 0x4 | ||
| 1326 | #define LPFC_EVT_CODE_FC_8_GBAUD 0x8 | ||
| 1327 | #define LPFC_EVT_CODE_FC_10_GBAUD 0xA | ||
| 1328 | #define LPFC_EVT_CODE_FC_16_GBAUD 0x10 | ||
| 1317 | #define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI | 1329 | #define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI |
| 1318 | #define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001 | 1330 | #define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001 |
| 1319 | #define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap | 1331 | #define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap |
| @@ -1695,8 +1707,14 @@ struct fcf_record { | |||
| 1695 | #define lpfc_fcf_record_fc_map_2_MASK 0x000000FF | 1707 | #define lpfc_fcf_record_fc_map_2_MASK 0x000000FF |
| 1696 | #define lpfc_fcf_record_fc_map_2_WORD word7 | 1708 | #define lpfc_fcf_record_fc_map_2_WORD word7 |
| 1697 | #define lpfc_fcf_record_fcf_valid_SHIFT 24 | 1709 | #define lpfc_fcf_record_fcf_valid_SHIFT 24 |
| 1698 | #define lpfc_fcf_record_fcf_valid_MASK 0x000000FF | 1710 | #define lpfc_fcf_record_fcf_valid_MASK 0x00000001 |
| 1699 | #define lpfc_fcf_record_fcf_valid_WORD word7 | 1711 | #define lpfc_fcf_record_fcf_valid_WORD word7 |
| 1712 | #define lpfc_fcf_record_fcf_fc_SHIFT 25 | ||
| 1713 | #define lpfc_fcf_record_fcf_fc_MASK 0x00000001 | ||
| 1714 | #define lpfc_fcf_record_fcf_fc_WORD word7 | ||
| 1715 | #define lpfc_fcf_record_fcf_sol_SHIFT 31 | ||
| 1716 | #define lpfc_fcf_record_fcf_sol_MASK 0x00000001 | ||
| 1717 | #define lpfc_fcf_record_fcf_sol_WORD word7 | ||
| 1700 | uint32_t word8; | 1718 | uint32_t word8; |
| 1701 | #define lpfc_fcf_record_fcf_index_SHIFT 0 | 1719 | #define lpfc_fcf_record_fcf_index_SHIFT 0 |
| 1702 | #define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF | 1720 | #define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 8a55a586dd65..7dc4218d9c4c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
| @@ -1892,8 +1892,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
| 1892 | max_speed = 4; | 1892 | max_speed = 4; |
| 1893 | else if (phba->lmt & LMT_2Gb) | 1893 | else if (phba->lmt & LMT_2Gb) |
| 1894 | max_speed = 2; | 1894 | max_speed = 2; |
| 1895 | else | 1895 | else if (phba->lmt & LMT_1Gb) |
| 1896 | max_speed = 1; | 1896 | max_speed = 1; |
| 1897 | else | ||
| 1898 | max_speed = 0; | ||
| 1897 | 1899 | ||
| 1898 | vp = &phba->vpd; | 1900 | vp = &phba->vpd; |
| 1899 | 1901 | ||
| @@ -2078,9 +2080,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
| 2078 | if (descp && descp[0] == '\0') { | 2080 | if (descp && descp[0] == '\0') { |
| 2079 | if (oneConnect) | 2081 | if (oneConnect) |
| 2080 | snprintf(descp, 255, | 2082 | snprintf(descp, 255, |
| 2081 | "Emulex OneConnect %s, %s Initiator, Port %s", | 2083 | "Emulex OneConnect %s, %s Initiator %s", |
| 2082 | m.name, m.function, | 2084 | m.name, m.function, |
| 2083 | phba->Port); | 2085 | phba->Port); |
| 2086 | else if (max_speed == 0) | ||
| 2087 | snprintf(descp, 255, | ||
| 2088 | "Emulex %s %s %s ", | ||
| 2089 | m.name, m.bus, m.function); | ||
| 2084 | else | 2090 | else |
| 2085 | snprintf(descp, 255, | 2091 | snprintf(descp, 255, |
| 2086 | "Emulex %s %d%s %s %s", | 2092 | "Emulex %s %d%s %s %s", |
| @@ -3502,6 +3508,119 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, | |||
| 3502 | } | 3508 | } |
| 3503 | 3509 | ||
| 3504 | /** | 3510 | /** |
| 3511 | * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed | ||
| 3512 | * @phba: pointer to lpfc hba data structure. | ||
| 3513 | * | ||
| 3514 | * This routine is to get an SLI3 FC port's link speed in Mbps. | ||
| 3515 | * | ||
| 3516 | * Return: link speed in terms of Mbps. | ||
| 3517 | **/ | ||
| 3518 | uint32_t | ||
| 3519 | lpfc_sli_port_speed_get(struct lpfc_hba *phba) | ||
| 3520 | { | ||
| 3521 | uint32_t link_speed; | ||
| 3522 | |||
| 3523 | if (!lpfc_is_link_up(phba)) | ||
| 3524 | return 0; | ||
| 3525 | |||
| 3526 | switch (phba->fc_linkspeed) { | ||
| 3527 | case LPFC_LINK_SPEED_1GHZ: | ||
| 3528 | link_speed = 1000; | ||
| 3529 | break; | ||
| 3530 | case LPFC_LINK_SPEED_2GHZ: | ||
| 3531 | link_speed = 2000; | ||
| 3532 | break; | ||
| 3533 | case LPFC_LINK_SPEED_4GHZ: | ||
| 3534 | link_speed = 4000; | ||
| 3535 | break; | ||
| 3536 | case LPFC_LINK_SPEED_8GHZ: | ||
| 3537 | link_speed = 8000; | ||
| 3538 | break; | ||
| 3539 | case LPFC_LINK_SPEED_10GHZ: | ||
| 3540 | link_speed = 10000; | ||
| 3541 | break; | ||
| 3542 | case LPFC_LINK_SPEED_16GHZ: | ||
| 3543 | link_speed = 16000; | ||
| 3544 | break; | ||
| 3545 | default: | ||
| 3546 | link_speed = 0; | ||
| 3547 | } | ||
| 3548 | return link_speed; | ||
| 3549 | } | ||
| 3550 | |||
| 3551 | /** | ||
| 3552 | * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed | ||
| 3553 | * @phba: pointer to lpfc hba data structure. | ||
| 3554 | * @evt_code: asynchronous event code. | ||
| 3555 | * @speed_code: asynchronous event link speed code. | ||
| 3556 | * | ||
| 3557 | * This routine is to parse the giving SLI4 async event link speed code into | ||
| 3558 | * value of Mbps for the link speed. | ||
| 3559 | * | ||
| 3560 | * Return: link speed in terms of Mbps. | ||
| 3561 | **/ | ||
| 3562 | static uint32_t | ||
| 3563 | lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, | ||
| 3564 | uint8_t speed_code) | ||
| 3565 | { | ||
| 3566 | uint32_t port_speed; | ||
| 3567 | |||
| 3568 | switch (evt_code) { | ||
| 3569 | case LPFC_TRAILER_CODE_LINK: | ||
| 3570 | switch (speed_code) { | ||
| 3571 | case LPFC_EVT_CODE_LINK_NO_LINK: | ||
| 3572 | port_speed = 0; | ||
| 3573 | break; | ||
| 3574 | case LPFC_EVT_CODE_LINK_10_MBIT: | ||
| 3575 | port_speed = 10; | ||
| 3576 | break; | ||
| 3577 | case LPFC_EVT_CODE_LINK_100_MBIT: | ||
| 3578 | port_speed = 100; | ||
| 3579 | break; | ||
| 3580 | case LPFC_EVT_CODE_LINK_1_GBIT: | ||
| 3581 | port_speed = 1000; | ||
| 3582 | break; | ||
| 3583 | case LPFC_EVT_CODE_LINK_10_GBIT: | ||
| 3584 | port_speed = 10000; | ||
| 3585 | break; | ||
| 3586 | default: | ||
| 3587 | port_speed = 0; | ||
| 3588 | } | ||
| 3589 | break; | ||
| 3590 | case LPFC_TRAILER_CODE_FC: | ||
| 3591 | switch (speed_code) { | ||
| 3592 | case LPFC_EVT_CODE_FC_NO_LINK: | ||
| 3593 | port_speed = 0; | ||
| 3594 | break; | ||
| 3595 | case LPFC_EVT_CODE_FC_1_GBAUD: | ||
| 3596 | port_speed = 1000; | ||
| 3597 | break; | ||
| 3598 | case LPFC_EVT_CODE_FC_2_GBAUD: | ||
| 3599 | port_speed = 2000; | ||
| 3600 | break; | ||
| 3601 | case LPFC_EVT_CODE_FC_4_GBAUD: | ||
| 3602 | port_speed = 4000; | ||
| 3603 | break; | ||
| 3604 | case LPFC_EVT_CODE_FC_8_GBAUD: | ||
| 3605 | port_speed = 8000; | ||
| 3606 | break; | ||
| 3607 | case LPFC_EVT_CODE_FC_10_GBAUD: | ||
| 3608 | port_speed = 10000; | ||
| 3609 | break; | ||
| 3610 | case LPFC_EVT_CODE_FC_16_GBAUD: | ||
| 3611 | port_speed = 16000; | ||
| 3612 | break; | ||
| 3613 | default: | ||
| 3614 | port_speed = 0; | ||
| 3615 | } | ||
| 3616 | break; | ||
| 3617 | default: | ||
| 3618 | port_speed = 0; | ||
| 3619 | } | ||
| 3620 | return port_speed; | ||
| 3621 | } | ||
| 3622 | |||
| 3623 | /** | ||
| 3505 | * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event | 3624 | * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event |
| 3506 | * @phba: pointer to lpfc hba data structure. | 3625 | * @phba: pointer to lpfc hba data structure. |
| 3507 | * @acqe_link: pointer to the async link completion queue entry. | 3626 | * @acqe_link: pointer to the async link completion queue entry. |
| @@ -3558,7 +3677,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, | |||
| 3558 | 3677 | ||
| 3559 | /* Keep the link status for extra SLI4 state machine reference */ | 3678 | /* Keep the link status for extra SLI4 state machine reference */ |
| 3560 | phba->sli4_hba.link_state.speed = | 3679 | phba->sli4_hba.link_state.speed = |
| 3561 | bf_get(lpfc_acqe_link_speed, acqe_link); | 3680 | lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, |
| 3681 | bf_get(lpfc_acqe_link_speed, acqe_link)); | ||
| 3562 | phba->sli4_hba.link_state.duplex = | 3682 | phba->sli4_hba.link_state.duplex = |
| 3563 | bf_get(lpfc_acqe_link_duplex, acqe_link); | 3683 | bf_get(lpfc_acqe_link_duplex, acqe_link); |
| 3564 | phba->sli4_hba.link_state.status = | 3684 | phba->sli4_hba.link_state.status = |
| @@ -3570,7 +3690,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, | |||
| 3570 | phba->sli4_hba.link_state.fault = | 3690 | phba->sli4_hba.link_state.fault = |
| 3571 | bf_get(lpfc_acqe_link_fault, acqe_link); | 3691 | bf_get(lpfc_acqe_link_fault, acqe_link); |
| 3572 | phba->sli4_hba.link_state.logical_speed = | 3692 | phba->sli4_hba.link_state.logical_speed = |
| 3573 | bf_get(lpfc_acqe_logical_link_speed, acqe_link); | 3693 | bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; |
| 3694 | |||
| 3574 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 3695 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 3575 | "2900 Async FC/FCoE Link event - Speed:%dGBit " | 3696 | "2900 Async FC/FCoE Link event - Speed:%dGBit " |
| 3576 | "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " | 3697 | "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " |
| @@ -3580,7 +3701,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, | |||
| 3580 | phba->sli4_hba.link_state.status, | 3701 | phba->sli4_hba.link_state.status, |
| 3581 | phba->sli4_hba.link_state.type, | 3702 | phba->sli4_hba.link_state.type, |
| 3582 | phba->sli4_hba.link_state.number, | 3703 | phba->sli4_hba.link_state.number, |
| 3583 | phba->sli4_hba.link_state.logical_speed * 10, | 3704 | phba->sli4_hba.link_state.logical_speed, |
| 3584 | phba->sli4_hba.link_state.fault); | 3705 | phba->sli4_hba.link_state.fault); |
| 3585 | /* | 3706 | /* |
| 3586 | * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch | 3707 | * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch |
| @@ -3652,7 +3773,8 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) | |||
| 3652 | } | 3773 | } |
| 3653 | /* Keep the link status for extra SLI4 state machine reference */ | 3774 | /* Keep the link status for extra SLI4 state machine reference */ |
| 3654 | phba->sli4_hba.link_state.speed = | 3775 | phba->sli4_hba.link_state.speed = |
| 3655 | bf_get(lpfc_acqe_fc_la_speed, acqe_fc); | 3776 | lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, |
| 3777 | bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); | ||
| 3656 | phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; | 3778 | phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; |
| 3657 | phba->sli4_hba.link_state.topology = | 3779 | phba->sli4_hba.link_state.topology = |
| 3658 | bf_get(lpfc_acqe_fc_la_topology, acqe_fc); | 3780 | bf_get(lpfc_acqe_fc_la_topology, acqe_fc); |
| @@ -3665,7 +3787,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) | |||
| 3665 | phba->sli4_hba.link_state.fault = | 3787 | phba->sli4_hba.link_state.fault = |
| 3666 | bf_get(lpfc_acqe_link_fault, acqe_fc); | 3788 | bf_get(lpfc_acqe_link_fault, acqe_fc); |
| 3667 | phba->sli4_hba.link_state.logical_speed = | 3789 | phba->sli4_hba.link_state.logical_speed = |
| 3668 | bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); | 3790 | bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; |
| 3669 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 3791 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 3670 | "2896 Async FC event - Speed:%dGBaud Topology:x%x " | 3792 | "2896 Async FC event - Speed:%dGBaud Topology:x%x " |
| 3671 | "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" | 3793 | "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" |
| @@ -3675,7 +3797,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) | |||
| 3675 | phba->sli4_hba.link_state.status, | 3797 | phba->sli4_hba.link_state.status, |
| 3676 | phba->sli4_hba.link_state.type, | 3798 | phba->sli4_hba.link_state.type, |
| 3677 | phba->sli4_hba.link_state.number, | 3799 | phba->sli4_hba.link_state.number, |
| 3678 | phba->sli4_hba.link_state.logical_speed * 10, | 3800 | phba->sli4_hba.link_state.logical_speed, |
| 3679 | phba->sli4_hba.link_state.fault); | 3801 | phba->sli4_hba.link_state.fault); |
| 3680 | pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 3802 | pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 3681 | if (!pmb) { | 3803 | if (!pmb) { |
| @@ -3783,14 +3905,18 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) | |||
| 3783 | case LPFC_SLI_EVENT_STATUS_VALID: | 3905 | case LPFC_SLI_EVENT_STATUS_VALID: |
| 3784 | return; /* no message if the sfp is okay */ | 3906 | return; /* no message if the sfp is okay */ |
| 3785 | case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: | 3907 | case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: |
| 3786 | sprintf(message, "Not installed"); | 3908 | sprintf(message, "Optics faulted/incorrectly installed/not " \ |
| 3909 | "installed - Reseat optics, if issue not " | ||
| 3910 | "resolved, replace."); | ||
| 3787 | break; | 3911 | break; |
| 3788 | case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: | 3912 | case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: |
| 3789 | sprintf(message, | 3913 | sprintf(message, |
| 3790 | "Optics of two types installed"); | 3914 | "Optics of two types installed - Remove one optic or " \ |
| 3915 | "install matching pair of optics."); | ||
| 3791 | break; | 3916 | break; |
| 3792 | case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: | 3917 | case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: |
| 3793 | sprintf(message, "Incompatible optics"); | 3918 | sprintf(message, "Incompatible optics - Replace with " \ |
| 3919 | "compatible optics for card to function."); | ||
| 3794 | break; | 3920 | break; |
| 3795 | default: | 3921 | default: |
| 3796 | /* firmware is reporting a status we don't know about */ | 3922 | /* firmware is reporting a status we don't know about */ |
| @@ -4161,11 +4287,11 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, | |||
| 4161 | phba->fcoe_eventtag = acqe_grp5->event_tag; | 4287 | phba->fcoe_eventtag = acqe_grp5->event_tag; |
| 4162 | prev_ll_spd = phba->sli4_hba.link_state.logical_speed; | 4288 | prev_ll_spd = phba->sli4_hba.link_state.logical_speed; |
| 4163 | phba->sli4_hba.link_state.logical_speed = | 4289 | phba->sli4_hba.link_state.logical_speed = |
| 4164 | (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); | 4290 | (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; |
| 4165 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 4291 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 4166 | "2789 GRP5 Async Event: Updating logical link speed " | 4292 | "2789 GRP5 Async Event: Updating logical link speed " |
| 4167 | "from %dMbps to %dMbps\n", (prev_ll_spd * 10), | 4293 | "from %dMbps to %dMbps\n", prev_ll_spd, |
| 4168 | (phba->sli4_hba.link_state.logical_speed*10)); | 4294 | phba->sli4_hba.link_state.logical_speed); |
| 4169 | } | 4295 | } |
| 4170 | 4296 | ||
| 4171 | /** | 4297 | /** |
| @@ -4947,7 +5073,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
| 4947 | } | 5073 | } |
| 4948 | 5074 | ||
| 4949 | phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * | 5075 | phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * |
| 4950 | phba->sli4_hba.cfg_eqn), GFP_KERNEL); | 5076 | phba->cfg_fcp_io_channel), GFP_KERNEL); |
| 4951 | if (!phba->sli4_hba.msix_entries) { | 5077 | if (!phba->sli4_hba.msix_entries) { |
| 4952 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5078 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 4953 | "2573 Failed allocate memory for msi-x " | 5079 | "2573 Failed allocate memory for msi-x " |
| @@ -6559,7 +6685,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) | |||
| 6559 | i++; | 6685 | i++; |
| 6560 | } | 6686 | } |
| 6561 | if (i < cfg_fcp_io_channel) { | 6687 | if (i < cfg_fcp_io_channel) { |
| 6562 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 6688 | lpfc_printf_log(phba, |
| 6689 | KERN_ERR, LOG_INIT, | ||
| 6563 | "3188 Reducing IO channels to match number of " | 6690 | "3188 Reducing IO channels to match number of " |
| 6564 | "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); | 6691 | "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); |
| 6565 | cfg_fcp_io_channel = i; | 6692 | cfg_fcp_io_channel = i; |
| @@ -6567,8 +6694,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) | |||
| 6567 | 6694 | ||
| 6568 | if (cfg_fcp_io_channel > | 6695 | if (cfg_fcp_io_channel > |
| 6569 | phba->sli4_hba.max_cfg_param.max_eq) { | 6696 | phba->sli4_hba.max_cfg_param.max_eq) { |
| 6570 | cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; | 6697 | if (phba->sli4_hba.max_cfg_param.max_eq < |
| 6571 | if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) { | 6698 | LPFC_FCP_IO_CHAN_MIN) { |
| 6572 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 6699 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 6573 | "2574 Not enough EQs (%d) from the " | 6700 | "2574 Not enough EQs (%d) from the " |
| 6574 | "pci function for supporting FCP " | 6701 | "pci function for supporting FCP " |
| @@ -6577,13 +6704,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) | |||
| 6577 | phba->cfg_fcp_io_channel); | 6704 | phba->cfg_fcp_io_channel); |
| 6578 | goto out_error; | 6705 | goto out_error; |
| 6579 | } | 6706 | } |
| 6580 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 6707 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 6581 | "2575 Not enough EQs (%d) from the pci " | 6708 | "2575 Reducing IO channels to match number of " |
| 6582 | "function for supporting the requested " | 6709 | "available EQs: from %d to %d\n", |
| 6583 | "FCP EQs (%d), the actual FCP EQs can " | 6710 | cfg_fcp_io_channel, |
| 6584 | "be supported: %d\n", | 6711 | phba->sli4_hba.max_cfg_param.max_eq); |
| 6585 | phba->sli4_hba.max_cfg_param.max_eq, | 6712 | cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; |
| 6586 | phba->cfg_fcp_io_channel, cfg_fcp_io_channel); | ||
| 6587 | } | 6713 | } |
| 6588 | 6714 | ||
| 6589 | /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ | 6715 | /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ |
| @@ -6592,7 +6718,6 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) | |||
| 6592 | phba->cfg_fcp_eq_count = cfg_fcp_io_channel; | 6718 | phba->cfg_fcp_eq_count = cfg_fcp_io_channel; |
| 6593 | phba->cfg_fcp_wq_count = cfg_fcp_io_channel; | 6719 | phba->cfg_fcp_wq_count = cfg_fcp_io_channel; |
| 6594 | phba->cfg_fcp_io_channel = cfg_fcp_io_channel; | 6720 | phba->cfg_fcp_io_channel = cfg_fcp_io_channel; |
| 6595 | phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel; | ||
| 6596 | 6721 | ||
| 6597 | /* Get EQ depth from module parameter, fake the default for now */ | 6722 | /* Get EQ depth from module parameter, fake the default for now */ |
| 6598 | phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; | 6723 | phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; |
| @@ -8095,11 +8220,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) | |||
| 8095 | int vectors, rc, index; | 8220 | int vectors, rc, index; |
| 8096 | 8221 | ||
| 8097 | /* Set up MSI-X multi-message vectors */ | 8222 | /* Set up MSI-X multi-message vectors */ |
| 8098 | for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) | 8223 | for (index = 0; index < phba->cfg_fcp_io_channel; index++) |
| 8099 | phba->sli4_hba.msix_entries[index].entry = index; | 8224 | phba->sli4_hba.msix_entries[index].entry = index; |
| 8100 | 8225 | ||
| 8101 | /* Configure MSI-X capability structure */ | 8226 | /* Configure MSI-X capability structure */ |
| 8102 | vectors = phba->sli4_hba.cfg_eqn; | 8227 | vectors = phba->cfg_fcp_io_channel; |
| 8103 | enable_msix_vectors: | 8228 | enable_msix_vectors: |
| 8104 | rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, | 8229 | rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, |
| 8105 | vectors); | 8230 | vectors); |
| @@ -8142,8 +8267,14 @@ enable_msix_vectors: | |||
| 8142 | goto cfg_fail_out; | 8267 | goto cfg_fail_out; |
| 8143 | } | 8268 | } |
| 8144 | } | 8269 | } |
| 8145 | phba->sli4_hba.msix_vec_nr = vectors; | ||
| 8146 | 8270 | ||
| 8271 | if (vectors != phba->cfg_fcp_io_channel) { | ||
| 8272 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 8273 | "3238 Reducing IO channels to match number of " | ||
| 8274 | "MSI-X vectors, requested %d got %d\n", | ||
| 8275 | phba->cfg_fcp_io_channel, vectors); | ||
| 8276 | phba->cfg_fcp_io_channel = vectors; | ||
| 8277 | } | ||
| 8147 | return rc; | 8278 | return rc; |
| 8148 | 8279 | ||
| 8149 | cfg_fail_out: | 8280 | cfg_fail_out: |
| @@ -8171,7 +8302,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) | |||
| 8171 | int index; | 8302 | int index; |
| 8172 | 8303 | ||
| 8173 | /* Free up MSI-X multi-message vectors */ | 8304 | /* Free up MSI-X multi-message vectors */ |
| 8174 | for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++) | 8305 | for (index = 0; index < phba->cfg_fcp_io_channel; index++) |
| 8175 | free_irq(phba->sli4_hba.msix_entries[index].vector, | 8306 | free_irq(phba->sli4_hba.msix_entries[index].vector, |
| 8176 | &phba->sli4_hba.fcp_eq_hdl[index]); | 8307 | &phba->sli4_hba.fcp_eq_hdl[index]); |
| 8177 | 8308 | ||
| @@ -9304,23 +9435,28 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) | |||
| 9304 | 9435 | ||
| 9305 | /** | 9436 | /** |
| 9306 | * lpfc_write_firmware - attempt to write a firmware image to the port | 9437 | * lpfc_write_firmware - attempt to write a firmware image to the port |
| 9307 | * @phba: pointer to lpfc hba data structure. | ||
| 9308 | * @fw: pointer to firmware image returned from request_firmware. | 9438 | * @fw: pointer to firmware image returned from request_firmware. |
| 9439 | * @phba: pointer to lpfc hba data structure. | ||
| 9309 | * | 9440 | * |
| 9310 | * returns the number of bytes written if write is successful. | ||
| 9311 | * returns a negative error value if there were errors. | ||
| 9312 | * returns 0 if firmware matches currently active firmware on port. | ||
| 9313 | **/ | 9441 | **/ |
| 9314 | int | 9442 | static void |
| 9315 | lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) | 9443 | lpfc_write_firmware(const struct firmware *fw, void *context) |
| 9316 | { | 9444 | { |
| 9445 | struct lpfc_hba *phba = (struct lpfc_hba *)context; | ||
| 9317 | char fwrev[FW_REV_STR_SIZE]; | 9446 | char fwrev[FW_REV_STR_SIZE]; |
| 9318 | struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; | 9447 | struct lpfc_grp_hdr *image; |
| 9319 | struct list_head dma_buffer_list; | 9448 | struct list_head dma_buffer_list; |
| 9320 | int i, rc = 0; | 9449 | int i, rc = 0; |
| 9321 | struct lpfc_dmabuf *dmabuf, *next; | 9450 | struct lpfc_dmabuf *dmabuf, *next; |
| 9322 | uint32_t offset = 0, temp_offset = 0; | 9451 | uint32_t offset = 0, temp_offset = 0; |
| 9323 | 9452 | ||
| 9453 | /* It can be null, sanity check */ | ||
| 9454 | if (!fw) { | ||
| 9455 | rc = -ENXIO; | ||
| 9456 | goto out; | ||
| 9457 | } | ||
| 9458 | image = (struct lpfc_grp_hdr *)fw->data; | ||
| 9459 | |||
| 9324 | INIT_LIST_HEAD(&dma_buffer_list); | 9460 | INIT_LIST_HEAD(&dma_buffer_list); |
| 9325 | if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || | 9461 | if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || |
| 9326 | (bf_get_be32(lpfc_grp_hdr_file_type, image) != | 9462 | (bf_get_be32(lpfc_grp_hdr_file_type, image) != |
| @@ -9333,12 +9469,13 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) | |||
| 9333 | be32_to_cpu(image->magic_number), | 9469 | be32_to_cpu(image->magic_number), |
| 9334 | bf_get_be32(lpfc_grp_hdr_file_type, image), | 9470 | bf_get_be32(lpfc_grp_hdr_file_type, image), |
| 9335 | bf_get_be32(lpfc_grp_hdr_id, image)); | 9471 | bf_get_be32(lpfc_grp_hdr_id, image)); |
| 9336 | return -EINVAL; | 9472 | rc = -EINVAL; |
| 9473 | goto release_out; | ||
| 9337 | } | 9474 | } |
| 9338 | lpfc_decode_firmware_rev(phba, fwrev, 1); | 9475 | lpfc_decode_firmware_rev(phba, fwrev, 1); |
| 9339 | if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { | 9476 | if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { |
| 9340 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 9477 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 9341 | "3023 Updating Firmware. Current Version:%s " | 9478 | "3023 Updating Firmware, Current Version:%s " |
| 9342 | "New Version:%s\n", | 9479 | "New Version:%s\n", |
| 9343 | fwrev, image->revision); | 9480 | fwrev, image->revision); |
| 9344 | for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { | 9481 | for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { |
| @@ -9346,7 +9483,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) | |||
| 9346 | GFP_KERNEL); | 9483 | GFP_KERNEL); |
| 9347 | if (!dmabuf) { | 9484 | if (!dmabuf) { |
| 9348 | rc = -ENOMEM; | 9485 | rc = -ENOMEM; |
| 9349 | goto out; | 9486 | goto release_out; |
| 9350 | } | 9487 | } |
| 9351 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, | 9488 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, |
| 9352 | SLI4_PAGE_SIZE, | 9489 | SLI4_PAGE_SIZE, |
| @@ -9355,7 +9492,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) | |||
| 9355 | if (!dmabuf->virt) { | 9492 | if (!dmabuf->virt) { |
| 9356 | kfree(dmabuf); | 9493 | kfree(dmabuf); |
| 9357 | rc = -ENOMEM; | 9494 | rc = -ENOMEM; |
| 9358 | goto out; | 9495 | goto release_out; |
| 9359 | } | 9496 | } |
| 9360 | list_add_tail(&dmabuf->list, &dma_buffer_list); | 9497 | list_add_tail(&dmabuf->list, &dma_buffer_list); |
| 9361 | } | 9498 | } |
| @@ -9375,23 +9512,24 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) | |||
| 9375 | } | 9512 | } |
| 9376 | rc = lpfc_wr_object(phba, &dma_buffer_list, | 9513 | rc = lpfc_wr_object(phba, &dma_buffer_list, |
| 9377 | (fw->size - offset), &offset); | 9514 | (fw->size - offset), &offset); |
| 9378 | if (rc) { | 9515 | if (rc) |
| 9379 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 9516 | goto release_out; |
| 9380 | "3024 Firmware update failed. " | ||
| 9381 | "%d\n", rc); | ||
| 9382 | goto out; | ||
| 9383 | } | ||
| 9384 | } | 9517 | } |
| 9385 | rc = offset; | 9518 | rc = offset; |
| 9386 | } | 9519 | } |
| 9387 | out: | 9520 | |
| 9521 | release_out: | ||
| 9388 | list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { | 9522 | list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { |
| 9389 | list_del(&dmabuf->list); | 9523 | list_del(&dmabuf->list); |
| 9390 | dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, | 9524 | dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, |
| 9391 | dmabuf->virt, dmabuf->phys); | 9525 | dmabuf->virt, dmabuf->phys); |
| 9392 | kfree(dmabuf); | 9526 | kfree(dmabuf); |
| 9393 | } | 9527 | } |
| 9394 | return rc; | 9528 | release_firmware(fw); |
| 9529 | out: | ||
| 9530 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 9531 | "3024 Firmware update done: %d.", rc); | ||
| 9532 | return; | ||
| 9395 | } | 9533 | } |
| 9396 | 9534 | ||
| 9397 | /** | 9535 | /** |
| @@ -9418,12 +9556,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 9418 | struct lpfc_hba *phba; | 9556 | struct lpfc_hba *phba; |
| 9419 | struct lpfc_vport *vport = NULL; | 9557 | struct lpfc_vport *vport = NULL; |
| 9420 | struct Scsi_Host *shost = NULL; | 9558 | struct Scsi_Host *shost = NULL; |
| 9421 | int error; | 9559 | int error, ret; |
| 9422 | uint32_t cfg_mode, intr_mode; | 9560 | uint32_t cfg_mode, intr_mode; |
| 9423 | int mcnt; | 9561 | int mcnt; |
| 9424 | int adjusted_fcp_io_channel; | 9562 | int adjusted_fcp_io_channel; |
| 9425 | const struct firmware *fw; | 9563 | uint8_t file_name[ELX_MODEL_NAME_SIZE]; |
| 9426 | uint8_t file_name[16]; | ||
| 9427 | 9564 | ||
| 9428 | /* Allocate memory for HBA structure */ | 9565 | /* Allocate memory for HBA structure */ |
| 9429 | phba = lpfc_hba_alloc(pdev); | 9566 | phba = lpfc_hba_alloc(pdev); |
| @@ -9525,9 +9662,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 9525 | /* Default to single EQ for non-MSI-X */ | 9662 | /* Default to single EQ for non-MSI-X */ |
| 9526 | if (phba->intr_type != MSIX) | 9663 | if (phba->intr_type != MSIX) |
| 9527 | adjusted_fcp_io_channel = 1; | 9664 | adjusted_fcp_io_channel = 1; |
| 9528 | else if (phba->sli4_hba.msix_vec_nr < | ||
| 9529 | phba->cfg_fcp_io_channel) | ||
| 9530 | adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr; | ||
| 9531 | else | 9665 | else |
| 9532 | adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; | 9666 | adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; |
| 9533 | phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; | 9667 | phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; |
| @@ -9572,12 +9706,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 9572 | /* check for firmware upgrade or downgrade (if_type 2 only) */ | 9706 | /* check for firmware upgrade or downgrade (if_type 2 only) */ |
| 9573 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == | 9707 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == |
| 9574 | LPFC_SLI_INTF_IF_TYPE_2) { | 9708 | LPFC_SLI_INTF_IF_TYPE_2) { |
| 9575 | snprintf(file_name, 16, "%s.grp", phba->ModelName); | 9709 | snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", |
| 9576 | error = request_firmware(&fw, file_name, &phba->pcidev->dev); | 9710 | phba->ModelName); |
| 9577 | if (!error) { | 9711 | ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, |
| 9578 | lpfc_write_firmware(phba, fw); | 9712 | file_name, &phba->pcidev->dev, |
| 9579 | release_firmware(fw); | 9713 | GFP_KERNEL, (void *)phba, |
| 9580 | } | 9714 | lpfc_write_firmware); |
| 9581 | } | 9715 | } |
| 9582 | 9716 | ||
| 9583 | /* Check if there are static vports to be created. */ | 9717 | /* Check if there are static vports to be created. */ |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 64013f3097ad..7f45ac9964a9 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
| @@ -3829,9 +3829,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 3829 | cmd->scsi_done(cmd); | 3829 | cmd->scsi_done(cmd); |
| 3830 | 3830 | ||
| 3831 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | 3831 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { |
| 3832 | spin_lock_irq(&phba->hbalock); | 3832 | spin_lock_irqsave(&phba->hbalock, flags); |
| 3833 | lpfc_cmd->pCmd = NULL; | 3833 | lpfc_cmd->pCmd = NULL; |
| 3834 | spin_unlock_irq(&phba->hbalock); | 3834 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 3835 | 3835 | ||
| 3836 | /* | 3836 | /* |
| 3837 | * If there is a thread waiting for command completion | 3837 | * If there is a thread waiting for command completion |
| @@ -3871,9 +3871,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 3871 | } | 3871 | } |
| 3872 | } | 3872 | } |
| 3873 | 3873 | ||
| 3874 | spin_lock_irq(&phba->hbalock); | 3874 | spin_lock_irqsave(&phba->hbalock, flags); |
| 3875 | lpfc_cmd->pCmd = NULL; | 3875 | lpfc_cmd->pCmd = NULL; |
| 3876 | spin_unlock_irq(&phba->hbalock); | 3876 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 3877 | 3877 | ||
| 3878 | /* | 3878 | /* |
| 3879 | * If there is a thread waiting for command completion | 3879 | * If there is a thread waiting for command completion |
| @@ -4163,7 +4163,7 @@ lpfc_info(struct Scsi_Host *host) | |||
| 4163 | { | 4163 | { |
| 4164 | struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; | 4164 | struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; |
| 4165 | struct lpfc_hba *phba = vport->phba; | 4165 | struct lpfc_hba *phba = vport->phba; |
| 4166 | int len; | 4166 | int len, link_speed = 0; |
| 4167 | static char lpfcinfobuf[384]; | 4167 | static char lpfcinfobuf[384]; |
| 4168 | 4168 | ||
| 4169 | memset(lpfcinfobuf,0,384); | 4169 | memset(lpfcinfobuf,0,384); |
| @@ -4184,12 +4184,18 @@ lpfc_info(struct Scsi_Host *host) | |||
| 4184 | phba->Port); | 4184 | phba->Port); |
| 4185 | } | 4185 | } |
| 4186 | len = strlen(lpfcinfobuf); | 4186 | len = strlen(lpfcinfobuf); |
| 4187 | if (phba->sli4_hba.link_state.logical_speed) { | 4187 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
| 4188 | snprintf(lpfcinfobuf + len, | 4188 | link_speed = lpfc_sli_port_speed_get(phba); |
| 4189 | 384-len, | 4189 | } else { |
| 4190 | " Logical Link Speed: %d Mbps", | 4190 | if (phba->sli4_hba.link_state.logical_speed) |
| 4191 | phba->sli4_hba.link_state.logical_speed * 10); | 4191 | link_speed = |
| 4192 | phba->sli4_hba.link_state.logical_speed; | ||
| 4193 | else | ||
| 4194 | link_speed = phba->sli4_hba.link_state.speed; | ||
| 4192 | } | 4195 | } |
| 4196 | if (link_speed != 0) | ||
| 4197 | snprintf(lpfcinfobuf + len, 384-len, | ||
| 4198 | " Logical Link Speed: %d Mbps", link_speed); | ||
| 4193 | } | 4199 | } |
| 4194 | return lpfcinfobuf; | 4200 | return lpfcinfobuf; |
| 4195 | } | 4201 | } |
| @@ -4398,16 +4404,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 4398 | struct lpfc_scsi_buf *lpfc_cmd; | 4404 | struct lpfc_scsi_buf *lpfc_cmd; |
| 4399 | IOCB_t *cmd, *icmd; | 4405 | IOCB_t *cmd, *icmd; |
| 4400 | int ret = SUCCESS, status = 0; | 4406 | int ret = SUCCESS, status = 0; |
| 4407 | unsigned long flags; | ||
| 4401 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); | 4408 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); |
| 4402 | 4409 | ||
| 4403 | status = fc_block_scsi_eh(cmnd); | 4410 | status = fc_block_scsi_eh(cmnd); |
| 4404 | if (status != 0 && status != SUCCESS) | 4411 | if (status != 0 && status != SUCCESS) |
| 4405 | return status; | 4412 | return status; |
| 4406 | 4413 | ||
| 4407 | spin_lock_irq(&phba->hbalock); | 4414 | spin_lock_irqsave(&phba->hbalock, flags); |
| 4408 | /* driver queued commands are in process of being flushed */ | 4415 | /* driver queued commands are in process of being flushed */ |
| 4409 | if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { | 4416 | if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { |
| 4410 | spin_unlock_irq(&phba->hbalock); | 4417 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 4411 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4418 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
| 4412 | "3168 SCSI Layer abort requested I/O has been " | 4419 | "3168 SCSI Layer abort requested I/O has been " |
| 4413 | "flushed by LLD.\n"); | 4420 | "flushed by LLD.\n"); |
| @@ -4416,7 +4423,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 4416 | 4423 | ||
| 4417 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; | 4424 | lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; |
| 4418 | if (!lpfc_cmd || !lpfc_cmd->pCmd) { | 4425 | if (!lpfc_cmd || !lpfc_cmd->pCmd) { |
| 4419 | spin_unlock_irq(&phba->hbalock); | 4426 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 4420 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4427 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
| 4421 | "2873 SCSI Layer I/O Abort Request IO CMPL Status " | 4428 | "2873 SCSI Layer I/O Abort Request IO CMPL Status " |
| 4422 | "x%x ID %d LUN %d\n", | 4429 | "x%x ID %d LUN %d\n", |
| @@ -4427,7 +4434,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 4427 | iocb = &lpfc_cmd->cur_iocbq; | 4434 | iocb = &lpfc_cmd->cur_iocbq; |
| 4428 | /* the command is in process of being cancelled */ | 4435 | /* the command is in process of being cancelled */ |
| 4429 | if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { | 4436 | if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { |
| 4430 | spin_unlock_irq(&phba->hbalock); | 4437 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 4431 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4438 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
| 4432 | "3169 SCSI Layer abort requested I/O has been " | 4439 | "3169 SCSI Layer abort requested I/O has been " |
| 4433 | "cancelled by LLD.\n"); | 4440 | "cancelled by LLD.\n"); |
| @@ -4484,7 +4491,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 4484 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; | 4491 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; |
| 4485 | abtsiocb->vport = vport; | 4492 | abtsiocb->vport = vport; |
| 4486 | /* no longer need the lock after this point */ | 4493 | /* no longer need the lock after this point */ |
| 4487 | spin_unlock_irq(&phba->hbalock); | 4494 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 4488 | 4495 | ||
| 4489 | if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == | 4496 | if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == |
| 4490 | IOCB_ERROR) { | 4497 | IOCB_ERROR) { |
| @@ -4516,7 +4523,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 4516 | goto out; | 4523 | goto out; |
| 4517 | 4524 | ||
| 4518 | out_unlock: | 4525 | out_unlock: |
| 4519 | spin_unlock_irq(&phba->hbalock); | 4526 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 4520 | out: | 4527 | out: |
| 4521 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4528 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
| 4522 | "0749 SCSI Layer I/O Abort Request Status x%x ID %d " | 4529 | "0749 SCSI Layer I/O Abort Request Status x%x ID %d " |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 219bf534ef99..d7f3313ef886 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
| @@ -3964,9 +3964,9 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) | |||
| 3964 | pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & | 3964 | pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & |
| 3965 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); | 3965 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); |
| 3966 | 3966 | ||
| 3967 | /* Perform FCoE PCI function reset */ | 3967 | /* Perform FCoE PCI function reset before freeing queue memory */ |
| 3968 | lpfc_sli4_queue_destroy(phba); | ||
| 3969 | rc = lpfc_pci_function_reset(phba); | 3968 | rc = lpfc_pci_function_reset(phba); |
| 3969 | lpfc_sli4_queue_destroy(phba); | ||
| 3970 | 3970 | ||
| 3971 | /* Restore PCI cmd register */ | 3971 | /* Restore PCI cmd register */ |
| 3972 | pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); | 3972 | pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); |
| @@ -7072,6 +7072,40 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) | |||
| 7072 | } | 7072 | } |
| 7073 | 7073 | ||
| 7074 | /** | 7074 | /** |
| 7075 | * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready | ||
| 7076 | * @phba: Pointer to HBA context object. | ||
| 7077 | * @mboxq: Pointer to mailbox object. | ||
| 7078 | * | ||
| 7079 | * The function waits for the bootstrap mailbox register ready bit from | ||
| 7080 | * port for twice the regular mailbox command timeout value. | ||
| 7081 | * | ||
| 7082 | * 0 - no timeout on waiting for bootstrap mailbox register ready. | ||
| 7083 | * MBXERR_ERROR - wait for bootstrap mailbox register timed out. | ||
| 7084 | **/ | ||
| 7085 | static int | ||
| 7086 | lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | ||
| 7087 | { | ||
| 7088 | uint32_t db_ready; | ||
| 7089 | unsigned long timeout; | ||
| 7090 | struct lpfc_register bmbx_reg; | ||
| 7091 | |||
| 7092 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) | ||
| 7093 | * 1000) + jiffies; | ||
| 7094 | |||
| 7095 | do { | ||
| 7096 | bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); | ||
| 7097 | db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); | ||
| 7098 | if (!db_ready) | ||
| 7099 | msleep(2); | ||
| 7100 | |||
| 7101 | if (time_after(jiffies, timeout)) | ||
| 7102 | return MBXERR_ERROR; | ||
| 7103 | } while (!db_ready); | ||
| 7104 | |||
| 7105 | return 0; | ||
| 7106 | } | ||
| 7107 | |||
| 7108 | /** | ||
| 7075 | * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox | 7109 | * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox |
| 7076 | * @phba: Pointer to HBA context object. | 7110 | * @phba: Pointer to HBA context object. |
| 7077 | * @mboxq: Pointer to mailbox object. | 7111 | * @mboxq: Pointer to mailbox object. |
| @@ -7092,15 +7126,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 7092 | { | 7126 | { |
| 7093 | int rc = MBX_SUCCESS; | 7127 | int rc = MBX_SUCCESS; |
| 7094 | unsigned long iflag; | 7128 | unsigned long iflag; |
| 7095 | uint32_t db_ready; | ||
| 7096 | uint32_t mcqe_status; | 7129 | uint32_t mcqe_status; |
| 7097 | uint32_t mbx_cmnd; | 7130 | uint32_t mbx_cmnd; |
| 7098 | unsigned long timeout; | ||
| 7099 | struct lpfc_sli *psli = &phba->sli; | 7131 | struct lpfc_sli *psli = &phba->sli; |
| 7100 | struct lpfc_mqe *mb = &mboxq->u.mqe; | 7132 | struct lpfc_mqe *mb = &mboxq->u.mqe; |
| 7101 | struct lpfc_bmbx_create *mbox_rgn; | 7133 | struct lpfc_bmbx_create *mbox_rgn; |
| 7102 | struct dma_address *dma_address; | 7134 | struct dma_address *dma_address; |
| 7103 | struct lpfc_register bmbx_reg; | ||
| 7104 | 7135 | ||
| 7105 | /* | 7136 | /* |
| 7106 | * Only one mailbox can be active to the bootstrap mailbox region | 7137 | * Only one mailbox can be active to the bootstrap mailbox region |
| @@ -7124,6 +7155,11 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 7124 | phba->sli.mbox_active = mboxq; | 7155 | phba->sli.mbox_active = mboxq; |
| 7125 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 7156 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
| 7126 | 7157 | ||
| 7158 | /* wait for bootstrap mbox register for readyness */ | ||
| 7159 | rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); | ||
| 7160 | if (rc) | ||
| 7161 | goto exit; | ||
| 7162 | |||
| 7127 | /* | 7163 | /* |
| 7128 | * Initialize the bootstrap memory region to avoid stale data areas | 7164 | * Initialize the bootstrap memory region to avoid stale data areas |
| 7129 | * in the mailbox post. Then copy the caller's mailbox contents to | 7165 | * in the mailbox post. Then copy the caller's mailbox contents to |
| @@ -7138,35 +7174,18 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 7138 | dma_address = &phba->sli4_hba.bmbx.dma_address; | 7174 | dma_address = &phba->sli4_hba.bmbx.dma_address; |
| 7139 | writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); | 7175 | writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); |
| 7140 | 7176 | ||
| 7141 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) | 7177 | /* wait for bootstrap mbox register for hi-address write done */ |
| 7142 | * 1000) + jiffies; | 7178 | rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); |
| 7143 | do { | 7179 | if (rc) |
| 7144 | bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); | 7180 | goto exit; |
| 7145 | db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); | ||
| 7146 | if (!db_ready) | ||
| 7147 | msleep(2); | ||
| 7148 | |||
| 7149 | if (time_after(jiffies, timeout)) { | ||
| 7150 | rc = MBXERR_ERROR; | ||
| 7151 | goto exit; | ||
| 7152 | } | ||
| 7153 | } while (!db_ready); | ||
| 7154 | 7181 | ||
| 7155 | /* Post the low mailbox dma address to the port. */ | 7182 | /* Post the low mailbox dma address to the port. */ |
| 7156 | writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); | 7183 | writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); |
| 7157 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) | ||
| 7158 | * 1000) + jiffies; | ||
| 7159 | do { | ||
| 7160 | bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); | ||
| 7161 | db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); | ||
| 7162 | if (!db_ready) | ||
| 7163 | msleep(2); | ||
| 7164 | 7184 | ||
| 7165 | if (time_after(jiffies, timeout)) { | 7185 | /* wait for bootstrap mbox register for low address write done */ |
| 7166 | rc = MBXERR_ERROR; | 7186 | rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); |
| 7167 | goto exit; | 7187 | if (rc) |
| 7168 | } | 7188 | goto exit; |
| 7169 | } while (!db_ready); | ||
| 7170 | 7189 | ||
| 7171 | /* | 7190 | /* |
| 7172 | * Read the CQ to ensure the mailbox has completed. | 7191 | * Read the CQ to ensure the mailbox has completed. |
| @@ -8090,6 +8109,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 8090 | bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, | 8109 | bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, |
| 8091 | LPFC_WQE_LENLOC_NONE); | 8110 | LPFC_WQE_LENLOC_NONE); |
| 8092 | bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); | 8111 | bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); |
| 8112 | bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, | ||
| 8113 | iocbq->iocb.ulpFCP2Rcvy); | ||
| 8093 | break; | 8114 | break; |
| 8094 | case CMD_GEN_REQUEST64_CR: | 8115 | case CMD_GEN_REQUEST64_CR: |
| 8095 | /* For this command calculate the xmit length of the | 8116 | /* For this command calculate the xmit length of the |
| @@ -12099,6 +12120,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) | |||
| 12099 | struct lpfc_queue *eq; | 12120 | struct lpfc_queue *eq; |
| 12100 | int cnt, rc, length, status = 0; | 12121 | int cnt, rc, length, status = 0; |
| 12101 | uint32_t shdr_status, shdr_add_status; | 12122 | uint32_t shdr_status, shdr_add_status; |
| 12123 | uint32_t result; | ||
| 12102 | int fcp_eqidx; | 12124 | int fcp_eqidx; |
| 12103 | union lpfc_sli4_cfg_shdr *shdr; | 12125 | union lpfc_sli4_cfg_shdr *shdr; |
| 12104 | uint16_t dmult; | 12126 | uint16_t dmult; |
| @@ -12117,8 +12139,11 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) | |||
| 12117 | eq_delay = &mbox->u.mqe.un.eq_delay; | 12139 | eq_delay = &mbox->u.mqe.un.eq_delay; |
| 12118 | 12140 | ||
| 12119 | /* Calculate delay multiper from maximum interrupt per second */ | 12141 | /* Calculate delay multiper from maximum interrupt per second */ |
| 12120 | dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; | 12142 | result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; |
| 12121 | dmult = LPFC_DMULT_CONST/dmult - 1; | 12143 | if (result > LPFC_DMULT_CONST) |
| 12144 | dmult = 0; | ||
| 12145 | else | ||
| 12146 | dmult = LPFC_DMULT_CONST/result - 1; | ||
| 12122 | 12147 | ||
| 12123 | cnt = 0; | 12148 | cnt = 0; |
| 12124 | for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; | 12149 | for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; |
| @@ -12174,7 +12199,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) | |||
| 12174 | * fails this function will return -ENXIO. | 12199 | * fails this function will return -ENXIO. |
| 12175 | **/ | 12200 | **/ |
| 12176 | uint32_t | 12201 | uint32_t |
| 12177 | lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) | 12202 | lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) |
| 12178 | { | 12203 | { |
| 12179 | struct lpfc_mbx_eq_create *eq_create; | 12204 | struct lpfc_mbx_eq_create *eq_create; |
| 12180 | LPFC_MBOXQ_t *mbox; | 12205 | LPFC_MBOXQ_t *mbox; |
| @@ -12206,7 +12231,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) | |||
| 12206 | LPFC_EQE_SIZE); | 12231 | LPFC_EQE_SIZE); |
| 12207 | bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); | 12232 | bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); |
| 12208 | /* Calculate delay multiper from maximum interrupt per second */ | 12233 | /* Calculate delay multiper from maximum interrupt per second */ |
| 12209 | dmult = LPFC_DMULT_CONST/imax - 1; | 12234 | if (imax > LPFC_DMULT_CONST) |
| 12235 | dmult = 0; | ||
| 12236 | else | ||
| 12237 | dmult = LPFC_DMULT_CONST/imax - 1; | ||
| 12210 | bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, | 12238 | bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, |
| 12211 | dmult); | 12239 | dmult); |
| 12212 | switch (eq->entry_count) { | 12240 | switch (eq->entry_count) { |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index bd4bc4342ae2..f44a06a4c6e7 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ | 37 | /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ |
| 38 | #define LPFC_FCP_IO_CHAN_DEF 4 | 38 | #define LPFC_FCP_IO_CHAN_DEF 4 |
| 39 | #define LPFC_FCP_IO_CHAN_MIN 1 | 39 | #define LPFC_FCP_IO_CHAN_MIN 1 |
| 40 | #define LPFC_FCP_IO_CHAN_MAX 8 | 40 | #define LPFC_FCP_IO_CHAN_MAX 16 |
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * Provide the default FCF Record attributes used by the driver | 43 | * Provide the default FCF Record attributes used by the driver |
| @@ -168,7 +168,7 @@ struct lpfc_queue { | |||
| 168 | }; | 168 | }; |
| 169 | 169 | ||
| 170 | struct lpfc_sli4_link { | 170 | struct lpfc_sli4_link { |
| 171 | uint8_t speed; | 171 | uint16_t speed; |
| 172 | uint8_t duplex; | 172 | uint8_t duplex; |
| 173 | uint8_t status; | 173 | uint8_t status; |
| 174 | uint8_t type; | 174 | uint8_t type; |
| @@ -490,8 +490,6 @@ struct lpfc_sli4_hba { | |||
| 490 | struct lpfc_pc_sli4_params pc_sli4_params; | 490 | struct lpfc_pc_sli4_params pc_sli4_params; |
| 491 | struct msix_entry *msix_entries; | 491 | struct msix_entry *msix_entries; |
| 492 | uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ]; | 492 | uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ]; |
| 493 | uint32_t cfg_eqn; | ||
| 494 | uint32_t msix_vec_nr; | ||
| 495 | struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ | 493 | struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ |
| 496 | 494 | ||
| 497 | /* Pointers to the constructed SLI4 queues */ | 495 | /* Pointers to the constructed SLI4 queues */ |
| @@ -626,7 +624,7 @@ void lpfc_sli4_hba_reset(struct lpfc_hba *); | |||
| 626 | struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, | 624 | struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, |
| 627 | uint32_t); | 625 | uint32_t); |
| 628 | void lpfc_sli4_queue_free(struct lpfc_queue *); | 626 | void lpfc_sli4_queue_free(struct lpfc_queue *); |
| 629 | uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); | 627 | uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); |
| 630 | uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); | 628 | uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); |
| 631 | uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, | 629 | uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, |
| 632 | struct lpfc_queue *, uint32_t, uint32_t); | 630 | struct lpfc_queue *, uint32_t, uint32_t); |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 04265a1c4e52..0c2149189dda 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | * included with this package. * | 18 | * included with this package. * |
| 19 | *******************************************************************/ | 19 | *******************************************************************/ |
| 20 | 20 | ||
| 21 | #define LPFC_DRIVER_VERSION "8.3.34" | 21 | #define LPFC_DRIVER_VERSION "8.3.35" |
| 22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
| 23 | 23 | ||
| 24 | /* Used for SLI 2/3 */ | 24 | /* Used for SLI 2/3 */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index fcb005fa4bd1..16b7a72a70c4 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Linux MegaRAID driver for SAS based RAID controllers | 2 | * Linux MegaRAID driver for SAS based RAID controllers |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2009-2011 LSI Corporation. | 4 | * Copyright (c) 2003-2012 LSI Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| @@ -33,9 +33,9 @@ | |||
| 33 | /* | 33 | /* |
| 34 | * MegaRAID SAS Driver meta data | 34 | * MegaRAID SAS Driver meta data |
| 35 | */ | 35 | */ |
| 36 | #define MEGASAS_VERSION "00.00.06.18-rc1" | 36 | #define MEGASAS_VERSION "06.504.01.00-rc1" |
| 37 | #define MEGASAS_RELDATE "Jun. 17, 2012" | 37 | #define MEGASAS_RELDATE "Oct. 1, 2012" |
| 38 | #define MEGASAS_EXT_VERSION "Tue. Jun. 17 17:00:00 PDT 2012" | 38 | #define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012" |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * Device IDs | 41 | * Device IDs |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0393ec478cdf..d2c5366aff7f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Linux MegaRAID driver for SAS based RAID controllers | 2 | * Linux MegaRAID driver for SAS based RAID controllers |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2009-2011 LSI Corporation. | 4 | * Copyright (c) 2003-2012 LSI Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| @@ -18,7 +18,7 @@ | |||
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 19 | * | 19 | * |
| 20 | * FILE: megaraid_sas_base.c | 20 | * FILE: megaraid_sas_base.c |
| 21 | * Version : v00.00.06.18-rc1 | 21 | * Version : v06.504.01.00-rc1 |
| 22 | * | 22 | * |
| 23 | * Authors: LSI Corporation | 23 | * Authors: LSI Corporation |
| 24 | * Sreenivas Bagalkote | 24 | * Sreenivas Bagalkote |
| @@ -71,6 +71,10 @@ static int msix_disable; | |||
| 71 | module_param(msix_disable, int, S_IRUGO); | 71 | module_param(msix_disable, int, S_IRUGO); |
| 72 | MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); | 72 | MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); |
| 73 | 73 | ||
| 74 | static unsigned int msix_vectors; | ||
| 75 | module_param(msix_vectors, int, S_IRUGO); | ||
| 76 | MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); | ||
| 77 | |||
| 74 | static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; | 78 | static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; |
| 75 | module_param(throttlequeuedepth, int, S_IRUGO); | 79 | module_param(throttlequeuedepth, int, S_IRUGO); |
| 76 | MODULE_PARM_DESC(throttlequeuedepth, | 80 | MODULE_PARM_DESC(throttlequeuedepth, |
| @@ -3520,6 +3524,10 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
| 3520 | instance->msix_vectors = (readl(&instance->reg_set-> | 3524 | instance->msix_vectors = (readl(&instance->reg_set-> |
| 3521 | outbound_scratch_pad_2 | 3525 | outbound_scratch_pad_2 |
| 3522 | ) & 0x1F) + 1; | 3526 | ) & 0x1F) + 1; |
| 3527 | if (msix_vectors) | ||
| 3528 | instance->msix_vectors = | ||
| 3529 | min(msix_vectors, | ||
| 3530 | instance->msix_vectors); | ||
| 3523 | } else | 3531 | } else |
| 3524 | instance->msix_vectors = 1; | 3532 | instance->msix_vectors = 1; |
| 3525 | /* Don't bother allocating more MSI-X vectors than cpus */ | 3533 | /* Don't bother allocating more MSI-X vectors than cpus */ |
| @@ -5233,7 +5241,6 @@ megasas_aen_polling(struct work_struct *work) | |||
| 5233 | 5241 | ||
| 5234 | case MR_EVT_PD_REMOVED: | 5242 | case MR_EVT_PD_REMOVED: |
| 5235 | if (megasas_get_pd_list(instance) == 0) { | 5243 | if (megasas_get_pd_list(instance) == 0) { |
| 5236 | megasas_get_pd_list(instance); | ||
| 5237 | for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { | 5244 | for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { |
| 5238 | for (j = 0; | 5245 | for (j = 0; |
| 5239 | j < MEGASAS_MAX_DEV_PER_CHANNEL; | 5246 | j < MEGASAS_MAX_DEV_PER_CHANNEL; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index e3d251a2e26a..a11df82474ef 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Linux MegaRAID driver for SAS based RAID controllers | 2 | * Linux MegaRAID driver for SAS based RAID controllers |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2009-2011 LSI Corporation. | 4 | * Copyright (c) 2009-2012 LSI Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ddf094e7d0ac..74030aff69ad 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Linux MegaRAID driver for SAS based RAID controllers | 2 | * Linux MegaRAID driver for SAS based RAID controllers |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2009-2011 LSI Corporation. | 4 | * Copyright (c) 2009-2012 LSI Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| @@ -1184,8 +1184,6 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, | |||
| 1184 | io_request->CDB.EEDP32.PrimaryReferenceTag = | 1184 | io_request->CDB.EEDP32.PrimaryReferenceTag = |
| 1185 | cpu_to_be32(ref_tag); | 1185 | cpu_to_be32(ref_tag); |
| 1186 | io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; | 1186 | io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; |
| 1187 | |||
| 1188 | io_request->DataLength = num_blocks * 512; | ||
| 1189 | io_request->IoFlags = 32; /* Specify 32-byte cdb */ | 1187 | io_request->IoFlags = 32; /* Specify 32-byte cdb */ |
| 1190 | 1188 | ||
| 1191 | /* Transfer length */ | 1189 | /* Transfer length */ |
| @@ -1329,7 +1327,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
| 1329 | struct megasas_cmd_fusion *cmd) | 1327 | struct megasas_cmd_fusion *cmd) |
| 1330 | { | 1328 | { |
| 1331 | u8 fp_possible; | 1329 | u8 fp_possible; |
| 1332 | u32 start_lba_lo, start_lba_hi, device_id; | 1330 | u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; |
| 1333 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; | 1331 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; |
| 1334 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; | 1332 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; |
| 1335 | struct IO_REQUEST_INFO io_info; | 1333 | struct IO_REQUEST_INFO io_info; |
| @@ -1355,7 +1353,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
| 1355 | * 6-byte READ(0x08) or WRITE(0x0A) cdb | 1353 | * 6-byte READ(0x08) or WRITE(0x0A) cdb |
| 1356 | */ | 1354 | */ |
| 1357 | if (scp->cmd_len == 6) { | 1355 | if (scp->cmd_len == 6) { |
| 1358 | io_request->DataLength = (u32) scp->cmnd[4]; | 1356 | datalength = (u32) scp->cmnd[4]; |
| 1359 | start_lba_lo = ((u32) scp->cmnd[1] << 16) | | 1357 | start_lba_lo = ((u32) scp->cmnd[1] << 16) | |
| 1360 | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; | 1358 | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; |
| 1361 | 1359 | ||
| @@ -1366,7 +1364,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
| 1366 | * 10-byte READ(0x28) or WRITE(0x2A) cdb | 1364 | * 10-byte READ(0x28) or WRITE(0x2A) cdb |
| 1367 | */ | 1365 | */ |
| 1368 | else if (scp->cmd_len == 10) { | 1366 | else if (scp->cmd_len == 10) { |
| 1369 | io_request->DataLength = (u32) scp->cmnd[8] | | 1367 | datalength = (u32) scp->cmnd[8] | |
| 1370 | ((u32) scp->cmnd[7] << 8); | 1368 | ((u32) scp->cmnd[7] << 8); |
| 1371 | start_lba_lo = ((u32) scp->cmnd[2] << 24) | | 1369 | start_lba_lo = ((u32) scp->cmnd[2] << 24) | |
| 1372 | ((u32) scp->cmnd[3] << 16) | | 1370 | ((u32) scp->cmnd[3] << 16) | |
| @@ -1377,7 +1375,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
| 1377 | * 12-byte READ(0xA8) or WRITE(0xAA) cdb | 1375 | * 12-byte READ(0xA8) or WRITE(0xAA) cdb |
| 1378 | */ | 1376 | */ |
| 1379 | else if (scp->cmd_len == 12) { | 1377 | else if (scp->cmd_len == 12) { |
| 1380 | io_request->DataLength = ((u32) scp->cmnd[6] << 24) | | 1378 | datalength = ((u32) scp->cmnd[6] << 24) | |
| 1381 | ((u32) scp->cmnd[7] << 16) | | 1379 | ((u32) scp->cmnd[7] << 16) | |
| 1382 | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; | 1380 | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; |
| 1383 | start_lba_lo = ((u32) scp->cmnd[2] << 24) | | 1381 | start_lba_lo = ((u32) scp->cmnd[2] << 24) | |
| @@ -1389,7 +1387,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
| 1389 | * 16-byte READ(0x88) or WRITE(0x8A) cdb | 1387 | * 16-byte READ(0x88) or WRITE(0x8A) cdb |
| 1390 | */ | 1388 | */ |
| 1391 | else if (scp->cmd_len == 16) { | 1389 | else if (scp->cmd_len == 16) { |
| 1392 | io_request->DataLength = ((u32) scp->cmnd[10] << 24) | | 1390 | datalength = ((u32) scp->cmnd[10] << 24) | |
| 1393 | ((u32) scp->cmnd[11] << 16) | | 1391 | ((u32) scp->cmnd[11] << 16) | |
| 1394 | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; | 1392 | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; |
| 1395 | start_lba_lo = ((u32) scp->cmnd[6] << 24) | | 1393 | start_lba_lo = ((u32) scp->cmnd[6] << 24) | |
| @@ -1403,8 +1401,9 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
| 1403 | 1401 | ||
| 1404 | memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); | 1402 | memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); |
| 1405 | io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; | 1403 | io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; |
| 1406 | io_info.numBlocks = io_request->DataLength; | 1404 | io_info.numBlocks = datalength; |
| 1407 | io_info.ldTgtId = device_id; | 1405 | io_info.ldTgtId = device_id; |
| 1406 | io_request->DataLength = scsi_bufflen(scp); | ||
| 1408 | 1407 | ||
| 1409 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) | 1408 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) |
| 1410 | io_info.isRead = 1; | 1409 | io_info.isRead = 1; |
| @@ -1431,7 +1430,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
| 1431 | if (fp_possible) { | 1430 | if (fp_possible) { |
| 1432 | megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, | 1431 | megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, |
| 1433 | local_map_ptr, start_lba_lo); | 1432 | local_map_ptr, start_lba_lo); |
| 1434 | io_request->DataLength = scsi_bufflen(scp); | ||
| 1435 | io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; | 1433 | io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; |
| 1436 | cmd->request_desc->SCSIIO.RequestFlags = | 1434 | cmd->request_desc->SCSIIO.RequestFlags = |
| 1437 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY | 1435 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY |
| @@ -1510,7 +1508,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
| 1510 | local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; | 1508 | local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; |
| 1511 | 1509 | ||
| 1512 | /* Check if this is a system PD I/O */ | 1510 | /* Check if this is a system PD I/O */ |
| 1513 | if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { | 1511 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && |
| 1512 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { | ||
| 1514 | io_request->Function = 0; | 1513 | io_request->Function = 0; |
| 1515 | io_request->DevHandle = | 1514 | io_request->DevHandle = |
| 1516 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; | 1515 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; |
| @@ -1525,6 +1524,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
| 1525 | cmd->request_desc->SCSIIO.RequestFlags = | 1524 | cmd->request_desc->SCSIIO.RequestFlags = |
| 1526 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << | 1525 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << |
| 1527 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 1526 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| 1527 | cmd->request_desc->SCSIIO.DevHandle = | ||
| 1528 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; | ||
| 1528 | } else { | 1529 | } else { |
| 1529 | io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; | 1530 | io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; |
| 1530 | io_request->DevHandle = device_id; | 1531 | io_request->DevHandle = device_id; |
| @@ -1732,8 +1733,6 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) | |||
| 1732 | if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) | 1733 | if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) |
| 1733 | return IRQ_NONE; | 1734 | return IRQ_NONE; |
| 1734 | 1735 | ||
| 1735 | d_val.word = desc->Words; | ||
| 1736 | |||
| 1737 | num_completed = 0; | 1736 | num_completed = 0; |
| 1738 | 1737 | ||
| 1739 | while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { | 1738 | while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { |
| @@ -1855,10 +1854,8 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) | |||
| 1855 | } | 1854 | } |
| 1856 | spin_unlock_irqrestore(&instance->hba_lock, flags); | 1855 | spin_unlock_irqrestore(&instance->hba_lock, flags); |
| 1857 | 1856 | ||
| 1858 | spin_lock_irqsave(&instance->completion_lock, flags); | ||
| 1859 | for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) | 1857 | for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) |
| 1860 | complete_cmd_fusion(instance, MSIxIndex); | 1858 | complete_cmd_fusion(instance, MSIxIndex); |
| 1861 | spin_unlock_irqrestore(&instance->completion_lock, flags); | ||
| 1862 | } | 1859 | } |
| 1863 | 1860 | ||
| 1864 | /** | 1861 | /** |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 088c9f91da95..a7c64f051996 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Linux MegaRAID driver for SAS based RAID controllers | 2 | * Linux MegaRAID driver for SAS based RAID controllers |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2009-2011 LSI Corporation. | 4 | * Copyright (c) 2009-2012 LSI Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 783edc7c6b98..c585a925b3cd 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c | |||
| @@ -35,10 +35,12 @@ | |||
| 35 | #include <linux/io.h> | 35 | #include <linux/io.h> |
| 36 | #include <scsi/scsi.h> | 36 | #include <scsi/scsi.h> |
| 37 | #include <scsi/scsi_cmnd.h> | 37 | #include <scsi/scsi_cmnd.h> |
| 38 | #include <scsi/scsi_device.h> | ||
| 38 | #include <scsi/scsi_host.h> | 39 | #include <scsi/scsi_host.h> |
| 39 | #include <scsi/scsi_transport.h> | 40 | #include <scsi/scsi_transport.h> |
| 40 | #include <scsi/scsi_eh.h> | 41 | #include <scsi/scsi_eh.h> |
| 41 | #include <linux/uaccess.h> | 42 | #include <linux/uaccess.h> |
| 43 | #include <linux/kthread.h> | ||
| 42 | 44 | ||
| 43 | #include "mvumi.h" | 45 | #include "mvumi.h" |
| 44 | 46 | ||
| @@ -48,6 +50,7 @@ MODULE_DESCRIPTION("Marvell UMI Driver"); | |||
| 48 | 50 | ||
| 49 | static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { | 51 | static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { |
| 50 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) }, | 52 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) }, |
| 53 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9580) }, | ||
| 51 | { 0 } | 54 | { 0 } |
| 52 | }; | 55 | }; |
| 53 | 56 | ||
| @@ -118,7 +121,7 @@ static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) | |||
| 118 | static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, | 121 | static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, |
| 119 | enum resource_type type, unsigned int size) | 122 | enum resource_type type, unsigned int size) |
| 120 | { | 123 | { |
| 121 | struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL); | 124 | struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC); |
| 122 | 125 | ||
| 123 | if (!res) { | 126 | if (!res) { |
| 124 | dev_err(&mhba->pdev->dev, | 127 | dev_err(&mhba->pdev->dev, |
| @@ -128,7 +131,7 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, | |||
| 128 | 131 | ||
| 129 | switch (type) { | 132 | switch (type) { |
| 130 | case RESOURCE_CACHED_MEMORY: | 133 | case RESOURCE_CACHED_MEMORY: |
| 131 | res->virt_addr = kzalloc(size, GFP_KERNEL); | 134 | res->virt_addr = kzalloc(size, GFP_ATOMIC); |
| 132 | if (!res->virt_addr) { | 135 | if (!res->virt_addr) { |
| 133 | dev_err(&mhba->pdev->dev, | 136 | dev_err(&mhba->pdev->dev, |
| 134 | "unable to allocate memory,size = %d.\n", size); | 137 | "unable to allocate memory,size = %d.\n", size); |
| @@ -222,11 +225,11 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, | |||
| 222 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); | 225 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); |
| 223 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); | 226 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); |
| 224 | m_sg->flags = 0; | 227 | m_sg->flags = 0; |
| 225 | m_sg->size = cpu_to_le32(sg_dma_len(&sg[i])); | 228 | sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); |
| 226 | if ((i + 1) == *sg_count) | 229 | if ((i + 1) == *sg_count) |
| 227 | m_sg->flags |= SGD_EOT; | 230 | m_sg->flags |= 1U << mhba->eot_flag; |
| 228 | 231 | ||
| 229 | m_sg++; | 232 | sgd_inc(mhba, m_sg); |
| 230 | } | 233 | } |
| 231 | } else { | 234 | } else { |
| 232 | scmd->SCp.dma_handle = scsi_bufflen(scmd) ? | 235 | scmd->SCp.dma_handle = scsi_bufflen(scmd) ? |
| @@ -237,8 +240,8 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, | |||
| 237 | busaddr = scmd->SCp.dma_handle; | 240 | busaddr = scmd->SCp.dma_handle; |
| 238 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); | 241 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); |
| 239 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); | 242 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); |
| 240 | m_sg->flags = SGD_EOT; | 243 | m_sg->flags = 1U << mhba->eot_flag; |
| 241 | m_sg->size = cpu_to_le32(scsi_bufflen(scmd)); | 244 | sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd))); |
| 242 | *sg_count = 1; | 245 | *sg_count = 1; |
| 243 | } | 246 | } |
| 244 | 247 | ||
| @@ -267,8 +270,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, | |||
| 267 | 270 | ||
| 268 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); | 271 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); |
| 269 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); | 272 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); |
| 270 | m_sg->flags = SGD_EOT; | 273 | m_sg->flags = 1U << mhba->eot_flag; |
| 271 | m_sg->size = cpu_to_le32(size); | 274 | sgd_setsz(mhba, m_sg, cpu_to_le32(size)); |
| 272 | 275 | ||
| 273 | return 0; | 276 | return 0; |
| 274 | } | 277 | } |
| @@ -285,7 +288,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, | |||
| 285 | } | 288 | } |
| 286 | INIT_LIST_HEAD(&cmd->queue_pointer); | 289 | INIT_LIST_HEAD(&cmd->queue_pointer); |
| 287 | 290 | ||
| 288 | cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); | 291 | cmd->frame = pci_alloc_consistent(mhba->pdev, |
| 292 | mhba->ib_max_size, &cmd->frame_phys); | ||
| 289 | if (!cmd->frame) { | 293 | if (!cmd->frame) { |
| 290 | dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" | 294 | dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" |
| 291 | " frame,size = %d.\n", mhba->ib_max_size); | 295 | " frame,size = %d.\n", mhba->ib_max_size); |
| @@ -297,7 +301,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, | |||
| 297 | if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { | 301 | if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { |
| 298 | dev_err(&mhba->pdev->dev, "failed to allocate memory" | 302 | dev_err(&mhba->pdev->dev, "failed to allocate memory" |
| 299 | " for internal frame\n"); | 303 | " for internal frame\n"); |
| 300 | kfree(cmd->frame); | 304 | pci_free_consistent(mhba->pdev, mhba->ib_max_size, |
| 305 | cmd->frame, cmd->frame_phys); | ||
| 301 | kfree(cmd); | 306 | kfree(cmd); |
| 302 | return NULL; | 307 | return NULL; |
| 303 | } | 308 | } |
| @@ -317,7 +322,7 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, | |||
| 317 | if (cmd && cmd->frame) { | 322 | if (cmd && cmd->frame) { |
| 318 | if (cmd->frame->sg_counts) { | 323 | if (cmd->frame->sg_counts) { |
| 319 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; | 324 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; |
| 320 | size = m_sg->size; | 325 | sgd_getsz(mhba, m_sg, size); |
| 321 | 326 | ||
| 322 | phy_addr = (dma_addr_t) m_sg->baseaddr_l | | 327 | phy_addr = (dma_addr_t) m_sg->baseaddr_l | |
| 323 | (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); | 328 | (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); |
| @@ -325,7 +330,8 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, | |||
| 325 | pci_free_consistent(mhba->pdev, size, cmd->data_buf, | 330 | pci_free_consistent(mhba->pdev, size, cmd->data_buf, |
| 326 | phy_addr); | 331 | phy_addr); |
| 327 | } | 332 | } |
| 328 | kfree(cmd->frame); | 333 | pci_free_consistent(mhba->pdev, mhba->ib_max_size, |
| 334 | cmd->frame, cmd->frame_phys); | ||
| 329 | kfree(cmd); | 335 | kfree(cmd); |
| 330 | } | 336 | } |
| 331 | } | 337 | } |
| @@ -374,7 +380,8 @@ static void mvumi_free_cmds(struct mvumi_hba *mhba) | |||
| 374 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, | 380 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, |
| 375 | queue_pointer); | 381 | queue_pointer); |
| 376 | list_del(&cmd->queue_pointer); | 382 | list_del(&cmd->queue_pointer); |
| 377 | kfree(cmd->frame); | 383 | if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) |
| 384 | kfree(cmd->frame); | ||
| 378 | kfree(cmd); | 385 | kfree(cmd); |
| 379 | } | 386 | } |
| 380 | } | 387 | } |
| @@ -396,7 +403,12 @@ static int mvumi_alloc_cmds(struct mvumi_hba *mhba) | |||
| 396 | 403 | ||
| 397 | INIT_LIST_HEAD(&cmd->queue_pointer); | 404 | INIT_LIST_HEAD(&cmd->queue_pointer); |
| 398 | list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); | 405 | list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); |
| 399 | cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); | 406 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
| 407 | cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; | ||
| 408 | cmd->frame_phys = mhba->ib_frame_phys | ||
| 409 | + i * mhba->ib_max_size; | ||
| 410 | } else | ||
| 411 | cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); | ||
| 400 | if (!cmd->frame) | 412 | if (!cmd->frame) |
| 401 | goto err_exit; | 413 | goto err_exit; |
| 402 | } | 414 | } |
| @@ -409,48 +421,71 @@ err_exit: | |||
| 409 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, | 421 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, |
| 410 | queue_pointer); | 422 | queue_pointer); |
| 411 | list_del(&cmd->queue_pointer); | 423 | list_del(&cmd->queue_pointer); |
| 412 | kfree(cmd->frame); | 424 | if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) |
| 425 | kfree(cmd->frame); | ||
| 413 | kfree(cmd); | 426 | kfree(cmd); |
| 414 | } | 427 | } |
| 415 | return -ENOMEM; | 428 | return -ENOMEM; |
| 416 | } | 429 | } |
| 417 | 430 | ||
| 418 | static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) | 431 | static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) |
| 419 | { | 432 | { |
| 420 | unsigned int ib_rp_reg, cur_ib_entry; | 433 | unsigned int ib_rp_reg; |
| 434 | struct mvumi_hw_regs *regs = mhba->regs; | ||
| 435 | |||
| 436 | ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); | ||
| 421 | 437 | ||
| 438 | if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) == | ||
| 439 | (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && | ||
| 440 | ((ib_rp_reg & regs->cl_pointer_toggle) | ||
| 441 | != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { | ||
| 442 | dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); | ||
| 443 | return 0; | ||
| 444 | } | ||
| 422 | if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { | 445 | if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { |
| 423 | dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); | 446 | dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); |
| 424 | return -1; | 447 | return 0; |
| 448 | } else { | ||
| 449 | return mhba->max_io - atomic_read(&mhba->fw_outstanding); | ||
| 425 | } | 450 | } |
| 426 | ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER); | 451 | } |
| 427 | 452 | ||
| 428 | if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) == | 453 | static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) |
| 429 | (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) && | 454 | { |
| 430 | ((ib_rp_reg & CL_POINTER_TOGGLE) != | 455 | unsigned int count; |
| 431 | (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) { | 456 | if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) |
| 432 | dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); | 457 | return 0; |
| 433 | return -1; | 458 | count = ioread32(mhba->ib_shadow); |
| 434 | } | 459 | if (count == 0xffff) |
| 460 | return 0; | ||
| 461 | return count; | ||
| 462 | } | ||
| 463 | |||
| 464 | static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) | ||
| 465 | { | ||
| 466 | unsigned int cur_ib_entry; | ||
| 435 | 467 | ||
| 436 | cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK; | 468 | cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; |
| 437 | cur_ib_entry++; | 469 | cur_ib_entry++; |
| 438 | if (cur_ib_entry >= mhba->list_num_io) { | 470 | if (cur_ib_entry >= mhba->list_num_io) { |
| 439 | cur_ib_entry -= mhba->list_num_io; | 471 | cur_ib_entry -= mhba->list_num_io; |
| 440 | mhba->ib_cur_slot ^= CL_POINTER_TOGGLE; | 472 | mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; |
| 473 | } | ||
| 474 | mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; | ||
| 475 | mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); | ||
| 476 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { | ||
| 477 | *ib_entry = mhba->ib_list + cur_ib_entry * | ||
| 478 | sizeof(struct mvumi_dyn_list_entry); | ||
| 479 | } else { | ||
| 480 | *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; | ||
| 441 | } | 481 | } |
| 442 | mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK; | ||
| 443 | mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK); | ||
| 444 | *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; | ||
| 445 | atomic_inc(&mhba->fw_outstanding); | 482 | atomic_inc(&mhba->fw_outstanding); |
| 446 | |||
| 447 | return 0; | ||
| 448 | } | 483 | } |
| 449 | 484 | ||
| 450 | static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) | 485 | static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) |
| 451 | { | 486 | { |
| 452 | iowrite32(0xfff, mhba->ib_shadow); | 487 | iowrite32(0xffff, mhba->ib_shadow); |
| 453 | iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER); | 488 | iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); |
| 454 | } | 489 | } |
| 455 | 490 | ||
| 456 | static char mvumi_check_ob_frame(struct mvumi_hba *mhba, | 491 | static char mvumi_check_ob_frame(struct mvumi_hba *mhba, |
| @@ -480,31 +515,59 @@ static char mvumi_check_ob_frame(struct mvumi_hba *mhba, | |||
| 480 | return 0; | 515 | return 0; |
| 481 | } | 516 | } |
| 482 | 517 | ||
| 483 | static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) | 518 | static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, |
| 519 | unsigned int *cur_obf, unsigned int *assign_obf_end) | ||
| 484 | { | 520 | { |
| 485 | unsigned int ob_write_reg, ob_write_shadow_reg; | 521 | unsigned int ob_write, ob_write_shadow; |
| 486 | unsigned int cur_obf, assign_obf_end, i; | 522 | struct mvumi_hw_regs *regs = mhba->regs; |
| 487 | struct mvumi_ob_data *ob_data; | ||
| 488 | struct mvumi_rsp_frame *p_outb_frame; | ||
| 489 | 523 | ||
| 490 | do { | 524 | do { |
| 491 | ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER); | 525 | ob_write = ioread32(regs->outb_copy_pointer); |
| 492 | ob_write_shadow_reg = ioread32(mhba->ob_shadow); | 526 | ob_write_shadow = ioread32(mhba->ob_shadow); |
| 493 | } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg); | 527 | } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow); |
| 494 | 528 | ||
| 495 | cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK; | 529 | *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; |
| 496 | assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK; | 530 | *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; |
| 497 | 531 | ||
| 498 | if ((ob_write_reg & CL_POINTER_TOGGLE) != | 532 | if ((ob_write & regs->cl_pointer_toggle) != |
| 499 | (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) { | 533 | (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { |
| 500 | assign_obf_end += mhba->list_num_io; | 534 | *assign_obf_end += mhba->list_num_io; |
| 501 | } | 535 | } |
| 536 | return 0; | ||
| 537 | } | ||
| 538 | |||
| 539 | static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, | ||
| 540 | unsigned int *cur_obf, unsigned int *assign_obf_end) | ||
| 541 | { | ||
| 542 | unsigned int ob_write; | ||
| 543 | struct mvumi_hw_regs *regs = mhba->regs; | ||
| 544 | |||
| 545 | ob_write = ioread32(regs->outb_read_pointer); | ||
| 546 | ob_write = ioread32(regs->outb_copy_pointer); | ||
| 547 | *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; | ||
| 548 | *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; | ||
| 549 | if (*assign_obf_end < *cur_obf) | ||
| 550 | *assign_obf_end += mhba->list_num_io; | ||
| 551 | else if (*assign_obf_end == *cur_obf) | ||
| 552 | return -1; | ||
| 553 | return 0; | ||
| 554 | } | ||
| 555 | |||
| 556 | static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) | ||
| 557 | { | ||
| 558 | unsigned int cur_obf, assign_obf_end, i; | ||
| 559 | struct mvumi_ob_data *ob_data; | ||
| 560 | struct mvumi_rsp_frame *p_outb_frame; | ||
| 561 | struct mvumi_hw_regs *regs = mhba->regs; | ||
| 562 | |||
| 563 | if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) | ||
| 564 | return; | ||
| 502 | 565 | ||
| 503 | for (i = (assign_obf_end - cur_obf); i != 0; i--) { | 566 | for (i = (assign_obf_end - cur_obf); i != 0; i--) { |
| 504 | cur_obf++; | 567 | cur_obf++; |
| 505 | if (cur_obf >= mhba->list_num_io) { | 568 | if (cur_obf >= mhba->list_num_io) { |
| 506 | cur_obf -= mhba->list_num_io; | 569 | cur_obf -= mhba->list_num_io; |
| 507 | mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; | 570 | mhba->ob_cur_slot ^= regs->cl_pointer_toggle; |
| 508 | } | 571 | } |
| 509 | 572 | ||
| 510 | p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; | 573 | p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; |
| @@ -528,7 +591,7 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) | |||
| 528 | ob_data = NULL; | 591 | ob_data = NULL; |
| 529 | if (cur_obf == 0) { | 592 | if (cur_obf == 0) { |
| 530 | cur_obf = mhba->list_num_io - 1; | 593 | cur_obf = mhba->list_num_io - 1; |
| 531 | mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; | 594 | mhba->ob_cur_slot ^= regs->cl_pointer_toggle; |
| 532 | } else | 595 | } else |
| 533 | cur_obf -= 1; | 596 | cur_obf -= 1; |
| 534 | break; | 597 | break; |
| @@ -539,18 +602,20 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) | |||
| 539 | 602 | ||
| 540 | list_add_tail(&ob_data->list, &mhba->free_ob_list); | 603 | list_add_tail(&ob_data->list, &mhba->free_ob_list); |
| 541 | } | 604 | } |
| 542 | mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK; | 605 | mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; |
| 543 | mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK); | 606 | mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); |
| 544 | iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER); | 607 | iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); |
| 545 | } | 608 | } |
| 546 | 609 | ||
| 547 | static void mvumi_reset(void *regs) | 610 | static void mvumi_reset(struct mvumi_hba *mhba) |
| 548 | { | 611 | { |
| 549 | iowrite32(0, regs + CPU_ENPOINTA_MASK_REG); | 612 | struct mvumi_hw_regs *regs = mhba->regs; |
| 550 | if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE) | 613 | |
| 614 | iowrite32(0, regs->enpointa_mask_reg); | ||
| 615 | if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE) | ||
| 551 | return; | 616 | return; |
| 552 | 617 | ||
| 553 | iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG); | 618 | iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg); |
| 554 | } | 619 | } |
| 555 | 620 | ||
| 556 | static unsigned char mvumi_start(struct mvumi_hba *mhba); | 621 | static unsigned char mvumi_start(struct mvumi_hba *mhba); |
| @@ -558,7 +623,7 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba); | |||
| 558 | static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) | 623 | static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) |
| 559 | { | 624 | { |
| 560 | mhba->fw_state = FW_STATE_ABORT; | 625 | mhba->fw_state = FW_STATE_ABORT; |
| 561 | mvumi_reset(mhba->mmio); | 626 | mvumi_reset(mhba); |
| 562 | 627 | ||
| 563 | if (mvumi_start(mhba)) | 628 | if (mvumi_start(mhba)) |
| 564 | return FAILED; | 629 | return FAILED; |
| @@ -566,6 +631,98 @@ static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) | |||
| 566 | return SUCCESS; | 631 | return SUCCESS; |
| 567 | } | 632 | } |
| 568 | 633 | ||
| 634 | static int mvumi_wait_for_fw(struct mvumi_hba *mhba) | ||
| 635 | { | ||
| 636 | struct mvumi_hw_regs *regs = mhba->regs; | ||
| 637 | u32 tmp; | ||
| 638 | unsigned long before; | ||
| 639 | before = jiffies; | ||
| 640 | |||
| 641 | iowrite32(0, regs->enpointa_mask_reg); | ||
| 642 | tmp = ioread32(regs->arm_to_pciea_msg1); | ||
| 643 | while (tmp != HANDSHAKE_READYSTATE) { | ||
| 644 | iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg); | ||
| 645 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { | ||
| 646 | dev_err(&mhba->pdev->dev, | ||
| 647 | "FW reset failed [0x%x].\n", tmp); | ||
| 648 | return FAILED; | ||
| 649 | } | ||
| 650 | |||
| 651 | msleep(500); | ||
| 652 | rmb(); | ||
| 653 | tmp = ioread32(regs->arm_to_pciea_msg1); | ||
| 654 | } | ||
| 655 | |||
| 656 | return SUCCESS; | ||
| 657 | } | ||
| 658 | |||
| 659 | static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) | ||
| 660 | { | ||
| 661 | unsigned char i; | ||
| 662 | |||
| 663 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { | ||
| 664 | pci_read_config_dword(mhba->pdev, 0x10 + i * 4, | ||
| 665 | &mhba->pci_base[i]); | ||
| 666 | } | ||
| 667 | } | ||
| 668 | |||
| 669 | static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) | ||
| 670 | { | ||
| 671 | unsigned char i; | ||
| 672 | |||
| 673 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { | ||
| 674 | if (mhba->pci_base[i]) | ||
| 675 | pci_write_config_dword(mhba->pdev, 0x10 + i * 4, | ||
| 676 | mhba->pci_base[i]); | ||
| 677 | } | ||
| 678 | } | ||
| 679 | |||
| 680 | static unsigned int mvumi_pci_set_master(struct pci_dev *pdev) | ||
| 681 | { | ||
| 682 | unsigned int ret = 0; | ||
| 683 | pci_set_master(pdev); | ||
| 684 | |||
| 685 | if (IS_DMA64) { | ||
| 686 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) | ||
| 687 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 688 | } else | ||
| 689 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 690 | |||
| 691 | return ret; | ||
| 692 | } | ||
| 693 | |||
| 694 | static int mvumi_reset_host_9580(struct mvumi_hba *mhba) | ||
| 695 | { | ||
| 696 | mhba->fw_state = FW_STATE_ABORT; | ||
| 697 | |||
| 698 | iowrite32(0, mhba->regs->reset_enable); | ||
| 699 | iowrite32(0xf, mhba->regs->reset_request); | ||
| 700 | |||
| 701 | iowrite32(0x10, mhba->regs->reset_enable); | ||
| 702 | iowrite32(0x10, mhba->regs->reset_request); | ||
| 703 | msleep(100); | ||
| 704 | pci_disable_device(mhba->pdev); | ||
| 705 | |||
| 706 | if (pci_enable_device(mhba->pdev)) { | ||
| 707 | dev_err(&mhba->pdev->dev, "enable device failed\n"); | ||
| 708 | return FAILED; | ||
| 709 | } | ||
| 710 | if (mvumi_pci_set_master(mhba->pdev)) { | ||
| 711 | dev_err(&mhba->pdev->dev, "set master failed\n"); | ||
| 712 | return FAILED; | ||
| 713 | } | ||
| 714 | mvumi_restore_bar_addr(mhba); | ||
| 715 | if (mvumi_wait_for_fw(mhba) == FAILED) | ||
| 716 | return FAILED; | ||
| 717 | |||
| 718 | return mvumi_wait_for_outstanding(mhba); | ||
| 719 | } | ||
| 720 | |||
| 721 | static int mvumi_reset_host_9143(struct mvumi_hba *mhba) | ||
| 722 | { | ||
| 723 | return mvumi_wait_for_outstanding(mhba); | ||
| 724 | } | ||
| 725 | |||
| 569 | static int mvumi_host_reset(struct scsi_cmnd *scmd) | 726 | static int mvumi_host_reset(struct scsi_cmnd *scmd) |
| 570 | { | 727 | { |
| 571 | struct mvumi_hba *mhba; | 728 | struct mvumi_hba *mhba; |
| @@ -575,7 +732,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd) | |||
| 575 | scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", | 732 | scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", |
| 576 | scmd->serial_number, scmd->cmnd[0], scmd->retries); | 733 | scmd->serial_number, scmd->cmnd[0], scmd->retries); |
| 577 | 734 | ||
| 578 | return mvumi_wait_for_outstanding(mhba); | 735 | return mhba->instancet->reset_host(mhba); |
| 579 | } | 736 | } |
| 580 | 737 | ||
| 581 | static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, | 738 | static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, |
| @@ -628,7 +785,9 @@ static void mvumi_release_fw(struct mvumi_hba *mhba) | |||
| 628 | mvumi_free_cmds(mhba); | 785 | mvumi_free_cmds(mhba); |
| 629 | mvumi_release_mem_resource(mhba); | 786 | mvumi_release_mem_resource(mhba); |
| 630 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); | 787 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); |
| 631 | kfree(mhba->handshake_page); | 788 | pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, |
| 789 | mhba->handshake_page, mhba->handshake_page_phys); | ||
| 790 | kfree(mhba->regs); | ||
| 632 | pci_release_regions(mhba->pdev); | 791 | pci_release_regions(mhba->pdev); |
| 633 | } | 792 | } |
| 634 | 793 | ||
| @@ -665,6 +824,7 @@ get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); | |||
| 665 | frame->cdb_length = MAX_COMMAND_SIZE; | 824 | frame->cdb_length = MAX_COMMAND_SIZE; |
| 666 | memset(frame->cdb, 0, MAX_COMMAND_SIZE); | 825 | memset(frame->cdb, 0, MAX_COMMAND_SIZE); |
| 667 | frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; | 826 | frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; |
| 827 | frame->cdb[1] = CDB_CORE_MODULE; | ||
| 668 | frame->cdb[2] = CDB_CORE_SHUTDOWN; | 828 | frame->cdb[2] = CDB_CORE_SHUTDOWN; |
| 669 | 829 | ||
| 670 | mvumi_issue_blocked_cmd(mhba, cmd); | 830 | mvumi_issue_blocked_cmd(mhba, cmd); |
| @@ -695,7 +855,7 @@ mvumi_calculate_checksum(struct mvumi_hs_header *p_header, | |||
| 695 | return ret; | 855 | return ret; |
| 696 | } | 856 | } |
| 697 | 857 | ||
| 698 | void mvumi_hs_build_page(struct mvumi_hba *mhba, | 858 | static void mvumi_hs_build_page(struct mvumi_hba *mhba, |
| 699 | struct mvumi_hs_header *hs_header) | 859 | struct mvumi_hs_header *hs_header) |
| 700 | { | 860 | { |
| 701 | struct mvumi_hs_page2 *hs_page2; | 861 | struct mvumi_hs_page2 *hs_page2; |
| @@ -710,6 +870,8 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba, | |||
| 710 | hs_header->frame_length = sizeof(*hs_page2) - 4; | 870 | hs_header->frame_length = sizeof(*hs_page2) - 4; |
| 711 | memset(hs_header->frame_content, 0, hs_header->frame_length); | 871 | memset(hs_header->frame_content, 0, hs_header->frame_length); |
| 712 | hs_page2->host_type = 3; /* 3 mean linux*/ | 872 | hs_page2->host_type = 3; /* 3 mean linux*/ |
| 873 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) | ||
| 874 | hs_page2->host_cap = 0x08;/* host dynamic source mode */ | ||
| 713 | hs_page2->host_ver.ver_major = VER_MAJOR; | 875 | hs_page2->host_ver.ver_major = VER_MAJOR; |
| 714 | hs_page2->host_ver.ver_minor = VER_MINOR; | 876 | hs_page2->host_ver.ver_minor = VER_MINOR; |
| 715 | hs_page2->host_ver.ver_oem = VER_OEM; | 877 | hs_page2->host_ver.ver_oem = VER_OEM; |
| @@ -745,8 +907,18 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba, | |||
| 745 | hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); | 907 | hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); |
| 746 | hs_page4->ib_entry_size = mhba->ib_max_size_setting; | 908 | hs_page4->ib_entry_size = mhba->ib_max_size_setting; |
| 747 | hs_page4->ob_entry_size = mhba->ob_max_size_setting; | 909 | hs_page4->ob_entry_size = mhba->ob_max_size_setting; |
| 748 | hs_page4->ob_depth = mhba->list_num_io; | 910 | if (mhba->hba_capability |
| 749 | hs_page4->ib_depth = mhba->list_num_io; | 911 | & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) { |
| 912 | hs_page4->ob_depth = find_first_bit((unsigned long *) | ||
| 913 | &mhba->list_num_io, | ||
| 914 | BITS_PER_LONG); | ||
| 915 | hs_page4->ib_depth = find_first_bit((unsigned long *) | ||
| 916 | &mhba->list_num_io, | ||
| 917 | BITS_PER_LONG); | ||
| 918 | } else { | ||
| 919 | hs_page4->ob_depth = (u8) mhba->list_num_io; | ||
| 920 | hs_page4->ib_depth = (u8) mhba->list_num_io; | ||
| 921 | } | ||
| 750 | hs_header->checksum = mvumi_calculate_checksum(hs_header, | 922 | hs_header->checksum = mvumi_calculate_checksum(hs_header, |
| 751 | hs_header->frame_length); | 923 | hs_header->frame_length); |
| 752 | break; | 924 | break; |
| @@ -774,8 +946,11 @@ static int mvumi_init_data(struct mvumi_hba *mhba) | |||
| 774 | return 0; | 946 | return 0; |
| 775 | 947 | ||
| 776 | tmp_size = mhba->ib_max_size * mhba->max_io; | 948 | tmp_size = mhba->ib_max_size * mhba->max_io; |
| 949 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) | ||
| 950 | tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; | ||
| 951 | |||
| 777 | tmp_size += 128 + mhba->ob_max_size * mhba->max_io; | 952 | tmp_size += 128 + mhba->ob_max_size * mhba->max_io; |
| 778 | tmp_size += 8 + sizeof(u32) + 16; | 953 | tmp_size += 8 + sizeof(u32)*2 + 16; |
| 779 | 954 | ||
| 780 | res_mgnt = mvumi_alloc_mem_resource(mhba, | 955 | res_mgnt = mvumi_alloc_mem_resource(mhba, |
| 781 | RESOURCE_UNCACHED_MEMORY, tmp_size); | 956 | RESOURCE_UNCACHED_MEMORY, tmp_size); |
| @@ -793,24 +968,41 @@ static int mvumi_init_data(struct mvumi_hba *mhba) | |||
| 793 | v += offset; | 968 | v += offset; |
| 794 | mhba->ib_list = v; | 969 | mhba->ib_list = v; |
| 795 | mhba->ib_list_phys = p; | 970 | mhba->ib_list_phys = p; |
| 971 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { | ||
| 972 | v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; | ||
| 973 | p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; | ||
| 974 | mhba->ib_frame = v; | ||
| 975 | mhba->ib_frame_phys = p; | ||
| 976 | } | ||
| 796 | v += mhba->ib_max_size * mhba->max_io; | 977 | v += mhba->ib_max_size * mhba->max_io; |
| 797 | p += mhba->ib_max_size * mhba->max_io; | 978 | p += mhba->ib_max_size * mhba->max_io; |
| 979 | |||
| 798 | /* ib shadow */ | 980 | /* ib shadow */ |
| 799 | offset = round_up(p, 8) - p; | 981 | offset = round_up(p, 8) - p; |
| 800 | p += offset; | 982 | p += offset; |
| 801 | v += offset; | 983 | v += offset; |
| 802 | mhba->ib_shadow = v; | 984 | mhba->ib_shadow = v; |
| 803 | mhba->ib_shadow_phys = p; | 985 | mhba->ib_shadow_phys = p; |
| 804 | p += sizeof(u32); | 986 | p += sizeof(u32)*2; |
| 805 | v += sizeof(u32); | 987 | v += sizeof(u32)*2; |
| 806 | /* ob shadow */ | 988 | /* ob shadow */ |
| 807 | offset = round_up(p, 8) - p; | 989 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { |
| 808 | p += offset; | 990 | offset = round_up(p, 8) - p; |
| 809 | v += offset; | 991 | p += offset; |
| 810 | mhba->ob_shadow = v; | 992 | v += offset; |
| 811 | mhba->ob_shadow_phys = p; | 993 | mhba->ob_shadow = v; |
| 812 | p += 8; | 994 | mhba->ob_shadow_phys = p; |
| 813 | v += 8; | 995 | p += 8; |
| 996 | v += 8; | ||
| 997 | } else { | ||
| 998 | offset = round_up(p, 4) - p; | ||
| 999 | p += offset; | ||
| 1000 | v += offset; | ||
| 1001 | mhba->ob_shadow = v; | ||
| 1002 | mhba->ob_shadow_phys = p; | ||
| 1003 | p += 4; | ||
| 1004 | v += 4; | ||
| 1005 | } | ||
| 814 | 1006 | ||
| 815 | /* ob list */ | 1007 | /* ob list */ |
| 816 | offset = round_up(p, 128) - p; | 1008 | offset = round_up(p, 128) - p; |
| @@ -902,6 +1094,12 @@ static int mvumi_hs_process_page(struct mvumi_hba *mhba, | |||
| 902 | dev_dbg(&mhba->pdev->dev, "FW version:%d\n", | 1094 | dev_dbg(&mhba->pdev->dev, "FW version:%d\n", |
| 903 | hs_page1->fw_ver.ver_build); | 1095 | hs_page1->fw_ver.ver_build); |
| 904 | 1096 | ||
| 1097 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) | ||
| 1098 | mhba->eot_flag = 22; | ||
| 1099 | else | ||
| 1100 | mhba->eot_flag = 27; | ||
| 1101 | if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) | ||
| 1102 | mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; | ||
| 905 | break; | 1103 | break; |
| 906 | default: | 1104 | default: |
| 907 | dev_err(&mhba->pdev->dev, "handshake: page code error\n"); | 1105 | dev_err(&mhba->pdev->dev, "handshake: page code error\n"); |
| @@ -923,12 +1121,12 @@ static int mvumi_handshake(struct mvumi_hba *mhba) | |||
| 923 | { | 1121 | { |
| 924 | unsigned int hs_state, tmp, hs_fun; | 1122 | unsigned int hs_state, tmp, hs_fun; |
| 925 | struct mvumi_hs_header *hs_header; | 1123 | struct mvumi_hs_header *hs_header; |
| 926 | void *regs = mhba->mmio; | 1124 | struct mvumi_hw_regs *regs = mhba->regs; |
| 927 | 1125 | ||
| 928 | if (mhba->fw_state == FW_STATE_STARTING) | 1126 | if (mhba->fw_state == FW_STATE_STARTING) |
| 929 | hs_state = HS_S_START; | 1127 | hs_state = HS_S_START; |
| 930 | else { | 1128 | else { |
| 931 | tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0); | 1129 | tmp = ioread32(regs->arm_to_pciea_msg0); |
| 932 | hs_state = HS_GET_STATE(tmp); | 1130 | hs_state = HS_GET_STATE(tmp); |
| 933 | dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); | 1131 | dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); |
| 934 | if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { | 1132 | if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { |
| @@ -943,21 +1141,20 @@ static int mvumi_handshake(struct mvumi_hba *mhba) | |||
| 943 | mhba->fw_state = FW_STATE_HANDSHAKING; | 1141 | mhba->fw_state = FW_STATE_HANDSHAKING; |
| 944 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); | 1142 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
| 945 | HS_SET_STATE(hs_fun, HS_S_RESET); | 1143 | HS_SET_STATE(hs_fun, HS_S_RESET); |
| 946 | iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1); | 1144 | iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1); |
| 947 | iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); | 1145 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
| 948 | iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); | 1146 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
| 949 | break; | 1147 | break; |
| 950 | 1148 | ||
| 951 | case HS_S_RESET: | 1149 | case HS_S_RESET: |
| 952 | iowrite32(lower_32_bits(mhba->handshake_page_phys), | 1150 | iowrite32(lower_32_bits(mhba->handshake_page_phys), |
| 953 | regs + CPU_PCIEA_TO_ARM_MSG1); | 1151 | regs->pciea_to_arm_msg1); |
| 954 | iowrite32(upper_32_bits(mhba->handshake_page_phys), | 1152 | iowrite32(upper_32_bits(mhba->handshake_page_phys), |
| 955 | regs + CPU_ARM_TO_PCIEA_MSG1); | 1153 | regs->arm_to_pciea_msg1); |
| 956 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); | 1154 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
| 957 | HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); | 1155 | HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); |
| 958 | iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); | 1156 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
| 959 | iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); | 1157 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
| 960 | |||
| 961 | break; | 1158 | break; |
| 962 | 1159 | ||
| 963 | case HS_S_PAGE_ADDR: | 1160 | case HS_S_PAGE_ADDR: |
| @@ -997,30 +1194,37 @@ static int mvumi_handshake(struct mvumi_hba *mhba) | |||
| 997 | HS_SET_STATE(hs_fun, HS_S_END); | 1194 | HS_SET_STATE(hs_fun, HS_S_END); |
| 998 | 1195 | ||
| 999 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); | 1196 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
| 1000 | iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); | 1197 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
| 1001 | iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); | 1198 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
| 1002 | break; | 1199 | break; |
| 1003 | 1200 | ||
| 1004 | case HS_S_END: | 1201 | case HS_S_END: |
| 1005 | /* Set communication list ISR */ | 1202 | /* Set communication list ISR */ |
| 1006 | tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG); | 1203 | tmp = ioread32(regs->enpointa_mask_reg); |
| 1007 | tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR; | 1204 | tmp |= regs->int_comaout | regs->int_comaerr; |
| 1008 | iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); | 1205 | iowrite32(tmp, regs->enpointa_mask_reg); |
| 1009 | iowrite32(mhba->list_num_io, mhba->ib_shadow); | 1206 | iowrite32(mhba->list_num_io, mhba->ib_shadow); |
| 1010 | /* Set InBound List Available count shadow */ | 1207 | /* Set InBound List Available count shadow */ |
| 1011 | iowrite32(lower_32_bits(mhba->ib_shadow_phys), | 1208 | iowrite32(lower_32_bits(mhba->ib_shadow_phys), |
| 1012 | regs + CLA_INB_AVAL_COUNT_BASEL); | 1209 | regs->inb_aval_count_basel); |
| 1013 | iowrite32(upper_32_bits(mhba->ib_shadow_phys), | 1210 | iowrite32(upper_32_bits(mhba->ib_shadow_phys), |
| 1014 | regs + CLA_INB_AVAL_COUNT_BASEH); | 1211 | regs->inb_aval_count_baseh); |
| 1015 | 1212 | ||
| 1016 | /* Set OutBound List Available count shadow */ | 1213 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { |
| 1017 | iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE, | 1214 | /* Set OutBound List Available count shadow */ |
| 1018 | mhba->ob_shadow); | 1215 | iowrite32((mhba->list_num_io-1) | |
| 1019 | iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0); | 1216 | regs->cl_pointer_toggle, |
| 1020 | iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4); | 1217 | mhba->ob_shadow); |
| 1218 | iowrite32(lower_32_bits(mhba->ob_shadow_phys), | ||
| 1219 | regs->outb_copy_basel); | ||
| 1220 | iowrite32(upper_32_bits(mhba->ob_shadow_phys), | ||
| 1221 | regs->outb_copy_baseh); | ||
| 1222 | } | ||
| 1021 | 1223 | ||
| 1022 | mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; | 1224 | mhba->ib_cur_slot = (mhba->list_num_io - 1) | |
| 1023 | mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; | 1225 | regs->cl_pointer_toggle; |
| 1226 | mhba->ob_cur_slot = (mhba->list_num_io - 1) | | ||
| 1227 | regs->cl_pointer_toggle; | ||
| 1024 | mhba->fw_state = FW_STATE_STARTED; | 1228 | mhba->fw_state = FW_STATE_STARTED; |
| 1025 | 1229 | ||
| 1026 | break; | 1230 | break; |
| @@ -1040,7 +1244,7 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) | |||
| 1040 | before = jiffies; | 1244 | before = jiffies; |
| 1041 | mvumi_handshake(mhba); | 1245 | mvumi_handshake(mhba); |
| 1042 | do { | 1246 | do { |
| 1043 | isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio); | 1247 | isr_status = mhba->instancet->read_fw_status_reg(mhba); |
| 1044 | 1248 | ||
| 1045 | if (mhba->fw_state == FW_STATE_STARTED) | 1249 | if (mhba->fw_state == FW_STATE_STARTED) |
| 1046 | return 0; | 1250 | return 0; |
| @@ -1062,16 +1266,15 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) | |||
| 1062 | 1266 | ||
| 1063 | static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) | 1267 | static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) |
| 1064 | { | 1268 | { |
| 1065 | void *regs = mhba->mmio; | ||
| 1066 | unsigned int tmp; | 1269 | unsigned int tmp; |
| 1067 | unsigned long before; | 1270 | unsigned long before; |
| 1068 | 1271 | ||
| 1069 | before = jiffies; | 1272 | before = jiffies; |
| 1070 | tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); | 1273 | tmp = ioread32(mhba->regs->arm_to_pciea_msg1); |
| 1071 | while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { | 1274 | while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { |
| 1072 | if (tmp != HANDSHAKE_READYSTATE) | 1275 | if (tmp != HANDSHAKE_READYSTATE) |
| 1073 | iowrite32(DRBL_MU_RESET, | 1276 | iowrite32(DRBL_MU_RESET, |
| 1074 | regs + CPU_PCIEA_TO_ARM_DRBL_REG); | 1277 | mhba->regs->pciea_to_arm_drbl_reg); |
| 1075 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { | 1278 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { |
| 1076 | dev_err(&mhba->pdev->dev, | 1279 | dev_err(&mhba->pdev->dev, |
| 1077 | "invalid signature [0x%x].\n", tmp); | 1280 | "invalid signature [0x%x].\n", tmp); |
| @@ -1079,7 +1282,7 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) | |||
| 1079 | } | 1282 | } |
| 1080 | usleep_range(1000, 2000); | 1283 | usleep_range(1000, 2000); |
| 1081 | rmb(); | 1284 | rmb(); |
| 1082 | tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); | 1285 | tmp = ioread32(mhba->regs->arm_to_pciea_msg1); |
| 1083 | } | 1286 | } |
| 1084 | 1287 | ||
| 1085 | mhba->fw_state = FW_STATE_STARTING; | 1288 | mhba->fw_state = FW_STATE_STARTING; |
| @@ -1100,15 +1303,17 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) | |||
| 1100 | 1303 | ||
| 1101 | static unsigned char mvumi_start(struct mvumi_hba *mhba) | 1304 | static unsigned char mvumi_start(struct mvumi_hba *mhba) |
| 1102 | { | 1305 | { |
| 1103 | void *regs = mhba->mmio; | ||
| 1104 | unsigned int tmp; | 1306 | unsigned int tmp; |
| 1307 | struct mvumi_hw_regs *regs = mhba->regs; | ||
| 1308 | |||
| 1105 | /* clear Door bell */ | 1309 | /* clear Door bell */ |
| 1106 | tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); | 1310 | tmp = ioread32(regs->arm_to_pciea_drbl_reg); |
| 1107 | iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG); | 1311 | iowrite32(tmp, regs->arm_to_pciea_drbl_reg); |
| 1108 | 1312 | ||
| 1109 | iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); | 1313 | iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); |
| 1110 | tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA; | 1314 | tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea; |
| 1111 | iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); | 1315 | iowrite32(tmp, regs->enpointa_mask_reg); |
| 1316 | msleep(100); | ||
| 1112 | if (mvumi_check_handshake(mhba)) | 1317 | if (mvumi_check_handshake(mhba)) |
| 1113 | return -1; | 1318 | return -1; |
| 1114 | 1319 | ||
| @@ -1166,6 +1371,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, | |||
| 1166 | cmd->scmd->scsi_done(scmd); | 1371 | cmd->scmd->scsi_done(scmd); |
| 1167 | mvumi_return_cmd(mhba, cmd); | 1372 | mvumi_return_cmd(mhba, cmd); |
| 1168 | } | 1373 | } |
| 1374 | |||
| 1169 | static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, | 1375 | static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, |
| 1170 | struct mvumi_cmd *cmd, | 1376 | struct mvumi_cmd *cmd, |
| 1171 | struct mvumi_rsp_frame *ob_frame) | 1377 | struct mvumi_rsp_frame *ob_frame) |
| @@ -1210,6 +1416,304 @@ static void mvumi_show_event(struct mvumi_hba *mhba, | |||
| 1210 | } | 1416 | } |
| 1211 | } | 1417 | } |
| 1212 | 1418 | ||
| 1419 | static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) | ||
| 1420 | { | ||
| 1421 | struct scsi_device *sdev; | ||
| 1422 | int ret = -1; | ||
| 1423 | |||
| 1424 | if (status == DEVICE_OFFLINE) { | ||
| 1425 | sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); | ||
| 1426 | if (sdev) { | ||
| 1427 | dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, | ||
| 1428 | sdev->id, 0); | ||
| 1429 | scsi_remove_device(sdev); | ||
| 1430 | scsi_device_put(sdev); | ||
| 1431 | ret = 0; | ||
| 1432 | } else | ||
| 1433 | dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", | ||
| 1434 | devid); | ||
| 1435 | } else if (status == DEVICE_ONLINE) { | ||
| 1436 | sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); | ||
| 1437 | if (!sdev) { | ||
| 1438 | scsi_add_device(mhba->shost, 0, devid, 0); | ||
| 1439 | dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, | ||
| 1440 | devid, 0); | ||
| 1441 | ret = 0; | ||
| 1442 | } else { | ||
| 1443 | dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", | ||
| 1444 | 0, devid, 0); | ||
| 1445 | scsi_device_put(sdev); | ||
| 1446 | } | ||
| 1447 | } | ||
| 1448 | return ret; | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | static u64 mvumi_inquiry(struct mvumi_hba *mhba, | ||
| 1452 | unsigned int id, struct mvumi_cmd *cmd) | ||
| 1453 | { | ||
| 1454 | struct mvumi_msg_frame *frame; | ||
| 1455 | u64 wwid = 0; | ||
| 1456 | int cmd_alloc = 0; | ||
| 1457 | int data_buf_len = 64; | ||
| 1458 | |||
| 1459 | if (!cmd) { | ||
| 1460 | cmd = mvumi_create_internal_cmd(mhba, data_buf_len); | ||
| 1461 | if (cmd) | ||
| 1462 | cmd_alloc = 1; | ||
| 1463 | else | ||
| 1464 | return 0; | ||
| 1465 | } else { | ||
| 1466 | memset(cmd->data_buf, 0, data_buf_len); | ||
| 1467 | } | ||
| 1468 | cmd->scmd = NULL; | ||
| 1469 | cmd->cmd_status = REQ_STATUS_PENDING; | ||
| 1470 | atomic_set(&cmd->sync_cmd, 0); | ||
| 1471 | frame = cmd->frame; | ||
| 1472 | frame->device_id = (u16) id; | ||
| 1473 | frame->cmd_flag = CMD_FLAG_DATA_IN; | ||
| 1474 | frame->req_function = CL_FUN_SCSI_CMD; | ||
| 1475 | frame->cdb_length = 6; | ||
| 1476 | frame->data_transfer_length = MVUMI_INQUIRY_LENGTH; | ||
| 1477 | memset(frame->cdb, 0, frame->cdb_length); | ||
| 1478 | frame->cdb[0] = INQUIRY; | ||
| 1479 | frame->cdb[4] = frame->data_transfer_length; | ||
| 1480 | |||
| 1481 | mvumi_issue_blocked_cmd(mhba, cmd); | ||
| 1482 | |||
| 1483 | if (cmd->cmd_status == SAM_STAT_GOOD) { | ||
| 1484 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) | ||
| 1485 | wwid = id + 1; | ||
| 1486 | else | ||
| 1487 | memcpy((void *)&wwid, | ||
| 1488 | (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF), | ||
| 1489 | MVUMI_INQUIRY_UUID_LEN); | ||
| 1490 | dev_dbg(&mhba->pdev->dev, | ||
| 1491 | "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid); | ||
| 1492 | } else { | ||
| 1493 | wwid = 0; | ||
| 1494 | } | ||
| 1495 | if (cmd_alloc) | ||
| 1496 | mvumi_delete_internal_cmd(mhba, cmd); | ||
| 1497 | |||
| 1498 | return wwid; | ||
| 1499 | } | ||
| 1500 | |||
| 1501 | static void mvumi_detach_devices(struct mvumi_hba *mhba) | ||
| 1502 | { | ||
| 1503 | struct mvumi_device *mv_dev = NULL , *dev_next; | ||
| 1504 | struct scsi_device *sdev = NULL; | ||
| 1505 | |||
| 1506 | mutex_lock(&mhba->device_lock); | ||
| 1507 | |||
| 1508 | /* detach Hard Disk */ | ||
| 1509 | list_for_each_entry_safe(mv_dev, dev_next, | ||
| 1510 | &mhba->shost_dev_list, list) { | ||
| 1511 | mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); | ||
| 1512 | list_del_init(&mv_dev->list); | ||
| 1513 | dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", | ||
| 1514 | mv_dev->id, mv_dev->wwid); | ||
| 1515 | kfree(mv_dev); | ||
| 1516 | } | ||
| 1517 | list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { | ||
| 1518 | list_del_init(&mv_dev->list); | ||
| 1519 | dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", | ||
| 1520 | mv_dev->id, mv_dev->wwid); | ||
| 1521 | kfree(mv_dev); | ||
| 1522 | } | ||
| 1523 | |||
| 1524 | /* detach virtual device */ | ||
| 1525 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) | ||
| 1526 | sdev = scsi_device_lookup(mhba->shost, 0, | ||
| 1527 | mhba->max_target_id - 1, 0); | ||
| 1528 | |||
| 1529 | if (sdev) { | ||
| 1530 | scsi_remove_device(sdev); | ||
| 1531 | scsi_device_put(sdev); | ||
| 1532 | } | ||
| 1533 | |||
| 1534 | mutex_unlock(&mhba->device_lock); | ||
| 1535 | } | ||
| 1536 | |||
| 1537 | static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) | ||
| 1538 | { | ||
| 1539 | struct scsi_device *sdev; | ||
| 1540 | |||
| 1541 | sdev = scsi_device_lookup(mhba->shost, 0, id, 0); | ||
| 1542 | if (sdev) { | ||
| 1543 | scsi_rescan_device(&sdev->sdev_gendev); | ||
| 1544 | scsi_device_put(sdev); | ||
| 1545 | } | ||
| 1546 | } | ||
| 1547 | |||
| 1548 | static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) | ||
| 1549 | { | ||
| 1550 | struct mvumi_device *mv_dev = NULL; | ||
| 1551 | |||
| 1552 | list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { | ||
| 1553 | if (mv_dev->wwid == wwid) { | ||
| 1554 | if (mv_dev->id != id) { | ||
| 1555 | dev_err(&mhba->pdev->dev, | ||
| 1556 | "%s has same wwid[%llx] ," | ||
| 1557 | " but different id[%d %d]\n", | ||
| 1558 | __func__, mv_dev->wwid, mv_dev->id, id); | ||
| 1559 | return -1; | ||
| 1560 | } else { | ||
| 1561 | if (mhba->pdev->device == | ||
| 1562 | PCI_DEVICE_ID_MARVELL_MV9143) | ||
| 1563 | mvumi_rescan_devices(mhba, id); | ||
| 1564 | return 1; | ||
| 1565 | } | ||
| 1566 | } | ||
| 1567 | } | ||
| 1568 | return 0; | ||
| 1569 | } | ||
| 1570 | |||
| 1571 | static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) | ||
| 1572 | { | ||
| 1573 | struct mvumi_device *mv_dev = NULL, *dev_next; | ||
| 1574 | |||
| 1575 | list_for_each_entry_safe(mv_dev, dev_next, | ||
| 1576 | &mhba->shost_dev_list, list) { | ||
| 1577 | if (mv_dev->id == id) { | ||
| 1578 | dev_dbg(&mhba->pdev->dev, | ||
| 1579 | "detach device(0:%d:0) wwid(%llx) from HOST\n", | ||
| 1580 | mv_dev->id, mv_dev->wwid); | ||
| 1581 | mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); | ||
| 1582 | list_del_init(&mv_dev->list); | ||
| 1583 | kfree(mv_dev); | ||
| 1584 | } | ||
| 1585 | } | ||
| 1586 | } | ||
| 1587 | |||
| 1588 | static int mvumi_probe_devices(struct mvumi_hba *mhba) | ||
| 1589 | { | ||
| 1590 | int id, maxid; | ||
| 1591 | u64 wwid = 0; | ||
| 1592 | struct mvumi_device *mv_dev = NULL; | ||
| 1593 | struct mvumi_cmd *cmd = NULL; | ||
| 1594 | int found = 0; | ||
| 1595 | |||
| 1596 | cmd = mvumi_create_internal_cmd(mhba, 64); | ||
| 1597 | if (!cmd) | ||
| 1598 | return -1; | ||
| 1599 | |||
| 1600 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) | ||
| 1601 | maxid = mhba->max_target_id; | ||
| 1602 | else | ||
| 1603 | maxid = mhba->max_target_id - 1; | ||
| 1604 | |||
| 1605 | for (id = 0; id < maxid; id++) { | ||
| 1606 | wwid = mvumi_inquiry(mhba, id, cmd); | ||
| 1607 | if (!wwid) { | ||
| 1608 | /* device no response, remove it */ | ||
| 1609 | mvumi_remove_devices(mhba, id); | ||
| 1610 | } else { | ||
| 1611 | /* device response, add it */ | ||
| 1612 | found = mvumi_match_devices(mhba, id, wwid); | ||
| 1613 | if (!found) { | ||
| 1614 | mvumi_remove_devices(mhba, id); | ||
| 1615 | mv_dev = kzalloc(sizeof(struct mvumi_device), | ||
| 1616 | GFP_KERNEL); | ||
| 1617 | if (!mv_dev) { | ||
| 1618 | dev_err(&mhba->pdev->dev, | ||
| 1619 | "%s alloc mv_dev failed\n", | ||
| 1620 | __func__); | ||
| 1621 | continue; | ||
| 1622 | } | ||
| 1623 | mv_dev->id = id; | ||
| 1624 | mv_dev->wwid = wwid; | ||
| 1625 | mv_dev->sdev = NULL; | ||
| 1626 | INIT_LIST_HEAD(&mv_dev->list); | ||
| 1627 | list_add_tail(&mv_dev->list, | ||
| 1628 | &mhba->mhba_dev_list); | ||
| 1629 | dev_dbg(&mhba->pdev->dev, | ||
| 1630 | "probe a new device(0:%d:0)" | ||
| 1631 | " wwid(%llx)\n", id, mv_dev->wwid); | ||
| 1632 | } else if (found == -1) | ||
| 1633 | return -1; | ||
| 1634 | else | ||
| 1635 | continue; | ||
| 1636 | } | ||
| 1637 | } | ||
| 1638 | |||
| 1639 | if (cmd) | ||
| 1640 | mvumi_delete_internal_cmd(mhba, cmd); | ||
| 1641 | |||
| 1642 | return 0; | ||
| 1643 | } | ||
| 1644 | |||
| 1645 | static int mvumi_rescan_bus(void *data) | ||
| 1646 | { | ||
| 1647 | int ret = 0; | ||
| 1648 | struct mvumi_hba *mhba = (struct mvumi_hba *) data; | ||
| 1649 | struct mvumi_device *mv_dev = NULL , *dev_next; | ||
| 1650 | |||
| 1651 | while (!kthread_should_stop()) { | ||
| 1652 | |||
| 1653 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1654 | if (!atomic_read(&mhba->pnp_count)) | ||
| 1655 | schedule(); | ||
| 1656 | msleep(1000); | ||
| 1657 | atomic_set(&mhba->pnp_count, 0); | ||
| 1658 | __set_current_state(TASK_RUNNING); | ||
| 1659 | |||
| 1660 | mutex_lock(&mhba->device_lock); | ||
| 1661 | ret = mvumi_probe_devices(mhba); | ||
| 1662 | if (!ret) { | ||
| 1663 | list_for_each_entry_safe(mv_dev, dev_next, | ||
| 1664 | &mhba->mhba_dev_list, list) { | ||
| 1665 | if (mvumi_handle_hotplug(mhba, mv_dev->id, | ||
| 1666 | DEVICE_ONLINE)) { | ||
| 1667 | dev_err(&mhba->pdev->dev, | ||
| 1668 | "%s add device(0:%d:0) failed" | ||
| 1669 | "wwid(%llx) has exist\n", | ||
| 1670 | __func__, | ||
| 1671 | mv_dev->id, mv_dev->wwid); | ||
| 1672 | list_del_init(&mv_dev->list); | ||
| 1673 | kfree(mv_dev); | ||
| 1674 | } else { | ||
| 1675 | list_move_tail(&mv_dev->list, | ||
| 1676 | &mhba->shost_dev_list); | ||
| 1677 | } | ||
| 1678 | } | ||
| 1679 | } | ||
| 1680 | mutex_unlock(&mhba->device_lock); | ||
| 1681 | } | ||
| 1682 | return 0; | ||
| 1683 | } | ||
| 1684 | |||
| 1685 | static void mvumi_proc_msg(struct mvumi_hba *mhba, | ||
| 1686 | struct mvumi_hotplug_event *param) | ||
| 1687 | { | ||
| 1688 | u16 size = param->size; | ||
| 1689 | const unsigned long *ar_bitmap; | ||
| 1690 | const unsigned long *re_bitmap; | ||
| 1691 | int index; | ||
| 1692 | |||
| 1693 | if (mhba->fw_flag & MVUMI_FW_ATTACH) { | ||
| 1694 | index = -1; | ||
| 1695 | ar_bitmap = (const unsigned long *) param->bitmap; | ||
| 1696 | re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3]; | ||
| 1697 | |||
| 1698 | mutex_lock(&mhba->sas_discovery_mutex); | ||
| 1699 | do { | ||
| 1700 | index = find_next_zero_bit(ar_bitmap, size, index + 1); | ||
| 1701 | if (index >= size) | ||
| 1702 | break; | ||
| 1703 | mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); | ||
| 1704 | } while (1); | ||
| 1705 | |||
| 1706 | index = -1; | ||
| 1707 | do { | ||
| 1708 | index = find_next_zero_bit(re_bitmap, size, index + 1); | ||
| 1709 | if (index >= size) | ||
| 1710 | break; | ||
| 1711 | mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); | ||
| 1712 | } while (1); | ||
| 1713 | mutex_unlock(&mhba->sas_discovery_mutex); | ||
| 1714 | } | ||
| 1715 | } | ||
| 1716 | |||
| 1213 | static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) | 1717 | static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) |
| 1214 | { | 1718 | { |
| 1215 | if (msg == APICDB1_EVENT_GETEVENT) { | 1719 | if (msg == APICDB1_EVENT_GETEVENT) { |
| @@ -1227,6 +1731,8 @@ static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) | |||
| 1227 | param = &er->events[i]; | 1731 | param = &er->events[i]; |
| 1228 | mvumi_show_event(mhba, param); | 1732 | mvumi_show_event(mhba, param); |
| 1229 | } | 1733 | } |
| 1734 | } else if (msg == APICDB1_HOST_GETEVENT) { | ||
| 1735 | mvumi_proc_msg(mhba, buffer); | ||
| 1230 | } | 1736 | } |
| 1231 | } | 1737 | } |
| 1232 | 1738 | ||
| @@ -1271,17 +1777,27 @@ static void mvumi_scan_events(struct work_struct *work) | |||
| 1271 | kfree(mu_ev); | 1777 | kfree(mu_ev); |
| 1272 | } | 1778 | } |
| 1273 | 1779 | ||
| 1274 | static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg) | 1780 | static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) |
| 1275 | { | 1781 | { |
| 1276 | struct mvumi_events_wq *mu_ev; | 1782 | struct mvumi_events_wq *mu_ev; |
| 1277 | 1783 | ||
| 1278 | mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); | 1784 | while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) { |
| 1279 | if (mu_ev) { | 1785 | if (isr_status & DRBL_BUS_CHANGE) { |
| 1280 | INIT_WORK(&mu_ev->work_q, mvumi_scan_events); | 1786 | atomic_inc(&mhba->pnp_count); |
| 1281 | mu_ev->mhba = mhba; | 1787 | wake_up_process(mhba->dm_thread); |
| 1282 | mu_ev->event = msg; | 1788 | isr_status &= ~(DRBL_BUS_CHANGE); |
| 1283 | mu_ev->param = NULL; | 1789 | continue; |
| 1284 | schedule_work(&mu_ev->work_q); | 1790 | } |
| 1791 | |||
| 1792 | mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); | ||
| 1793 | if (mu_ev) { | ||
| 1794 | INIT_WORK(&mu_ev->work_q, mvumi_scan_events); | ||
| 1795 | mu_ev->mhba = mhba; | ||
| 1796 | mu_ev->event = APICDB1_EVENT_GETEVENT; | ||
| 1797 | isr_status &= ~(DRBL_EVENT_NOTIFY); | ||
| 1798 | mu_ev->param = NULL; | ||
| 1799 | schedule_work(&mu_ev->work_q); | ||
| 1800 | } | ||
| 1285 | } | 1801 | } |
| 1286 | } | 1802 | } |
| 1287 | 1803 | ||
| @@ -1322,16 +1838,17 @@ static irqreturn_t mvumi_isr_handler(int irq, void *devp) | |||
| 1322 | return IRQ_NONE; | 1838 | return IRQ_NONE; |
| 1323 | } | 1839 | } |
| 1324 | 1840 | ||
| 1325 | if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) { | 1841 | if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { |
| 1842 | if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) | ||
| 1843 | mvumi_launch_events(mhba, mhba->isr_status); | ||
| 1326 | if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { | 1844 | if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { |
| 1327 | dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); | 1845 | dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); |
| 1328 | mvumi_handshake(mhba); | 1846 | mvumi_handshake(mhba); |
| 1329 | } | 1847 | } |
| 1330 | if (mhba->isr_status & DRBL_EVENT_NOTIFY) | 1848 | |
| 1331 | mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT); | ||
| 1332 | } | 1849 | } |
| 1333 | 1850 | ||
| 1334 | if (mhba->global_isr & INT_MAP_COMAOUT) | 1851 | if (mhba->global_isr & mhba->regs->int_comaout) |
| 1335 | mvumi_receive_ob_list_entry(mhba); | 1852 | mvumi_receive_ob_list_entry(mhba); |
| 1336 | 1853 | ||
| 1337 | mhba->global_isr = 0; | 1854 | mhba->global_isr = 0; |
| @@ -1358,8 +1875,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, | |||
| 1358 | dev_dbg(&mhba->pdev->dev, "no free tag.\n"); | 1875 | dev_dbg(&mhba->pdev->dev, "no free tag.\n"); |
| 1359 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; | 1876 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; |
| 1360 | } | 1877 | } |
| 1361 | if (mvumi_get_ib_list_entry(mhba, &ib_entry)) | 1878 | mvumi_get_ib_list_entry(mhba, &ib_entry); |
| 1362 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; | ||
| 1363 | 1879 | ||
| 1364 | cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); | 1880 | cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); |
| 1365 | cmd->frame->request_id = mhba->io_seq++; | 1881 | cmd->frame->request_id = mhba->io_seq++; |
| @@ -1367,21 +1883,35 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, | |||
| 1367 | mhba->tag_cmd[cmd->frame->tag] = cmd; | 1883 | mhba->tag_cmd[cmd->frame->tag] = cmd; |
| 1368 | frame_len = sizeof(*ib_frame) - 4 + | 1884 | frame_len = sizeof(*ib_frame) - 4 + |
| 1369 | ib_frame->sg_counts * sizeof(struct mvumi_sgl); | 1885 | ib_frame->sg_counts * sizeof(struct mvumi_sgl); |
| 1370 | memcpy(ib_entry, ib_frame, frame_len); | 1886 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
| 1887 | struct mvumi_dyn_list_entry *dle; | ||
| 1888 | dle = ib_entry; | ||
| 1889 | dle->src_low_addr = | ||
| 1890 | cpu_to_le32(lower_32_bits(cmd->frame_phys)); | ||
| 1891 | dle->src_high_addr = | ||
| 1892 | cpu_to_le32(upper_32_bits(cmd->frame_phys)); | ||
| 1893 | dle->if_length = (frame_len >> 2) & 0xFFF; | ||
| 1894 | } else { | ||
| 1895 | memcpy(ib_entry, ib_frame, frame_len); | ||
| 1896 | } | ||
| 1371 | return MV_QUEUE_COMMAND_RESULT_SENT; | 1897 | return MV_QUEUE_COMMAND_RESULT_SENT; |
| 1372 | } | 1898 | } |
| 1373 | 1899 | ||
| 1374 | static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) | 1900 | static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) |
| 1375 | { | 1901 | { |
| 1376 | unsigned short num_of_cl_sent = 0; | 1902 | unsigned short num_of_cl_sent = 0; |
| 1903 | unsigned int count; | ||
| 1377 | enum mvumi_qc_result result; | 1904 | enum mvumi_qc_result result; |
| 1378 | 1905 | ||
| 1379 | if (cmd) | 1906 | if (cmd) |
| 1380 | list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); | 1907 | list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); |
| 1908 | count = mhba->instancet->check_ib_list(mhba); | ||
| 1909 | if (list_empty(&mhba->waiting_req_list) || !count) | ||
| 1910 | return; | ||
| 1381 | 1911 | ||
| 1382 | while (!list_empty(&mhba->waiting_req_list)) { | 1912 | do { |
| 1383 | cmd = list_first_entry(&mhba->waiting_req_list, | 1913 | cmd = list_first_entry(&mhba->waiting_req_list, |
| 1384 | struct mvumi_cmd, queue_pointer); | 1914 | struct mvumi_cmd, queue_pointer); |
| 1385 | list_del_init(&cmd->queue_pointer); | 1915 | list_del_init(&cmd->queue_pointer); |
| 1386 | result = mvumi_send_command(mhba, cmd); | 1916 | result = mvumi_send_command(mhba, cmd); |
| 1387 | switch (result) { | 1917 | switch (result) { |
| @@ -1395,65 +1925,77 @@ static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) | |||
| 1395 | 1925 | ||
| 1396 | return; | 1926 | return; |
| 1397 | } | 1927 | } |
| 1398 | } | 1928 | } while (!list_empty(&mhba->waiting_req_list) && count--); |
| 1929 | |||
| 1399 | if (num_of_cl_sent > 0) | 1930 | if (num_of_cl_sent > 0) |
| 1400 | mvumi_send_ib_list_entry(mhba); | 1931 | mvumi_send_ib_list_entry(mhba); |
| 1401 | } | 1932 | } |
| 1402 | 1933 | ||
| 1403 | /** | 1934 | /** |
| 1404 | * mvumi_enable_intr - Enables interrupts | 1935 | * mvumi_enable_intr - Enables interrupts |
| 1405 | * @regs: FW register set | 1936 | * @mhba: Adapter soft state |
| 1406 | */ | 1937 | */ |
| 1407 | static void mvumi_enable_intr(void *regs) | 1938 | static void mvumi_enable_intr(struct mvumi_hba *mhba) |
| 1408 | { | 1939 | { |
| 1409 | unsigned int mask; | 1940 | unsigned int mask; |
| 1941 | struct mvumi_hw_regs *regs = mhba->regs; | ||
| 1410 | 1942 | ||
| 1411 | iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); | 1943 | iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); |
| 1412 | mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); | 1944 | mask = ioread32(regs->enpointa_mask_reg); |
| 1413 | mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR; | 1945 | mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr; |
| 1414 | iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); | 1946 | iowrite32(mask, regs->enpointa_mask_reg); |
| 1415 | } | 1947 | } |
| 1416 | 1948 | ||
| 1417 | /** | 1949 | /** |
| 1418 | * mvumi_disable_intr -Disables interrupt | 1950 | * mvumi_disable_intr -Disables interrupt |
| 1419 | * @regs: FW register set | 1951 | * @mhba: Adapter soft state |
| 1420 | */ | 1952 | */ |
| 1421 | static void mvumi_disable_intr(void *regs) | 1953 | static void mvumi_disable_intr(struct mvumi_hba *mhba) |
| 1422 | { | 1954 | { |
| 1423 | unsigned int mask; | 1955 | unsigned int mask; |
| 1956 | struct mvumi_hw_regs *regs = mhba->regs; | ||
| 1424 | 1957 | ||
| 1425 | iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG); | 1958 | iowrite32(0, regs->arm_to_pciea_mask_reg); |
| 1426 | mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); | 1959 | mask = ioread32(regs->enpointa_mask_reg); |
| 1427 | mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR); | 1960 | mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout | |
| 1428 | iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); | 1961 | regs->int_comaerr); |
| 1962 | iowrite32(mask, regs->enpointa_mask_reg); | ||
| 1429 | } | 1963 | } |
| 1430 | 1964 | ||
| 1431 | static int mvumi_clear_intr(void *extend) | 1965 | static int mvumi_clear_intr(void *extend) |
| 1432 | { | 1966 | { |
| 1433 | struct mvumi_hba *mhba = (struct mvumi_hba *) extend; | 1967 | struct mvumi_hba *mhba = (struct mvumi_hba *) extend; |
| 1434 | unsigned int status, isr_status = 0, tmp = 0; | 1968 | unsigned int status, isr_status = 0, tmp = 0; |
| 1435 | void *regs = mhba->mmio; | 1969 | struct mvumi_hw_regs *regs = mhba->regs; |
| 1436 | 1970 | ||
| 1437 | status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG); | 1971 | status = ioread32(regs->main_int_cause_reg); |
| 1438 | if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF) | 1972 | if (!(status & regs->int_mu) || status == 0xFFFFFFFF) |
| 1439 | return 1; | 1973 | return 1; |
| 1440 | if (unlikely(status & INT_MAP_COMAERR)) { | 1974 | if (unlikely(status & regs->int_comaerr)) { |
| 1441 | tmp = ioread32(regs + CLA_ISR_CAUSE); | 1975 | tmp = ioread32(regs->outb_isr_cause); |
| 1442 | if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ)) | 1976 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { |
| 1443 | iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ), | 1977 | if (tmp & regs->clic_out_err) { |
| 1444 | regs + CLA_ISR_CAUSE); | 1978 | iowrite32(tmp & regs->clic_out_err, |
| 1445 | status ^= INT_MAP_COMAERR; | 1979 | regs->outb_isr_cause); |
| 1980 | } | ||
| 1981 | } else { | ||
| 1982 | if (tmp & (regs->clic_in_err | regs->clic_out_err)) | ||
| 1983 | iowrite32(tmp & (regs->clic_in_err | | ||
| 1984 | regs->clic_out_err), | ||
| 1985 | regs->outb_isr_cause); | ||
| 1986 | } | ||
| 1987 | status ^= mhba->regs->int_comaerr; | ||
| 1446 | /* inbound or outbound parity error, command will timeout */ | 1988 | /* inbound or outbound parity error, command will timeout */ |
| 1447 | } | 1989 | } |
| 1448 | if (status & INT_MAP_COMAOUT) { | 1990 | if (status & regs->int_comaout) { |
| 1449 | tmp = ioread32(regs + CLA_ISR_CAUSE); | 1991 | tmp = ioread32(regs->outb_isr_cause); |
| 1450 | if (tmp & CLIC_OUT_IRQ) | 1992 | if (tmp & regs->clic_irq) |
| 1451 | iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE); | 1993 | iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause); |
| 1452 | } | 1994 | } |
| 1453 | if (status & INT_MAP_DL_CPU2PCIEA) { | 1995 | if (status & regs->int_dl_cpu2pciea) { |
| 1454 | isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); | 1996 | isr_status = ioread32(regs->arm_to_pciea_drbl_reg); |
| 1455 | if (isr_status) | 1997 | if (isr_status) |
| 1456 | iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); | 1998 | iowrite32(isr_status, regs->arm_to_pciea_drbl_reg); |
| 1457 | } | 1999 | } |
| 1458 | 2000 | ||
| 1459 | mhba->global_isr = status; | 2001 | mhba->global_isr = status; |
| @@ -1464,24 +2006,38 @@ static int mvumi_clear_intr(void *extend) | |||
| 1464 | 2006 | ||
| 1465 | /** | 2007 | /** |
| 1466 | * mvumi_read_fw_status_reg - returns the current FW status value | 2008 | * mvumi_read_fw_status_reg - returns the current FW status value |
| 1467 | * @regs: FW register set | 2009 | * @mhba: Adapter soft state |
| 1468 | */ | 2010 | */ |
| 1469 | static unsigned int mvumi_read_fw_status_reg(void *regs) | 2011 | static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) |
| 1470 | { | 2012 | { |
| 1471 | unsigned int status; | 2013 | unsigned int status; |
| 1472 | 2014 | ||
| 1473 | status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); | 2015 | status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); |
| 1474 | if (status) | 2016 | if (status) |
| 1475 | iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); | 2017 | iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); |
| 1476 | return status; | 2018 | return status; |
| 1477 | } | 2019 | } |
| 1478 | 2020 | ||
| 1479 | static struct mvumi_instance_template mvumi_instance_template = { | 2021 | static struct mvumi_instance_template mvumi_instance_9143 = { |
| 1480 | .fire_cmd = mvumi_fire_cmd, | 2022 | .fire_cmd = mvumi_fire_cmd, |
| 1481 | .enable_intr = mvumi_enable_intr, | 2023 | .enable_intr = mvumi_enable_intr, |
| 1482 | .disable_intr = mvumi_disable_intr, | 2024 | .disable_intr = mvumi_disable_intr, |
| 1483 | .clear_intr = mvumi_clear_intr, | 2025 | .clear_intr = mvumi_clear_intr, |
| 1484 | .read_fw_status_reg = mvumi_read_fw_status_reg, | 2026 | .read_fw_status_reg = mvumi_read_fw_status_reg, |
| 2027 | .check_ib_list = mvumi_check_ib_list_9143, | ||
| 2028 | .check_ob_list = mvumi_check_ob_list_9143, | ||
| 2029 | .reset_host = mvumi_reset_host_9143, | ||
| 2030 | }; | ||
| 2031 | |||
| 2032 | static struct mvumi_instance_template mvumi_instance_9580 = { | ||
| 2033 | .fire_cmd = mvumi_fire_cmd, | ||
| 2034 | .enable_intr = mvumi_enable_intr, | ||
| 2035 | .disable_intr = mvumi_disable_intr, | ||
| 2036 | .clear_intr = mvumi_clear_intr, | ||
| 2037 | .read_fw_status_reg = mvumi_read_fw_status_reg, | ||
| 2038 | .check_ib_list = mvumi_check_ib_list_9580, | ||
| 2039 | .check_ob_list = mvumi_check_ob_list_9580, | ||
| 2040 | .reset_host = mvumi_reset_host_9580, | ||
| 1485 | }; | 2041 | }; |
| 1486 | 2042 | ||
| 1487 | static int mvumi_slave_configure(struct scsi_device *sdev) | 2043 | static int mvumi_slave_configure(struct scsi_device *sdev) |
| @@ -1681,6 +2237,124 @@ static struct scsi_transport_template mvumi_transport_template = { | |||
| 1681 | .eh_timed_out = mvumi_timed_out, | 2237 | .eh_timed_out = mvumi_timed_out, |
| 1682 | }; | 2238 | }; |
| 1683 | 2239 | ||
| 2240 | static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) | ||
| 2241 | { | ||
| 2242 | void *base = NULL; | ||
| 2243 | struct mvumi_hw_regs *regs; | ||
| 2244 | |||
| 2245 | switch (mhba->pdev->device) { | ||
| 2246 | case PCI_DEVICE_ID_MARVELL_MV9143: | ||
| 2247 | mhba->mmio = mhba->base_addr[0]; | ||
| 2248 | base = mhba->mmio; | ||
| 2249 | if (!mhba->regs) { | ||
| 2250 | mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); | ||
| 2251 | if (mhba->regs == NULL) | ||
| 2252 | return -ENOMEM; | ||
| 2253 | } | ||
| 2254 | regs = mhba->regs; | ||
| 2255 | |||
| 2256 | /* For Arm */ | ||
| 2257 | regs->ctrl_sts_reg = base + 0x20104; | ||
| 2258 | regs->rstoutn_mask_reg = base + 0x20108; | ||
| 2259 | regs->sys_soft_rst_reg = base + 0x2010C; | ||
| 2260 | regs->main_int_cause_reg = base + 0x20200; | ||
| 2261 | regs->enpointa_mask_reg = base + 0x2020C; | ||
| 2262 | regs->rstoutn_en_reg = base + 0xF1400; | ||
| 2263 | /* For Doorbell */ | ||
| 2264 | regs->pciea_to_arm_drbl_reg = base + 0x20400; | ||
| 2265 | regs->arm_to_pciea_drbl_reg = base + 0x20408; | ||
| 2266 | regs->arm_to_pciea_mask_reg = base + 0x2040C; | ||
| 2267 | regs->pciea_to_arm_msg0 = base + 0x20430; | ||
| 2268 | regs->pciea_to_arm_msg1 = base + 0x20434; | ||
| 2269 | regs->arm_to_pciea_msg0 = base + 0x20438; | ||
| 2270 | regs->arm_to_pciea_msg1 = base + 0x2043C; | ||
| 2271 | |||
| 2272 | /* For Message Unit */ | ||
| 2273 | |||
| 2274 | regs->inb_aval_count_basel = base + 0x508; | ||
| 2275 | regs->inb_aval_count_baseh = base + 0x50C; | ||
| 2276 | regs->inb_write_pointer = base + 0x518; | ||
| 2277 | regs->inb_read_pointer = base + 0x51C; | ||
| 2278 | regs->outb_coal_cfg = base + 0x568; | ||
| 2279 | regs->outb_copy_basel = base + 0x5B0; | ||
| 2280 | regs->outb_copy_baseh = base + 0x5B4; | ||
| 2281 | regs->outb_copy_pointer = base + 0x544; | ||
| 2282 | regs->outb_read_pointer = base + 0x548; | ||
| 2283 | regs->outb_isr_cause = base + 0x560; | ||
| 2284 | regs->outb_coal_cfg = base + 0x568; | ||
| 2285 | /* Bit setting for HW */ | ||
| 2286 | regs->int_comaout = 1 << 8; | ||
| 2287 | regs->int_comaerr = 1 << 6; | ||
| 2288 | regs->int_dl_cpu2pciea = 1 << 1; | ||
| 2289 | regs->cl_pointer_toggle = 1 << 12; | ||
| 2290 | regs->clic_irq = 1 << 1; | ||
| 2291 | regs->clic_in_err = 1 << 8; | ||
| 2292 | regs->clic_out_err = 1 << 12; | ||
| 2293 | regs->cl_slot_num_mask = 0xFFF; | ||
| 2294 | regs->int_drbl_int_mask = 0x3FFFFFFF; | ||
| 2295 | regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout | | ||
| 2296 | regs->int_comaerr; | ||
| 2297 | break; | ||
| 2298 | case PCI_DEVICE_ID_MARVELL_MV9580: | ||
| 2299 | mhba->mmio = mhba->base_addr[2]; | ||
| 2300 | base = mhba->mmio; | ||
| 2301 | if (!mhba->regs) { | ||
| 2302 | mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); | ||
| 2303 | if (mhba->regs == NULL) | ||
| 2304 | return -ENOMEM; | ||
| 2305 | } | ||
| 2306 | regs = mhba->regs; | ||
| 2307 | /* For Arm */ | ||
| 2308 | regs->ctrl_sts_reg = base + 0x20104; | ||
| 2309 | regs->rstoutn_mask_reg = base + 0x1010C; | ||
| 2310 | regs->sys_soft_rst_reg = base + 0x10108; | ||
| 2311 | regs->main_int_cause_reg = base + 0x10200; | ||
| 2312 | regs->enpointa_mask_reg = base + 0x1020C; | ||
| 2313 | regs->rstoutn_en_reg = base + 0xF1400; | ||
| 2314 | |||
| 2315 | /* For Doorbell */ | ||
| 2316 | regs->pciea_to_arm_drbl_reg = base + 0x10460; | ||
| 2317 | regs->arm_to_pciea_drbl_reg = base + 0x10480; | ||
| 2318 | regs->arm_to_pciea_mask_reg = base + 0x10484; | ||
| 2319 | regs->pciea_to_arm_msg0 = base + 0x10400; | ||
| 2320 | regs->pciea_to_arm_msg1 = base + 0x10404; | ||
| 2321 | regs->arm_to_pciea_msg0 = base + 0x10420; | ||
| 2322 | regs->arm_to_pciea_msg1 = base + 0x10424; | ||
| 2323 | |||
| 2324 | /* For reset*/ | ||
| 2325 | regs->reset_request = base + 0x10108; | ||
| 2326 | regs->reset_enable = base + 0x1010c; | ||
| 2327 | |||
| 2328 | /* For Message Unit */ | ||
| 2329 | regs->inb_aval_count_basel = base + 0x4008; | ||
| 2330 | regs->inb_aval_count_baseh = base + 0x400C; | ||
| 2331 | regs->inb_write_pointer = base + 0x4018; | ||
| 2332 | regs->inb_read_pointer = base + 0x401C; | ||
| 2333 | regs->outb_copy_basel = base + 0x4058; | ||
| 2334 | regs->outb_copy_baseh = base + 0x405C; | ||
| 2335 | regs->outb_copy_pointer = base + 0x406C; | ||
| 2336 | regs->outb_read_pointer = base + 0x4070; | ||
| 2337 | regs->outb_coal_cfg = base + 0x4080; | ||
| 2338 | regs->outb_isr_cause = base + 0x4088; | ||
| 2339 | /* Bit setting for HW */ | ||
| 2340 | regs->int_comaout = 1 << 4; | ||
| 2341 | regs->int_dl_cpu2pciea = 1 << 12; | ||
| 2342 | regs->int_comaerr = 1 << 29; | ||
| 2343 | regs->cl_pointer_toggle = 1 << 14; | ||
| 2344 | regs->cl_slot_num_mask = 0x3FFF; | ||
| 2345 | regs->clic_irq = 1 << 0; | ||
| 2346 | regs->clic_out_err = 1 << 1; | ||
| 2347 | regs->int_drbl_int_mask = 0x3FFFFFFF; | ||
| 2348 | regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout; | ||
| 2349 | break; | ||
| 2350 | default: | ||
| 2351 | return -1; | ||
| 2352 | break; | ||
| 2353 | } | ||
| 2354 | |||
| 2355 | return 0; | ||
| 2356 | } | ||
| 2357 | |||
| 1684 | /** | 2358 | /** |
| 1685 | * mvumi_init_fw - Initializes the FW | 2359 | * mvumi_init_fw - Initializes the FW |
| 1686 | * @mhba: Adapter soft state | 2360 | * @mhba: Adapter soft state |
| @@ -1699,15 +2373,18 @@ static int mvumi_init_fw(struct mvumi_hba *mhba) | |||
| 1699 | if (ret) | 2373 | if (ret) |
| 1700 | goto fail_ioremap; | 2374 | goto fail_ioremap; |
| 1701 | 2375 | ||
| 1702 | mhba->mmio = mhba->base_addr[0]; | ||
| 1703 | |||
| 1704 | switch (mhba->pdev->device) { | 2376 | switch (mhba->pdev->device) { |
| 1705 | case PCI_DEVICE_ID_MARVELL_MV9143: | 2377 | case PCI_DEVICE_ID_MARVELL_MV9143: |
| 1706 | mhba->instancet = &mvumi_instance_template; | 2378 | mhba->instancet = &mvumi_instance_9143; |
| 1707 | mhba->io_seq = 0; | 2379 | mhba->io_seq = 0; |
| 1708 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; | 2380 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; |
| 1709 | mhba->request_id_enabled = 1; | 2381 | mhba->request_id_enabled = 1; |
| 1710 | break; | 2382 | break; |
| 2383 | case PCI_DEVICE_ID_MARVELL_MV9580: | ||
| 2384 | mhba->instancet = &mvumi_instance_9580; | ||
| 2385 | mhba->io_seq = 0; | ||
| 2386 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; | ||
| 2387 | break; | ||
| 1711 | default: | 2388 | default: |
| 1712 | dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", | 2389 | dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", |
| 1713 | mhba->pdev->device); | 2390 | mhba->pdev->device); |
| @@ -1717,15 +2394,21 @@ static int mvumi_init_fw(struct mvumi_hba *mhba) | |||
| 1717 | } | 2394 | } |
| 1718 | dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", | 2395 | dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", |
| 1719 | mhba->pdev->device); | 2396 | mhba->pdev->device); |
| 1720 | 2397 | ret = mvumi_cfg_hw_reg(mhba); | |
| 1721 | mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL); | 2398 | if (ret) { |
| 2399 | dev_err(&mhba->pdev->dev, | ||
| 2400 | "failed to allocate memory for reg\n"); | ||
| 2401 | ret = -ENOMEM; | ||
| 2402 | goto fail_alloc_mem; | ||
| 2403 | } | ||
| 2404 | mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE, | ||
| 2405 | &mhba->handshake_page_phys); | ||
| 1722 | if (!mhba->handshake_page) { | 2406 | if (!mhba->handshake_page) { |
| 1723 | dev_err(&mhba->pdev->dev, | 2407 | dev_err(&mhba->pdev->dev, |
| 1724 | "failed to allocate memory for handshake\n"); | 2408 | "failed to allocate memory for handshake\n"); |
| 1725 | ret = -ENOMEM; | 2409 | ret = -ENOMEM; |
| 1726 | goto fail_alloc_mem; | 2410 | goto fail_alloc_page; |
| 1727 | } | 2411 | } |
| 1728 | mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page); | ||
| 1729 | 2412 | ||
| 1730 | if (mvumi_start(mhba)) { | 2413 | if (mvumi_start(mhba)) { |
| 1731 | ret = -EINVAL; | 2414 | ret = -EINVAL; |
| @@ -1739,7 +2422,10 @@ static int mvumi_init_fw(struct mvumi_hba *mhba) | |||
| 1739 | 2422 | ||
| 1740 | fail_ready_state: | 2423 | fail_ready_state: |
| 1741 | mvumi_release_mem_resource(mhba); | 2424 | mvumi_release_mem_resource(mhba); |
| 1742 | kfree(mhba->handshake_page); | 2425 | pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, |
| 2426 | mhba->handshake_page, mhba->handshake_page_phys); | ||
| 2427 | fail_alloc_page: | ||
| 2428 | kfree(mhba->regs); | ||
| 1743 | fail_alloc_mem: | 2429 | fail_alloc_mem: |
| 1744 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); | 2430 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); |
| 1745 | fail_ioremap: | 2431 | fail_ioremap: |
| @@ -1755,6 +2441,7 @@ fail_ioremap: | |||
| 1755 | static int mvumi_io_attach(struct mvumi_hba *mhba) | 2441 | static int mvumi_io_attach(struct mvumi_hba *mhba) |
| 1756 | { | 2442 | { |
| 1757 | struct Scsi_Host *host = mhba->shost; | 2443 | struct Scsi_Host *host = mhba->shost; |
| 2444 | struct scsi_device *sdev = NULL; | ||
| 1758 | int ret; | 2445 | int ret; |
| 1759 | unsigned int max_sg = (mhba->ib_max_size + 4 - | 2446 | unsigned int max_sg = (mhba->ib_max_size + 4 - |
| 1760 | sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); | 2447 | sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); |
| @@ -1764,7 +2451,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) | |||
| 1764 | host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; | 2451 | host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; |
| 1765 | host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; | 2452 | host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; |
| 1766 | host->max_sectors = mhba->max_transfer_size / 512; | 2453 | host->max_sectors = mhba->max_transfer_size / 512; |
| 1767 | host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; | 2454 | host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; |
| 1768 | host->max_id = mhba->max_target_id; | 2455 | host->max_id = mhba->max_target_id; |
| 1769 | host->max_cmd_len = MAX_COMMAND_SIZE; | 2456 | host->max_cmd_len = MAX_COMMAND_SIZE; |
| 1770 | host->transportt = &mvumi_transport_template; | 2457 | host->transportt = &mvumi_transport_template; |
| @@ -1775,9 +2462,43 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) | |||
| 1775 | return ret; | 2462 | return ret; |
| 1776 | } | 2463 | } |
| 1777 | mhba->fw_flag |= MVUMI_FW_ATTACH; | 2464 | mhba->fw_flag |= MVUMI_FW_ATTACH; |
| 1778 | scsi_scan_host(host); | ||
| 1779 | 2465 | ||
| 2466 | mutex_lock(&mhba->sas_discovery_mutex); | ||
| 2467 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) | ||
| 2468 | ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); | ||
| 2469 | else | ||
| 2470 | ret = 0; | ||
| 2471 | if (ret) { | ||
| 2472 | dev_err(&mhba->pdev->dev, "add virtual device failed\n"); | ||
| 2473 | mutex_unlock(&mhba->sas_discovery_mutex); | ||
| 2474 | goto fail_add_device; | ||
| 2475 | } | ||
| 2476 | |||
| 2477 | mhba->dm_thread = kthread_create(mvumi_rescan_bus, | ||
| 2478 | mhba, "mvumi_scanthread"); | ||
| 2479 | if (IS_ERR(mhba->dm_thread)) { | ||
| 2480 | dev_err(&mhba->pdev->dev, | ||
| 2481 | "failed to create device scan thread\n"); | ||
| 2482 | mutex_unlock(&mhba->sas_discovery_mutex); | ||
| 2483 | goto fail_create_thread; | ||
| 2484 | } | ||
| 2485 | atomic_set(&mhba->pnp_count, 1); | ||
| 2486 | wake_up_process(mhba->dm_thread); | ||
| 2487 | |||
| 2488 | mutex_unlock(&mhba->sas_discovery_mutex); | ||
| 1780 | return 0; | 2489 | return 0; |
| 2490 | |||
| 2491 | fail_create_thread: | ||
| 2492 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) | ||
| 2493 | sdev = scsi_device_lookup(mhba->shost, 0, | ||
| 2494 | mhba->max_target_id - 1, 0); | ||
| 2495 | if (sdev) { | ||
| 2496 | scsi_remove_device(sdev); | ||
| 2497 | scsi_device_put(sdev); | ||
| 2498 | } | ||
| 2499 | fail_add_device: | ||
| 2500 | scsi_remove_host(mhba->shost); | ||
| 2501 | return ret; | ||
| 1781 | } | 2502 | } |
| 1782 | 2503 | ||
| 1783 | /** | 2504 | /** |
| @@ -1828,8 +2549,12 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev, | |||
| 1828 | INIT_LIST_HEAD(&mhba->free_ob_list); | 2549 | INIT_LIST_HEAD(&mhba->free_ob_list); |
| 1829 | INIT_LIST_HEAD(&mhba->res_list); | 2550 | INIT_LIST_HEAD(&mhba->res_list); |
| 1830 | INIT_LIST_HEAD(&mhba->waiting_req_list); | 2551 | INIT_LIST_HEAD(&mhba->waiting_req_list); |
| 2552 | mutex_init(&mhba->device_lock); | ||
| 2553 | INIT_LIST_HEAD(&mhba->mhba_dev_list); | ||
| 2554 | INIT_LIST_HEAD(&mhba->shost_dev_list); | ||
| 1831 | atomic_set(&mhba->fw_outstanding, 0); | 2555 | atomic_set(&mhba->fw_outstanding, 0); |
| 1832 | init_waitqueue_head(&mhba->int_cmd_wait_q); | 2556 | init_waitqueue_head(&mhba->int_cmd_wait_q); |
| 2557 | mutex_init(&mhba->sas_discovery_mutex); | ||
| 1833 | 2558 | ||
| 1834 | mhba->pdev = pdev; | 2559 | mhba->pdev = pdev; |
| 1835 | mhba->shost = host; | 2560 | mhba->shost = host; |
| @@ -1845,19 +2570,22 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev, | |||
| 1845 | dev_err(&pdev->dev, "failed to register IRQ\n"); | 2570 | dev_err(&pdev->dev, "failed to register IRQ\n"); |
| 1846 | goto fail_init_irq; | 2571 | goto fail_init_irq; |
| 1847 | } | 2572 | } |
| 1848 | mhba->instancet->enable_intr(mhba->mmio); | 2573 | |
| 2574 | mhba->instancet->enable_intr(mhba); | ||
| 1849 | pci_set_drvdata(pdev, mhba); | 2575 | pci_set_drvdata(pdev, mhba); |
| 1850 | 2576 | ||
| 1851 | ret = mvumi_io_attach(mhba); | 2577 | ret = mvumi_io_attach(mhba); |
| 1852 | if (ret) | 2578 | if (ret) |
| 1853 | goto fail_io_attach; | 2579 | goto fail_io_attach; |
| 2580 | |||
| 2581 | mvumi_backup_bar_addr(mhba); | ||
| 1854 | dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); | 2582 | dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); |
| 1855 | 2583 | ||
| 1856 | return 0; | 2584 | return 0; |
| 1857 | 2585 | ||
| 1858 | fail_io_attach: | 2586 | fail_io_attach: |
| 1859 | pci_set_drvdata(pdev, NULL); | 2587 | pci_set_drvdata(pdev, NULL); |
| 1860 | mhba->instancet->disable_intr(mhba->mmio); | 2588 | mhba->instancet->disable_intr(mhba); |
| 1861 | free_irq(mhba->pdev->irq, mhba); | 2589 | free_irq(mhba->pdev->irq, mhba); |
| 1862 | fail_init_irq: | 2590 | fail_init_irq: |
| 1863 | mvumi_release_fw(mhba); | 2591 | mvumi_release_fw(mhba); |
| @@ -1877,11 +2605,17 @@ static void mvumi_detach_one(struct pci_dev *pdev) | |||
| 1877 | struct mvumi_hba *mhba; | 2605 | struct mvumi_hba *mhba; |
| 1878 | 2606 | ||
| 1879 | mhba = pci_get_drvdata(pdev); | 2607 | mhba = pci_get_drvdata(pdev); |
| 2608 | if (mhba->dm_thread) { | ||
| 2609 | kthread_stop(mhba->dm_thread); | ||
| 2610 | mhba->dm_thread = NULL; | ||
| 2611 | } | ||
| 2612 | |||
| 2613 | mvumi_detach_devices(mhba); | ||
| 1880 | host = mhba->shost; | 2614 | host = mhba->shost; |
| 1881 | scsi_remove_host(mhba->shost); | 2615 | scsi_remove_host(mhba->shost); |
| 1882 | mvumi_flush_cache(mhba); | 2616 | mvumi_flush_cache(mhba); |
| 1883 | 2617 | ||
| 1884 | mhba->instancet->disable_intr(mhba->mmio); | 2618 | mhba->instancet->disable_intr(mhba); |
| 1885 | free_irq(mhba->pdev->irq, mhba); | 2619 | free_irq(mhba->pdev->irq, mhba); |
| 1886 | mvumi_release_fw(mhba); | 2620 | mvumi_release_fw(mhba); |
| 1887 | scsi_host_put(host); | 2621 | scsi_host_put(host); |
| @@ -1909,7 +2643,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 1909 | mvumi_flush_cache(mhba); | 2643 | mvumi_flush_cache(mhba); |
| 1910 | 2644 | ||
| 1911 | pci_set_drvdata(pdev, mhba); | 2645 | pci_set_drvdata(pdev, mhba); |
| 1912 | mhba->instancet->disable_intr(mhba->mmio); | 2646 | mhba->instancet->disable_intr(mhba); |
| 1913 | free_irq(mhba->pdev->irq, mhba); | 2647 | free_irq(mhba->pdev->irq, mhba); |
| 1914 | mvumi_unmap_pci_addr(pdev, mhba->base_addr); | 2648 | mvumi_unmap_pci_addr(pdev, mhba->base_addr); |
| 1915 | pci_release_regions(pdev); | 2649 | pci_release_regions(pdev); |
| @@ -1956,8 +2690,13 @@ static int mvumi_resume(struct pci_dev *pdev) | |||
| 1956 | if (ret) | 2690 | if (ret) |
| 1957 | goto release_regions; | 2691 | goto release_regions; |
| 1958 | 2692 | ||
| 2693 | if (mvumi_cfg_hw_reg(mhba)) { | ||
| 2694 | ret = -EINVAL; | ||
| 2695 | goto unmap_pci_addr; | ||
| 2696 | } | ||
| 2697 | |||
| 1959 | mhba->mmio = mhba->base_addr[0]; | 2698 | mhba->mmio = mhba->base_addr[0]; |
| 1960 | mvumi_reset(mhba->mmio); | 2699 | mvumi_reset(mhba); |
| 1961 | 2700 | ||
| 1962 | if (mvumi_start(mhba)) { | 2701 | if (mvumi_start(mhba)) { |
| 1963 | ret = -EINVAL; | 2702 | ret = -EINVAL; |
| @@ -1970,7 +2709,7 @@ static int mvumi_resume(struct pci_dev *pdev) | |||
| 1970 | dev_err(&pdev->dev, "failed to register IRQ\n"); | 2709 | dev_err(&pdev->dev, "failed to register IRQ\n"); |
| 1971 | goto unmap_pci_addr; | 2710 | goto unmap_pci_addr; |
| 1972 | } | 2711 | } |
| 1973 | mhba->instancet->enable_intr(mhba->mmio); | 2712 | mhba->instancet->enable_intr(mhba); |
| 1974 | 2713 | ||
| 1975 | return 0; | 2714 | return 0; |
| 1976 | 2715 | ||
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h index 10b9237566f0..e360135fd1bd 100644 --- a/drivers/scsi/mvumi.h +++ b/drivers/scsi/mvumi.h | |||
| @@ -34,51 +34,87 @@ | |||
| 34 | #define MV_DRIVER_NAME "mvumi" | 34 | #define MV_DRIVER_NAME "mvumi" |
| 35 | #define PCI_VENDOR_ID_MARVELL_2 0x1b4b | 35 | #define PCI_VENDOR_ID_MARVELL_2 0x1b4b |
| 36 | #define PCI_DEVICE_ID_MARVELL_MV9143 0x9143 | 36 | #define PCI_DEVICE_ID_MARVELL_MV9143 0x9143 |
| 37 | #define PCI_DEVICE_ID_MARVELL_MV9580 0x9580 | ||
| 37 | 38 | ||
| 38 | #define MVUMI_INTERNAL_CMD_WAIT_TIME 45 | 39 | #define MVUMI_INTERNAL_CMD_WAIT_TIME 45 |
| 40 | #define MVUMI_INQUIRY_LENGTH 44 | ||
| 41 | #define MVUMI_INQUIRY_UUID_OFF 36 | ||
| 42 | #define MVUMI_INQUIRY_UUID_LEN 8 | ||
| 39 | 43 | ||
| 40 | #define IS_DMA64 (sizeof(dma_addr_t) == 8) | 44 | #define IS_DMA64 (sizeof(dma_addr_t) == 8) |
| 41 | 45 | ||
| 42 | enum mvumi_qc_result { | 46 | enum mvumi_qc_result { |
| 43 | MV_QUEUE_COMMAND_RESULT_SENT = 0, | 47 | MV_QUEUE_COMMAND_RESULT_SENT = 0, |
| 44 | MV_QUEUE_COMMAND_RESULT_NO_RESOURCE, | 48 | MV_QUEUE_COMMAND_RESULT_NO_RESOURCE, |
| 45 | }; | 49 | }; |
| 46 | 50 | ||
| 47 | enum { | 51 | struct mvumi_hw_regs { |
| 48 | /*******************************************/ | 52 | /* For CPU */ |
| 49 | 53 | void *main_int_cause_reg; | |
| 50 | /* ARM Mbus Registers Map */ | 54 | void *enpointa_mask_reg; |
| 51 | 55 | void *enpointb_mask_reg; | |
| 52 | /*******************************************/ | 56 | void *rstoutn_en_reg; |
| 53 | CPU_MAIN_INT_CAUSE_REG = 0x20200, | 57 | void *ctrl_sts_reg; |
| 54 | CPU_MAIN_IRQ_MASK_REG = 0x20204, | 58 | void *rstoutn_mask_reg; |
| 55 | CPU_MAIN_FIQ_MASK_REG = 0x20208, | 59 | void *sys_soft_rst_reg; |
| 56 | CPU_ENPOINTA_MASK_REG = 0x2020C, | 60 | |
| 57 | CPU_ENPOINTB_MASK_REG = 0x20210, | 61 | /* For Doorbell */ |
| 58 | 62 | void *pciea_to_arm_drbl_reg; | |
| 59 | INT_MAP_COMAERR = 1 << 6, | 63 | void *arm_to_pciea_drbl_reg; |
| 60 | INT_MAP_COMAIN = 1 << 7, | 64 | void *arm_to_pciea_mask_reg; |
| 61 | INT_MAP_COMAOUT = 1 << 8, | 65 | void *pciea_to_arm_msg0; |
| 62 | INT_MAP_COMBERR = 1 << 9, | 66 | void *pciea_to_arm_msg1; |
| 63 | INT_MAP_COMBIN = 1 << 10, | 67 | void *arm_to_pciea_msg0; |
| 64 | INT_MAP_COMBOUT = 1 << 11, | 68 | void *arm_to_pciea_msg1; |
| 65 | 69 | ||
| 66 | INT_MAP_COMAINT = (INT_MAP_COMAOUT | INT_MAP_COMAERR), | 70 | /* reset register */ |
| 67 | INT_MAP_COMBINT = (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR), | 71 | void *reset_request; |
| 68 | 72 | void *reset_enable; | |
| 69 | INT_MAP_DL_PCIEA2CPU = 1 << 0, | 73 | |
| 70 | INT_MAP_DL_CPU2PCIEA = 1 << 1, | 74 | /* For Message Unit */ |
| 71 | 75 | void *inb_list_basel; | |
| 72 | /***************************************/ | 76 | void *inb_list_baseh; |
| 77 | void *inb_aval_count_basel; | ||
| 78 | void *inb_aval_count_baseh; | ||
| 79 | void *inb_write_pointer; | ||
| 80 | void *inb_read_pointer; | ||
| 81 | void *outb_list_basel; | ||
| 82 | void *outb_list_baseh; | ||
| 83 | void *outb_copy_basel; | ||
| 84 | void *outb_copy_baseh; | ||
| 85 | void *outb_copy_pointer; | ||
| 86 | void *outb_read_pointer; | ||
| 87 | void *inb_isr_cause; | ||
| 88 | void *outb_isr_cause; | ||
| 89 | void *outb_coal_cfg; | ||
| 90 | void *outb_coal_timeout; | ||
| 91 | |||
| 92 | /* Bit setting for HW */ | ||
| 93 | u32 int_comaout; | ||
| 94 | u32 int_comaerr; | ||
| 95 | u32 int_dl_cpu2pciea; | ||
| 96 | u32 int_mu; | ||
| 97 | u32 int_drbl_int_mask; | ||
| 98 | u32 int_main_int_mask; | ||
| 99 | u32 cl_pointer_toggle; | ||
| 100 | u32 cl_slot_num_mask; | ||
| 101 | u32 clic_irq; | ||
| 102 | u32 clic_in_err; | ||
| 103 | u32 clic_out_err; | ||
| 104 | }; | ||
| 73 | 105 | ||
| 74 | /* ARM Doorbell Registers Map */ | 106 | struct mvumi_dyn_list_entry { |
| 107 | u32 src_low_addr; | ||
| 108 | u32 src_high_addr; | ||
| 109 | u32 if_length; | ||
| 110 | u32 reserve; | ||
| 111 | }; | ||
| 75 | 112 | ||
| 76 | /***************************************/ | 113 | #define SCSI_CMD_MARVELL_SPECIFIC 0xE1 |
| 77 | CPU_PCIEA_TO_ARM_DRBL_REG = 0x20400, | 114 | #define CDB_CORE_MODULE 0x1 |
| 78 | CPU_PCIEA_TO_ARM_MASK_REG = 0x20404, | 115 | #define CDB_CORE_SHUTDOWN 0xB |
| 79 | CPU_ARM_TO_PCIEA_DRBL_REG = 0x20408, | ||
| 80 | CPU_ARM_TO_PCIEA_MASK_REG = 0x2040C, | ||
| 81 | 116 | ||
| 117 | enum { | ||
| 82 | DRBL_HANDSHAKE = 1 << 0, | 118 | DRBL_HANDSHAKE = 1 << 0, |
| 83 | DRBL_SOFT_RESET = 1 << 1, | 119 | DRBL_SOFT_RESET = 1 << 1, |
| 84 | DRBL_BUS_CHANGE = 1 << 2, | 120 | DRBL_BUS_CHANGE = 1 << 2, |
| @@ -86,46 +122,6 @@ enum { | |||
| 86 | DRBL_MU_RESET = 1 << 4, | 122 | DRBL_MU_RESET = 1 << 4, |
| 87 | DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE, | 123 | DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE, |
| 88 | 124 | ||
| 89 | CPU_PCIEA_TO_ARM_MSG0 = 0x20430, | ||
| 90 | CPU_PCIEA_TO_ARM_MSG1 = 0x20434, | ||
| 91 | CPU_ARM_TO_PCIEA_MSG0 = 0x20438, | ||
| 92 | CPU_ARM_TO_PCIEA_MSG1 = 0x2043C, | ||
| 93 | |||
| 94 | /*******************************************/ | ||
| 95 | |||
| 96 | /* ARM Communication List Registers Map */ | ||
| 97 | |||
| 98 | /*******************************************/ | ||
| 99 | CLA_INB_LIST_BASEL = 0x500, | ||
| 100 | CLA_INB_LIST_BASEH = 0x504, | ||
| 101 | CLA_INB_AVAL_COUNT_BASEL = 0x508, | ||
| 102 | CLA_INB_AVAL_COUNT_BASEH = 0x50C, | ||
| 103 | CLA_INB_DESTI_LIST_BASEL = 0x510, | ||
| 104 | CLA_INB_DESTI_LIST_BASEH = 0x514, | ||
| 105 | CLA_INB_WRITE_POINTER = 0x518, | ||
| 106 | CLA_INB_READ_POINTER = 0x51C, | ||
| 107 | |||
| 108 | CLA_OUTB_LIST_BASEL = 0x530, | ||
| 109 | CLA_OUTB_LIST_BASEH = 0x534, | ||
| 110 | CLA_OUTB_SOURCE_LIST_BASEL = 0x538, | ||
| 111 | CLA_OUTB_SOURCE_LIST_BASEH = 0x53C, | ||
| 112 | CLA_OUTB_COPY_POINTER = 0x544, | ||
| 113 | CLA_OUTB_READ_POINTER = 0x548, | ||
| 114 | |||
| 115 | CLA_ISR_CAUSE = 0x560, | ||
| 116 | CLA_ISR_MASK = 0x564, | ||
| 117 | |||
| 118 | INT_MAP_MU = (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT), | ||
| 119 | |||
| 120 | CL_POINTER_TOGGLE = 1 << 12, | ||
| 121 | |||
| 122 | CLIC_IN_IRQ = 1 << 0, | ||
| 123 | CLIC_OUT_IRQ = 1 << 1, | ||
| 124 | CLIC_IN_ERR_IRQ = 1 << 8, | ||
| 125 | CLIC_OUT_ERR_IRQ = 1 << 12, | ||
| 126 | |||
| 127 | CL_SLOT_NUM_MASK = 0xFFF, | ||
| 128 | |||
| 129 | /* | 125 | /* |
| 130 | * Command flag is the flag for the CDB command itself | 126 | * Command flag is the flag for the CDB command itself |
| 131 | */ | 127 | */ |
| @@ -137,15 +133,23 @@ enum { | |||
| 137 | CMD_FLAG_DATA_IN = 1 << 3, | 133 | CMD_FLAG_DATA_IN = 1 << 3, |
| 138 | /* 1-host write data */ | 134 | /* 1-host write data */ |
| 139 | CMD_FLAG_DATA_OUT = 1 << 4, | 135 | CMD_FLAG_DATA_OUT = 1 << 4, |
| 140 | 136 | CMD_FLAG_PRDT_IN_HOST = 1 << 5, | |
| 141 | SCSI_CMD_MARVELL_SPECIFIC = 0xE1, | ||
| 142 | CDB_CORE_SHUTDOWN = 0xB, | ||
| 143 | }; | 137 | }; |
| 144 | 138 | ||
| 145 | #define APICDB0_EVENT 0xF4 | 139 | #define APICDB0_EVENT 0xF4 |
| 146 | #define APICDB1_EVENT_GETEVENT 0 | 140 | #define APICDB1_EVENT_GETEVENT 0 |
| 141 | #define APICDB1_HOST_GETEVENT 1 | ||
| 147 | #define MAX_EVENTS_RETURNED 6 | 142 | #define MAX_EVENTS_RETURNED 6 |
| 148 | 143 | ||
| 144 | #define DEVICE_OFFLINE 0 | ||
| 145 | #define DEVICE_ONLINE 1 | ||
| 146 | |||
| 147 | struct mvumi_hotplug_event { | ||
| 148 | u16 size; | ||
| 149 | u8 dummy[2]; | ||
| 150 | u8 bitmap[0]; | ||
| 151 | }; | ||
| 152 | |||
| 149 | struct mvumi_driver_event { | 153 | struct mvumi_driver_event { |
| 150 | u32 time_stamp; | 154 | u32 time_stamp; |
| 151 | u32 sequence_no; | 155 | u32 sequence_no; |
| @@ -172,8 +176,14 @@ struct mvumi_events_wq { | |||
| 172 | void *param; | 176 | void *param; |
| 173 | }; | 177 | }; |
| 174 | 178 | ||
| 179 | #define HS_CAPABILITY_SUPPORT_COMPACT_SG (1U << 4) | ||
| 180 | #define HS_CAPABILITY_SUPPORT_PRD_HOST (1U << 5) | ||
| 181 | #define HS_CAPABILITY_SUPPORT_DYN_SRC (1U << 6) | ||
| 182 | #define HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF (1U << 14) | ||
| 183 | |||
| 175 | #define MVUMI_MAX_SG_ENTRY 32 | 184 | #define MVUMI_MAX_SG_ENTRY 32 |
| 176 | #define SGD_EOT (1L << 27) | 185 | #define SGD_EOT (1L << 27) |
| 186 | #define SGD_EOT_CP (1L << 22) | ||
| 177 | 187 | ||
| 178 | struct mvumi_sgl { | 188 | struct mvumi_sgl { |
| 179 | u32 baseaddr_l; | 189 | u32 baseaddr_l; |
| @@ -181,6 +191,39 @@ struct mvumi_sgl { | |||
| 181 | u32 flags; | 191 | u32 flags; |
| 182 | u32 size; | 192 | u32 size; |
| 183 | }; | 193 | }; |
| 194 | struct mvumi_compact_sgl { | ||
| 195 | u32 baseaddr_l; | ||
| 196 | u32 baseaddr_h; | ||
| 197 | u32 flags; | ||
| 198 | }; | ||
| 199 | |||
| 200 | #define GET_COMPACT_SGD_SIZE(sgd) \ | ||
| 201 | ((((struct mvumi_compact_sgl *)(sgd))->flags) & 0x3FFFFFL) | ||
| 202 | |||
| 203 | #define SET_COMPACT_SGD_SIZE(sgd, sz) do { \ | ||
| 204 | (((struct mvumi_compact_sgl *)(sgd))->flags) &= ~0x3FFFFFL; \ | ||
| 205 | (((struct mvumi_compact_sgl *)(sgd))->flags) |= (sz); \ | ||
| 206 | } while (0) | ||
| 207 | #define sgd_getsz(_mhba, sgd, sz) do { \ | ||
| 208 | if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \ | ||
| 209 | (sz) = GET_COMPACT_SGD_SIZE(sgd); \ | ||
| 210 | else \ | ||
| 211 | (sz) = (sgd)->size; \ | ||
| 212 | } while (0) | ||
| 213 | |||
| 214 | #define sgd_setsz(_mhba, sgd, sz) do { \ | ||
| 215 | if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \ | ||
| 216 | SET_COMPACT_SGD_SIZE(sgd, sz); \ | ||
| 217 | else \ | ||
| 218 | (sgd)->size = (sz); \ | ||
| 219 | } while (0) | ||
| 220 | |||
| 221 | #define sgd_inc(_mhba, sgd) do { \ | ||
| 222 | if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \ | ||
| 223 | sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 12); \ | ||
| 224 | else \ | ||
| 225 | sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 16); \ | ||
| 226 | } while (0) | ||
| 184 | 227 | ||
| 185 | struct mvumi_res { | 228 | struct mvumi_res { |
| 186 | struct list_head entry; | 229 | struct list_head entry; |
| @@ -197,7 +240,7 @@ enum resource_type { | |||
| 197 | }; | 240 | }; |
| 198 | 241 | ||
| 199 | struct mvumi_sense_data { | 242 | struct mvumi_sense_data { |
| 200 | u8 error_eode:7; | 243 | u8 error_code:7; |
| 201 | u8 valid:1; | 244 | u8 valid:1; |
| 202 | u8 segment_number; | 245 | u8 segment_number; |
| 203 | u8 sense_key:4; | 246 | u8 sense_key:4; |
| @@ -220,6 +263,7 @@ struct mvumi_sense_data { | |||
| 220 | struct mvumi_cmd { | 263 | struct mvumi_cmd { |
| 221 | struct list_head queue_pointer; | 264 | struct list_head queue_pointer; |
| 222 | struct mvumi_msg_frame *frame; | 265 | struct mvumi_msg_frame *frame; |
| 266 | dma_addr_t frame_phys; | ||
| 223 | struct scsi_cmnd *scmd; | 267 | struct scsi_cmnd *scmd; |
| 224 | atomic_t sync_cmd; | 268 | atomic_t sync_cmd; |
| 225 | void *data_buf; | 269 | void *data_buf; |
| @@ -393,7 +437,8 @@ struct mvumi_hs_page2 { | |||
| 393 | u16 frame_length; | 437 | u16 frame_length; |
| 394 | 438 | ||
| 395 | u8 host_type; | 439 | u8 host_type; |
| 396 | u8 reserved[3]; | 440 | u8 host_cap; |
| 441 | u8 reserved[2]; | ||
| 397 | struct version_info host_ver; | 442 | struct version_info host_ver; |
| 398 | u32 system_io_bus; | 443 | u32 system_io_bus; |
| 399 | u32 slot_number; | 444 | u32 slot_number; |
| @@ -435,8 +480,17 @@ struct mvumi_tag { | |||
| 435 | unsigned short size; | 480 | unsigned short size; |
| 436 | }; | 481 | }; |
| 437 | 482 | ||
| 483 | struct mvumi_device { | ||
| 484 | struct list_head list; | ||
| 485 | struct scsi_device *sdev; | ||
| 486 | u64 wwid; | ||
| 487 | u8 dev_type; | ||
| 488 | int id; | ||
| 489 | }; | ||
| 490 | |||
| 438 | struct mvumi_hba { | 491 | struct mvumi_hba { |
| 439 | void *base_addr[MAX_BASE_ADDRESS]; | 492 | void *base_addr[MAX_BASE_ADDRESS]; |
| 493 | u32 pci_base[MAX_BASE_ADDRESS]; | ||
| 440 | void *mmio; | 494 | void *mmio; |
| 441 | struct list_head cmd_pool; | 495 | struct list_head cmd_pool; |
| 442 | struct Scsi_Host *shost; | 496 | struct Scsi_Host *shost; |
| @@ -449,6 +503,9 @@ struct mvumi_hba { | |||
| 449 | void *ib_list; | 503 | void *ib_list; |
| 450 | dma_addr_t ib_list_phys; | 504 | dma_addr_t ib_list_phys; |
| 451 | 505 | ||
| 506 | void *ib_frame; | ||
| 507 | dma_addr_t ib_frame_phys; | ||
| 508 | |||
| 452 | void *ob_list; | 509 | void *ob_list; |
| 453 | dma_addr_t ob_list_phys; | 510 | dma_addr_t ob_list_phys; |
| 454 | 511 | ||
| @@ -477,12 +534,14 @@ struct mvumi_hba { | |||
| 477 | unsigned char hba_total_pages; | 534 | unsigned char hba_total_pages; |
| 478 | unsigned char fw_flag; | 535 | unsigned char fw_flag; |
| 479 | unsigned char request_id_enabled; | 536 | unsigned char request_id_enabled; |
| 537 | unsigned char eot_flag; | ||
| 480 | unsigned short hba_capability; | 538 | unsigned short hba_capability; |
| 481 | unsigned short io_seq; | 539 | unsigned short io_seq; |
| 482 | 540 | ||
| 483 | unsigned int ib_cur_slot; | 541 | unsigned int ib_cur_slot; |
| 484 | unsigned int ob_cur_slot; | 542 | unsigned int ob_cur_slot; |
| 485 | unsigned int fw_state; | 543 | unsigned int fw_state; |
| 544 | struct mutex sas_discovery_mutex; | ||
| 486 | 545 | ||
| 487 | struct list_head ob_data_list; | 546 | struct list_head ob_data_list; |
| 488 | struct list_head free_ob_list; | 547 | struct list_head free_ob_list; |
| @@ -491,14 +550,24 @@ struct mvumi_hba { | |||
| 491 | 550 | ||
| 492 | struct mvumi_tag tag_pool; | 551 | struct mvumi_tag tag_pool; |
| 493 | struct mvumi_cmd **tag_cmd; | 552 | struct mvumi_cmd **tag_cmd; |
| 553 | struct mvumi_hw_regs *regs; | ||
| 554 | struct mutex device_lock; | ||
| 555 | struct list_head mhba_dev_list; | ||
| 556 | struct list_head shost_dev_list; | ||
| 557 | struct task_struct *dm_thread; | ||
| 558 | atomic_t pnp_count; | ||
| 494 | }; | 559 | }; |
| 495 | 560 | ||
| 496 | struct mvumi_instance_template { | 561 | struct mvumi_instance_template { |
| 497 | void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *); | 562 | void (*fire_cmd) (struct mvumi_hba *, struct mvumi_cmd *); |
| 498 | void (*enable_intr)(void *) ; | 563 | void (*enable_intr) (struct mvumi_hba *); |
| 499 | void (*disable_intr)(void *); | 564 | void (*disable_intr) (struct mvumi_hba *); |
| 500 | int (*clear_intr)(void *); | 565 | int (*clear_intr) (void *); |
| 501 | unsigned int (*read_fw_status_reg)(void *); | 566 | unsigned int (*read_fw_status_reg) (struct mvumi_hba *); |
| 567 | unsigned int (*check_ib_list) (struct mvumi_hba *); | ||
| 568 | int (*check_ob_list) (struct mvumi_hba *, unsigned int *, | ||
| 569 | unsigned int *); | ||
| 570 | int (*reset_host) (struct mvumi_hba *); | ||
| 502 | }; | 571 | }; |
| 503 | 572 | ||
| 504 | extern struct timezone sys_tz; | 573 | extern struct timezone sys_tz; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 799a58bb9859..48fca47384b7 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -2080,6 +2080,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
| 2080 | uint8_t domain; | 2080 | uint8_t domain; |
| 2081 | char connect_type[22]; | 2081 | char connect_type[22]; |
| 2082 | struct qla_hw_data *ha = vha->hw; | 2082 | struct qla_hw_data *ha = vha->hw; |
| 2083 | unsigned long flags; | ||
| 2083 | 2084 | ||
| 2084 | /* Get host addresses. */ | 2085 | /* Get host addresses. */ |
| 2085 | rval = qla2x00_get_adapter_id(vha, | 2086 | rval = qla2x00_get_adapter_id(vha, |
| @@ -2154,9 +2155,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
| 2154 | vha->d_id.b.area = area; | 2155 | vha->d_id.b.area = area; |
| 2155 | vha->d_id.b.al_pa = al_pa; | 2156 | vha->d_id.b.al_pa = al_pa; |
| 2156 | 2157 | ||
| 2157 | spin_lock(&ha->vport_slock); | 2158 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 2158 | qlt_update_vp_map(vha, SET_AL_PA); | 2159 | qlt_update_vp_map(vha, SET_AL_PA); |
| 2159 | spin_unlock(&ha->vport_slock); | 2160 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
| 2160 | 2161 | ||
| 2161 | if (!vha->flags.init_done) | 2162 | if (!vha->flags.init_done) |
| 2162 | ql_log(ql_log_info, vha, 0x2010, | 2163 | ql_log(ql_log_info, vha, 0x2010, |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 57fbd5a3d4e2..5cda11c07c68 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
| @@ -2055,7 +2055,7 @@ static void unmap_region(sector_t lba, unsigned int len) | |||
| 2055 | block = lba + alignment; | 2055 | block = lba + alignment; |
| 2056 | rem = do_div(block, granularity); | 2056 | rem = do_div(block, granularity); |
| 2057 | 2057 | ||
| 2058 | if (rem == 0 && lba + granularity <= end && block < map_size) { | 2058 | if (rem == 0 && lba + granularity < end && block < map_size) { |
| 2059 | clear_bit(block, map_storep); | 2059 | clear_bit(block, map_storep); |
| 2060 | if (scsi_debug_lbprz) | 2060 | if (scsi_debug_lbprz) |
| 2061 | memset(fake_storep + | 2061 | memset(fake_storep + |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index de2337f255a7..c1b05a83d403 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
| @@ -789,7 +789,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, | |||
| 789 | int cmnd_size, int timeout, unsigned sense_bytes) | 789 | int cmnd_size, int timeout, unsigned sense_bytes) |
| 790 | { | 790 | { |
| 791 | struct scsi_device *sdev = scmd->device; | 791 | struct scsi_device *sdev = scmd->device; |
| 792 | struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); | ||
| 793 | struct Scsi_Host *shost = sdev->host; | 792 | struct Scsi_Host *shost = sdev->host; |
| 794 | DECLARE_COMPLETION_ONSTACK(done); | 793 | DECLARE_COMPLETION_ONSTACK(done); |
| 795 | unsigned long timeleft; | 794 | unsigned long timeleft; |
| @@ -845,8 +844,11 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, | |||
| 845 | 844 | ||
| 846 | scsi_eh_restore_cmnd(scmd, &ses); | 845 | scsi_eh_restore_cmnd(scmd, &ses); |
| 847 | 846 | ||
| 848 | if (sdrv && sdrv->eh_action) | 847 | if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { |
| 849 | rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn); | 848 | struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); |
| 849 | if (sdrv->eh_action) | ||
| 850 | rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn); | ||
| 851 | } | ||
| 850 | 852 | ||
| 851 | return rtn; | 853 | return rtn; |
| 852 | } | 854 | } |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 528d52beaa1c..01440782feb2 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
| @@ -1221,7 +1221,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) | |||
| 1221 | /* | 1221 | /* |
| 1222 | * At this point, all outstanding requests in the adapter | 1222 | * At this point, all outstanding requests in the adapter |
| 1223 | * should have been flushed out and return to us | 1223 | * should have been flushed out and return to us |
| 1224 | * There is a potential race here where the host may be in | ||
| 1225 | * the process of responding when we return from here. | ||
| 1226 | * Just wait for all in-transit packets to be accounted for | ||
| 1227 | * before we return from here. | ||
| 1224 | */ | 1228 | */ |
| 1229 | storvsc_wait_to_drain(stor_device); | ||
| 1225 | 1230 | ||
| 1226 | return SUCCESS; | 1231 | return SUCCESS; |
| 1227 | } | 1232 | } |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 3e79a2f00042..595af1ae4421 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
| @@ -219,7 +219,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, | |||
| 219 | struct scatterlist sg; | 219 | struct scatterlist sg; |
| 220 | unsigned long flags; | 220 | unsigned long flags; |
| 221 | 221 | ||
| 222 | sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); | 222 | sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); |
| 223 | 223 | ||
| 224 | spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); | 224 | spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); |
| 225 | 225 | ||
| @@ -279,6 +279,31 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, | |||
| 279 | } | 279 | } |
| 280 | } | 280 | } |
| 281 | 281 | ||
| 282 | static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, | ||
| 283 | struct virtio_scsi_event *event) | ||
| 284 | { | ||
| 285 | struct scsi_device *sdev; | ||
| 286 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | ||
| 287 | unsigned int target = event->lun[1]; | ||
| 288 | unsigned int lun = (event->lun[2] << 8) | event->lun[3]; | ||
| 289 | u8 asc = event->reason & 255; | ||
| 290 | u8 ascq = event->reason >> 8; | ||
| 291 | |||
| 292 | sdev = scsi_device_lookup(shost, 0, target, lun); | ||
| 293 | if (!sdev) { | ||
| 294 | pr_err("SCSI device %d 0 %d %d not found\n", | ||
| 295 | shost->host_no, target, lun); | ||
| 296 | return; | ||
| 297 | } | ||
| 298 | |||
| 299 | /* Handle "Parameters changed", "Mode parameters changed", and | ||
| 300 | "Capacity data has changed". */ | ||
| 301 | if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) | ||
| 302 | scsi_rescan_device(&sdev->sdev_gendev); | ||
| 303 | |||
| 304 | scsi_device_put(sdev); | ||
| 305 | } | ||
| 306 | |||
| 282 | static void virtscsi_handle_event(struct work_struct *work) | 307 | static void virtscsi_handle_event(struct work_struct *work) |
| 283 | { | 308 | { |
| 284 | struct virtio_scsi_event_node *event_node = | 309 | struct virtio_scsi_event_node *event_node = |
| @@ -297,6 +322,9 @@ static void virtscsi_handle_event(struct work_struct *work) | |||
| 297 | case VIRTIO_SCSI_T_TRANSPORT_RESET: | 322 | case VIRTIO_SCSI_T_TRANSPORT_RESET: |
| 298 | virtscsi_handle_transport_reset(vscsi, event); | 323 | virtscsi_handle_transport_reset(vscsi, event); |
| 299 | break; | 324 | break; |
| 325 | case VIRTIO_SCSI_T_PARAM_CHANGE: | ||
| 326 | virtscsi_handle_param_change(vscsi, event); | ||
| 327 | break; | ||
| 300 | default: | 328 | default: |
| 301 | pr_err("Unsupport virtio scsi event %x\n", event->event); | 329 | pr_err("Unsupport virtio scsi event %x\n", event->event); |
| 302 | } | 330 | } |
| @@ -677,7 +705,11 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev) | |||
| 677 | cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; | 705 | cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; |
| 678 | shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); | 706 | shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); |
| 679 | shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; | 707 | shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; |
| 680 | shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1; | 708 | |
| 709 | /* LUNs > 256 are reported with format 1, so they go in the range | ||
| 710 | * 16640-32767. | ||
| 711 | */ | ||
| 712 | shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; | ||
| 681 | shost->max_id = num_targets; | 713 | shost->max_id = num_targets; |
| 682 | shost->max_channel = 0; | 714 | shost->max_channel = 0; |
| 683 | shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; | 715 | shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; |
| @@ -733,7 +765,8 @@ static struct virtio_device_id id_table[] = { | |||
| 733 | }; | 765 | }; |
| 734 | 766 | ||
| 735 | static unsigned int features[] = { | 767 | static unsigned int features[] = { |
| 736 | VIRTIO_SCSI_F_HOTPLUG | 768 | VIRTIO_SCSI_F_HOTPLUG, |
| 769 | VIRTIO_SCSI_F_CHANGE, | ||
| 737 | }; | 770 | }; |
| 738 | 771 | ||
| 739 | static struct virtio_driver virtio_scsi_driver = { | 772 | static struct virtio_driver virtio_scsi_driver = { |
diff --git a/drivers/staging/omap-thermal/omap-thermal-common.c b/drivers/staging/omap-thermal/omap-thermal-common.c index 46ee0a9f49d9..5c0c203b887f 100644 --- a/drivers/staging/omap-thermal/omap-thermal-common.c +++ b/drivers/staging/omap-thermal/omap-thermal-common.c | |||
| @@ -126,7 +126,9 @@ static int omap_thermal_bind(struct thermal_zone_device *thermal, | |||
| 126 | 126 | ||
| 127 | /* TODO: bind with min and max states */ | 127 | /* TODO: bind with min and max states */ |
| 128 | /* Simple thing, two trips, one passive another critical */ | 128 | /* Simple thing, two trips, one passive another critical */ |
| 129 | return thermal_zone_bind_cooling_device(thermal, 0, cdev); | 129 | return thermal_zone_bind_cooling_device(thermal, 0, cdev, |
| 130 | THERMAL_NO_LIMIT, | ||
| 131 | THERMAL_NO_LIMIT); | ||
| 130 | } | 132 | } |
| 131 | 133 | ||
| 132 | /* Unbind callback functions for thermal zone */ | 134 | /* Unbind callback functions for thermal zone */ |
| @@ -268,7 +270,6 @@ int omap_thermal_expose_sensor(struct omap_bandgap *bg_ptr, int id, | |||
| 268 | /* Create thermal zone */ | 270 | /* Create thermal zone */ |
| 269 | data->omap_thermal = thermal_zone_device_register(domain, | 271 | data->omap_thermal = thermal_zone_device_register(domain, |
| 270 | OMAP_TRIP_NUMBER, 0, data, &omap_thermal_ops, | 272 | OMAP_TRIP_NUMBER, 0, data, &omap_thermal_ops, |
| 271 | 1, 2, /*TODO: remove this when FW allows */ | ||
| 272 | FAST_TEMP_MONITORING_RATE, | 273 | FAST_TEMP_MONITORING_RATE, |
| 273 | FAST_TEMP_MONITORING_RATE); | 274 | FAST_TEMP_MONITORING_RATE); |
| 274 | if (IS_ERR_OR_NULL(data->omap_thermal)) { | 275 | if (IS_ERR_OR_NULL(data->omap_thermal)) { |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 3ab2bd540b54..edfd67d25013 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
| @@ -19,6 +19,17 @@ config THERMAL_HWMON | |||
| 19 | depends on HWMON=y || HWMON=THERMAL | 19 | depends on HWMON=y || HWMON=THERMAL |
| 20 | default y | 20 | default y |
| 21 | 21 | ||
| 22 | config CPU_THERMAL | ||
| 23 | bool "generic cpu cooling support" | ||
| 24 | depends on THERMAL && CPU_FREQ | ||
| 25 | help | ||
| 26 | This implements the generic cpu cooling mechanism through frequency | ||
| 27 | reduction, cpu hotplug and any other ways of reducing temperature. An | ||
| 28 | ACPI version of this already exists(drivers/acpi/processor_thermal.c). | ||
| 29 | This will be useful for platforms using the generic thermal interface | ||
| 30 | and not the ACPI interface. | ||
| 31 | If you want this support, you should say Y here. | ||
| 32 | |||
| 22 | config SPEAR_THERMAL | 33 | config SPEAR_THERMAL |
| 23 | bool "SPEAr thermal sensor driver" | 34 | bool "SPEAr thermal sensor driver" |
| 24 | depends on THERMAL | 35 | depends on THERMAL |
| @@ -27,3 +38,18 @@ config SPEAR_THERMAL | |||
| 27 | help | 38 | help |
| 28 | Enable this to plug the SPEAr thermal sensor driver into the Linux | 39 | Enable this to plug the SPEAr thermal sensor driver into the Linux |
| 29 | thermal framework | 40 | thermal framework |
| 41 | |||
| 42 | config RCAR_THERMAL | ||
| 43 | tristate "Renesas R-Car thermal driver" | ||
| 44 | depends on THERMAL | ||
| 45 | depends on ARCH_SHMOBILE | ||
| 46 | help | ||
| 47 | Enable this to plug the R-Car thermal sensor driver into the Linux | ||
| 48 | thermal framework | ||
| 49 | |||
| 50 | config EXYNOS_THERMAL | ||
| 51 | tristate "Temperature sensor on Samsung EXYNOS" | ||
| 52 | depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5) && THERMAL | ||
| 53 | help | ||
| 54 | If you say yes here you get support for TMU (Thermal Managment | ||
| 55 | Unit) on SAMSUNG EXYNOS series of SoC. | ||
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index a9fff0bf4b14..885550dc64b7 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile | |||
| @@ -3,4 +3,7 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_THERMAL) += thermal_sys.o | 5 | obj-$(CONFIG_THERMAL) += thermal_sys.o |
| 6 | obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o \ No newline at end of file | 6 | obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o |
| 7 | obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o | ||
| 8 | obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o | ||
| 9 | obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o | ||
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c new file mode 100644 index 000000000000..cc1c930a90e4 --- /dev/null +++ b/drivers/thermal/cpu_cooling.c | |||
| @@ -0,0 +1,449 @@ | |||
| 1 | /* | ||
| 2 | * linux/drivers/thermal/cpu_cooling.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) | ||
| 5 | * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> | ||
| 6 | * | ||
| 7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; version 2 of the License. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License along | ||
| 18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
| 20 | * | ||
| 21 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 22 | */ | ||
| 23 | #include <linux/kernel.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/thermal.h> | ||
| 26 | #include <linux/platform_device.h> | ||
| 27 | #include <linux/cpufreq.h> | ||
| 28 | #include <linux/err.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | #include <linux/cpu.h> | ||
| 31 | #include <linux/cpu_cooling.h> | ||
| 32 | |||
| 33 | /** | ||
| 34 | * struct cpufreq_cooling_device | ||
| 35 | * @id: unique integer value corresponding to each cpufreq_cooling_device | ||
| 36 | * registered. | ||
| 37 | * @cool_dev: thermal_cooling_device pointer to keep track of the the | ||
| 38 | * egistered cooling device. | ||
| 39 | * @cpufreq_state: integer value representing the current state of cpufreq | ||
| 40 | * cooling devices. | ||
| 41 | * @cpufreq_val: integer value representing the absolute value of the clipped | ||
| 42 | * frequency. | ||
| 43 | * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. | ||
| 44 | * @node: list_head to link all cpufreq_cooling_device together. | ||
| 45 | * | ||
| 46 | * This structure is required for keeping information of each | ||
| 47 | * cpufreq_cooling_device registered as a list whose head is represented by | ||
| 48 | * cooling_cpufreq_list. In order to prevent corruption of this list a | ||
| 49 | * mutex lock cooling_cpufreq_lock is used. | ||
| 50 | */ | ||
| 51 | struct cpufreq_cooling_device { | ||
| 52 | int id; | ||
| 53 | struct thermal_cooling_device *cool_dev; | ||
| 54 | unsigned int cpufreq_state; | ||
| 55 | unsigned int cpufreq_val; | ||
| 56 | struct cpumask allowed_cpus; | ||
| 57 | struct list_head node; | ||
| 58 | }; | ||
| 59 | static LIST_HEAD(cooling_cpufreq_list); | ||
| 60 | static DEFINE_IDR(cpufreq_idr); | ||
| 61 | |||
| 62 | static struct mutex cooling_cpufreq_lock; | ||
| 63 | |||
| 64 | /* notify_table passes value to the CPUFREQ_ADJUST callback function. */ | ||
| 65 | #define NOTIFY_INVALID NULL | ||
| 66 | struct cpufreq_cooling_device *notify_device; | ||
| 67 | |||
| 68 | /** | ||
| 69 | * get_idr - function to get a unique id. | ||
| 70 | * @idr: struct idr * handle used to create a id. | ||
| 71 | * @id: int * value generated by this function. | ||
| 72 | */ | ||
| 73 | static int get_idr(struct idr *idr, int *id) | ||
| 74 | { | ||
| 75 | int err; | ||
| 76 | again: | ||
| 77 | if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) | ||
| 78 | return -ENOMEM; | ||
| 79 | |||
| 80 | mutex_lock(&cooling_cpufreq_lock); | ||
| 81 | err = idr_get_new(idr, NULL, id); | ||
| 82 | mutex_unlock(&cooling_cpufreq_lock); | ||
| 83 | |||
| 84 | if (unlikely(err == -EAGAIN)) | ||
| 85 | goto again; | ||
| 86 | else if (unlikely(err)) | ||
| 87 | return err; | ||
| 88 | |||
| 89 | *id = *id & MAX_IDR_MASK; | ||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | /** | ||
| 94 | * release_idr - function to free the unique id. | ||
| 95 | * @idr: struct idr * handle used for creating the id. | ||
| 96 | * @id: int value representing the unique id. | ||
| 97 | */ | ||
| 98 | static void release_idr(struct idr *idr, int id) | ||
| 99 | { | ||
| 100 | mutex_lock(&cooling_cpufreq_lock); | ||
| 101 | idr_remove(idr, id); | ||
| 102 | mutex_unlock(&cooling_cpufreq_lock); | ||
| 103 | } | ||
| 104 | |||
| 105 | /* Below code defines functions to be used for cpufreq as cooling device */ | ||
| 106 | |||
| 107 | /** | ||
| 108 | * is_cpufreq_valid - function to check if a cpu has frequency transition policy. | ||
| 109 | * @cpu: cpu for which check is needed. | ||
| 110 | */ | ||
| 111 | static int is_cpufreq_valid(int cpu) | ||
| 112 | { | ||
| 113 | struct cpufreq_policy policy; | ||
| 114 | return !cpufreq_get_policy(&policy, cpu); | ||
| 115 | } | ||
| 116 | |||
| 117 | /** | ||
| 118 | * get_cpu_frequency - get the absolute value of frequency from level. | ||
| 119 | * @cpu: cpu for which frequency is fetched. | ||
| 120 | * @level: level of frequency of the CPU | ||
| 121 | * e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc | ||
| 122 | */ | ||
| 123 | static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) | ||
| 124 | { | ||
| 125 | int ret = 0, i = 0; | ||
| 126 | unsigned long level_index; | ||
| 127 | bool descend = false; | ||
| 128 | struct cpufreq_frequency_table *table = | ||
| 129 | cpufreq_frequency_get_table(cpu); | ||
| 130 | if (!table) | ||
| 131 | return ret; | ||
| 132 | |||
| 133 | while (table[i].frequency != CPUFREQ_TABLE_END) { | ||
| 134 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) | ||
| 135 | continue; | ||
| 136 | |||
| 137 | /*check if table in ascending or descending order*/ | ||
| 138 | if ((table[i + 1].frequency != CPUFREQ_TABLE_END) && | ||
| 139 | (table[i + 1].frequency < table[i].frequency) | ||
| 140 | && !descend) { | ||
| 141 | descend = true; | ||
| 142 | } | ||
| 143 | |||
| 144 | /*return if level matched and table in descending order*/ | ||
| 145 | if (descend && i == level) | ||
| 146 | return table[i].frequency; | ||
| 147 | i++; | ||
| 148 | } | ||
| 149 | i--; | ||
| 150 | |||
| 151 | if (level > i || descend) | ||
| 152 | return ret; | ||
| 153 | level_index = i - level; | ||
| 154 | |||
| 155 | /*Scan the table in reverse order and match the level*/ | ||
| 156 | while (i >= 0) { | ||
| 157 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) | ||
| 158 | continue; | ||
| 159 | /*return if level matched*/ | ||
| 160 | if (i == level_index) | ||
| 161 | return table[i].frequency; | ||
| 162 | i--; | ||
| 163 | } | ||
| 164 | return ret; | ||
| 165 | } | ||
| 166 | |||
| 167 | /** | ||
| 168 | * cpufreq_apply_cooling - function to apply frequency clipping. | ||
| 169 | * @cpufreq_device: cpufreq_cooling_device pointer containing frequency | ||
| 170 | * clipping data. | ||
| 171 | * @cooling_state: value of the cooling state. | ||
| 172 | */ | ||
| 173 | static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, | ||
| 174 | unsigned long cooling_state) | ||
| 175 | { | ||
| 176 | unsigned int cpuid, clip_freq; | ||
| 177 | struct cpumask *maskPtr = &cpufreq_device->allowed_cpus; | ||
| 178 | unsigned int cpu = cpumask_any(maskPtr); | ||
| 179 | |||
| 180 | |||
| 181 | /* Check if the old cooling action is same as new cooling action */ | ||
| 182 | if (cpufreq_device->cpufreq_state == cooling_state) | ||
| 183 | return 0; | ||
| 184 | |||
| 185 | clip_freq = get_cpu_frequency(cpu, cooling_state); | ||
| 186 | if (!clip_freq) | ||
| 187 | return -EINVAL; | ||
| 188 | |||
| 189 | cpufreq_device->cpufreq_state = cooling_state; | ||
| 190 | cpufreq_device->cpufreq_val = clip_freq; | ||
| 191 | notify_device = cpufreq_device; | ||
| 192 | |||
| 193 | for_each_cpu(cpuid, maskPtr) { | ||
| 194 | if (is_cpufreq_valid(cpuid)) | ||
| 195 | cpufreq_update_policy(cpuid); | ||
| 196 | } | ||
| 197 | |||
| 198 | notify_device = NOTIFY_INVALID; | ||
| 199 | |||
| 200 | return 0; | ||
| 201 | } | ||
| 202 | |||
| 203 | /** | ||
| 204 | * cpufreq_thermal_notifier - notifier callback for cpufreq policy change. | ||
| 205 | * @nb: struct notifier_block * with callback info. | ||
| 206 | * @event: value showing cpufreq event for which this function invoked. | ||
| 207 | * @data: callback-specific data | ||
| 208 | */ | ||
| 209 | static int cpufreq_thermal_notifier(struct notifier_block *nb, | ||
| 210 | unsigned long event, void *data) | ||
| 211 | { | ||
| 212 | struct cpufreq_policy *policy = data; | ||
| 213 | unsigned long max_freq = 0; | ||
| 214 | |||
| 215 | if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) | ||
| 216 | return 0; | ||
| 217 | |||
| 218 | if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus)) | ||
| 219 | max_freq = notify_device->cpufreq_val; | ||
| 220 | |||
| 221 | /* Never exceed user_policy.max*/ | ||
| 222 | if (max_freq > policy->user_policy.max) | ||
| 223 | max_freq = policy->user_policy.max; | ||
| 224 | |||
| 225 | if (policy->max != max_freq) | ||
| 226 | cpufreq_verify_within_limits(policy, 0, max_freq); | ||
| 227 | |||
| 228 | return 0; | ||
| 229 | } | ||
| 230 | |||
| 231 | /* | ||
| 232 | * cpufreq cooling device callback functions are defined below | ||
| 233 | */ | ||
| 234 | |||
| 235 | /** | ||
| 236 | * cpufreq_get_max_state - callback function to get the max cooling state. | ||
| 237 | * @cdev: thermal cooling device pointer. | ||
| 238 | * @state: fill this variable with the max cooling state. | ||
| 239 | */ | ||
| 240 | static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, | ||
| 241 | unsigned long *state) | ||
| 242 | { | ||
| 243 | int ret = -EINVAL, i = 0; | ||
| 244 | struct cpufreq_cooling_device *cpufreq_device; | ||
| 245 | struct cpumask *maskPtr; | ||
| 246 | unsigned int cpu; | ||
| 247 | struct cpufreq_frequency_table *table; | ||
| 248 | |||
| 249 | mutex_lock(&cooling_cpufreq_lock); | ||
| 250 | list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) { | ||
| 251 | if (cpufreq_device && cpufreq_device->cool_dev == cdev) | ||
| 252 | break; | ||
| 253 | } | ||
| 254 | if (cpufreq_device == NULL) | ||
| 255 | goto return_get_max_state; | ||
| 256 | |||
| 257 | maskPtr = &cpufreq_device->allowed_cpus; | ||
| 258 | cpu = cpumask_any(maskPtr); | ||
| 259 | table = cpufreq_frequency_get_table(cpu); | ||
| 260 | if (!table) { | ||
| 261 | *state = 0; | ||
| 262 | ret = 0; | ||
| 263 | goto return_get_max_state; | ||
| 264 | } | ||
| 265 | |||
| 266 | while (table[i].frequency != CPUFREQ_TABLE_END) { | ||
| 267 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) | ||
| 268 | continue; | ||
| 269 | i++; | ||
| 270 | } | ||
| 271 | if (i > 0) { | ||
| 272 | *state = --i; | ||
| 273 | ret = 0; | ||
| 274 | } | ||
| 275 | |||
| 276 | return_get_max_state: | ||
| 277 | mutex_unlock(&cooling_cpufreq_lock); | ||
| 278 | return ret; | ||
| 279 | } | ||
| 280 | |||
| 281 | /** | ||
| 282 | * cpufreq_get_cur_state - callback function to get the current cooling state. | ||
| 283 | * @cdev: thermal cooling device pointer. | ||
| 284 | * @state: fill this variable with the current cooling state. | ||
| 285 | */ | ||
| 286 | static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, | ||
| 287 | unsigned long *state) | ||
| 288 | { | ||
| 289 | int ret = -EINVAL; | ||
| 290 | struct cpufreq_cooling_device *cpufreq_device; | ||
| 291 | |||
| 292 | mutex_lock(&cooling_cpufreq_lock); | ||
| 293 | list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) { | ||
| 294 | if (cpufreq_device && cpufreq_device->cool_dev == cdev) { | ||
| 295 | *state = cpufreq_device->cpufreq_state; | ||
| 296 | ret = 0; | ||
| 297 | break; | ||
| 298 | } | ||
| 299 | } | ||
| 300 | mutex_unlock(&cooling_cpufreq_lock); | ||
| 301 | |||
| 302 | return ret; | ||
| 303 | } | ||
| 304 | |||
| 305 | /** | ||
| 306 | * cpufreq_set_cur_state - callback function to set the current cooling state. | ||
| 307 | * @cdev: thermal cooling device pointer. | ||
| 308 | * @state: set this variable to the current cooling state. | ||
| 309 | */ | ||
| 310 | static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, | ||
| 311 | unsigned long state) | ||
| 312 | { | ||
| 313 | int ret = -EINVAL; | ||
| 314 | struct cpufreq_cooling_device *cpufreq_device; | ||
| 315 | |||
| 316 | mutex_lock(&cooling_cpufreq_lock); | ||
| 317 | list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) { | ||
| 318 | if (cpufreq_device && cpufreq_device->cool_dev == cdev) { | ||
| 319 | ret = 0; | ||
| 320 | break; | ||
| 321 | } | ||
| 322 | } | ||
| 323 | if (!ret) | ||
| 324 | ret = cpufreq_apply_cooling(cpufreq_device, state); | ||
| 325 | |||
| 326 | mutex_unlock(&cooling_cpufreq_lock); | ||
| 327 | |||
| 328 | return ret; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* Bind cpufreq callbacks to thermal cooling device ops */ | ||
| 332 | static struct thermal_cooling_device_ops const cpufreq_cooling_ops = { | ||
| 333 | .get_max_state = cpufreq_get_max_state, | ||
| 334 | .get_cur_state = cpufreq_get_cur_state, | ||
| 335 | .set_cur_state = cpufreq_set_cur_state, | ||
| 336 | }; | ||
| 337 | |||
| 338 | /* Notifier for cpufreq policy change */ | ||
| 339 | static struct notifier_block thermal_cpufreq_notifier_block = { | ||
| 340 | .notifier_call = cpufreq_thermal_notifier, | ||
| 341 | }; | ||
| 342 | |||
| 343 | /** | ||
| 344 | * cpufreq_cooling_register - function to create cpufreq cooling device. | ||
| 345 | * @clip_cpus: cpumask of cpus where the frequency constraints will happen. | ||
| 346 | */ | ||
| 347 | struct thermal_cooling_device *cpufreq_cooling_register( | ||
| 348 | struct cpumask *clip_cpus) | ||
| 349 | { | ||
| 350 | struct thermal_cooling_device *cool_dev; | ||
| 351 | struct cpufreq_cooling_device *cpufreq_dev = NULL; | ||
| 352 | unsigned int cpufreq_dev_count = 0, min = 0, max = 0; | ||
| 353 | char dev_name[THERMAL_NAME_LENGTH]; | ||
| 354 | int ret = 0, i; | ||
| 355 | struct cpufreq_policy policy; | ||
| 356 | |||
| 357 | list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) | ||
| 358 | cpufreq_dev_count++; | ||
| 359 | |||
| 360 | /*Verify that all the clip cpus have same freq_min, freq_max limit*/ | ||
| 361 | for_each_cpu(i, clip_cpus) { | ||
| 362 | /*continue if cpufreq policy not found and not return error*/ | ||
| 363 | if (!cpufreq_get_policy(&policy, i)) | ||
| 364 | continue; | ||
| 365 | if (min == 0 && max == 0) { | ||
| 366 | min = policy.cpuinfo.min_freq; | ||
| 367 | max = policy.cpuinfo.max_freq; | ||
| 368 | } else { | ||
| 369 | if (min != policy.cpuinfo.min_freq || | ||
| 370 | max != policy.cpuinfo.max_freq) | ||
| 371 | return ERR_PTR(-EINVAL); | ||
| 372 | } | ||
| 373 | } | ||
| 374 | cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device), | ||
| 375 | GFP_KERNEL); | ||
| 376 | if (!cpufreq_dev) | ||
| 377 | return ERR_PTR(-ENOMEM); | ||
| 378 | |||
| 379 | cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); | ||
| 380 | |||
| 381 | if (cpufreq_dev_count == 0) | ||
| 382 | mutex_init(&cooling_cpufreq_lock); | ||
| 383 | |||
| 384 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); | ||
| 385 | if (ret) { | ||
| 386 | kfree(cpufreq_dev); | ||
| 387 | return ERR_PTR(-EINVAL); | ||
| 388 | } | ||
| 389 | |||
| 390 | sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id); | ||
| 391 | |||
| 392 | cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev, | ||
| 393 | &cpufreq_cooling_ops); | ||
| 394 | if (!cool_dev) { | ||
| 395 | release_idr(&cpufreq_idr, cpufreq_dev->id); | ||
| 396 | kfree(cpufreq_dev); | ||
| 397 | return ERR_PTR(-EINVAL); | ||
| 398 | } | ||
| 399 | cpufreq_dev->cool_dev = cool_dev; | ||
| 400 | cpufreq_dev->cpufreq_state = 0; | ||
| 401 | mutex_lock(&cooling_cpufreq_lock); | ||
| 402 | list_add_tail(&cpufreq_dev->node, &cooling_cpufreq_list); | ||
| 403 | |||
| 404 | /* Register the notifier for first cpufreq cooling device */ | ||
| 405 | if (cpufreq_dev_count == 0) | ||
| 406 | cpufreq_register_notifier(&thermal_cpufreq_notifier_block, | ||
| 407 | CPUFREQ_POLICY_NOTIFIER); | ||
| 408 | |||
| 409 | mutex_unlock(&cooling_cpufreq_lock); | ||
| 410 | return cool_dev; | ||
| 411 | } | ||
| 412 | EXPORT_SYMBOL(cpufreq_cooling_register); | ||
| 413 | |||
| 414 | /** | ||
| 415 | * cpufreq_cooling_unregister - function to remove cpufreq cooling device. | ||
| 416 | * @cdev: thermal cooling device pointer. | ||
| 417 | */ | ||
| 418 | void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) | ||
| 419 | { | ||
| 420 | struct cpufreq_cooling_device *cpufreq_dev = NULL; | ||
| 421 | unsigned int cpufreq_dev_count = 0; | ||
| 422 | |||
| 423 | mutex_lock(&cooling_cpufreq_lock); | ||
| 424 | list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) { | ||
| 425 | if (cpufreq_dev && cpufreq_dev->cool_dev == cdev) | ||
| 426 | break; | ||
| 427 | cpufreq_dev_count++; | ||
| 428 | } | ||
| 429 | |||
| 430 | if (!cpufreq_dev || cpufreq_dev->cool_dev != cdev) { | ||
| 431 | mutex_unlock(&cooling_cpufreq_lock); | ||
| 432 | return; | ||
| 433 | } | ||
| 434 | |||
| 435 | list_del(&cpufreq_dev->node); | ||
| 436 | |||
| 437 | /* Unregister the notifier for the last cpufreq cooling device */ | ||
| 438 | if (cpufreq_dev_count == 1) { | ||
| 439 | cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, | ||
| 440 | CPUFREQ_POLICY_NOTIFIER); | ||
| 441 | } | ||
| 442 | mutex_unlock(&cooling_cpufreq_lock); | ||
| 443 | thermal_cooling_device_unregister(cpufreq_dev->cool_dev); | ||
| 444 | release_idr(&cpufreq_idr, cpufreq_dev->id); | ||
| 445 | if (cpufreq_dev_count == 1) | ||
| 446 | mutex_destroy(&cooling_cpufreq_lock); | ||
| 447 | kfree(cpufreq_dev); | ||
| 448 | } | ||
| 449 | EXPORT_SYMBOL(cpufreq_cooling_unregister); | ||
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c new file mode 100644 index 000000000000..fd03e8581afc --- /dev/null +++ b/drivers/thermal/exynos_thermal.c | |||
| @@ -0,0 +1,997 @@ | |||
| 1 | /* | ||
| 2 | * exynos_thermal.c - Samsung EXYNOS TMU (Thermal Management Unit) | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Samsung Electronics | ||
| 5 | * Donggeun Kim <dg77.kim@samsung.com> | ||
| 6 | * Amit Daniel Kachhap <amit.kachhap@linaro.org> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; either version 2 of the License, or | ||
| 11 | * (at your option) any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License | ||
| 19 | * along with this program; if not, write to the Free Software | ||
| 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/err.h> | ||
| 26 | #include <linux/kernel.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | #include <linux/platform_device.h> | ||
| 29 | #include <linux/interrupt.h> | ||
| 30 | #include <linux/clk.h> | ||
| 31 | #include <linux/workqueue.h> | ||
| 32 | #include <linux/sysfs.h> | ||
| 33 | #include <linux/kobject.h> | ||
| 34 | #include <linux/io.h> | ||
| 35 | #include <linux/mutex.h> | ||
| 36 | #include <linux/platform_data/exynos_thermal.h> | ||
| 37 | #include <linux/thermal.h> | ||
| 38 | #include <linux/cpufreq.h> | ||
| 39 | #include <linux/cpu_cooling.h> | ||
| 40 | #include <linux/of.h> | ||
| 41 | |||
| 42 | #include <plat/cpu.h> | ||
| 43 | |||
| 44 | /* Exynos generic registers */ | ||
| 45 | #define EXYNOS_TMU_REG_TRIMINFO 0x0 | ||
| 46 | #define EXYNOS_TMU_REG_CONTROL 0x20 | ||
| 47 | #define EXYNOS_TMU_REG_STATUS 0x28 | ||
| 48 | #define EXYNOS_TMU_REG_CURRENT_TEMP 0x40 | ||
| 49 | #define EXYNOS_TMU_REG_INTEN 0x70 | ||
| 50 | #define EXYNOS_TMU_REG_INTSTAT 0x74 | ||
| 51 | #define EXYNOS_TMU_REG_INTCLEAR 0x78 | ||
| 52 | |||
| 53 | #define EXYNOS_TMU_TRIM_TEMP_MASK 0xff | ||
| 54 | #define EXYNOS_TMU_GAIN_SHIFT 8 | ||
| 55 | #define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24 | ||
| 56 | #define EXYNOS_TMU_CORE_ON 3 | ||
| 57 | #define EXYNOS_TMU_CORE_OFF 2 | ||
| 58 | #define EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET 50 | ||
| 59 | |||
| 60 | /* Exynos4210 specific registers */ | ||
| 61 | #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44 | ||
| 62 | #define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50 | ||
| 63 | #define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54 | ||
| 64 | #define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58 | ||
| 65 | #define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C | ||
| 66 | #define EXYNOS4210_TMU_REG_PAST_TEMP0 0x60 | ||
| 67 | #define EXYNOS4210_TMU_REG_PAST_TEMP1 0x64 | ||
| 68 | #define EXYNOS4210_TMU_REG_PAST_TEMP2 0x68 | ||
| 69 | #define EXYNOS4210_TMU_REG_PAST_TEMP3 0x6C | ||
| 70 | |||
| 71 | #define EXYNOS4210_TMU_TRIG_LEVEL0_MASK 0x1 | ||
| 72 | #define EXYNOS4210_TMU_TRIG_LEVEL1_MASK 0x10 | ||
| 73 | #define EXYNOS4210_TMU_TRIG_LEVEL2_MASK 0x100 | ||
| 74 | #define EXYNOS4210_TMU_TRIG_LEVEL3_MASK 0x1000 | ||
| 75 | #define EXYNOS4210_TMU_INTCLEAR_VAL 0x1111 | ||
| 76 | |||
| 77 | /* Exynos5250 and Exynos4412 specific registers */ | ||
| 78 | #define EXYNOS_TMU_TRIMINFO_CON 0x14 | ||
| 79 | #define EXYNOS_THD_TEMP_RISE 0x50 | ||
| 80 | #define EXYNOS_THD_TEMP_FALL 0x54 | ||
| 81 | #define EXYNOS_EMUL_CON 0x80 | ||
| 82 | |||
| 83 | #define EXYNOS_TRIMINFO_RELOAD 0x1 | ||
| 84 | #define EXYNOS_TMU_CLEAR_RISE_INT 0x111 | ||
| 85 | #define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 16) | ||
| 86 | #define EXYNOS_MUX_ADDR_VALUE 6 | ||
| 87 | #define EXYNOS_MUX_ADDR_SHIFT 20 | ||
| 88 | #define EXYNOS_TMU_TRIP_MODE_SHIFT 13 | ||
| 89 | |||
| 90 | #define EFUSE_MIN_VALUE 40 | ||
| 91 | #define EFUSE_MAX_VALUE 100 | ||
| 92 | |||
| 93 | /* In-kernel thermal framework related macros & definations */ | ||
| 94 | #define SENSOR_NAME_LEN 16 | ||
| 95 | #define MAX_TRIP_COUNT 8 | ||
| 96 | #define MAX_COOLING_DEVICE 4 | ||
| 97 | |||
| 98 | #define ACTIVE_INTERVAL 500 | ||
| 99 | #define IDLE_INTERVAL 10000 | ||
| 100 | #define MCELSIUS 1000 | ||
| 101 | |||
| 102 | /* CPU Zone information */ | ||
| 103 | #define PANIC_ZONE 4 | ||
| 104 | #define WARN_ZONE 3 | ||
| 105 | #define MONITOR_ZONE 2 | ||
| 106 | #define SAFE_ZONE 1 | ||
| 107 | |||
| 108 | #define GET_ZONE(trip) (trip + 2) | ||
| 109 | #define GET_TRIP(zone) (zone - 2) | ||
| 110 | |||
| 111 | #define EXYNOS_ZONE_COUNT 3 | ||
| 112 | |||
| 113 | struct exynos_tmu_data { | ||
| 114 | struct exynos_tmu_platform_data *pdata; | ||
| 115 | struct resource *mem; | ||
| 116 | void __iomem *base; | ||
| 117 | int irq; | ||
| 118 | enum soc_type soc; | ||
| 119 | struct work_struct irq_work; | ||
| 120 | struct mutex lock; | ||
| 121 | struct clk *clk; | ||
| 122 | u8 temp_error1, temp_error2; | ||
| 123 | }; | ||
| 124 | |||
| 125 | struct thermal_trip_point_conf { | ||
| 126 | int trip_val[MAX_TRIP_COUNT]; | ||
| 127 | int trip_count; | ||
| 128 | }; | ||
| 129 | |||
| 130 | struct thermal_cooling_conf { | ||
| 131 | struct freq_clip_table freq_data[MAX_TRIP_COUNT]; | ||
| 132 | int freq_clip_count; | ||
| 133 | }; | ||
| 134 | |||
| 135 | struct thermal_sensor_conf { | ||
| 136 | char name[SENSOR_NAME_LEN]; | ||
| 137 | int (*read_temperature)(void *data); | ||
| 138 | struct thermal_trip_point_conf trip_data; | ||
| 139 | struct thermal_cooling_conf cooling_data; | ||
| 140 | void *private_data; | ||
| 141 | }; | ||
| 142 | |||
| 143 | struct exynos_thermal_zone { | ||
| 144 | enum thermal_device_mode mode; | ||
| 145 | struct thermal_zone_device *therm_dev; | ||
| 146 | struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE]; | ||
| 147 | unsigned int cool_dev_size; | ||
| 148 | struct platform_device *exynos4_dev; | ||
| 149 | struct thermal_sensor_conf *sensor_conf; | ||
| 150 | bool bind; | ||
| 151 | }; | ||
| 152 | |||
| 153 | static struct exynos_thermal_zone *th_zone; | ||
| 154 | static void exynos_unregister_thermal(void); | ||
| 155 | static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf); | ||
| 156 | |||
| 157 | /* Get mode callback functions for thermal zone */ | ||
| 158 | static int exynos_get_mode(struct thermal_zone_device *thermal, | ||
| 159 | enum thermal_device_mode *mode) | ||
| 160 | { | ||
| 161 | if (th_zone) | ||
| 162 | *mode = th_zone->mode; | ||
| 163 | return 0; | ||
| 164 | } | ||
| 165 | |||
| 166 | /* Set mode callback functions for thermal zone */ | ||
| 167 | static int exynos_set_mode(struct thermal_zone_device *thermal, | ||
| 168 | enum thermal_device_mode mode) | ||
| 169 | { | ||
| 170 | if (!th_zone->therm_dev) { | ||
| 171 | pr_notice("thermal zone not registered\n"); | ||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | |||
| 175 | mutex_lock(&th_zone->therm_dev->lock); | ||
| 176 | |||
| 177 | if (mode == THERMAL_DEVICE_ENABLED) | ||
| 178 | th_zone->therm_dev->polling_delay = IDLE_INTERVAL; | ||
| 179 | else | ||
| 180 | th_zone->therm_dev->polling_delay = 0; | ||
| 181 | |||
| 182 | mutex_unlock(&th_zone->therm_dev->lock); | ||
| 183 | |||
| 184 | th_zone->mode = mode; | ||
| 185 | thermal_zone_device_update(th_zone->therm_dev); | ||
| 186 | pr_info("thermal polling set for duration=%d msec\n", | ||
| 187 | th_zone->therm_dev->polling_delay); | ||
| 188 | return 0; | ||
| 189 | } | ||
| 190 | |||
| 191 | |||
| 192 | /* Get trip type callback functions for thermal zone */ | ||
| 193 | static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip, | ||
| 194 | enum thermal_trip_type *type) | ||
| 195 | { | ||
| 196 | switch (GET_ZONE(trip)) { | ||
| 197 | case MONITOR_ZONE: | ||
| 198 | case WARN_ZONE: | ||
| 199 | *type = THERMAL_TRIP_ACTIVE; | ||
| 200 | break; | ||
| 201 | case PANIC_ZONE: | ||
| 202 | *type = THERMAL_TRIP_CRITICAL; | ||
| 203 | break; | ||
| 204 | default: | ||
| 205 | return -EINVAL; | ||
| 206 | } | ||
| 207 | return 0; | ||
| 208 | } | ||
| 209 | |||
| 210 | /* Get trip temperature callback functions for thermal zone */ | ||
| 211 | static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip, | ||
| 212 | unsigned long *temp) | ||
| 213 | { | ||
| 214 | if (trip < GET_TRIP(MONITOR_ZONE) || trip > GET_TRIP(PANIC_ZONE)) | ||
| 215 | return -EINVAL; | ||
| 216 | |||
| 217 | *temp = th_zone->sensor_conf->trip_data.trip_val[trip]; | ||
| 218 | /* convert the temperature into millicelsius */ | ||
| 219 | *temp = *temp * MCELSIUS; | ||
| 220 | |||
| 221 | return 0; | ||
| 222 | } | ||
| 223 | |||
| 224 | /* Get critical temperature callback functions for thermal zone */ | ||
| 225 | static int exynos_get_crit_temp(struct thermal_zone_device *thermal, | ||
| 226 | unsigned long *temp) | ||
| 227 | { | ||
| 228 | int ret; | ||
| 229 | /* Panic zone */ | ||
| 230 | ret = exynos_get_trip_temp(thermal, GET_TRIP(PANIC_ZONE), temp); | ||
| 231 | return ret; | ||
| 232 | } | ||
| 233 | |||
| 234 | static int exynos_get_frequency_level(unsigned int cpu, unsigned int freq) | ||
| 235 | { | ||
| 236 | int i = 0, ret = -EINVAL; | ||
| 237 | struct cpufreq_frequency_table *table = NULL; | ||
| 238 | #ifdef CONFIG_CPU_FREQ | ||
| 239 | table = cpufreq_frequency_get_table(cpu); | ||
| 240 | #endif | ||
| 241 | if (!table) | ||
| 242 | return ret; | ||
| 243 | |||
| 244 | while (table[i].frequency != CPUFREQ_TABLE_END) { | ||
| 245 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) | ||
| 246 | continue; | ||
| 247 | if (table[i].frequency == freq) | ||
| 248 | return i; | ||
| 249 | i++; | ||
| 250 | } | ||
| 251 | return ret; | ||
| 252 | } | ||
| 253 | |||
| 254 | /* Bind callback functions for thermal zone */ | ||
| 255 | static int exynos_bind(struct thermal_zone_device *thermal, | ||
| 256 | struct thermal_cooling_device *cdev) | ||
| 257 | { | ||
| 258 | int ret = 0, i, tab_size, level; | ||
| 259 | struct freq_clip_table *tab_ptr, *clip_data; | ||
| 260 | struct thermal_sensor_conf *data = th_zone->sensor_conf; | ||
| 261 | |||
| 262 | tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data; | ||
| 263 | tab_size = data->cooling_data.freq_clip_count; | ||
| 264 | |||
| 265 | if (tab_ptr == NULL || tab_size == 0) | ||
| 266 | return -EINVAL; | ||
| 267 | |||
| 268 | /* find the cooling device registered*/ | ||
| 269 | for (i = 0; i < th_zone->cool_dev_size; i++) | ||
| 270 | if (cdev == th_zone->cool_dev[i]) | ||
| 271 | break; | ||
| 272 | |||
| 273 | /* No matching cooling device */ | ||
| 274 | if (i == th_zone->cool_dev_size) | ||
| 275 | return 0; | ||
| 276 | |||
| 277 | /* Bind the thermal zone to the cpufreq cooling device */ | ||
| 278 | for (i = 0; i < tab_size; i++) { | ||
| 279 | clip_data = (struct freq_clip_table *)&(tab_ptr[i]); | ||
| 280 | level = exynos_get_frequency_level(0, clip_data->freq_clip_max); | ||
| 281 | if (level < 0) | ||
| 282 | return 0; | ||
| 283 | switch (GET_ZONE(i)) { | ||
| 284 | case MONITOR_ZONE: | ||
| 285 | case WARN_ZONE: | ||
| 286 | if (thermal_zone_bind_cooling_device(thermal, i, cdev, | ||
| 287 | level, level)) { | ||
| 288 | pr_err("error binding cdev inst %d\n", i); | ||
| 289 | ret = -EINVAL; | ||
| 290 | } | ||
| 291 | th_zone->bind = true; | ||
| 292 | break; | ||
| 293 | default: | ||
| 294 | ret = -EINVAL; | ||
| 295 | } | ||
| 296 | } | ||
| 297 | |||
| 298 | return ret; | ||
| 299 | } | ||
| 300 | |||
| 301 | /* Unbind callback functions for thermal zone */ | ||
| 302 | static int exynos_unbind(struct thermal_zone_device *thermal, | ||
| 303 | struct thermal_cooling_device *cdev) | ||
| 304 | { | ||
| 305 | int ret = 0, i, tab_size; | ||
| 306 | struct thermal_sensor_conf *data = th_zone->sensor_conf; | ||
| 307 | |||
| 308 | if (th_zone->bind == false) | ||
| 309 | return 0; | ||
| 310 | |||
| 311 | tab_size = data->cooling_data.freq_clip_count; | ||
| 312 | |||
| 313 | if (tab_size == 0) | ||
| 314 | return -EINVAL; | ||
| 315 | |||
| 316 | /* find the cooling device registered*/ | ||
| 317 | for (i = 0; i < th_zone->cool_dev_size; i++) | ||
| 318 | if (cdev == th_zone->cool_dev[i]) | ||
| 319 | break; | ||
| 320 | |||
| 321 | /* No matching cooling device */ | ||
| 322 | if (i == th_zone->cool_dev_size) | ||
| 323 | return 0; | ||
| 324 | |||
| 325 | /* Bind the thermal zone to the cpufreq cooling device */ | ||
| 326 | for (i = 0; i < tab_size; i++) { | ||
| 327 | switch (GET_ZONE(i)) { | ||
| 328 | case MONITOR_ZONE: | ||
| 329 | case WARN_ZONE: | ||
| 330 | if (thermal_zone_unbind_cooling_device(thermal, i, | ||
| 331 | cdev)) { | ||
| 332 | pr_err("error unbinding cdev inst=%d\n", i); | ||
| 333 | ret = -EINVAL; | ||
| 334 | } | ||
| 335 | th_zone->bind = false; | ||
| 336 | break; | ||
| 337 | default: | ||
| 338 | ret = -EINVAL; | ||
| 339 | } | ||
| 340 | } | ||
| 341 | return ret; | ||
| 342 | } | ||
| 343 | |||
| 344 | /* Get temperature callback functions for thermal zone */ | ||
| 345 | static int exynos_get_temp(struct thermal_zone_device *thermal, | ||
| 346 | unsigned long *temp) | ||
| 347 | { | ||
| 348 | void *data; | ||
| 349 | |||
| 350 | if (!th_zone->sensor_conf) { | ||
| 351 | pr_info("Temperature sensor not initialised\n"); | ||
| 352 | return -EINVAL; | ||
| 353 | } | ||
| 354 | data = th_zone->sensor_conf->private_data; | ||
| 355 | *temp = th_zone->sensor_conf->read_temperature(data); | ||
| 356 | /* convert the temperature into millicelsius */ | ||
| 357 | *temp = *temp * MCELSIUS; | ||
| 358 | return 0; | ||
| 359 | } | ||
| 360 | |||
| 361 | /* Get the temperature trend */ | ||
| 362 | static int exynos_get_trend(struct thermal_zone_device *thermal, | ||
| 363 | int trip, enum thermal_trend *trend) | ||
| 364 | { | ||
| 365 | if (thermal->temperature >= trip) | ||
| 366 | *trend = THERMAL_TREND_RAISING; | ||
| 367 | else | ||
| 368 | *trend = THERMAL_TREND_DROPPING; | ||
| 369 | |||
| 370 | return 0; | ||
| 371 | } | ||
| 372 | /* Operation callback functions for thermal zone */ | ||
| 373 | static struct thermal_zone_device_ops const exynos_dev_ops = { | ||
| 374 | .bind = exynos_bind, | ||
| 375 | .unbind = exynos_unbind, | ||
| 376 | .get_temp = exynos_get_temp, | ||
| 377 | .get_trend = exynos_get_trend, | ||
| 378 | .get_mode = exynos_get_mode, | ||
| 379 | .set_mode = exynos_set_mode, | ||
| 380 | .get_trip_type = exynos_get_trip_type, | ||
| 381 | .get_trip_temp = exynos_get_trip_temp, | ||
| 382 | .get_crit_temp = exynos_get_crit_temp, | ||
| 383 | }; | ||
| 384 | |||
| 385 | /* | ||
| 386 | * This function may be called from interrupt based temperature sensor | ||
| 387 | * when threshold is changed. | ||
| 388 | */ | ||
| 389 | static void exynos_report_trigger(void) | ||
| 390 | { | ||
| 391 | unsigned int i; | ||
| 392 | char data[10]; | ||
| 393 | char *envp[] = { data, NULL }; | ||
| 394 | |||
| 395 | if (!th_zone || !th_zone->therm_dev) | ||
| 396 | return; | ||
| 397 | if (th_zone->bind == false) { | ||
| 398 | for (i = 0; i < th_zone->cool_dev_size; i++) { | ||
| 399 | if (!th_zone->cool_dev[i]) | ||
| 400 | continue; | ||
| 401 | exynos_bind(th_zone->therm_dev, | ||
| 402 | th_zone->cool_dev[i]); | ||
| 403 | } | ||
| 404 | } | ||
| 405 | |||
| 406 | thermal_zone_device_update(th_zone->therm_dev); | ||
| 407 | |||
| 408 | mutex_lock(&th_zone->therm_dev->lock); | ||
| 409 | /* Find the level for which trip happened */ | ||
| 410 | for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) { | ||
| 411 | if (th_zone->therm_dev->last_temperature < | ||
| 412 | th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS) | ||
| 413 | break; | ||
| 414 | } | ||
| 415 | |||
| 416 | if (th_zone->mode == THERMAL_DEVICE_ENABLED) { | ||
| 417 | if (i > 0) | ||
| 418 | th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL; | ||
| 419 | else | ||
| 420 | th_zone->therm_dev->polling_delay = IDLE_INTERVAL; | ||
| 421 | } | ||
| 422 | |||
| 423 | snprintf(data, sizeof(data), "%u", i); | ||
| 424 | kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp); | ||
| 425 | mutex_unlock(&th_zone->therm_dev->lock); | ||
| 426 | } | ||
| 427 | |||
| 428 | /* Register with the in-kernel thermal management */ | ||
| 429 | static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) | ||
| 430 | { | ||
| 431 | int ret; | ||
| 432 | struct cpumask mask_val; | ||
| 433 | |||
| 434 | if (!sensor_conf || !sensor_conf->read_temperature) { | ||
| 435 | pr_err("Temperature sensor not initialised\n"); | ||
| 436 | return -EINVAL; | ||
| 437 | } | ||
| 438 | |||
| 439 | th_zone = kzalloc(sizeof(struct exynos_thermal_zone), GFP_KERNEL); | ||
| 440 | if (!th_zone) | ||
| 441 | return -ENOMEM; | ||
| 442 | |||
| 443 | th_zone->sensor_conf = sensor_conf; | ||
| 444 | cpumask_set_cpu(0, &mask_val); | ||
| 445 | th_zone->cool_dev[0] = cpufreq_cooling_register(&mask_val); | ||
| 446 | if (IS_ERR(th_zone->cool_dev[0])) { | ||
| 447 | pr_err("Failed to register cpufreq cooling device\n"); | ||
| 448 | ret = -EINVAL; | ||
| 449 | goto err_unregister; | ||
| 450 | } | ||
| 451 | th_zone->cool_dev_size++; | ||
| 452 | |||
| 453 | th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name, | ||
| 454 | EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, 0, | ||
| 455 | IDLE_INTERVAL); | ||
| 456 | |||
| 457 | if (IS_ERR(th_zone->therm_dev)) { | ||
| 458 | pr_err("Failed to register thermal zone device\n"); | ||
| 459 | ret = -EINVAL; | ||
| 460 | goto err_unregister; | ||
| 461 | } | ||
| 462 | th_zone->mode = THERMAL_DEVICE_ENABLED; | ||
| 463 | |||
| 464 | pr_info("Exynos: Kernel Thermal management registered\n"); | ||
| 465 | |||
| 466 | return 0; | ||
| 467 | |||
| 468 | err_unregister: | ||
| 469 | exynos_unregister_thermal(); | ||
| 470 | return ret; | ||
| 471 | } | ||
| 472 | |||
| 473 | /* Un-Register with the in-kernel thermal management */ | ||
| 474 | static void exynos_unregister_thermal(void) | ||
| 475 | { | ||
| 476 | int i; | ||
| 477 | |||
| 478 | if (!th_zone) | ||
| 479 | return; | ||
| 480 | |||
| 481 | if (th_zone->therm_dev) | ||
| 482 | thermal_zone_device_unregister(th_zone->therm_dev); | ||
| 483 | |||
| 484 | for (i = 0; i < th_zone->cool_dev_size; i++) { | ||
| 485 | if (th_zone->cool_dev[i]) | ||
| 486 | cpufreq_cooling_unregister(th_zone->cool_dev[i]); | ||
| 487 | } | ||
| 488 | |||
| 489 | kfree(th_zone); | ||
| 490 | pr_info("Exynos: Kernel Thermal management unregistered\n"); | ||
| 491 | } | ||
| 492 | |||
| 493 | /* | ||
| 494 | * TMU treats temperature as a mapped temperature code. | ||
| 495 | * The temperature is converted differently depending on the calibration type. | ||
| 496 | */ | ||
| 497 | static int temp_to_code(struct exynos_tmu_data *data, u8 temp) | ||
| 498 | { | ||
| 499 | struct exynos_tmu_platform_data *pdata = data->pdata; | ||
| 500 | int temp_code; | ||
| 501 | |||
| 502 | if (data->soc == SOC_ARCH_EXYNOS4210) | ||
| 503 | /* temp should range between 25 and 125 */ | ||
| 504 | if (temp < 25 || temp > 125) { | ||
| 505 | temp_code = -EINVAL; | ||
| 506 | goto out; | ||
| 507 | } | ||
| 508 | |||
| 509 | switch (pdata->cal_type) { | ||
| 510 | case TYPE_TWO_POINT_TRIMMING: | ||
| 511 | temp_code = (temp - 25) * | ||
| 512 | (data->temp_error2 - data->temp_error1) / | ||
| 513 | (85 - 25) + data->temp_error1; | ||
| 514 | break; | ||
| 515 | case TYPE_ONE_POINT_TRIMMING: | ||
| 516 | temp_code = temp + data->temp_error1 - 25; | ||
| 517 | break; | ||
| 518 | default: | ||
| 519 | temp_code = temp + EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET; | ||
| 520 | break; | ||
| 521 | } | ||
| 522 | out: | ||
| 523 | return temp_code; | ||
| 524 | } | ||
| 525 | |||
| 526 | /* | ||
| 527 | * Calculate a temperature value from a temperature code. | ||
| 528 | * The unit of the temperature is degree Celsius. | ||
| 529 | */ | ||
| 530 | static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code) | ||
| 531 | { | ||
| 532 | struct exynos_tmu_platform_data *pdata = data->pdata; | ||
| 533 | int temp; | ||
| 534 | |||
| 535 | if (data->soc == SOC_ARCH_EXYNOS4210) | ||
| 536 | /* temp_code should range between 75 and 175 */ | ||
| 537 | if (temp_code < 75 || temp_code > 175) { | ||
| 538 | temp = -ENODATA; | ||
| 539 | goto out; | ||
| 540 | } | ||
| 541 | |||
| 542 | switch (pdata->cal_type) { | ||
| 543 | case TYPE_TWO_POINT_TRIMMING: | ||
| 544 | temp = (temp_code - data->temp_error1) * (85 - 25) / | ||
| 545 | (data->temp_error2 - data->temp_error1) + 25; | ||
| 546 | break; | ||
| 547 | case TYPE_ONE_POINT_TRIMMING: | ||
| 548 | temp = temp_code - data->temp_error1 + 25; | ||
| 549 | break; | ||
| 550 | default: | ||
| 551 | temp = temp_code - EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET; | ||
| 552 | break; | ||
| 553 | } | ||
| 554 | out: | ||
| 555 | return temp; | ||
| 556 | } | ||
| 557 | |||
| 558 | static int exynos_tmu_initialize(struct platform_device *pdev) | ||
| 559 | { | ||
| 560 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); | ||
| 561 | struct exynos_tmu_platform_data *pdata = data->pdata; | ||
| 562 | unsigned int status, trim_info, rising_threshold; | ||
| 563 | int ret = 0, threshold_code; | ||
| 564 | |||
| 565 | mutex_lock(&data->lock); | ||
| 566 | clk_enable(data->clk); | ||
| 567 | |||
| 568 | status = readb(data->base + EXYNOS_TMU_REG_STATUS); | ||
| 569 | if (!status) { | ||
| 570 | ret = -EBUSY; | ||
| 571 | goto out; | ||
| 572 | } | ||
| 573 | |||
| 574 | if (data->soc == SOC_ARCH_EXYNOS) { | ||
| 575 | __raw_writel(EXYNOS_TRIMINFO_RELOAD, | ||
| 576 | data->base + EXYNOS_TMU_TRIMINFO_CON); | ||
| 577 | } | ||
| 578 | /* Save trimming info in order to perform calibration */ | ||
| 579 | trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); | ||
| 580 | data->temp_error1 = trim_info & EXYNOS_TMU_TRIM_TEMP_MASK; | ||
| 581 | data->temp_error2 = ((trim_info >> 8) & EXYNOS_TMU_TRIM_TEMP_MASK); | ||
| 582 | |||
| 583 | if ((EFUSE_MIN_VALUE > data->temp_error1) || | ||
| 584 | (data->temp_error1 > EFUSE_MAX_VALUE) || | ||
| 585 | (data->temp_error2 != 0)) | ||
| 586 | data->temp_error1 = pdata->efuse_value; | ||
| 587 | |||
| 588 | if (data->soc == SOC_ARCH_EXYNOS4210) { | ||
| 589 | /* Write temperature code for threshold */ | ||
| 590 | threshold_code = temp_to_code(data, pdata->threshold); | ||
| 591 | if (threshold_code < 0) { | ||
| 592 | ret = threshold_code; | ||
| 593 | goto out; | ||
| 594 | } | ||
| 595 | writeb(threshold_code, | ||
| 596 | data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); | ||
| 597 | |||
| 598 | writeb(pdata->trigger_levels[0], | ||
| 599 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0); | ||
| 600 | writeb(pdata->trigger_levels[1], | ||
| 601 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL1); | ||
| 602 | writeb(pdata->trigger_levels[2], | ||
| 603 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL2); | ||
| 604 | writeb(pdata->trigger_levels[3], | ||
| 605 | data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL3); | ||
| 606 | |||
| 607 | writel(EXYNOS4210_TMU_INTCLEAR_VAL, | ||
| 608 | data->base + EXYNOS_TMU_REG_INTCLEAR); | ||
| 609 | } else if (data->soc == SOC_ARCH_EXYNOS) { | ||
| 610 | /* Write temperature code for threshold */ | ||
| 611 | threshold_code = temp_to_code(data, pdata->trigger_levels[0]); | ||
| 612 | if (threshold_code < 0) { | ||
| 613 | ret = threshold_code; | ||
| 614 | goto out; | ||
| 615 | } | ||
| 616 | rising_threshold = threshold_code; | ||
| 617 | threshold_code = temp_to_code(data, pdata->trigger_levels[1]); | ||
| 618 | if (threshold_code < 0) { | ||
| 619 | ret = threshold_code; | ||
| 620 | goto out; | ||
| 621 | } | ||
| 622 | rising_threshold |= (threshold_code << 8); | ||
| 623 | threshold_code = temp_to_code(data, pdata->trigger_levels[2]); | ||
| 624 | if (threshold_code < 0) { | ||
| 625 | ret = threshold_code; | ||
| 626 | goto out; | ||
| 627 | } | ||
| 628 | rising_threshold |= (threshold_code << 16); | ||
| 629 | |||
| 630 | writel(rising_threshold, | ||
| 631 | data->base + EXYNOS_THD_TEMP_RISE); | ||
| 632 | writel(0, data->base + EXYNOS_THD_TEMP_FALL); | ||
| 633 | |||
| 634 | writel(EXYNOS_TMU_CLEAR_RISE_INT|EXYNOS_TMU_CLEAR_FALL_INT, | ||
| 635 | data->base + EXYNOS_TMU_REG_INTCLEAR); | ||
| 636 | } | ||
| 637 | out: | ||
| 638 | clk_disable(data->clk); | ||
| 639 | mutex_unlock(&data->lock); | ||
| 640 | |||
| 641 | return ret; | ||
| 642 | } | ||
| 643 | |||
| 644 | static void exynos_tmu_control(struct platform_device *pdev, bool on) | ||
| 645 | { | ||
| 646 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); | ||
| 647 | struct exynos_tmu_platform_data *pdata = data->pdata; | ||
| 648 | unsigned int con, interrupt_en; | ||
| 649 | |||
| 650 | mutex_lock(&data->lock); | ||
| 651 | clk_enable(data->clk); | ||
| 652 | |||
| 653 | con = pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT | | ||
| 654 | pdata->gain << EXYNOS_TMU_GAIN_SHIFT; | ||
| 655 | |||
| 656 | if (data->soc == SOC_ARCH_EXYNOS) { | ||
| 657 | con |= pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT; | ||
| 658 | con |= (EXYNOS_MUX_ADDR_VALUE << EXYNOS_MUX_ADDR_SHIFT); | ||
| 659 | } | ||
| 660 | |||
| 661 | if (on) { | ||
| 662 | con |= EXYNOS_TMU_CORE_ON; | ||
| 663 | interrupt_en = pdata->trigger_level3_en << 12 | | ||
| 664 | pdata->trigger_level2_en << 8 | | ||
| 665 | pdata->trigger_level1_en << 4 | | ||
| 666 | pdata->trigger_level0_en; | ||
| 667 | } else { | ||
| 668 | con |= EXYNOS_TMU_CORE_OFF; | ||
| 669 | interrupt_en = 0; /* Disable all interrupts */ | ||
| 670 | } | ||
| 671 | writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN); | ||
| 672 | writel(con, data->base + EXYNOS_TMU_REG_CONTROL); | ||
| 673 | |||
| 674 | clk_disable(data->clk); | ||
| 675 | mutex_unlock(&data->lock); | ||
| 676 | } | ||
| 677 | |||
| 678 | static int exynos_tmu_read(struct exynos_tmu_data *data) | ||
| 679 | { | ||
| 680 | u8 temp_code; | ||
| 681 | int temp; | ||
| 682 | |||
| 683 | mutex_lock(&data->lock); | ||
| 684 | clk_enable(data->clk); | ||
| 685 | |||
| 686 | temp_code = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP); | ||
| 687 | temp = code_to_temp(data, temp_code); | ||
| 688 | |||
| 689 | clk_disable(data->clk); | ||
| 690 | mutex_unlock(&data->lock); | ||
| 691 | |||
| 692 | return temp; | ||
| 693 | } | ||
| 694 | |||
| 695 | static void exynos_tmu_work(struct work_struct *work) | ||
| 696 | { | ||
| 697 | struct exynos_tmu_data *data = container_of(work, | ||
| 698 | struct exynos_tmu_data, irq_work); | ||
| 699 | |||
| 700 | mutex_lock(&data->lock); | ||
| 701 | clk_enable(data->clk); | ||
| 702 | |||
| 703 | |||
| 704 | if (data->soc == SOC_ARCH_EXYNOS) | ||
| 705 | writel(EXYNOS_TMU_CLEAR_RISE_INT, | ||
| 706 | data->base + EXYNOS_TMU_REG_INTCLEAR); | ||
| 707 | else | ||
| 708 | writel(EXYNOS4210_TMU_INTCLEAR_VAL, | ||
| 709 | data->base + EXYNOS_TMU_REG_INTCLEAR); | ||
| 710 | |||
| 711 | clk_disable(data->clk); | ||
| 712 | mutex_unlock(&data->lock); | ||
| 713 | exynos_report_trigger(); | ||
| 714 | enable_irq(data->irq); | ||
| 715 | } | ||
| 716 | |||
| 717 | static irqreturn_t exynos_tmu_irq(int irq, void *id) | ||
| 718 | { | ||
| 719 | struct exynos_tmu_data *data = id; | ||
| 720 | |||
| 721 | disable_irq_nosync(irq); | ||
| 722 | schedule_work(&data->irq_work); | ||
| 723 | |||
| 724 | return IRQ_HANDLED; | ||
| 725 | } | ||
| 726 | static struct thermal_sensor_conf exynos_sensor_conf = { | ||
| 727 | .name = "exynos-therm", | ||
| 728 | .read_temperature = (int (*)(void *))exynos_tmu_read, | ||
| 729 | }; | ||
| 730 | |||
| 731 | #if defined(CONFIG_CPU_EXYNOS4210) | ||
| 732 | static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = { | ||
| 733 | .threshold = 80, | ||
| 734 | .trigger_levels[0] = 5, | ||
| 735 | .trigger_levels[1] = 20, | ||
| 736 | .trigger_levels[2] = 30, | ||
| 737 | .trigger_level0_en = 1, | ||
| 738 | .trigger_level1_en = 1, | ||
| 739 | .trigger_level2_en = 1, | ||
| 740 | .trigger_level3_en = 0, | ||
| 741 | .gain = 15, | ||
| 742 | .reference_voltage = 7, | ||
| 743 | .cal_type = TYPE_ONE_POINT_TRIMMING, | ||
| 744 | .freq_tab[0] = { | ||
| 745 | .freq_clip_max = 800 * 1000, | ||
| 746 | .temp_level = 85, | ||
| 747 | }, | ||
| 748 | .freq_tab[1] = { | ||
| 749 | .freq_clip_max = 200 * 1000, | ||
| 750 | .temp_level = 100, | ||
| 751 | }, | ||
| 752 | .freq_tab_count = 2, | ||
| 753 | .type = SOC_ARCH_EXYNOS4210, | ||
| 754 | }; | ||
| 755 | #define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data) | ||
| 756 | #else | ||
| 757 | #define EXYNOS4210_TMU_DRV_DATA (NULL) | ||
| 758 | #endif | ||
| 759 | |||
| 760 | #if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) | ||
| 761 | static struct exynos_tmu_platform_data const exynos_default_tmu_data = { | ||
| 762 | .trigger_levels[0] = 85, | ||
| 763 | .trigger_levels[1] = 103, | ||
| 764 | .trigger_levels[2] = 110, | ||
| 765 | .trigger_level0_en = 1, | ||
| 766 | .trigger_level1_en = 1, | ||
| 767 | .trigger_level2_en = 1, | ||
| 768 | .trigger_level3_en = 0, | ||
| 769 | .gain = 8, | ||
| 770 | .reference_voltage = 16, | ||
| 771 | .noise_cancel_mode = 4, | ||
| 772 | .cal_type = TYPE_ONE_POINT_TRIMMING, | ||
| 773 | .efuse_value = 55, | ||
| 774 | .freq_tab[0] = { | ||
| 775 | .freq_clip_max = 800 * 1000, | ||
| 776 | .temp_level = 85, | ||
| 777 | }, | ||
| 778 | .freq_tab[1] = { | ||
| 779 | .freq_clip_max = 200 * 1000, | ||
| 780 | .temp_level = 103, | ||
| 781 | }, | ||
| 782 | .freq_tab_count = 2, | ||
| 783 | .type = SOC_ARCH_EXYNOS, | ||
| 784 | }; | ||
| 785 | #define EXYNOS_TMU_DRV_DATA (&exynos_default_tmu_data) | ||
| 786 | #else | ||
| 787 | #define EXYNOS_TMU_DRV_DATA (NULL) | ||
| 788 | #endif | ||
| 789 | |||
| 790 | #ifdef CONFIG_OF | ||
| 791 | static const struct of_device_id exynos_tmu_match[] = { | ||
| 792 | { | ||
| 793 | .compatible = "samsung,exynos4210-tmu", | ||
| 794 | .data = (void *)EXYNOS4210_TMU_DRV_DATA, | ||
| 795 | }, | ||
| 796 | { | ||
| 797 | .compatible = "samsung,exynos5250-tmu", | ||
| 798 | .data = (void *)EXYNOS_TMU_DRV_DATA, | ||
| 799 | }, | ||
| 800 | {}, | ||
| 801 | }; | ||
| 802 | MODULE_DEVICE_TABLE(of, exynos_tmu_match); | ||
| 803 | #else | ||
| 804 | #define exynos_tmu_match NULL | ||
| 805 | #endif | ||
| 806 | |||
| 807 | static struct platform_device_id exynos_tmu_driver_ids[] = { | ||
| 808 | { | ||
| 809 | .name = "exynos4210-tmu", | ||
| 810 | .driver_data = (kernel_ulong_t)EXYNOS4210_TMU_DRV_DATA, | ||
| 811 | }, | ||
| 812 | { | ||
| 813 | .name = "exynos5250-tmu", | ||
| 814 | .driver_data = (kernel_ulong_t)EXYNOS_TMU_DRV_DATA, | ||
| 815 | }, | ||
| 816 | { }, | ||
| 817 | }; | ||
| 818 | MODULE_DEVICE_TABLE(platform, exynos4_tmu_driver_ids); | ||
| 819 | |||
| 820 | static inline struct exynos_tmu_platform_data *exynos_get_driver_data( | ||
| 821 | struct platform_device *pdev) | ||
| 822 | { | ||
| 823 | #ifdef CONFIG_OF | ||
| 824 | if (pdev->dev.of_node) { | ||
| 825 | const struct of_device_id *match; | ||
| 826 | match = of_match_node(exynos_tmu_match, pdev->dev.of_node); | ||
| 827 | if (!match) | ||
| 828 | return NULL; | ||
| 829 | return (struct exynos_tmu_platform_data *) match->data; | ||
| 830 | } | ||
| 831 | #endif | ||
| 832 | return (struct exynos_tmu_platform_data *) | ||
| 833 | platform_get_device_id(pdev)->driver_data; | ||
| 834 | } | ||
| 835 | static int __devinit exynos_tmu_probe(struct platform_device *pdev) | ||
| 836 | { | ||
| 837 | struct exynos_tmu_data *data; | ||
| 838 | struct exynos_tmu_platform_data *pdata = pdev->dev.platform_data; | ||
| 839 | int ret, i; | ||
| 840 | |||
| 841 | if (!pdata) | ||
| 842 | pdata = exynos_get_driver_data(pdev); | ||
| 843 | |||
| 844 | if (!pdata) { | ||
| 845 | dev_err(&pdev->dev, "No platform init data supplied.\n"); | ||
| 846 | return -ENODEV; | ||
| 847 | } | ||
| 848 | data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data), | ||
| 849 | GFP_KERNEL); | ||
| 850 | if (!data) { | ||
| 851 | dev_err(&pdev->dev, "Failed to allocate driver structure\n"); | ||
| 852 | return -ENOMEM; | ||
| 853 | } | ||
| 854 | |||
| 855 | data->irq = platform_get_irq(pdev, 0); | ||
| 856 | if (data->irq < 0) { | ||
| 857 | dev_err(&pdev->dev, "Failed to get platform irq\n"); | ||
| 858 | return data->irq; | ||
| 859 | } | ||
| 860 | |||
| 861 | INIT_WORK(&data->irq_work, exynos_tmu_work); | ||
| 862 | |||
| 863 | data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 864 | if (!data->mem) { | ||
| 865 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | ||
| 866 | return -ENOENT; | ||
| 867 | } | ||
| 868 | |||
| 869 | data->base = devm_request_and_ioremap(&pdev->dev, data->mem); | ||
| 870 | if (!data->base) { | ||
| 871 | dev_err(&pdev->dev, "Failed to ioremap memory\n"); | ||
| 872 | return -ENODEV; | ||
| 873 | } | ||
| 874 | |||
| 875 | ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, | ||
| 876 | IRQF_TRIGGER_RISING, "exynos-tmu", data); | ||
| 877 | if (ret) { | ||
| 878 | dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); | ||
| 879 | return ret; | ||
| 880 | } | ||
| 881 | |||
| 882 | data->clk = clk_get(NULL, "tmu_apbif"); | ||
| 883 | if (IS_ERR(data->clk)) { | ||
| 884 | dev_err(&pdev->dev, "Failed to get clock\n"); | ||
| 885 | return PTR_ERR(data->clk); | ||
| 886 | } | ||
| 887 | |||
| 888 | if (pdata->type == SOC_ARCH_EXYNOS || | ||
| 889 | pdata->type == SOC_ARCH_EXYNOS4210) | ||
| 890 | data->soc = pdata->type; | ||
| 891 | else { | ||
| 892 | ret = -EINVAL; | ||
| 893 | dev_err(&pdev->dev, "Platform not supported\n"); | ||
| 894 | goto err_clk; | ||
| 895 | } | ||
| 896 | |||
| 897 | data->pdata = pdata; | ||
| 898 | platform_set_drvdata(pdev, data); | ||
| 899 | mutex_init(&data->lock); | ||
| 900 | |||
| 901 | ret = exynos_tmu_initialize(pdev); | ||
| 902 | if (ret) { | ||
| 903 | dev_err(&pdev->dev, "Failed to initialize TMU\n"); | ||
| 904 | goto err_clk; | ||
| 905 | } | ||
| 906 | |||
| 907 | exynos_tmu_control(pdev, true); | ||
| 908 | |||
| 909 | /* Register the sensor with thermal management interface */ | ||
| 910 | (&exynos_sensor_conf)->private_data = data; | ||
| 911 | exynos_sensor_conf.trip_data.trip_count = pdata->trigger_level0_en + | ||
| 912 | pdata->trigger_level1_en + pdata->trigger_level2_en + | ||
| 913 | pdata->trigger_level3_en; | ||
| 914 | |||
| 915 | for (i = 0; i < exynos_sensor_conf.trip_data.trip_count; i++) | ||
| 916 | exynos_sensor_conf.trip_data.trip_val[i] = | ||
| 917 | pdata->threshold + pdata->trigger_levels[i]; | ||
| 918 | |||
| 919 | exynos_sensor_conf.cooling_data.freq_clip_count = | ||
| 920 | pdata->freq_tab_count; | ||
| 921 | for (i = 0; i < pdata->freq_tab_count; i++) { | ||
| 922 | exynos_sensor_conf.cooling_data.freq_data[i].freq_clip_max = | ||
| 923 | pdata->freq_tab[i].freq_clip_max; | ||
| 924 | exynos_sensor_conf.cooling_data.freq_data[i].temp_level = | ||
| 925 | pdata->freq_tab[i].temp_level; | ||
| 926 | } | ||
| 927 | |||
| 928 | ret = exynos_register_thermal(&exynos_sensor_conf); | ||
| 929 | if (ret) { | ||
| 930 | dev_err(&pdev->dev, "Failed to register thermal interface\n"); | ||
| 931 | goto err_clk; | ||
| 932 | } | ||
| 933 | return 0; | ||
| 934 | err_clk: | ||
| 935 | platform_set_drvdata(pdev, NULL); | ||
| 936 | clk_put(data->clk); | ||
| 937 | return ret; | ||
| 938 | } | ||
| 939 | |||
| 940 | static int __devexit exynos_tmu_remove(struct platform_device *pdev) | ||
| 941 | { | ||
| 942 | struct exynos_tmu_data *data = platform_get_drvdata(pdev); | ||
| 943 | |||
| 944 | exynos_tmu_control(pdev, false); | ||
| 945 | |||
| 946 | exynos_unregister_thermal(); | ||
| 947 | |||
| 948 | clk_put(data->clk); | ||
| 949 | |||
| 950 | platform_set_drvdata(pdev, NULL); | ||
| 951 | |||
| 952 | return 0; | ||
| 953 | } | ||
| 954 | |||
| 955 | #ifdef CONFIG_PM_SLEEP | ||
| 956 | static int exynos_tmu_suspend(struct device *dev) | ||
| 957 | { | ||
| 958 | exynos_tmu_control(to_platform_device(dev), false); | ||
| 959 | |||
| 960 | return 0; | ||
| 961 | } | ||
| 962 | |||
| 963 | static int exynos_tmu_resume(struct device *dev) | ||
| 964 | { | ||
| 965 | struct platform_device *pdev = to_platform_device(dev); | ||
| 966 | |||
| 967 | exynos_tmu_initialize(pdev); | ||
| 968 | exynos_tmu_control(pdev, true); | ||
| 969 | |||
| 970 | return 0; | ||
| 971 | } | ||
| 972 | |||
| 973 | static SIMPLE_DEV_PM_OPS(exynos_tmu_pm, | ||
| 974 | exynos_tmu_suspend, exynos_tmu_resume); | ||
| 975 | #define EXYNOS_TMU_PM (&exynos_tmu_pm) | ||
| 976 | #else | ||
| 977 | #define EXYNOS_TMU_PM NULL | ||
| 978 | #endif | ||
| 979 | |||
| 980 | static struct platform_driver exynos_tmu_driver = { | ||
| 981 | .driver = { | ||
| 982 | .name = "exynos-tmu", | ||
| 983 | .owner = THIS_MODULE, | ||
| 984 | .pm = EXYNOS_TMU_PM, | ||
| 985 | .of_match_table = exynos_tmu_match, | ||
| 986 | }, | ||
| 987 | .probe = exynos_tmu_probe, | ||
| 988 | .remove = __devexit_p(exynos_tmu_remove), | ||
| 989 | .id_table = exynos_tmu_driver_ids, | ||
| 990 | }; | ||
| 991 | |||
| 992 | module_platform_driver(exynos_tmu_driver); | ||
| 993 | |||
| 994 | MODULE_DESCRIPTION("EXYNOS TMU Driver"); | ||
| 995 | MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); | ||
| 996 | MODULE_LICENSE("GPL"); | ||
| 997 | MODULE_ALIAS("platform:exynos-tmu"); | ||
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c new file mode 100644 index 000000000000..d4452716aaab --- /dev/null +++ b/drivers/thermal/rcar_thermal.c | |||
| @@ -0,0 +1,260 @@ | |||
| 1 | /* | ||
| 2 | * R-Car THS/TSC thermal sensor driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Renesas Solutions Corp. | ||
| 5 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; version 2 of the License. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, but | ||
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | * General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License along | ||
| 17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
| 19 | */ | ||
| 20 | #include <linux/delay.h> | ||
| 21 | #include <linux/err.h> | ||
| 22 | #include <linux/io.h> | ||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | #include <linux/slab.h> | ||
| 26 | #include <linux/spinlock.h> | ||
| 27 | #include <linux/thermal.h> | ||
| 28 | |||
| 29 | #define THSCR 0x2c | ||
| 30 | #define THSSR 0x30 | ||
| 31 | |||
| 32 | /* THSCR */ | ||
| 33 | #define CPTAP 0xf | ||
| 34 | |||
| 35 | /* THSSR */ | ||
| 36 | #define CTEMP 0x3f | ||
| 37 | |||
| 38 | |||
| 39 | struct rcar_thermal_priv { | ||
| 40 | void __iomem *base; | ||
| 41 | struct device *dev; | ||
| 42 | spinlock_t lock; | ||
| 43 | u32 comp; | ||
| 44 | }; | ||
| 45 | |||
| 46 | /* | ||
| 47 | * basic functions | ||
| 48 | */ | ||
| 49 | static u32 rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg) | ||
| 50 | { | ||
| 51 | unsigned long flags; | ||
| 52 | u32 ret; | ||
| 53 | |||
| 54 | spin_lock_irqsave(&priv->lock, flags); | ||
| 55 | |||
| 56 | ret = ioread32(priv->base + reg); | ||
| 57 | |||
| 58 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 59 | |||
| 60 | return ret; | ||
| 61 | } | ||
| 62 | |||
| 63 | #if 0 /* no user at this point */ | ||
| 64 | static void rcar_thermal_write(struct rcar_thermal_priv *priv, | ||
| 65 | u32 reg, u32 data) | ||
| 66 | { | ||
| 67 | unsigned long flags; | ||
| 68 | |||
| 69 | spin_lock_irqsave(&priv->lock, flags); | ||
| 70 | |||
| 71 | iowrite32(data, priv->base + reg); | ||
| 72 | |||
| 73 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 74 | } | ||
| 75 | #endif | ||
| 76 | |||
| 77 | static void rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg, | ||
| 78 | u32 mask, u32 data) | ||
| 79 | { | ||
| 80 | unsigned long flags; | ||
| 81 | u32 val; | ||
| 82 | |||
| 83 | spin_lock_irqsave(&priv->lock, flags); | ||
| 84 | |||
| 85 | val = ioread32(priv->base + reg); | ||
| 86 | val &= ~mask; | ||
| 87 | val |= (data & mask); | ||
| 88 | iowrite32(val, priv->base + reg); | ||
| 89 | |||
| 90 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 91 | } | ||
| 92 | |||
| 93 | /* | ||
| 94 | * zone device functions | ||
| 95 | */ | ||
| 96 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, | ||
| 97 | unsigned long *temp) | ||
| 98 | { | ||
| 99 | struct rcar_thermal_priv *priv = zone->devdata; | ||
| 100 | int val, min, max, tmp; | ||
| 101 | |||
| 102 | tmp = -200; /* default */ | ||
| 103 | while (1) { | ||
| 104 | if (priv->comp < 1 || priv->comp > 12) { | ||
| 105 | dev_err(priv->dev, | ||
| 106 | "THSSR invalid data (%d)\n", priv->comp); | ||
| 107 | priv->comp = 4; /* for next thermal */ | ||
| 108 | return -EINVAL; | ||
| 109 | } | ||
| 110 | |||
| 111 | /* | ||
| 112 | * THS comparator offset and the reference temperature | ||
| 113 | * | ||
| 114 | * Comparator | reference | Temperature field | ||
| 115 | * offset | temperature | measurement | ||
| 116 | * | (degrees C) | (degrees C) | ||
| 117 | * -------------+---------------+------------------- | ||
| 118 | * 1 | -45 | -45 to -30 | ||
| 119 | * 2 | -30 | -30 to -15 | ||
| 120 | * 3 | -15 | -15 to 0 | ||
| 121 | * 4 | 0 | 0 to +15 | ||
| 122 | * 5 | +15 | +15 to +30 | ||
| 123 | * 6 | +30 | +30 to +45 | ||
| 124 | * 7 | +45 | +45 to +60 | ||
| 125 | * 8 | +60 | +60 to +75 | ||
| 126 | * 9 | +75 | +75 to +90 | ||
| 127 | * 10 | +90 | +90 to +105 | ||
| 128 | * 11 | +105 | +105 to +120 | ||
| 129 | * 12 | +120 | +120 to +135 | ||
| 130 | */ | ||
| 131 | |||
| 132 | /* calculate thermal limitation */ | ||
| 133 | min = (priv->comp * 15) - 60; | ||
| 134 | max = min + 15; | ||
| 135 | |||
| 136 | /* | ||
| 137 | * we need to wait 300us after changing comparator offset | ||
| 138 | * to get stable temperature. | ||
| 139 | * see "Usage Notes" on datasheet | ||
| 140 | */ | ||
| 141 | rcar_thermal_bset(priv, THSCR, CPTAP, priv->comp); | ||
| 142 | udelay(300); | ||
| 143 | |||
| 144 | /* calculate current temperature */ | ||
| 145 | val = rcar_thermal_read(priv, THSSR) & CTEMP; | ||
| 146 | val = (val * 5) - 65; | ||
| 147 | |||
| 148 | dev_dbg(priv->dev, "comp/min/max/val = %d/%d/%d/%d\n", | ||
| 149 | priv->comp, min, max, val); | ||
| 150 | |||
| 151 | /* | ||
| 152 | * If val is same as min/max, then, | ||
| 153 | * it should try again on next comparator. | ||
| 154 | * But the val might be correct temperature. | ||
| 155 | * Keep it on "tmp" and compare with next val. | ||
| 156 | */ | ||
| 157 | if (tmp == val) | ||
| 158 | break; | ||
| 159 | |||
| 160 | if (val <= min) { | ||
| 161 | tmp = min; | ||
| 162 | priv->comp--; /* try again */ | ||
| 163 | } else if (val >= max) { | ||
| 164 | tmp = max; | ||
| 165 | priv->comp++; /* try again */ | ||
| 166 | } else { | ||
| 167 | tmp = val; | ||
| 168 | break; | ||
| 169 | } | ||
| 170 | } | ||
| 171 | |||
| 172 | *temp = tmp; | ||
| 173 | return 0; | ||
| 174 | } | ||
| 175 | |||
| 176 | static struct thermal_zone_device_ops rcar_thermal_zone_ops = { | ||
| 177 | .get_temp = rcar_thermal_get_temp, | ||
| 178 | }; | ||
| 179 | |||
| 180 | /* | ||
| 181 | * platform functions | ||
| 182 | */ | ||
| 183 | static int rcar_thermal_probe(struct platform_device *pdev) | ||
| 184 | { | ||
| 185 | struct thermal_zone_device *zone; | ||
| 186 | struct rcar_thermal_priv *priv; | ||
| 187 | struct resource *res; | ||
| 188 | int ret; | ||
| 189 | |||
| 190 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 191 | if (!res) { | ||
| 192 | dev_err(&pdev->dev, "Could not get platform resource\n"); | ||
| 193 | return -ENODEV; | ||
| 194 | } | ||
| 195 | |||
| 196 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | ||
| 197 | if (!priv) { | ||
| 198 | dev_err(&pdev->dev, "Could not allocate priv\n"); | ||
| 199 | return -ENOMEM; | ||
| 200 | } | ||
| 201 | |||
| 202 | priv->comp = 4; /* basic setup */ | ||
| 203 | priv->dev = &pdev->dev; | ||
| 204 | spin_lock_init(&priv->lock); | ||
| 205 | priv->base = devm_ioremap_nocache(&pdev->dev, | ||
| 206 | res->start, resource_size(res)); | ||
| 207 | if (!priv->base) { | ||
| 208 | dev_err(&pdev->dev, "Unable to ioremap thermal register\n"); | ||
| 209 | ret = -ENOMEM; | ||
| 210 | goto error_free_priv; | ||
| 211 | } | ||
| 212 | |||
| 213 | zone = thermal_zone_device_register("rcar_thermal", 0, priv, | ||
| 214 | &rcar_thermal_zone_ops, 0, 0); | ||
| 215 | if (IS_ERR(zone)) { | ||
| 216 | dev_err(&pdev->dev, "thermal zone device is NULL\n"); | ||
| 217 | ret = PTR_ERR(zone); | ||
| 218 | goto error_iounmap; | ||
| 219 | } | ||
| 220 | |||
| 221 | platform_set_drvdata(pdev, zone); | ||
| 222 | |||
| 223 | dev_info(&pdev->dev, "proved\n"); | ||
| 224 | |||
| 225 | return 0; | ||
| 226 | |||
| 227 | error_iounmap: | ||
| 228 | devm_iounmap(&pdev->dev, priv->base); | ||
| 229 | error_free_priv: | ||
| 230 | devm_kfree(&pdev->dev, priv); | ||
| 231 | |||
| 232 | return ret; | ||
| 233 | } | ||
| 234 | |||
| 235 | static int rcar_thermal_remove(struct platform_device *pdev) | ||
| 236 | { | ||
| 237 | struct thermal_zone_device *zone = platform_get_drvdata(pdev); | ||
| 238 | struct rcar_thermal_priv *priv = zone->devdata; | ||
| 239 | |||
| 240 | thermal_zone_device_unregister(zone); | ||
| 241 | platform_set_drvdata(pdev, NULL); | ||
| 242 | |||
| 243 | devm_iounmap(&pdev->dev, priv->base); | ||
| 244 | devm_kfree(&pdev->dev, priv); | ||
| 245 | |||
| 246 | return 0; | ||
| 247 | } | ||
| 248 | |||
| 249 | static struct platform_driver rcar_thermal_driver = { | ||
| 250 | .driver = { | ||
| 251 | .name = "rcar_thermal", | ||
| 252 | }, | ||
| 253 | .probe = rcar_thermal_probe, | ||
| 254 | .remove = rcar_thermal_remove, | ||
| 255 | }; | ||
| 256 | module_platform_driver(rcar_thermal_driver); | ||
| 257 | |||
| 258 | MODULE_LICENSE("GPL"); | ||
| 259 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); | ||
| 260 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); | ||
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c index 5f8ee39f2000..9bc969261d01 100644 --- a/drivers/thermal/spear_thermal.c +++ b/drivers/thermal/spear_thermal.c | |||
| @@ -147,7 +147,7 @@ static int spear_thermal_probe(struct platform_device *pdev) | |||
| 147 | writel_relaxed(stdev->flags, stdev->thermal_base); | 147 | writel_relaxed(stdev->flags, stdev->thermal_base); |
| 148 | 148 | ||
| 149 | spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0, | 149 | spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0, |
| 150 | stdev, &ops, 0, 0, 0, 0); | 150 | stdev, &ops, 0, 0); |
| 151 | if (IS_ERR(spear_thermal)) { | 151 | if (IS_ERR(spear_thermal)) { |
| 152 | dev_err(&pdev->dev, "thermal zone device is NULL\n"); | 152 | dev_err(&pdev->dev, "thermal zone device is NULL\n"); |
| 153 | ret = PTR_ERR(spear_thermal); | 153 | ret = PTR_ERR(spear_thermal); |
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index efd81bb25e01..9ee42ca4d289 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
| @@ -41,15 +41,25 @@ MODULE_AUTHOR("Zhang Rui"); | |||
| 41 | MODULE_DESCRIPTION("Generic thermal management sysfs support"); | 41 | MODULE_DESCRIPTION("Generic thermal management sysfs support"); |
| 42 | MODULE_LICENSE("GPL"); | 42 | MODULE_LICENSE("GPL"); |
| 43 | 43 | ||
| 44 | struct thermal_cooling_device_instance { | 44 | #define THERMAL_NO_TARGET -1UL |
| 45 | /* | ||
| 46 | * This structure is used to describe the behavior of | ||
| 47 | * a certain cooling device on a certain trip point | ||
| 48 | * in a certain thermal zone | ||
| 49 | */ | ||
| 50 | struct thermal_instance { | ||
| 45 | int id; | 51 | int id; |
| 46 | char name[THERMAL_NAME_LENGTH]; | 52 | char name[THERMAL_NAME_LENGTH]; |
| 47 | struct thermal_zone_device *tz; | 53 | struct thermal_zone_device *tz; |
| 48 | struct thermal_cooling_device *cdev; | 54 | struct thermal_cooling_device *cdev; |
| 49 | int trip; | 55 | int trip; |
| 56 | unsigned long upper; /* Highest cooling state for this trip point */ | ||
| 57 | unsigned long lower; /* Lowest cooling state for this trip point */ | ||
| 58 | unsigned long target; /* expected cooling state */ | ||
| 50 | char attr_name[THERMAL_NAME_LENGTH]; | 59 | char attr_name[THERMAL_NAME_LENGTH]; |
| 51 | struct device_attribute attr; | 60 | struct device_attribute attr; |
| 52 | struct list_head node; | 61 | struct list_head tz_node; /* node in tz->thermal_instances */ |
| 62 | struct list_head cdev_node; /* node in cdev->thermal_instances */ | ||
| 53 | }; | 63 | }; |
| 54 | 64 | ||
| 55 | static DEFINE_IDR(thermal_tz_idr); | 65 | static DEFINE_IDR(thermal_tz_idr); |
| @@ -308,8 +318,9 @@ passive_store(struct device *dev, struct device_attribute *attr, | |||
| 308 | if (!strncmp("Processor", cdev->type, | 318 | if (!strncmp("Processor", cdev->type, |
| 309 | sizeof("Processor"))) | 319 | sizeof("Processor"))) |
| 310 | thermal_zone_bind_cooling_device(tz, | 320 | thermal_zone_bind_cooling_device(tz, |
| 311 | THERMAL_TRIPS_NONE, | 321 | THERMAL_TRIPS_NONE, cdev, |
| 312 | cdev); | 322 | THERMAL_NO_LIMIT, |
| 323 | THERMAL_NO_LIMIT); | ||
| 313 | } | 324 | } |
| 314 | mutex_unlock(&thermal_list_lock); | 325 | mutex_unlock(&thermal_list_lock); |
| 315 | if (!tz->passive_delay) | 326 | if (!tz->passive_delay) |
| @@ -327,9 +338,6 @@ passive_store(struct device *dev, struct device_attribute *attr, | |||
| 327 | tz->passive_delay = 0; | 338 | tz->passive_delay = 0; |
| 328 | } | 339 | } |
| 329 | 340 | ||
| 330 | tz->tc1 = 1; | ||
| 331 | tz->tc2 = 1; | ||
| 332 | |||
| 333 | tz->forced_passive = state; | 341 | tz->forced_passive = state; |
| 334 | 342 | ||
| 335 | thermal_zone_device_update(tz); | 343 | thermal_zone_device_update(tz); |
| @@ -425,10 +433,10 @@ static ssize_t | |||
| 425 | thermal_cooling_device_trip_point_show(struct device *dev, | 433 | thermal_cooling_device_trip_point_show(struct device *dev, |
| 426 | struct device_attribute *attr, char *buf) | 434 | struct device_attribute *attr, char *buf) |
| 427 | { | 435 | { |
| 428 | struct thermal_cooling_device_instance *instance; | 436 | struct thermal_instance *instance; |
| 429 | 437 | ||
| 430 | instance = | 438 | instance = |
| 431 | container_of(attr, struct thermal_cooling_device_instance, attr); | 439 | container_of(attr, struct thermal_instance, attr); |
| 432 | 440 | ||
| 433 | if (instance->trip == THERMAL_TRIPS_NONE) | 441 | if (instance->trip == THERMAL_TRIPS_NONE) |
| 434 | return sprintf(buf, "-1\n"); | 442 | return sprintf(buf, "-1\n"); |
| @@ -590,7 +598,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
| 590 | temp->tz = tz; | 598 | temp->tz = tz; |
| 591 | hwmon->count++; | 599 | hwmon->count++; |
| 592 | 600 | ||
| 593 | snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH, | 601 | snprintf(temp->temp_input.name, sizeof(temp->temp_input.name), |
| 594 | "temp%d_input", hwmon->count); | 602 | "temp%d_input", hwmon->count); |
| 595 | temp->temp_input.attr.attr.name = temp->temp_input.name; | 603 | temp->temp_input.attr.attr.name = temp->temp_input.name; |
| 596 | temp->temp_input.attr.attr.mode = 0444; | 604 | temp->temp_input.attr.attr.mode = 0444; |
| @@ -603,7 +611,8 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
| 603 | if (tz->ops->get_crit_temp) { | 611 | if (tz->ops->get_crit_temp) { |
| 604 | unsigned long temperature; | 612 | unsigned long temperature; |
| 605 | if (!tz->ops->get_crit_temp(tz, &temperature)) { | 613 | if (!tz->ops->get_crit_temp(tz, &temperature)) { |
| 606 | snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH, | 614 | snprintf(temp->temp_crit.name, |
| 615 | sizeof(temp->temp_crit.name), | ||
| 607 | "temp%d_crit", hwmon->count); | 616 | "temp%d_crit", hwmon->count); |
| 608 | temp->temp_crit.attr.attr.name = temp->temp_crit.name; | 617 | temp->temp_crit.attr.attr.name = temp->temp_crit.name; |
| 609 | temp->temp_crit.attr.attr.mode = 0444; | 618 | temp->temp_crit.attr.attr.mode = 0444; |
| @@ -704,74 +713,6 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, | |||
| 704 | cancel_delayed_work(&tz->poll_queue); | 713 | cancel_delayed_work(&tz->poll_queue); |
| 705 | } | 714 | } |
| 706 | 715 | ||
| 707 | static void thermal_zone_device_passive(struct thermal_zone_device *tz, | ||
| 708 | int temp, int trip_temp, int trip) | ||
| 709 | { | ||
| 710 | int trend = 0; | ||
| 711 | struct thermal_cooling_device_instance *instance; | ||
| 712 | struct thermal_cooling_device *cdev; | ||
| 713 | long state, max_state; | ||
| 714 | |||
| 715 | /* | ||
| 716 | * Above Trip? | ||
| 717 | * ----------- | ||
| 718 | * Calculate the thermal trend (using the passive cooling equation) | ||
| 719 | * and modify the performance limit for all passive cooling devices | ||
| 720 | * accordingly. Note that we assume symmetry. | ||
| 721 | */ | ||
| 722 | if (temp >= trip_temp) { | ||
| 723 | tz->passive = true; | ||
| 724 | |||
| 725 | trend = (tz->tc1 * (temp - tz->last_temperature)) + | ||
| 726 | (tz->tc2 * (temp - trip_temp)); | ||
| 727 | |||
| 728 | /* Heating up? */ | ||
| 729 | if (trend > 0) { | ||
| 730 | list_for_each_entry(instance, &tz->cooling_devices, | ||
| 731 | node) { | ||
| 732 | if (instance->trip != trip) | ||
| 733 | continue; | ||
| 734 | cdev = instance->cdev; | ||
| 735 | cdev->ops->get_cur_state(cdev, &state); | ||
| 736 | cdev->ops->get_max_state(cdev, &max_state); | ||
| 737 | if (state++ < max_state) | ||
| 738 | cdev->ops->set_cur_state(cdev, state); | ||
| 739 | } | ||
| 740 | } else if (trend < 0) { /* Cooling off? */ | ||
| 741 | list_for_each_entry(instance, &tz->cooling_devices, | ||
| 742 | node) { | ||
| 743 | if (instance->trip != trip) | ||
| 744 | continue; | ||
| 745 | cdev = instance->cdev; | ||
| 746 | cdev->ops->get_cur_state(cdev, &state); | ||
| 747 | cdev->ops->get_max_state(cdev, &max_state); | ||
| 748 | if (state > 0) | ||
| 749 | cdev->ops->set_cur_state(cdev, --state); | ||
| 750 | } | ||
| 751 | } | ||
| 752 | return; | ||
| 753 | } | ||
| 754 | |||
| 755 | /* | ||
| 756 | * Below Trip? | ||
| 757 | * ----------- | ||
| 758 | * Implement passive cooling hysteresis to slowly increase performance | ||
| 759 | * and avoid thrashing around the passive trip point. Note that we | ||
| 760 | * assume symmetry. | ||
| 761 | */ | ||
| 762 | list_for_each_entry(instance, &tz->cooling_devices, node) { | ||
| 763 | if (instance->trip != trip) | ||
| 764 | continue; | ||
| 765 | cdev = instance->cdev; | ||
| 766 | cdev->ops->get_cur_state(cdev, &state); | ||
| 767 | cdev->ops->get_max_state(cdev, &max_state); | ||
| 768 | if (state > 0) | ||
| 769 | cdev->ops->set_cur_state(cdev, --state); | ||
| 770 | if (state == 0) | ||
| 771 | tz->passive = false; | ||
| 772 | } | ||
| 773 | } | ||
| 774 | |||
| 775 | static void thermal_zone_device_check(struct work_struct *work) | 716 | static void thermal_zone_device_check(struct work_struct *work) |
| 776 | { | 717 | { |
| 777 | struct thermal_zone_device *tz = container_of(work, struct | 718 | struct thermal_zone_device *tz = container_of(work, struct |
| @@ -791,12 +732,14 @@ static void thermal_zone_device_check(struct work_struct *work) | |||
| 791 | */ | 732 | */ |
| 792 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, | 733 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, |
| 793 | int trip, | 734 | int trip, |
| 794 | struct thermal_cooling_device *cdev) | 735 | struct thermal_cooling_device *cdev, |
| 736 | unsigned long upper, unsigned long lower) | ||
| 795 | { | 737 | { |
| 796 | struct thermal_cooling_device_instance *dev; | 738 | struct thermal_instance *dev; |
| 797 | struct thermal_cooling_device_instance *pos; | 739 | struct thermal_instance *pos; |
| 798 | struct thermal_zone_device *pos1; | 740 | struct thermal_zone_device *pos1; |
| 799 | struct thermal_cooling_device *pos2; | 741 | struct thermal_cooling_device *pos2; |
| 742 | unsigned long max_state; | ||
| 800 | int result; | 743 | int result; |
| 801 | 744 | ||
| 802 | if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) | 745 | if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) |
| @@ -814,13 +757,26 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, | |||
| 814 | if (tz != pos1 || cdev != pos2) | 757 | if (tz != pos1 || cdev != pos2) |
| 815 | return -EINVAL; | 758 | return -EINVAL; |
| 816 | 759 | ||
| 760 | cdev->ops->get_max_state(cdev, &max_state); | ||
| 761 | |||
| 762 | /* lower default 0, upper default max_state */ | ||
| 763 | lower = lower == THERMAL_NO_LIMIT ? 0 : lower; | ||
| 764 | upper = upper == THERMAL_NO_LIMIT ? max_state : upper; | ||
| 765 | |||
| 766 | if (lower > upper || upper > max_state) | ||
| 767 | return -EINVAL; | ||
| 768 | |||
| 817 | dev = | 769 | dev = |
| 818 | kzalloc(sizeof(struct thermal_cooling_device_instance), GFP_KERNEL); | 770 | kzalloc(sizeof(struct thermal_instance), GFP_KERNEL); |
| 819 | if (!dev) | 771 | if (!dev) |
| 820 | return -ENOMEM; | 772 | return -ENOMEM; |
| 821 | dev->tz = tz; | 773 | dev->tz = tz; |
| 822 | dev->cdev = cdev; | 774 | dev->cdev = cdev; |
| 823 | dev->trip = trip; | 775 | dev->trip = trip; |
| 776 | dev->upper = upper; | ||
| 777 | dev->lower = lower; | ||
| 778 | dev->target = THERMAL_NO_TARGET; | ||
| 779 | |||
| 824 | result = get_idr(&tz->idr, &tz->lock, &dev->id); | 780 | result = get_idr(&tz->idr, &tz->lock, &dev->id); |
| 825 | if (result) | 781 | if (result) |
| 826 | goto free_mem; | 782 | goto free_mem; |
| @@ -841,13 +797,17 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, | |||
| 841 | goto remove_symbol_link; | 797 | goto remove_symbol_link; |
| 842 | 798 | ||
| 843 | mutex_lock(&tz->lock); | 799 | mutex_lock(&tz->lock); |
| 844 | list_for_each_entry(pos, &tz->cooling_devices, node) | 800 | mutex_lock(&cdev->lock); |
| 801 | list_for_each_entry(pos, &tz->thermal_instances, tz_node) | ||
| 845 | if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { | 802 | if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { |
| 846 | result = -EEXIST; | 803 | result = -EEXIST; |
| 847 | break; | 804 | break; |
| 848 | } | 805 | } |
| 849 | if (!result) | 806 | if (!result) { |
| 850 | list_add_tail(&dev->node, &tz->cooling_devices); | 807 | list_add_tail(&dev->tz_node, &tz->thermal_instances); |
| 808 | list_add_tail(&dev->cdev_node, &cdev->thermal_instances); | ||
| 809 | } | ||
| 810 | mutex_unlock(&cdev->lock); | ||
| 851 | mutex_unlock(&tz->lock); | 811 | mutex_unlock(&tz->lock); |
| 852 | 812 | ||
| 853 | if (!result) | 813 | if (!result) |
| @@ -877,16 +837,20 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, | |||
| 877 | int trip, | 837 | int trip, |
| 878 | struct thermal_cooling_device *cdev) | 838 | struct thermal_cooling_device *cdev) |
| 879 | { | 839 | { |
| 880 | struct thermal_cooling_device_instance *pos, *next; | 840 | struct thermal_instance *pos, *next; |
| 881 | 841 | ||
| 882 | mutex_lock(&tz->lock); | 842 | mutex_lock(&tz->lock); |
| 883 | list_for_each_entry_safe(pos, next, &tz->cooling_devices, node) { | 843 | mutex_lock(&cdev->lock); |
| 844 | list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) { | ||
| 884 | if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { | 845 | if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { |
| 885 | list_del(&pos->node); | 846 | list_del(&pos->tz_node); |
| 847 | list_del(&pos->cdev_node); | ||
| 848 | mutex_unlock(&cdev->lock); | ||
| 886 | mutex_unlock(&tz->lock); | 849 | mutex_unlock(&tz->lock); |
| 887 | goto unbind; | 850 | goto unbind; |
| 888 | } | 851 | } |
| 889 | } | 852 | } |
| 853 | mutex_unlock(&cdev->lock); | ||
| 890 | mutex_unlock(&tz->lock); | 854 | mutex_unlock(&tz->lock); |
| 891 | 855 | ||
| 892 | return -ENODEV; | 856 | return -ENODEV; |
| @@ -934,7 +898,7 @@ thermal_cooling_device_register(char *type, void *devdata, | |||
| 934 | struct thermal_zone_device *pos; | 898 | struct thermal_zone_device *pos; |
| 935 | int result; | 899 | int result; |
| 936 | 900 | ||
| 937 | if (strlen(type) >= THERMAL_NAME_LENGTH) | 901 | if (type && strlen(type) >= THERMAL_NAME_LENGTH) |
| 938 | return ERR_PTR(-EINVAL); | 902 | return ERR_PTR(-EINVAL); |
| 939 | 903 | ||
| 940 | if (!ops || !ops->get_max_state || !ops->get_cur_state || | 904 | if (!ops || !ops->get_max_state || !ops->get_cur_state || |
| @@ -951,8 +915,11 @@ thermal_cooling_device_register(char *type, void *devdata, | |||
| 951 | return ERR_PTR(result); | 915 | return ERR_PTR(result); |
| 952 | } | 916 | } |
| 953 | 917 | ||
| 954 | strcpy(cdev->type, type); | 918 | strcpy(cdev->type, type ? : ""); |
| 919 | mutex_init(&cdev->lock); | ||
| 920 | INIT_LIST_HEAD(&cdev->thermal_instances); | ||
| 955 | cdev->ops = ops; | 921 | cdev->ops = ops; |
| 922 | cdev->updated = true; | ||
| 956 | cdev->device.class = &thermal_class; | 923 | cdev->device.class = &thermal_class; |
| 957 | cdev->devdata = devdata; | 924 | cdev->devdata = devdata; |
| 958 | dev_set_name(&cdev->device, "cooling_device%d", cdev->id); | 925 | dev_set_name(&cdev->device, "cooling_device%d", cdev->id); |
| @@ -1044,6 +1011,136 @@ void thermal_cooling_device_unregister(struct | |||
| 1044 | } | 1011 | } |
| 1045 | EXPORT_SYMBOL(thermal_cooling_device_unregister); | 1012 | EXPORT_SYMBOL(thermal_cooling_device_unregister); |
| 1046 | 1013 | ||
| 1014 | static void thermal_cdev_do_update(struct thermal_cooling_device *cdev) | ||
| 1015 | { | ||
| 1016 | struct thermal_instance *instance; | ||
| 1017 | unsigned long target = 0; | ||
| 1018 | |||
| 1019 | /* cooling device is updated*/ | ||
| 1020 | if (cdev->updated) | ||
| 1021 | return; | ||
| 1022 | |||
| 1023 | mutex_lock(&cdev->lock); | ||
| 1024 | /* Make sure cdev enters the deepest cooling state */ | ||
| 1025 | list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { | ||
| 1026 | if (instance->target == THERMAL_NO_TARGET) | ||
| 1027 | continue; | ||
| 1028 | if (instance->target > target) | ||
| 1029 | target = instance->target; | ||
| 1030 | } | ||
| 1031 | mutex_unlock(&cdev->lock); | ||
| 1032 | cdev->ops->set_cur_state(cdev, target); | ||
| 1033 | cdev->updated = true; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | static void thermal_zone_do_update(struct thermal_zone_device *tz) | ||
| 1037 | { | ||
| 1038 | struct thermal_instance *instance; | ||
| 1039 | |||
| 1040 | list_for_each_entry(instance, &tz->thermal_instances, tz_node) | ||
| 1041 | thermal_cdev_do_update(instance->cdev); | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | /* | ||
| 1045 | * Cooling algorithm for both active and passive cooling | ||
| 1046 | * | ||
| 1047 | * 1. if the temperature is higher than a trip point, | ||
| 1048 | * a. if the trend is THERMAL_TREND_RAISING, use higher cooling | ||
| 1049 | * state for this trip point | ||
| 1050 | * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling | ||
| 1051 | * state for this trip point | ||
| 1052 | * | ||
| 1053 | * 2. if the temperature is lower than a trip point, use lower | ||
| 1054 | * cooling state for this trip point | ||
| 1055 | * | ||
| 1056 | * Note that this behaves the same as the previous passive cooling | ||
| 1057 | * algorithm. | ||
| 1058 | */ | ||
| 1059 | |||
| 1060 | static void thermal_zone_trip_update(struct thermal_zone_device *tz, | ||
| 1061 | int trip, long temp) | ||
| 1062 | { | ||
| 1063 | struct thermal_instance *instance; | ||
| 1064 | struct thermal_cooling_device *cdev = NULL; | ||
| 1065 | unsigned long cur_state, max_state; | ||
| 1066 | long trip_temp; | ||
| 1067 | enum thermal_trip_type trip_type; | ||
| 1068 | enum thermal_trend trend; | ||
| 1069 | |||
| 1070 | if (trip == THERMAL_TRIPS_NONE) { | ||
| 1071 | trip_temp = tz->forced_passive; | ||
| 1072 | trip_type = THERMAL_TRIPS_NONE; | ||
| 1073 | } else { | ||
| 1074 | tz->ops->get_trip_temp(tz, trip, &trip_temp); | ||
| 1075 | tz->ops->get_trip_type(tz, trip, &trip_type); | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | if (!tz->ops->get_trend || tz->ops->get_trend(tz, trip, &trend)) { | ||
| 1079 | /* | ||
| 1080 | * compare the current temperature and previous temperature | ||
| 1081 | * to get the thermal trend, if no special requirement | ||
| 1082 | */ | ||
| 1083 | if (tz->temperature > tz->last_temperature) | ||
| 1084 | trend = THERMAL_TREND_RAISING; | ||
| 1085 | else if (tz->temperature < tz->last_temperature) | ||
| 1086 | trend = THERMAL_TREND_DROPPING; | ||
| 1087 | else | ||
| 1088 | trend = THERMAL_TREND_STABLE; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | if (temp >= trip_temp) { | ||
| 1092 | list_for_each_entry(instance, &tz->thermal_instances, tz_node) { | ||
| 1093 | if (instance->trip != trip) | ||
| 1094 | continue; | ||
| 1095 | |||
| 1096 | cdev = instance->cdev; | ||
| 1097 | |||
| 1098 | cdev->ops->get_cur_state(cdev, &cur_state); | ||
| 1099 | cdev->ops->get_max_state(cdev, &max_state); | ||
| 1100 | |||
| 1101 | if (trend == THERMAL_TREND_RAISING) { | ||
| 1102 | cur_state = cur_state < instance->upper ? | ||
| 1103 | (cur_state + 1) : instance->upper; | ||
| 1104 | } else if (trend == THERMAL_TREND_DROPPING) { | ||
| 1105 | cur_state = cur_state > instance->lower ? | ||
| 1106 | (cur_state - 1) : instance->lower; | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | /* activate a passive thermal instance */ | ||
| 1110 | if ((trip_type == THERMAL_TRIP_PASSIVE || | ||
| 1111 | trip_type == THERMAL_TRIPS_NONE) && | ||
| 1112 | instance->target == THERMAL_NO_TARGET) | ||
| 1113 | tz->passive++; | ||
| 1114 | |||
| 1115 | instance->target = cur_state; | ||
| 1116 | cdev->updated = false; /* cooling device needs update */ | ||
| 1117 | } | ||
| 1118 | } else { /* below trip */ | ||
| 1119 | list_for_each_entry(instance, &tz->thermal_instances, tz_node) { | ||
| 1120 | if (instance->trip != trip) | ||
| 1121 | continue; | ||
| 1122 | |||
| 1123 | /* Do not use the inactive thermal instance */ | ||
| 1124 | if (instance->target == THERMAL_NO_TARGET) | ||
| 1125 | continue; | ||
| 1126 | cdev = instance->cdev; | ||
| 1127 | cdev->ops->get_cur_state(cdev, &cur_state); | ||
| 1128 | |||
| 1129 | cur_state = cur_state > instance->lower ? | ||
| 1130 | (cur_state - 1) : THERMAL_NO_TARGET; | ||
| 1131 | |||
| 1132 | /* deactivate a passive thermal instance */ | ||
| 1133 | if ((trip_type == THERMAL_TRIP_PASSIVE || | ||
| 1134 | trip_type == THERMAL_TRIPS_NONE) && | ||
| 1135 | cur_state == THERMAL_NO_TARGET) | ||
| 1136 | tz->passive--; | ||
| 1137 | instance->target = cur_state; | ||
| 1138 | cdev->updated = false; /* cooling device needs update */ | ||
| 1139 | } | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | return; | ||
| 1143 | } | ||
| 1047 | /** | 1144 | /** |
| 1048 | * thermal_zone_device_update - force an update of a thermal zone's state | 1145 | * thermal_zone_device_update - force an update of a thermal zone's state |
| 1049 | * @ttz: the thermal zone to update | 1146 | * @ttz: the thermal zone to update |
| @@ -1054,8 +1151,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) | |||
| 1054 | int count, ret = 0; | 1151 | int count, ret = 0; |
| 1055 | long temp, trip_temp; | 1152 | long temp, trip_temp; |
| 1056 | enum thermal_trip_type trip_type; | 1153 | enum thermal_trip_type trip_type; |
| 1057 | struct thermal_cooling_device_instance *instance; | ||
| 1058 | struct thermal_cooling_device *cdev; | ||
| 1059 | 1154 | ||
| 1060 | mutex_lock(&tz->lock); | 1155 | mutex_lock(&tz->lock); |
| 1061 | 1156 | ||
| @@ -1065,6 +1160,9 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) | |||
| 1065 | goto leave; | 1160 | goto leave; |
| 1066 | } | 1161 | } |
| 1067 | 1162 | ||
| 1163 | tz->last_temperature = tz->temperature; | ||
| 1164 | tz->temperature = temp; | ||
| 1165 | |||
| 1068 | for (count = 0; count < tz->trips; count++) { | 1166 | for (count = 0; count < tz->trips; count++) { |
| 1069 | tz->ops->get_trip_type(tz, count, &trip_type); | 1167 | tz->ops->get_trip_type(tz, count, &trip_type); |
| 1070 | tz->ops->get_trip_temp(tz, count, &trip_temp); | 1168 | tz->ops->get_trip_temp(tz, count, &trip_temp); |
| @@ -1088,32 +1186,18 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) | |||
| 1088 | tz->ops->notify(tz, count, trip_type); | 1186 | tz->ops->notify(tz, count, trip_type); |
| 1089 | break; | 1187 | break; |
| 1090 | case THERMAL_TRIP_ACTIVE: | 1188 | case THERMAL_TRIP_ACTIVE: |
| 1091 | list_for_each_entry(instance, &tz->cooling_devices, | 1189 | thermal_zone_trip_update(tz, count, temp); |
| 1092 | node) { | ||
| 1093 | if (instance->trip != count) | ||
| 1094 | continue; | ||
| 1095 | |||
| 1096 | cdev = instance->cdev; | ||
| 1097 | |||
| 1098 | if (temp >= trip_temp) | ||
| 1099 | cdev->ops->set_cur_state(cdev, 1); | ||
| 1100 | else | ||
| 1101 | cdev->ops->set_cur_state(cdev, 0); | ||
| 1102 | } | ||
| 1103 | break; | 1190 | break; |
| 1104 | case THERMAL_TRIP_PASSIVE: | 1191 | case THERMAL_TRIP_PASSIVE: |
| 1105 | if (temp >= trip_temp || tz->passive) | 1192 | if (temp >= trip_temp || tz->passive) |
| 1106 | thermal_zone_device_passive(tz, temp, | 1193 | thermal_zone_trip_update(tz, count, temp); |
| 1107 | trip_temp, count); | ||
| 1108 | break; | 1194 | break; |
| 1109 | } | 1195 | } |
| 1110 | } | 1196 | } |
| 1111 | 1197 | ||
| 1112 | if (tz->forced_passive) | 1198 | if (tz->forced_passive) |
| 1113 | thermal_zone_device_passive(tz, temp, tz->forced_passive, | 1199 | thermal_zone_trip_update(tz, THERMAL_TRIPS_NONE, temp); |
| 1114 | THERMAL_TRIPS_NONE); | 1200 | thermal_zone_do_update(tz); |
| 1115 | |||
| 1116 | tz->last_temperature = temp; | ||
| 1117 | 1201 | ||
| 1118 | leave: | 1202 | leave: |
| 1119 | if (tz->passive) | 1203 | if (tz->passive) |
| @@ -1236,8 +1320,6 @@ static void remove_trip_attrs(struct thermal_zone_device *tz) | |||
| 1236 | * @mask: a bit string indicating the writeablility of trip points | 1320 | * @mask: a bit string indicating the writeablility of trip points |
| 1237 | * @devdata: private device data | 1321 | * @devdata: private device data |
| 1238 | * @ops: standard thermal zone device callbacks | 1322 | * @ops: standard thermal zone device callbacks |
| 1239 | * @tc1: thermal coefficient 1 for passive calculations | ||
| 1240 | * @tc2: thermal coefficient 2 for passive calculations | ||
| 1241 | * @passive_delay: number of milliseconds to wait between polls when | 1323 | * @passive_delay: number of milliseconds to wait between polls when |
| 1242 | * performing passive cooling | 1324 | * performing passive cooling |
| 1243 | * @polling_delay: number of milliseconds to wait between polls when checking | 1325 | * @polling_delay: number of milliseconds to wait between polls when checking |
| @@ -1245,13 +1327,12 @@ static void remove_trip_attrs(struct thermal_zone_device *tz) | |||
| 1245 | * driven systems) | 1327 | * driven systems) |
| 1246 | * | 1328 | * |
| 1247 | * thermal_zone_device_unregister() must be called when the device is no | 1329 | * thermal_zone_device_unregister() must be called when the device is no |
| 1248 | * longer needed. The passive cooling formula uses tc1 and tc2 as described in | 1330 | * longer needed. The passive cooling depends on the .get_trend() return value. |
| 1249 | * section 11.1.5.1 of the ACPI specification 3.0. | ||
| 1250 | */ | 1331 | */ |
| 1251 | struct thermal_zone_device *thermal_zone_device_register(const char *type, | 1332 | struct thermal_zone_device *thermal_zone_device_register(const char *type, |
| 1252 | int trips, int mask, void *devdata, | 1333 | int trips, int mask, void *devdata, |
| 1253 | const struct thermal_zone_device_ops *ops, | 1334 | const struct thermal_zone_device_ops *ops, |
| 1254 | int tc1, int tc2, int passive_delay, int polling_delay) | 1335 | int passive_delay, int polling_delay) |
| 1255 | { | 1336 | { |
| 1256 | struct thermal_zone_device *tz; | 1337 | struct thermal_zone_device *tz; |
| 1257 | struct thermal_cooling_device *pos; | 1338 | struct thermal_cooling_device *pos; |
| @@ -1260,7 +1341,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, | |||
| 1260 | int count; | 1341 | int count; |
| 1261 | int passive = 0; | 1342 | int passive = 0; |
| 1262 | 1343 | ||
| 1263 | if (strlen(type) >= THERMAL_NAME_LENGTH) | 1344 | if (type && strlen(type) >= THERMAL_NAME_LENGTH) |
| 1264 | return ERR_PTR(-EINVAL); | 1345 | return ERR_PTR(-EINVAL); |
| 1265 | 1346 | ||
| 1266 | if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) | 1347 | if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) |
| @@ -1273,7 +1354,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, | |||
| 1273 | if (!tz) | 1354 | if (!tz) |
| 1274 | return ERR_PTR(-ENOMEM); | 1355 | return ERR_PTR(-ENOMEM); |
| 1275 | 1356 | ||
| 1276 | INIT_LIST_HEAD(&tz->cooling_devices); | 1357 | INIT_LIST_HEAD(&tz->thermal_instances); |
| 1277 | idr_init(&tz->idr); | 1358 | idr_init(&tz->idr); |
| 1278 | mutex_init(&tz->lock); | 1359 | mutex_init(&tz->lock); |
| 1279 | result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); | 1360 | result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); |
| @@ -1282,13 +1363,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, | |||
| 1282 | return ERR_PTR(result); | 1363 | return ERR_PTR(result); |
| 1283 | } | 1364 | } |
| 1284 | 1365 | ||
| 1285 | strcpy(tz->type, type); | 1366 | strcpy(tz->type, type ? : ""); |
| 1286 | tz->ops = ops; | 1367 | tz->ops = ops; |
| 1287 | tz->device.class = &thermal_class; | 1368 | tz->device.class = &thermal_class; |
| 1288 | tz->devdata = devdata; | 1369 | tz->devdata = devdata; |
| 1289 | tz->trips = trips; | 1370 | tz->trips = trips; |
| 1290 | tz->tc1 = tc1; | ||
| 1291 | tz->tc2 = tc2; | ||
| 1292 | tz->passive_delay = passive_delay; | 1371 | tz->passive_delay = passive_delay; |
| 1293 | tz->polling_delay = polling_delay; | 1372 | tz->polling_delay = polling_delay; |
| 1294 | 1373 | ||
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 2944ff88fdc0..f4abfe238f98 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
| @@ -478,7 +478,6 @@ static void xencons_backend_changed(struct xenbus_device *dev, | |||
| 478 | case XenbusStateInitialising: | 478 | case XenbusStateInitialising: |
| 479 | case XenbusStateInitialised: | 479 | case XenbusStateInitialised: |
| 480 | case XenbusStateUnknown: | 480 | case XenbusStateUnknown: |
| 481 | case XenbusStateClosed: | ||
| 482 | break; | 481 | break; |
| 483 | 482 | ||
| 484 | case XenbusStateInitWait: | 483 | case XenbusStateInitWait: |
| @@ -488,6 +487,10 @@ static void xencons_backend_changed(struct xenbus_device *dev, | |||
| 488 | xenbus_switch_state(dev, XenbusStateConnected); | 487 | xenbus_switch_state(dev, XenbusStateConnected); |
| 489 | break; | 488 | break; |
| 490 | 489 | ||
| 490 | case XenbusStateClosed: | ||
| 491 | if (dev->state == XenbusStateClosed) | ||
| 492 | break; | ||
| 493 | /* Missed the backend's CLOSING state -- fallthrough */ | ||
| 491 | case XenbusStateClosing: | 494 | case XenbusStateClosing: |
| 492 | xenbus_frontend_closed(dev); | 495 | xenbus_frontend_closed(dev); |
| 493 | break; | 496 | break; |
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index c0b334327d93..10020547c60b 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c | |||
| @@ -97,7 +97,8 @@ static void kgdboc_restore_input(void) | |||
| 97 | 97 | ||
| 98 | static int kgdboc_register_kbd(char **cptr) | 98 | static int kgdboc_register_kbd(char **cptr) |
| 99 | { | 99 | { |
| 100 | if (strncmp(*cptr, "kbd", 3) == 0) { | 100 | if (strncmp(*cptr, "kbd", 3) == 0 || |
| 101 | strncmp(*cptr, "kdb", 3) == 0) { | ||
| 101 | if (kdb_poll_idx < KDB_POLL_FUNC_MAX) { | 102 | if (kdb_poll_idx < KDB_POLL_FUNC_MAX) { |
| 102 | kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char; | 103 | kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char; |
| 103 | kdb_poll_idx++; | 104 | kdb_poll_idx++; |
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 999ca63afdef..f87d7e8964bf 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
| @@ -3442,6 +3442,19 @@ int con_debug_enter(struct vc_data *vc) | |||
| 3442 | kdb_set(2, setargs); | 3442 | kdb_set(2, setargs); |
| 3443 | } | 3443 | } |
| 3444 | } | 3444 | } |
| 3445 | if (vc->vc_cols < 999) { | ||
| 3446 | int colcount; | ||
| 3447 | char cols[4]; | ||
| 3448 | const char *setargs[3] = { | ||
| 3449 | "set", | ||
| 3450 | "COLUMNS", | ||
| 3451 | cols, | ||
| 3452 | }; | ||
| 3453 | if (kdbgetintenv(setargs[0], &colcount)) { | ||
| 3454 | snprintf(cols, 4, "%i", vc->vc_cols); | ||
| 3455 | kdb_set(2, setargs); | ||
| 3456 | } | ||
| 3457 | } | ||
| 3445 | #endif /* CONFIG_KGDB_KDB */ | 3458 | #endif /* CONFIG_KGDB_KDB */ |
| 3446 | return ret; | 3459 | return ret; |
| 3447 | } | 3460 | } |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 36f2be4def2f..981f2132d128 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
| @@ -1551,6 +1551,9 @@ static const struct usb_device_id acm_ids[] = { | |||
| 1551 | Maybe we should define a new | 1551 | Maybe we should define a new |
| 1552 | quirk for this. */ | 1552 | quirk for this. */ |
| 1553 | }, | 1553 | }, |
| 1554 | { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */ | ||
| 1555 | .driver_info = NO_UNION_NORMAL, | ||
| 1556 | }, | ||
| 1554 | { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ | 1557 | { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ |
| 1555 | .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ | 1558 | .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ |
| 1556 | }, | 1559 | }, |
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 131dec04794e..48220e129f85 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | #include <xen/xenbus.h> | 48 | #include <xen/xenbus.h> |
| 49 | #include <xen/xen.h> | 49 | #include <xen/xen.h> |
| 50 | #include "xenbus_comms.h" | 50 | #include "xenbus_comms.h" |
| 51 | #include <asm/xen/hypervisor.h> | ||
| 51 | 52 | ||
| 52 | struct xs_stored_msg { | 53 | struct xs_stored_msg { |
| 53 | struct list_head list; | 54 | struct list_head list; |
| @@ -618,7 +619,24 @@ static struct xenbus_watch *find_watch(const char *token) | |||
| 618 | 619 | ||
| 619 | return NULL; | 620 | return NULL; |
| 620 | } | 621 | } |
| 622 | /* | ||
| 623 | * Certain older XenBus toolstack cannot handle reading values that are | ||
| 624 | * not populated. Some Xen 3.4 installation are incapable of doing this | ||
| 625 | * so if we are running on anything older than 4 do not attempt to read | ||
| 626 | * control/platform-feature-xs_reset_watches. | ||
| 627 | */ | ||
| 628 | static bool xen_strict_xenbus_quirk() | ||
| 629 | { | ||
| 630 | uint32_t eax, ebx, ecx, edx, base; | ||
| 631 | |||
| 632 | base = xen_cpuid_base(); | ||
| 633 | cpuid(base + 1, &eax, &ebx, &ecx, &edx); | ||
| 621 | 634 | ||
| 635 | if ((eax >> 16) < 4) | ||
| 636 | return true; | ||
| 637 | return false; | ||
| 638 | |||
| 639 | } | ||
| 622 | static void xs_reset_watches(void) | 640 | static void xs_reset_watches(void) |
| 623 | { | 641 | { |
| 624 | int err, supported = 0; | 642 | int err, supported = 0; |
| @@ -626,6 +644,9 @@ static void xs_reset_watches(void) | |||
| 626 | if (!xen_hvm_domain() || xen_initial_domain()) | 644 | if (!xen_hvm_domain() || xen_initial_domain()) |
| 627 | return; | 645 | return; |
| 628 | 646 | ||
| 647 | if (xen_strict_xenbus_quirk()) | ||
| 648 | return; | ||
| 649 | |||
| 629 | err = xenbus_scanf(XBT_NIL, "control", | 650 | err = xenbus_scanf(XBT_NIL, "control", |
| 630 | "platform-feature-xs_reset_watches", "%d", &supported); | 651 | "platform-feature-xs_reset_watches", "%d", &supported); |
| 631 | if (err != 1 || !supported) | 652 | if (err != 1 || !supported) |
