diff options
Diffstat (limited to 'drivers/misc')
48 files changed, 7212 insertions, 382 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index a3e291d0df9a..6cb388e8fb7d 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
| @@ -525,4 +525,5 @@ source "drivers/misc/altera-stapl/Kconfig" | |||
| 525 | source "drivers/misc/mei/Kconfig" | 525 | source "drivers/misc/mei/Kconfig" |
| 526 | source "drivers/misc/vmw_vmci/Kconfig" | 526 | source "drivers/misc/vmw_vmci/Kconfig" |
| 527 | source "drivers/misc/mic/Kconfig" | 527 | source "drivers/misc/mic/Kconfig" |
| 528 | source "drivers/misc/genwqe/Kconfig" | ||
| 528 | endmenu | 529 | endmenu |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f45473e68bf7..99b9424ce31d 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
| @@ -53,3 +53,4 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ | |||
| 53 | obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o | 53 | obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o |
| 54 | obj-$(CONFIG_SRAM) += sram.o | 54 | obj-$(CONFIG_SRAM) += sram.o |
| 55 | obj-y += mic/ | 55 | obj-y += mic/ |
| 56 | obj-$(CONFIG_GENWQE) += genwqe/ | ||
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index 0daadcf1ed7a..d3eee113baeb 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c | |||
| @@ -641,7 +641,7 @@ static const struct attribute_group ad525x_group_commands = { | |||
| 641 | .attrs = ad525x_attributes_commands, | 641 | .attrs = ad525x_attributes_commands, |
| 642 | }; | 642 | }; |
| 643 | 643 | ||
| 644 | int ad_dpot_add_files(struct device *dev, | 644 | static int ad_dpot_add_files(struct device *dev, |
| 645 | unsigned features, unsigned rdac) | 645 | unsigned features, unsigned rdac) |
| 646 | { | 646 | { |
| 647 | int err = sysfs_create_file(&dev->kobj, | 647 | int err = sysfs_create_file(&dev->kobj, |
| @@ -666,7 +666,7 @@ int ad_dpot_add_files(struct device *dev, | |||
| 666 | return err; | 666 | return err; |
| 667 | } | 667 | } |
| 668 | 668 | ||
| 669 | inline void ad_dpot_remove_files(struct device *dev, | 669 | static inline void ad_dpot_remove_files(struct device *dev, |
| 670 | unsigned features, unsigned rdac) | 670 | unsigned features, unsigned rdac) |
| 671 | { | 671 | { |
| 672 | sysfs_remove_file(&dev->kobj, | 672 | sysfs_remove_file(&dev->kobj, |
diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c index 3abfcecf8424..a7c16295b816 100644 --- a/drivers/misc/bmp085-i2c.c +++ b/drivers/misc/bmp085-i2c.c | |||
| @@ -49,7 +49,7 @@ static int bmp085_i2c_probe(struct i2c_client *client, | |||
| 49 | return err; | 49 | return err; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | return bmp085_probe(&client->dev, regmap); | 52 | return bmp085_probe(&client->dev, regmap, client->irq); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | static int bmp085_i2c_remove(struct i2c_client *client) | 55 | static int bmp085_i2c_remove(struct i2c_client *client) |
diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c index d6a52659cf24..864ecac32373 100644 --- a/drivers/misc/bmp085-spi.c +++ b/drivers/misc/bmp085-spi.c | |||
| @@ -41,7 +41,7 @@ static int bmp085_spi_probe(struct spi_device *client) | |||
| 41 | return err; | 41 | return err; |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | return bmp085_probe(&client->dev, regmap); | 44 | return bmp085_probe(&client->dev, regmap, client->irq); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static int bmp085_spi_remove(struct spi_device *client) | 47 | static int bmp085_spi_remove(struct spi_device *client) |
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c index 2704d885a9b3..820e53d0048f 100644 --- a/drivers/misc/bmp085.c +++ b/drivers/misc/bmp085.c | |||
| @@ -49,9 +49,11 @@ | |||
| 49 | #include <linux/device.h> | 49 | #include <linux/device.h> |
| 50 | #include <linux/init.h> | 50 | #include <linux/init.h> |
| 51 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
| 52 | #include <linux/delay.h> | ||
| 53 | #include <linux/of.h> | 52 | #include <linux/of.h> |
| 54 | #include "bmp085.h" | 53 | #include "bmp085.h" |
| 54 | #include <linux/interrupt.h> | ||
| 55 | #include <linux/completion.h> | ||
| 56 | #include <linux/gpio.h> | ||
| 55 | 57 | ||
| 56 | #define BMP085_CHIP_ID 0x55 | 58 | #define BMP085_CHIP_ID 0x55 |
| 57 | #define BMP085_CALIBRATION_DATA_START 0xAA | 59 | #define BMP085_CALIBRATION_DATA_START 0xAA |
| @@ -84,8 +86,19 @@ struct bmp085_data { | |||
| 84 | unsigned long last_temp_measurement; | 86 | unsigned long last_temp_measurement; |
| 85 | u8 chip_id; | 87 | u8 chip_id; |
| 86 | s32 b6; /* calculated temperature correction coefficient */ | 88 | s32 b6; /* calculated temperature correction coefficient */ |
| 89 | int irq; | ||
| 90 | struct completion done; | ||
| 87 | }; | 91 | }; |
| 88 | 92 | ||
| 93 | static irqreturn_t bmp085_eoc_isr(int irq, void *devid) | ||
| 94 | { | ||
| 95 | struct bmp085_data *data = devid; | ||
| 96 | |||
| 97 | complete(&data->done); | ||
| 98 | |||
| 99 | return IRQ_HANDLED; | ||
| 100 | } | ||
| 101 | |||
| 89 | static s32 bmp085_read_calibration_data(struct bmp085_data *data) | 102 | static s32 bmp085_read_calibration_data(struct bmp085_data *data) |
| 90 | { | 103 | { |
| 91 | u16 tmp[BMP085_CALIBRATION_DATA_LENGTH]; | 104 | u16 tmp[BMP085_CALIBRATION_DATA_LENGTH]; |
| @@ -116,6 +129,9 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data) | |||
| 116 | s32 status; | 129 | s32 status; |
| 117 | 130 | ||
| 118 | mutex_lock(&data->lock); | 131 | mutex_lock(&data->lock); |
| 132 | |||
| 133 | init_completion(&data->done); | ||
| 134 | |||
| 119 | status = regmap_write(data->regmap, BMP085_CTRL_REG, | 135 | status = regmap_write(data->regmap, BMP085_CTRL_REG, |
| 120 | BMP085_TEMP_MEASUREMENT); | 136 | BMP085_TEMP_MEASUREMENT); |
| 121 | if (status < 0) { | 137 | if (status < 0) { |
| @@ -123,7 +139,8 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data) | |||
| 123 | "Error while requesting temperature measurement.\n"); | 139 | "Error while requesting temperature measurement.\n"); |
| 124 | goto exit; | 140 | goto exit; |
| 125 | } | 141 | } |
| 126 | msleep(BMP085_TEMP_CONVERSION_TIME); | 142 | wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies( |
| 143 | BMP085_TEMP_CONVERSION_TIME)); | ||
| 127 | 144 | ||
| 128 | status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, | 145 | status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, |
| 129 | &tmp, sizeof(tmp)); | 146 | &tmp, sizeof(tmp)); |
| @@ -147,6 +164,9 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data) | |||
| 147 | s32 status; | 164 | s32 status; |
| 148 | 165 | ||
| 149 | mutex_lock(&data->lock); | 166 | mutex_lock(&data->lock); |
| 167 | |||
| 168 | init_completion(&data->done); | ||
| 169 | |||
| 150 | status = regmap_write(data->regmap, BMP085_CTRL_REG, | 170 | status = regmap_write(data->regmap, BMP085_CTRL_REG, |
| 151 | BMP085_PRESSURE_MEASUREMENT + | 171 | BMP085_PRESSURE_MEASUREMENT + |
| 152 | (data->oversampling_setting << 6)); | 172 | (data->oversampling_setting << 6)); |
| @@ -157,8 +177,8 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data) | |||
| 157 | } | 177 | } |
| 158 | 178 | ||
| 159 | /* wait for the end of conversion */ | 179 | /* wait for the end of conversion */ |
| 160 | msleep(2+(3 << data->oversampling_setting)); | 180 | wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies( |
| 161 | 181 | 2+(3 << data->oversampling_setting))); | |
| 162 | /* copy data into a u32 (4 bytes), but skip the first byte. */ | 182 | /* copy data into a u32 (4 bytes), but skip the first byte. */ |
| 163 | status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, | 183 | status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, |
| 164 | ((u8 *)&tmp)+1, 3); | 184 | ((u8 *)&tmp)+1, 3); |
| @@ -420,7 +440,7 @@ struct regmap_config bmp085_regmap_config = { | |||
| 420 | }; | 440 | }; |
| 421 | EXPORT_SYMBOL_GPL(bmp085_regmap_config); | 441 | EXPORT_SYMBOL_GPL(bmp085_regmap_config); |
| 422 | 442 | ||
| 423 | int bmp085_probe(struct device *dev, struct regmap *regmap) | 443 | int bmp085_probe(struct device *dev, struct regmap *regmap, int irq) |
| 424 | { | 444 | { |
| 425 | struct bmp085_data *data; | 445 | struct bmp085_data *data; |
| 426 | int err = 0; | 446 | int err = 0; |
| @@ -434,6 +454,15 @@ int bmp085_probe(struct device *dev, struct regmap *regmap) | |||
| 434 | dev_set_drvdata(dev, data); | 454 | dev_set_drvdata(dev, data); |
| 435 | data->dev = dev; | 455 | data->dev = dev; |
| 436 | data->regmap = regmap; | 456 | data->regmap = regmap; |
| 457 | data->irq = irq; | ||
| 458 | |||
| 459 | if (data->irq > 0) { | ||
| 460 | err = devm_request_irq(dev, data->irq, bmp085_eoc_isr, | ||
| 461 | IRQF_TRIGGER_RISING, "bmp085", | ||
| 462 | data); | ||
| 463 | if (err < 0) | ||
| 464 | goto exit_free; | ||
| 465 | } | ||
| 437 | 466 | ||
| 438 | /* Initialize the BMP085 chip */ | 467 | /* Initialize the BMP085 chip */ |
| 439 | err = bmp085_init_client(data); | 468 | err = bmp085_init_client(data); |
diff --git a/drivers/misc/bmp085.h b/drivers/misc/bmp085.h index 2b8f615bca92..8b8e3b1f5ca5 100644 --- a/drivers/misc/bmp085.h +++ b/drivers/misc/bmp085.h | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | 26 | ||
| 27 | extern struct regmap_config bmp085_regmap_config; | 27 | extern struct regmap_config bmp085_regmap_config; |
| 28 | 28 | ||
| 29 | int bmp085_probe(struct device *dev, struct regmap *regmap); | 29 | int bmp085_probe(struct device *dev, struct regmap *regmap, int irq); |
| 30 | int bmp085_remove(struct device *dev); | 30 | int bmp085_remove(struct device *dev); |
| 31 | int bmp085_detect(struct device *dev); | 31 | int bmp085_detect(struct device *dev); |
| 32 | 32 | ||
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 3a015abb444a..78e55b501c94 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c | |||
| @@ -378,7 +378,6 @@ static int eeprom_93xx46_remove(struct spi_device *spi) | |||
| 378 | device_remove_file(&spi->dev, &dev_attr_erase); | 378 | device_remove_file(&spi->dev, &dev_attr_erase); |
| 379 | 379 | ||
| 380 | sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin); | 380 | sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin); |
| 381 | spi_set_drvdata(spi, NULL); | ||
| 382 | kfree(edev); | 381 | kfree(edev); |
| 383 | return 0; | 382 | return 0; |
| 384 | } | 383 | } |
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 0e8df41aaf14..2cf2bbc0b927 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c | |||
| @@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev) | |||
| 198 | { | 198 | { |
| 199 | char name[ENCLOSURE_NAME_SIZE]; | 199 | char name[ENCLOSURE_NAME_SIZE]; |
| 200 | 200 | ||
| 201 | /* | ||
| 202 | * In odd circumstances, like multipath devices, something else may | ||
| 203 | * already have removed the links, so check for this condition first. | ||
| 204 | */ | ||
| 205 | if (!cdev->dev->kobj.sd) | ||
| 206 | return; | ||
| 207 | |||
| 201 | enclosure_link_name(cdev, name); | 208 | enclosure_link_name(cdev, name); |
| 202 | sysfs_remove_link(&cdev->dev->kobj, name); | 209 | sysfs_remove_link(&cdev->dev->kobj, name); |
| 203 | sysfs_remove_link(&cdev->cdev.kobj, "device"); | 210 | sysfs_remove_link(&cdev->cdev.kobj, "device"); |
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c index a725c79c35f5..71d2793b372c 100644 --- a/drivers/misc/fsa9480.c +++ b/drivers/misc/fsa9480.c | |||
| @@ -396,7 +396,7 @@ static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw) | |||
| 396 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 396 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, |
| 397 | "fsa9480 micro USB", usbsw); | 397 | "fsa9480 micro USB", usbsw); |
| 398 | if (ret) { | 398 | if (ret) { |
| 399 | dev_err(&client->dev, "failed to reqeust IRQ\n"); | 399 | dev_err(&client->dev, "failed to request IRQ\n"); |
| 400 | return ret; | 400 | return ret; |
| 401 | } | 401 | } |
| 402 | 402 | ||
diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig new file mode 100644 index 000000000000..6069d8cd79d7 --- /dev/null +++ b/drivers/misc/genwqe/Kconfig | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | # | ||
| 2 | # IBM Accelerator Family 'GenWQE' | ||
| 3 | # | ||
| 4 | |||
| 5 | menuconfig GENWQE | ||
| 6 | tristate "GenWQE PCIe Accelerator" | ||
| 7 | depends on PCI && 64BIT | ||
| 8 | select CRC_ITU_T | ||
| 9 | default n | ||
| 10 | help | ||
| 11 | Enables PCIe card driver for IBM GenWQE accelerators. | ||
| 12 | The user-space interface is described in | ||
| 13 | include/linux/genwqe/genwqe_card.h. | ||
diff --git a/drivers/misc/genwqe/Makefile b/drivers/misc/genwqe/Makefile new file mode 100644 index 000000000000..98a2b4f0b18b --- /dev/null +++ b/drivers/misc/genwqe/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | # | ||
| 2 | # Makefile for GenWQE driver | ||
| 3 | # | ||
| 4 | |||
| 5 | obj-$(CONFIG_GENWQE) := genwqe_card.o | ||
| 6 | genwqe_card-objs := card_base.o card_dev.o card_ddcb.o card_sysfs.o \ | ||
| 7 | card_debugfs.o card_utils.o | ||
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c new file mode 100644 index 000000000000..74d51c9bb858 --- /dev/null +++ b/drivers/misc/genwqe/card_base.c | |||
| @@ -0,0 +1,1205 @@ | |||
| 1 | /** | ||
| 2 | * IBM Accelerator Family 'GenWQE' | ||
| 3 | * | ||
| 4 | * (C) Copyright IBM Corp. 2013 | ||
| 5 | * | ||
| 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 8 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 9 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 13 | * as published by the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Module initialization and PCIe setup. Card health monitoring and | ||
| 23 | * recovery functionality. Character device creation and deletion are | ||
| 24 | * controlled from here. | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/types.h> | ||
| 29 | #include <linux/pci.h> | ||
| 30 | #include <linux/err.h> | ||
| 31 | #include <linux/aer.h> | ||
| 32 | #include <linux/string.h> | ||
| 33 | #include <linux/sched.h> | ||
| 34 | #include <linux/wait.h> | ||
| 35 | #include <linux/delay.h> | ||
| 36 | #include <linux/dma-mapping.h> | ||
| 37 | #include <linux/module.h> | ||
| 38 | #include <linux/notifier.h> | ||
| 39 | #include <linux/device.h> | ||
| 40 | #include <linux/log2.h> | ||
| 41 | #include <linux/genwqe/genwqe_card.h> | ||
| 42 | |||
| 43 | #include "card_base.h" | ||
| 44 | #include "card_ddcb.h" | ||
| 45 | |||
| 46 | MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>"); | ||
| 47 | MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>"); | ||
| 48 | MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>"); | ||
| 49 | MODULE_AUTHOR("Michal Jung <mijung@de.ibm.com>"); | ||
| 50 | |||
| 51 | MODULE_DESCRIPTION("GenWQE Card"); | ||
| 52 | MODULE_VERSION(DRV_VERS_STRING); | ||
| 53 | MODULE_LICENSE("GPL"); | ||
| 54 | |||
| 55 | static char genwqe_driver_name[] = GENWQE_DEVNAME; | ||
| 56 | static struct class *class_genwqe; | ||
| 57 | static struct dentry *debugfs_genwqe; | ||
| 58 | static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX]; | ||
| 59 | |||
| 60 | /* PCI structure for identifying device by PCI vendor and device ID */ | ||
| 61 | static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = { | ||
| 62 | { .vendor = PCI_VENDOR_ID_IBM, | ||
| 63 | .device = PCI_DEVICE_GENWQE, | ||
| 64 | .subvendor = PCI_SUBVENDOR_ID_IBM, | ||
| 65 | .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, | ||
| 66 | .class = (PCI_CLASSCODE_GENWQE5 << 8), | ||
| 67 | .class_mask = ~0, | ||
| 68 | .driver_data = 0 }, | ||
| 69 | |||
| 70 | /* Initial SR-IOV bring-up image */ | ||
| 71 | { .vendor = PCI_VENDOR_ID_IBM, | ||
| 72 | .device = PCI_DEVICE_GENWQE, | ||
| 73 | .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, | ||
| 74 | .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, | ||
| 75 | .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), | ||
| 76 | .class_mask = ~0, | ||
| 77 | .driver_data = 0 }, | ||
| 78 | |||
| 79 | { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */ | ||
| 80 | .device = 0x0000, /* VF Device ID */ | ||
| 81 | .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, | ||
| 82 | .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, | ||
| 83 | .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), | ||
| 84 | .class_mask = ~0, | ||
| 85 | .driver_data = 0 }, | ||
| 86 | |||
| 87 | /* Fixed up image */ | ||
| 88 | { .vendor = PCI_VENDOR_ID_IBM, | ||
| 89 | .device = PCI_DEVICE_GENWQE, | ||
| 90 | .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, | ||
| 91 | .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, | ||
| 92 | .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), | ||
| 93 | .class_mask = ~0, | ||
| 94 | .driver_data = 0 }, | ||
| 95 | |||
| 96 | { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */ | ||
| 97 | .device = 0x0000, /* VF Device ID */ | ||
| 98 | .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, | ||
| 99 | .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, | ||
| 100 | .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), | ||
| 101 | .class_mask = ~0, | ||
| 102 | .driver_data = 0 }, | ||
| 103 | |||
| 104 | /* Even one more ... */ | ||
| 105 | { .vendor = PCI_VENDOR_ID_IBM, | ||
| 106 | .device = PCI_DEVICE_GENWQE, | ||
| 107 | .subvendor = PCI_SUBVENDOR_ID_IBM, | ||
| 108 | .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW, | ||
| 109 | .class = (PCI_CLASSCODE_GENWQE5 << 8), | ||
| 110 | .class_mask = ~0, | ||
| 111 | .driver_data = 0 }, | ||
| 112 | |||
| 113 | { 0, } /* 0 terminated list. */ | ||
| 114 | }; | ||
| 115 | |||
| 116 | MODULE_DEVICE_TABLE(pci, genwqe_device_table); | ||
| 117 | |||
| 118 | /** | ||
| 119 | * genwqe_dev_alloc() - Create and prepare a new card descriptor | ||
| 120 | * | ||
| 121 | * Return: Pointer to card descriptor, or ERR_PTR(err) on error | ||
| 122 | */ | ||
| 123 | static struct genwqe_dev *genwqe_dev_alloc(void) | ||
| 124 | { | ||
| 125 | unsigned int i = 0, j; | ||
| 126 | struct genwqe_dev *cd; | ||
| 127 | |||
| 128 | for (i = 0; i < GENWQE_CARD_NO_MAX; i++) { | ||
| 129 | if (genwqe_devices[i] == NULL) | ||
| 130 | break; | ||
| 131 | } | ||
| 132 | if (i >= GENWQE_CARD_NO_MAX) | ||
| 133 | return ERR_PTR(-ENODEV); | ||
| 134 | |||
| 135 | cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL); | ||
| 136 | if (!cd) | ||
| 137 | return ERR_PTR(-ENOMEM); | ||
| 138 | |||
| 139 | cd->card_idx = i; | ||
| 140 | cd->class_genwqe = class_genwqe; | ||
| 141 | cd->debugfs_genwqe = debugfs_genwqe; | ||
| 142 | |||
| 143 | init_waitqueue_head(&cd->queue_waitq); | ||
| 144 | |||
| 145 | spin_lock_init(&cd->file_lock); | ||
| 146 | INIT_LIST_HEAD(&cd->file_list); | ||
| 147 | |||
| 148 | cd->card_state = GENWQE_CARD_UNUSED; | ||
| 149 | spin_lock_init(&cd->print_lock); | ||
| 150 | |||
| 151 | cd->ddcb_software_timeout = genwqe_ddcb_software_timeout; | ||
| 152 | cd->kill_timeout = genwqe_kill_timeout; | ||
| 153 | |||
| 154 | for (j = 0; j < GENWQE_MAX_VFS; j++) | ||
| 155 | cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec; | ||
| 156 | |||
| 157 | genwqe_devices[i] = cd; | ||
| 158 | return cd; | ||
| 159 | } | ||
| 160 | |||
| 161 | static void genwqe_dev_free(struct genwqe_dev *cd) | ||
| 162 | { | ||
| 163 | if (!cd) | ||
| 164 | return; | ||
| 165 | |||
| 166 | genwqe_devices[cd->card_idx] = NULL; | ||
| 167 | kfree(cd); | ||
| 168 | } | ||
| 169 | |||
| 170 | /** | ||
| 171 | * genwqe_bus_reset() - Card recovery | ||
| 172 | * | ||
| 173 | * pci_reset_function() will recover the device and ensure that the | ||
| 174 | * registers are accessible again when it completes with success. If | ||
| 175 | * not, the card will stay dead and registers will be unaccessible | ||
| 176 | * still. | ||
| 177 | */ | ||
| 178 | static int genwqe_bus_reset(struct genwqe_dev *cd) | ||
| 179 | { | ||
| 180 | int bars, rc = 0; | ||
| 181 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 182 | void __iomem *mmio; | ||
| 183 | |||
| 184 | if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE) | ||
| 185 | return -EIO; | ||
| 186 | |||
| 187 | mmio = cd->mmio; | ||
| 188 | cd->mmio = NULL; | ||
| 189 | pci_iounmap(pci_dev, mmio); | ||
| 190 | |||
| 191 | bars = pci_select_bars(pci_dev, IORESOURCE_MEM); | ||
| 192 | pci_release_selected_regions(pci_dev, bars); | ||
| 193 | |||
| 194 | /* | ||
| 195 | * Firmware/BIOS might change memory mapping during bus reset. | ||
| 196 | * Settings like enable bus-mastering, ... are backuped and | ||
| 197 | * restored by the pci_reset_function(). | ||
| 198 | */ | ||
| 199 | dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__); | ||
| 200 | rc = pci_reset_function(pci_dev); | ||
| 201 | if (rc) { | ||
| 202 | dev_err(&pci_dev->dev, | ||
| 203 | "[%s] err: failed reset func (rc %d)\n", __func__, rc); | ||
| 204 | return rc; | ||
| 205 | } | ||
| 206 | dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc); | ||
| 207 | |||
| 208 | /* | ||
| 209 | * Here is the right spot to clear the register read | ||
| 210 | * failure. pci_bus_reset() does this job in real systems. | ||
| 211 | */ | ||
| 212 | cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE | | ||
| 213 | GENWQE_INJECT_GFIR_FATAL | | ||
| 214 | GENWQE_INJECT_GFIR_INFO); | ||
| 215 | |||
| 216 | rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); | ||
| 217 | if (rc) { | ||
| 218 | dev_err(&pci_dev->dev, | ||
| 219 | "[%s] err: request bars failed (%d)\n", __func__, rc); | ||
| 220 | return -EIO; | ||
| 221 | } | ||
| 222 | |||
| 223 | cd->mmio = pci_iomap(pci_dev, 0, 0); | ||
| 224 | if (cd->mmio == NULL) { | ||
| 225 | dev_err(&pci_dev->dev, | ||
| 226 | "[%s] err: mapping BAR0 failed\n", __func__); | ||
| 227 | return -ENOMEM; | ||
| 228 | } | ||
| 229 | return 0; | ||
| 230 | } | ||
| 231 | |||
| 232 | /* | ||
| 233 | * Hardware circumvention section. Certain bitstreams in our test-lab | ||
| 234 | * had different kinds of problems. Here is where we adjust those | ||
| 235 | * bitstreams to function will with this version of our device driver. | ||
| 236 | * | ||
| 237 | * Thise circumventions are applied to the physical function only. | ||
| 238 | * The magical numbers below are identifying development/manufacturing | ||
| 239 | * versions of the bitstream used on the card. | ||
| 240 | * | ||
| 241 | * Turn off error reporting for old/manufacturing images. | ||
| 242 | */ | ||
| 243 | |||
| 244 | bool genwqe_need_err_masking(struct genwqe_dev *cd) | ||
| 245 | { | ||
| 246 | return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; | ||
| 247 | } | ||
| 248 | |||
| 249 | static void genwqe_tweak_hardware(struct genwqe_dev *cd) | ||
| 250 | { | ||
| 251 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 252 | |||
| 253 | /* Mask FIRs for development images */ | ||
| 254 | if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) && | ||
| 255 | ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) { | ||
| 256 | dev_warn(&pci_dev->dev, | ||
| 257 | "FIRs masked due to bitstream %016llx.%016llx\n", | ||
| 258 | cd->slu_unitcfg, cd->app_unitcfg); | ||
| 259 | |||
| 260 | __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR, | ||
| 261 | 0xFFFFFFFFFFFFFFFFull); | ||
| 262 | |||
| 263 | __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK, | ||
| 264 | 0x0000000000000000ull); | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | /** | ||
| 269 | * genwqe_recovery_on_fatal_gfir_required() - Version depended actions | ||
| 270 | * | ||
| 271 | * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must | ||
| 272 | * be ignored. This is e.g. true for the bitstream we gave to the card | ||
| 273 | * manufacturer, but also for some old bitstreams we released to our | ||
| 274 | * test-lab. | ||
| 275 | */ | ||
| 276 | int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd) | ||
| 277 | { | ||
| 278 | return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull; | ||
| 279 | } | ||
| 280 | |||
| 281 | int genwqe_flash_readback_fails(struct genwqe_dev *cd) | ||
| 282 | { | ||
| 283 | return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; | ||
| 284 | } | ||
| 285 | |||
| 286 | /** | ||
| 287 | * genwqe_T_psec() - Calculate PF/VF timeout register content | ||
| 288 | * | ||
| 289 | * Note: From a design perspective it turned out to be a bad idea to | ||
| 290 | * use codes here to specifiy the frequency/speed values. An old | ||
| 291 | * driver cannot understand new codes and is therefore always a | ||
| 292 | * problem. Better is to measure out the value or put the | ||
| 293 | * speed/frequency directly into a register which is always a valid | ||
| 294 | * value for old as well as for new software. | ||
| 295 | */ | ||
| 296 | /* T = 1/f */ | ||
| 297 | static int genwqe_T_psec(struct genwqe_dev *cd) | ||
| 298 | { | ||
| 299 | u16 speed; /* 1/f -> 250, 200, 166, 175 */ | ||
| 300 | static const int T[] = { 4000, 5000, 6000, 5714 }; | ||
| 301 | |||
| 302 | speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); | ||
| 303 | if (speed >= ARRAY_SIZE(T)) | ||
| 304 | return -1; /* illegal value */ | ||
| 305 | |||
| 306 | return T[speed]; | ||
| 307 | } | ||
| 308 | |||
| 309 | /** | ||
| 310 | * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution | ||
| 311 | * | ||
| 312 | * Do this _after_ card_reset() is called. Otherwise the values will | ||
| 313 | * vanish. The settings need to be done when the queues are inactive. | ||
| 314 | * | ||
| 315 | * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16. | ||
| 316 | * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16. | ||
| 317 | */ | ||
| 318 | static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd) | ||
| 319 | { | ||
| 320 | u32 T = genwqe_T_psec(cd); | ||
| 321 | u64 x; | ||
| 322 | |||
| 323 | if (genwqe_pf_jobtimeout_msec == 0) | ||
| 324 | return false; | ||
| 325 | |||
| 326 | /* PF: large value needed, flash update 2sec per block */ | ||
| 327 | x = ilog2(genwqe_pf_jobtimeout_msec * | ||
| 328 | 16000000000uL/(T * 15)) - 10; | ||
| 329 | |||
| 330 | genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, | ||
| 331 | 0xff00 | (x & 0xff), 0); | ||
| 332 | return true; | ||
| 333 | } | ||
| 334 | |||
| 335 | /** | ||
| 336 | * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution | ||
| 337 | */ | ||
| 338 | static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd) | ||
| 339 | { | ||
| 340 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 341 | unsigned int vf; | ||
| 342 | u32 T = genwqe_T_psec(cd); | ||
| 343 | u64 x; | ||
| 344 | |||
| 345 | for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) { | ||
| 346 | |||
| 347 | if (cd->vf_jobtimeout_msec[vf] == 0) | ||
| 348 | continue; | ||
| 349 | |||
| 350 | x = ilog2(cd->vf_jobtimeout_msec[vf] * | ||
| 351 | 16000000000uL/(T * 15)) - 10; | ||
| 352 | |||
| 353 | genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, | ||
| 354 | 0xff00 | (x & 0xff), vf + 1); | ||
| 355 | } | ||
| 356 | return true; | ||
| 357 | } | ||
| 358 | |||
| 359 | static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd) | ||
| 360 | { | ||
| 361 | unsigned int type, e = 0; | ||
| 362 | |||
| 363 | for (type = 0; type < GENWQE_DBG_UNITS; type++) { | ||
| 364 | switch (type) { | ||
| 365 | case GENWQE_DBG_UNIT0: | ||
| 366 | e = genwqe_ffdc_buff_size(cd, 0); | ||
| 367 | break; | ||
| 368 | case GENWQE_DBG_UNIT1: | ||
| 369 | e = genwqe_ffdc_buff_size(cd, 1); | ||
| 370 | break; | ||
| 371 | case GENWQE_DBG_UNIT2: | ||
| 372 | e = genwqe_ffdc_buff_size(cd, 2); | ||
| 373 | break; | ||
| 374 | case GENWQE_DBG_REGS: | ||
| 375 | e = GENWQE_FFDC_REGS; | ||
| 376 | break; | ||
| 377 | } | ||
| 378 | |||
| 379 | /* currently support only the debug units mentioned here */ | ||
| 380 | cd->ffdc[type].entries = e; | ||
| 381 | cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg), | ||
| 382 | GFP_KERNEL); | ||
| 383 | /* | ||
| 384 | * regs == NULL is ok, the using code treats this as no regs, | ||
| 385 | * Printing warning is ok in this case. | ||
| 386 | */ | ||
| 387 | } | ||
| 388 | return 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd) | ||
| 392 | { | ||
| 393 | unsigned int type; | ||
| 394 | |||
| 395 | for (type = 0; type < GENWQE_DBG_UNITS; type++) { | ||
| 396 | kfree(cd->ffdc[type].regs); | ||
| 397 | cd->ffdc[type].regs = NULL; | ||
| 398 | } | ||
| 399 | } | ||
| 400 | |||
| 401 | static int genwqe_read_ids(struct genwqe_dev *cd) | ||
| 402 | { | ||
| 403 | int err = 0; | ||
| 404 | int slu_id; | ||
| 405 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 406 | |||
| 407 | cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); | ||
| 408 | if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) { | ||
| 409 | dev_err(&pci_dev->dev, | ||
| 410 | "err: SLUID=%016llx\n", cd->slu_unitcfg); | ||
| 411 | err = -EIO; | ||
| 412 | goto out_err; | ||
| 413 | } | ||
| 414 | |||
| 415 | slu_id = genwqe_get_slu_id(cd); | ||
| 416 | if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) { | ||
| 417 | dev_err(&pci_dev->dev, | ||
| 418 | "err: incompatible SLU Architecture %u\n", slu_id); | ||
| 419 | err = -ENOENT; | ||
| 420 | goto out_err; | ||
| 421 | } | ||
| 422 | |||
| 423 | cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); | ||
| 424 | if (cd->app_unitcfg == IO_ILLEGAL_VALUE) { | ||
| 425 | dev_err(&pci_dev->dev, | ||
| 426 | "err: APPID=%016llx\n", cd->app_unitcfg); | ||
| 427 | err = -EIO; | ||
| 428 | goto out_err; | ||
| 429 | } | ||
| 430 | genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name)); | ||
| 431 | |||
| 432 | /* | ||
| 433 | * Is access to all registers possible? If we are a VF the | ||
| 434 | * answer is obvious. If we run fully virtualized, we need to | ||
| 435 | * check if we can access all registers. If we do not have | ||
| 436 | * full access we will cause an UR and some informational FIRs | ||
| 437 | * in the PF, but that should not harm. | ||
| 438 | */ | ||
| 439 | if (pci_dev->is_virtfn) | ||
| 440 | cd->is_privileged = 0; | ||
| 441 | else | ||
| 442 | cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) | ||
| 443 | != IO_ILLEGAL_VALUE); | ||
| 444 | |||
| 445 | out_err: | ||
| 446 | return err; | ||
| 447 | } | ||
| 448 | |||
| 449 | static int genwqe_start(struct genwqe_dev *cd) | ||
| 450 | { | ||
| 451 | int err; | ||
| 452 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 453 | |||
| 454 | err = genwqe_read_ids(cd); | ||
| 455 | if (err) | ||
| 456 | return err; | ||
| 457 | |||
| 458 | if (genwqe_is_privileged(cd)) { | ||
| 459 | /* do this after the tweaks. alloc fail is acceptable */ | ||
| 460 | genwqe_ffdc_buffs_alloc(cd); | ||
| 461 | genwqe_stop_traps(cd); | ||
| 462 | |||
| 463 | /* Collect registers e.g. FIRs, UNITIDs, traces ... */ | ||
| 464 | genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs, | ||
| 465 | cd->ffdc[GENWQE_DBG_REGS].entries, 0); | ||
| 466 | |||
| 467 | genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0, | ||
| 468 | cd->ffdc[GENWQE_DBG_UNIT0].regs, | ||
| 469 | cd->ffdc[GENWQE_DBG_UNIT0].entries); | ||
| 470 | |||
| 471 | genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1, | ||
| 472 | cd->ffdc[GENWQE_DBG_UNIT1].regs, | ||
| 473 | cd->ffdc[GENWQE_DBG_UNIT1].entries); | ||
| 474 | |||
| 475 | genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2, | ||
| 476 | cd->ffdc[GENWQE_DBG_UNIT2].regs, | ||
| 477 | cd->ffdc[GENWQE_DBG_UNIT2].entries); | ||
| 478 | |||
| 479 | genwqe_start_traps(cd); | ||
| 480 | |||
| 481 | if (cd->card_state == GENWQE_CARD_FATAL_ERROR) { | ||
| 482 | dev_warn(&pci_dev->dev, | ||
| 483 | "[%s] chip reload/recovery!\n", __func__); | ||
| 484 | |||
| 485 | /* | ||
| 486 | * Stealth Mode: Reload chip on either hot | ||
| 487 | * reset or PERST. | ||
| 488 | */ | ||
| 489 | cd->softreset = 0x7Cull; | ||
| 490 | __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, | ||
| 491 | cd->softreset); | ||
| 492 | |||
| 493 | err = genwqe_bus_reset(cd); | ||
| 494 | if (err != 0) { | ||
| 495 | dev_err(&pci_dev->dev, | ||
| 496 | "[%s] err: bus reset failed!\n", | ||
| 497 | __func__); | ||
| 498 | goto out; | ||
| 499 | } | ||
| 500 | |||
| 501 | /* | ||
| 502 | * Re-read the IDs because | ||
| 503 | * it could happen that the bitstream load | ||
| 504 | * failed! | ||
| 505 | */ | ||
| 506 | err = genwqe_read_ids(cd); | ||
| 507 | if (err) | ||
| 508 | goto out; | ||
| 509 | } | ||
| 510 | } | ||
| 511 | |||
| 512 | err = genwqe_setup_service_layer(cd); /* does a reset to the card */ | ||
| 513 | if (err != 0) { | ||
| 514 | dev_err(&pci_dev->dev, | ||
| 515 | "[%s] err: could not setup servicelayer!\n", __func__); | ||
| 516 | err = -ENODEV; | ||
| 517 | goto out; | ||
| 518 | } | ||
| 519 | |||
| 520 | if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */ | ||
| 521 | genwqe_tweak_hardware(cd); | ||
| 522 | |||
| 523 | genwqe_setup_pf_jtimer(cd); | ||
| 524 | genwqe_setup_vf_jtimer(cd); | ||
| 525 | } | ||
| 526 | |||
| 527 | err = genwqe_device_create(cd); | ||
| 528 | if (err < 0) { | ||
| 529 | dev_err(&pci_dev->dev, | ||
| 530 | "err: chdev init failed! (err=%d)\n", err); | ||
| 531 | goto out_release_service_layer; | ||
| 532 | } | ||
| 533 | return 0; | ||
| 534 | |||
| 535 | out_release_service_layer: | ||
| 536 | genwqe_release_service_layer(cd); | ||
| 537 | out: | ||
| 538 | if (genwqe_is_privileged(cd)) | ||
| 539 | genwqe_ffdc_buffs_free(cd); | ||
| 540 | return -EIO; | ||
| 541 | } | ||
| 542 | |||
| 543 | /** | ||
| 544 | * genwqe_stop() - Stop card operation | ||
| 545 | * | ||
| 546 | * Recovery notes: | ||
| 547 | * As long as genwqe_thread runs we might access registers during | ||
| 548 | * error data capture. Same is with the genwqe_health_thread. | ||
| 549 | * When genwqe_bus_reset() fails this function might called two times: | ||
| 550 | * first by the genwqe_health_thread() and later by genwqe_remove() to | ||
| 551 | * unbind the device. We must be able to survive that. | ||
| 552 | * | ||
| 553 | * This function must be robust enough to be called twice. | ||
| 554 | */ | ||
| 555 | static int genwqe_stop(struct genwqe_dev *cd) | ||
| 556 | { | ||
| 557 | genwqe_finish_queue(cd); /* no register access */ | ||
| 558 | genwqe_device_remove(cd); /* device removed, procs killed */ | ||
| 559 | genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */ | ||
| 560 | |||
| 561 | if (genwqe_is_privileged(cd)) { | ||
| 562 | pci_disable_sriov(cd->pci_dev); /* access pci config space */ | ||
| 563 | genwqe_ffdc_buffs_free(cd); | ||
| 564 | } | ||
| 565 | |||
| 566 | return 0; | ||
| 567 | } | ||
| 568 | |||
| 569 | /** | ||
| 570 | * genwqe_recover_card() - Try to recover the card if it is possible | ||
| 571 | * | ||
| 572 | * If fatal_err is set no register access is possible anymore. It is | ||
| 573 | * likely that genwqe_start fails in that situation. Proper error | ||
| 574 | * handling is required in this case. | ||
| 575 | * | ||
| 576 | * genwqe_bus_reset() will cause the pci code to call genwqe_remove() | ||
| 577 | * and later genwqe_probe() for all virtual functions. | ||
| 578 | */ | ||
| 579 | static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err) | ||
| 580 | { | ||
| 581 | int rc; | ||
| 582 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 583 | |||
| 584 | genwqe_stop(cd); | ||
| 585 | |||
| 586 | /* | ||
| 587 | * Make sure chip is not reloaded to maintain FFDC. Write SLU | ||
| 588 | * Reset Register, CPLDReset field to 0. | ||
| 589 | */ | ||
| 590 | if (!fatal_err) { | ||
| 591 | cd->softreset = 0x70ull; | ||
| 592 | __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); | ||
| 593 | } | ||
| 594 | |||
| 595 | rc = genwqe_bus_reset(cd); | ||
| 596 | if (rc != 0) { | ||
| 597 | dev_err(&pci_dev->dev, | ||
| 598 | "[%s] err: card recovery impossible!\n", __func__); | ||
| 599 | return rc; | ||
| 600 | } | ||
| 601 | |||
| 602 | rc = genwqe_start(cd); | ||
| 603 | if (rc < 0) { | ||
| 604 | dev_err(&pci_dev->dev, | ||
| 605 | "[%s] err: failed to launch device!\n", __func__); | ||
| 606 | return rc; | ||
| 607 | } | ||
| 608 | return 0; | ||
| 609 | } | ||
| 610 | |||
| 611 | static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir) | ||
| 612 | { | ||
| 613 | *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); | ||
| 614 | return (*gfir & GFIR_ERR_TRIGGER) && | ||
| 615 | genwqe_recovery_on_fatal_gfir_required(cd); | ||
| 616 | } | ||
| 617 | |||
| 618 | /** | ||
| 619 | * genwqe_fir_checking() - Check the fault isolation registers of the card | ||
| 620 | * | ||
| 621 | * If this code works ok, can be tried out with help of the genwqe_poke tool: | ||
| 622 | * sudo ./tools/genwqe_poke 0x8 0xfefefefefef | ||
| 623 | * | ||
| 624 | * Now the relevant FIRs/sFIRs should be printed out and the driver should | ||
| 625 | * invoke recovery (devices are removed and readded). | ||
| 626 | */ | ||
| 627 | static u64 genwqe_fir_checking(struct genwqe_dev *cd) | ||
| 628 | { | ||
| 629 | int j, iterations = 0; | ||
| 630 | u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec; | ||
| 631 | u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr; | ||
| 632 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 633 | |||
| 634 | healthMonitor: | ||
| 635 | iterations++; | ||
| 636 | if (iterations > 16) { | ||
| 637 | dev_err(&pci_dev->dev, "* exit looping after %d times\n", | ||
| 638 | iterations); | ||
| 639 | goto fatal_error; | ||
| 640 | } | ||
| 641 | |||
| 642 | gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); | ||
| 643 | if (gfir != 0x0) | ||
| 644 | dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", | ||
| 645 | IO_SLC_CFGREG_GFIR, gfir); | ||
| 646 | if (gfir == IO_ILLEGAL_VALUE) | ||
| 647 | goto fatal_error; | ||
| 648 | |||
| 649 | /* | ||
| 650 | * Avoid printing when to GFIR bit is on prevents contignous | ||
| 651 | * printout e.g. for the following bug: | ||
| 652 | * FIR set without a 2ndary FIR/FIR cannot be cleared | ||
| 653 | * Comment out the following if to get the prints: | ||
| 654 | */ | ||
| 655 | if (gfir == 0) | ||
| 656 | return 0; | ||
| 657 | |||
| 658 | gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */ | ||
| 659 | |||
| 660 | for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */ | ||
| 661 | |||
| 662 | /* read the primary FIR (pfir) */ | ||
| 663 | fir_addr = (uid << 24) + 0x08; | ||
| 664 | fir = __genwqe_readq(cd, fir_addr); | ||
| 665 | if (fir == 0x0) | ||
| 666 | continue; /* no error in this unit */ | ||
| 667 | |||
| 668 | dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir); | ||
| 669 | if (fir == IO_ILLEGAL_VALUE) | ||
| 670 | goto fatal_error; | ||
| 671 | |||
| 672 | /* read primary FEC */ | ||
| 673 | fec_addr = (uid << 24) + 0x18; | ||
| 674 | fec = __genwqe_readq(cd, fec_addr); | ||
| 675 | |||
| 676 | dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec); | ||
| 677 | if (fec == IO_ILLEGAL_VALUE) | ||
| 678 | goto fatal_error; | ||
| 679 | |||
| 680 | for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) { | ||
| 681 | |||
| 682 | /* secondary fir empty, skip it */ | ||
| 683 | if ((fir & mask) == 0x0) | ||
| 684 | continue; | ||
| 685 | |||
| 686 | sfir_addr = (uid << 24) + 0x100 + 0x08 * j; | ||
| 687 | sfir = __genwqe_readq(cd, sfir_addr); | ||
| 688 | |||
| 689 | if (sfir == IO_ILLEGAL_VALUE) | ||
| 690 | goto fatal_error; | ||
| 691 | dev_err(&pci_dev->dev, | ||
| 692 | "* 0x%08x 0x%016llx\n", sfir_addr, sfir); | ||
| 693 | |||
| 694 | sfec_addr = (uid << 24) + 0x300 + 0x08 * j; | ||
| 695 | sfec = __genwqe_readq(cd, sfec_addr); | ||
| 696 | |||
| 697 | if (sfec == IO_ILLEGAL_VALUE) | ||
| 698 | goto fatal_error; | ||
| 699 | dev_err(&pci_dev->dev, | ||
| 700 | "* 0x%08x 0x%016llx\n", sfec_addr, sfec); | ||
| 701 | |||
| 702 | gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); | ||
| 703 | if (gfir == IO_ILLEGAL_VALUE) | ||
| 704 | goto fatal_error; | ||
| 705 | |||
| 706 | /* gfir turned on during routine! get out and | ||
| 707 | start over. */ | ||
| 708 | if ((gfir_masked == 0x0) && | ||
| 709 | (gfir & GFIR_ERR_TRIGGER)) { | ||
| 710 | goto healthMonitor; | ||
| 711 | } | ||
| 712 | |||
| 713 | /* do not clear if we entered with a fatal gfir */ | ||
| 714 | if (gfir_masked == 0x0) { | ||
| 715 | |||
| 716 | /* NEW clear by mask the logged bits */ | ||
| 717 | sfir_addr = (uid << 24) + 0x100 + 0x08 * j; | ||
| 718 | __genwqe_writeq(cd, sfir_addr, sfir); | ||
| 719 | |||
| 720 | dev_dbg(&pci_dev->dev, | ||
| 721 | "[HM] Clearing 2ndary FIR 0x%08x " | ||
| 722 | "with 0x%016llx\n", sfir_addr, sfir); | ||
| 723 | |||
| 724 | /* | ||
| 725 | * note, these cannot be error-Firs | ||
| 726 | * since gfir_masked is 0 after sfir | ||
| 727 | * was read. Also, it is safe to do | ||
| 728 | * this write if sfir=0. Still need to | ||
| 729 | * clear the primary. This just means | ||
| 730 | * there is no secondary FIR. | ||
| 731 | */ | ||
| 732 | |||
| 733 | /* clear by mask the logged bit. */ | ||
| 734 | fir_clr_addr = (uid << 24) + 0x10; | ||
| 735 | __genwqe_writeq(cd, fir_clr_addr, mask); | ||
| 736 | |||
| 737 | dev_dbg(&pci_dev->dev, | ||
| 738 | "[HM] Clearing primary FIR 0x%08x " | ||
| 739 | "with 0x%016llx\n", fir_clr_addr, | ||
| 740 | mask); | ||
| 741 | } | ||
| 742 | } | ||
| 743 | } | ||
| 744 | gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); | ||
| 745 | if (gfir == IO_ILLEGAL_VALUE) | ||
| 746 | goto fatal_error; | ||
| 747 | |||
| 748 | if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) { | ||
| 749 | /* | ||
| 750 | * Check once more that it didn't go on after all the | ||
| 751 | * FIRS were cleared. | ||
| 752 | */ | ||
| 753 | dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n", | ||
| 754 | iterations); | ||
| 755 | goto healthMonitor; | ||
| 756 | } | ||
| 757 | return gfir_masked; | ||
| 758 | |||
| 759 | fatal_error: | ||
| 760 | return IO_ILLEGAL_VALUE; | ||
| 761 | } | ||
| 762 | |||
| 763 | /** | ||
| 764 | * genwqe_health_thread() - Health checking thread | ||
| 765 | * | ||
| 766 | * This thread is only started for the PF of the card. | ||
| 767 | * | ||
| 768 | * This thread monitors the health of the card. A critical situation | ||
| 769 | * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In | ||
| 770 | * this case we need to be recovered from outside. Writing to | ||
| 771 | * registers will very likely not work either. | ||
| 772 | * | ||
| 773 | * This thread must only exit if kthread_should_stop() becomes true. | ||
| 774 | * | ||
| 775 | * Condition for the health-thread to trigger: | ||
| 776 | * a) when a kthread_stop() request comes in or | ||
| 777 | * b) a critical GFIR occured | ||
| 778 | * | ||
| 779 | * Informational GFIRs are checked and potentially printed in | ||
| 780 | * health_check_interval seconds. | ||
| 781 | */ | ||
| 782 | static int genwqe_health_thread(void *data) | ||
| 783 | { | ||
| 784 | int rc, should_stop = 0; | ||
| 785 | struct genwqe_dev *cd = data; | ||
| 786 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 787 | u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg; | ||
| 788 | |||
| 789 | while (!kthread_should_stop()) { | ||
| 790 | rc = wait_event_interruptible_timeout(cd->health_waitq, | ||
| 791 | (genwqe_health_check_cond(cd, &gfir) || | ||
| 792 | (should_stop = kthread_should_stop())), | ||
| 793 | genwqe_health_check_interval * HZ); | ||
| 794 | |||
| 795 | if (should_stop) | ||
| 796 | break; | ||
| 797 | |||
| 798 | if (gfir == IO_ILLEGAL_VALUE) { | ||
| 799 | dev_err(&pci_dev->dev, | ||
| 800 | "[%s] GFIR=%016llx\n", __func__, gfir); | ||
| 801 | goto fatal_error; | ||
| 802 | } | ||
| 803 | |||
| 804 | slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); | ||
| 805 | if (slu_unitcfg == IO_ILLEGAL_VALUE) { | ||
| 806 | dev_err(&pci_dev->dev, | ||
| 807 | "[%s] SLU_UNITCFG=%016llx\n", | ||
| 808 | __func__, slu_unitcfg); | ||
| 809 | goto fatal_error; | ||
| 810 | } | ||
| 811 | |||
| 812 | app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); | ||
| 813 | if (app_unitcfg == IO_ILLEGAL_VALUE) { | ||
| 814 | dev_err(&pci_dev->dev, | ||
| 815 | "[%s] APP_UNITCFG=%016llx\n", | ||
| 816 | __func__, app_unitcfg); | ||
| 817 | goto fatal_error; | ||
| 818 | } | ||
| 819 | |||
| 820 | gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); | ||
| 821 | if (gfir == IO_ILLEGAL_VALUE) { | ||
| 822 | dev_err(&pci_dev->dev, | ||
| 823 | "[%s] %s: GFIR=%016llx\n", __func__, | ||
| 824 | (gfir & GFIR_ERR_TRIGGER) ? "err" : "info", | ||
| 825 | gfir); | ||
| 826 | goto fatal_error; | ||
| 827 | } | ||
| 828 | |||
| 829 | gfir_masked = genwqe_fir_checking(cd); | ||
| 830 | if (gfir_masked == IO_ILLEGAL_VALUE) | ||
| 831 | goto fatal_error; | ||
| 832 | |||
| 833 | /* | ||
| 834 | * GFIR ErrorTrigger bits set => reset the card! | ||
| 835 | * Never do this for old/manufacturing images! | ||
| 836 | */ | ||
| 837 | if ((gfir_masked) && !cd->skip_recovery && | ||
| 838 | genwqe_recovery_on_fatal_gfir_required(cd)) { | ||
| 839 | |||
| 840 | cd->card_state = GENWQE_CARD_FATAL_ERROR; | ||
| 841 | |||
| 842 | rc = genwqe_recover_card(cd, 0); | ||
| 843 | if (rc < 0) { | ||
| 844 | /* FIXME Card is unusable and needs unbind! */ | ||
| 845 | goto fatal_error; | ||
| 846 | } | ||
| 847 | } | ||
| 848 | |||
| 849 | cd->last_gfir = gfir; | ||
| 850 | cond_resched(); | ||
| 851 | } | ||
| 852 | |||
| 853 | return 0; | ||
| 854 | |||
| 855 | fatal_error: | ||
| 856 | dev_err(&pci_dev->dev, | ||
| 857 | "[%s] card unusable. Please trigger unbind!\n", __func__); | ||
| 858 | |||
| 859 | /* Bring down logical devices to inform user space via udev remove. */ | ||
| 860 | cd->card_state = GENWQE_CARD_FATAL_ERROR; | ||
| 861 | genwqe_stop(cd); | ||
| 862 | |||
| 863 | /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */ | ||
| 864 | while (!kthread_should_stop()) | ||
| 865 | cond_resched(); | ||
| 866 | |||
| 867 | return -EIO; | ||
| 868 | } | ||
| 869 | |||
| 870 | static int genwqe_health_check_start(struct genwqe_dev *cd) | ||
| 871 | { | ||
| 872 | int rc; | ||
| 873 | |||
| 874 | if (genwqe_health_check_interval <= 0) | ||
| 875 | return 0; /* valid for disabling the service */ | ||
| 876 | |||
| 877 | /* moved before request_irq() */ | ||
| 878 | /* init_waitqueue_head(&cd->health_waitq); */ | ||
| 879 | |||
| 880 | cd->health_thread = kthread_run(genwqe_health_thread, cd, | ||
| 881 | GENWQE_DEVNAME "%d_health", | ||
| 882 | cd->card_idx); | ||
| 883 | if (IS_ERR(cd->health_thread)) { | ||
| 884 | rc = PTR_ERR(cd->health_thread); | ||
| 885 | cd->health_thread = NULL; | ||
| 886 | return rc; | ||
| 887 | } | ||
| 888 | return 0; | ||
| 889 | } | ||
| 890 | |||
| 891 | static int genwqe_health_thread_running(struct genwqe_dev *cd) | ||
| 892 | { | ||
| 893 | return cd->health_thread != NULL; | ||
| 894 | } | ||
| 895 | |||
| 896 | static int genwqe_health_check_stop(struct genwqe_dev *cd) | ||
| 897 | { | ||
| 898 | int rc; | ||
| 899 | |||
| 900 | if (!genwqe_health_thread_running(cd)) | ||
| 901 | return -EIO; | ||
| 902 | |||
| 903 | rc = kthread_stop(cd->health_thread); | ||
| 904 | cd->health_thread = NULL; | ||
| 905 | return 0; | ||
| 906 | } | ||
| 907 | |||
| 908 | /** | ||
| 909 | * genwqe_pci_setup() - Allocate PCIe related resources for our card | ||
| 910 | */ | ||
| 911 | static int genwqe_pci_setup(struct genwqe_dev *cd) | ||
| 912 | { | ||
| 913 | int err, bars; | ||
| 914 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 915 | |||
| 916 | bars = pci_select_bars(pci_dev, IORESOURCE_MEM); | ||
| 917 | err = pci_enable_device_mem(pci_dev); | ||
| 918 | if (err) { | ||
| 919 | dev_err(&pci_dev->dev, | ||
| 920 | "err: failed to enable pci memory (err=%d)\n", err); | ||
| 921 | goto err_out; | ||
| 922 | } | ||
| 923 | |||
| 924 | /* Reserve PCI I/O and memory resources */ | ||
| 925 | err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); | ||
| 926 | if (err) { | ||
| 927 | dev_err(&pci_dev->dev, | ||
| 928 | "[%s] err: request bars failed (%d)\n", __func__, err); | ||
| 929 | err = -EIO; | ||
| 930 | goto err_disable_device; | ||
| 931 | } | ||
| 932 | |||
| 933 | /* check for 64-bit DMA address supported (DAC) */ | ||
| 934 | if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { | ||
| 935 | err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); | ||
| 936 | if (err) { | ||
| 937 | dev_err(&pci_dev->dev, | ||
| 938 | "err: DMA64 consistent mask error\n"); | ||
| 939 | err = -EIO; | ||
| 940 | goto out_release_resources; | ||
| 941 | } | ||
| 942 | /* check for 32-bit DMA address supported (SAC) */ | ||
| 943 | } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { | ||
| 944 | err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); | ||
| 945 | if (err) { | ||
| 946 | dev_err(&pci_dev->dev, | ||
| 947 | "err: DMA32 consistent mask error\n"); | ||
| 948 | err = -EIO; | ||
| 949 | goto out_release_resources; | ||
| 950 | } | ||
| 951 | } else { | ||
| 952 | dev_err(&pci_dev->dev, | ||
| 953 | "err: neither DMA32 nor DMA64 supported\n"); | ||
| 954 | err = -EIO; | ||
| 955 | goto out_release_resources; | ||
| 956 | } | ||
| 957 | |||
| 958 | pci_set_master(pci_dev); | ||
| 959 | pci_enable_pcie_error_reporting(pci_dev); | ||
| 960 | |||
| 961 | /* request complete BAR-0 space (length = 0) */ | ||
| 962 | cd->mmio_len = pci_resource_len(pci_dev, 0); | ||
| 963 | cd->mmio = pci_iomap(pci_dev, 0, 0); | ||
| 964 | if (cd->mmio == NULL) { | ||
| 965 | dev_err(&pci_dev->dev, | ||
| 966 | "[%s] err: mapping BAR0 failed\n", __func__); | ||
| 967 | err = -ENOMEM; | ||
| 968 | goto out_release_resources; | ||
| 969 | } | ||
| 970 | |||
| 971 | cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); | ||
| 972 | |||
| 973 | err = genwqe_read_ids(cd); | ||
| 974 | if (err) | ||
| 975 | goto out_iounmap; | ||
| 976 | |||
| 977 | return 0; | ||
| 978 | |||
| 979 | out_iounmap: | ||
| 980 | pci_iounmap(pci_dev, cd->mmio); | ||
| 981 | out_release_resources: | ||
| 982 | pci_release_selected_regions(pci_dev, bars); | ||
| 983 | err_disable_device: | ||
| 984 | pci_disable_device(pci_dev); | ||
| 985 | err_out: | ||
| 986 | return err; | ||
| 987 | } | ||
| 988 | |||
| 989 | /** | ||
| 990 | * genwqe_pci_remove() - Free PCIe related resources for our card | ||
| 991 | */ | ||
| 992 | static void genwqe_pci_remove(struct genwqe_dev *cd) | ||
| 993 | { | ||
| 994 | int bars; | ||
| 995 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 996 | |||
| 997 | if (cd->mmio) | ||
| 998 | pci_iounmap(pci_dev, cd->mmio); | ||
| 999 | |||
| 1000 | bars = pci_select_bars(pci_dev, IORESOURCE_MEM); | ||
| 1001 | pci_release_selected_regions(pci_dev, bars); | ||
| 1002 | pci_disable_device(pci_dev); | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | /** | ||
| 1006 | * genwqe_probe() - Device initialization | ||
| 1007 | * @pdev: PCI device information struct | ||
| 1008 | * | ||
| 1009 | * Callable for multiple cards. This function is called on bind. | ||
| 1010 | * | ||
| 1011 | * Return: 0 if succeeded, < 0 when failed | ||
| 1012 | */ | ||
| 1013 | static int genwqe_probe(struct pci_dev *pci_dev, | ||
| 1014 | const struct pci_device_id *id) | ||
| 1015 | { | ||
| 1016 | int err; | ||
| 1017 | struct genwqe_dev *cd; | ||
| 1018 | |||
| 1019 | genwqe_init_crc32(); | ||
| 1020 | |||
| 1021 | cd = genwqe_dev_alloc(); | ||
| 1022 | if (IS_ERR(cd)) { | ||
| 1023 | dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n", | ||
| 1024 | (int)PTR_ERR(cd)); | ||
| 1025 | return PTR_ERR(cd); | ||
| 1026 | } | ||
| 1027 | |||
| 1028 | dev_set_drvdata(&pci_dev->dev, cd); | ||
| 1029 | cd->pci_dev = pci_dev; | ||
| 1030 | |||
| 1031 | err = genwqe_pci_setup(cd); | ||
| 1032 | if (err < 0) { | ||
| 1033 | dev_err(&pci_dev->dev, | ||
| 1034 | "err: problems with PCI setup (err=%d)\n", err); | ||
| 1035 | goto out_free_dev; | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | err = genwqe_start(cd); | ||
| 1039 | if (err < 0) { | ||
| 1040 | dev_err(&pci_dev->dev, | ||
| 1041 | "err: cannot start card services! (err=%d)\n", err); | ||
| 1042 | goto out_pci_remove; | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | if (genwqe_is_privileged(cd)) { | ||
| 1046 | err = genwqe_health_check_start(cd); | ||
| 1047 | if (err < 0) { | ||
| 1048 | dev_err(&pci_dev->dev, | ||
| 1049 | "err: cannot start health checking! " | ||
| 1050 | "(err=%d)\n", err); | ||
| 1051 | goto out_stop_services; | ||
| 1052 | } | ||
| 1053 | } | ||
| 1054 | return 0; | ||
| 1055 | |||
| 1056 | out_stop_services: | ||
| 1057 | genwqe_stop(cd); | ||
| 1058 | out_pci_remove: | ||
| 1059 | genwqe_pci_remove(cd); | ||
| 1060 | out_free_dev: | ||
| 1061 | genwqe_dev_free(cd); | ||
| 1062 | return err; | ||
| 1063 | } | ||
| 1064 | |||
| 1065 | /** | ||
| 1066 | * genwqe_remove() - Called when device is removed (hot-plugable) | ||
| 1067 | * | ||
| 1068 | * Or when driver is unloaded respecitively when unbind is done. | ||
| 1069 | */ | ||
| 1070 | static void genwqe_remove(struct pci_dev *pci_dev) | ||
| 1071 | { | ||
| 1072 | struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev); | ||
| 1073 | |||
| 1074 | genwqe_health_check_stop(cd); | ||
| 1075 | |||
| 1076 | /* | ||
| 1077 | * genwqe_stop() must survive if it is called twice | ||
| 1078 | * sequentially. This happens when the health thread calls it | ||
| 1079 | * and fails on genwqe_bus_reset(). | ||
| 1080 | */ | ||
| 1081 | genwqe_stop(cd); | ||
| 1082 | genwqe_pci_remove(cd); | ||
| 1083 | genwqe_dev_free(cd); | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | /* | ||
| 1087 | * genwqe_err_error_detected() - Error detection callback | ||
| 1088 | * | ||
| 1089 | * This callback is called by the PCI subsystem whenever a PCI bus | ||
| 1090 | * error is detected. | ||
| 1091 | */ | ||
| 1092 | static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev, | ||
| 1093 | enum pci_channel_state state) | ||
| 1094 | { | ||
| 1095 | struct genwqe_dev *cd; | ||
| 1096 | |||
| 1097 | dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state); | ||
| 1098 | |||
| 1099 | if (pci_dev == NULL) | ||
| 1100 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 1101 | |||
| 1102 | cd = dev_get_drvdata(&pci_dev->dev); | ||
| 1103 | if (cd == NULL) | ||
| 1104 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 1105 | |||
| 1106 | switch (state) { | ||
| 1107 | case pci_channel_io_normal: | ||
| 1108 | return PCI_ERS_RESULT_CAN_RECOVER; | ||
| 1109 | case pci_channel_io_frozen: | ||
| 1110 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 1111 | case pci_channel_io_perm_failure: | ||
| 1112 | return PCI_ERS_RESULT_DISCONNECT; | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | return PCI_ERS_RESULT_NEED_RESET; | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev) | ||
| 1119 | { | ||
| 1120 | return PCI_ERS_RESULT_NONE; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | static void genwqe_err_resume(struct pci_dev *dev) | ||
| 1124 | { | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) | ||
| 1128 | { | ||
| 1129 | struct genwqe_dev *cd = dev_get_drvdata(&dev->dev); | ||
| 1130 | |||
| 1131 | if (numvfs > 0) { | ||
| 1132 | genwqe_setup_vf_jtimer(cd); | ||
| 1133 | pci_enable_sriov(dev, numvfs); | ||
| 1134 | return numvfs; | ||
| 1135 | } | ||
| 1136 | if (numvfs == 0) { | ||
| 1137 | pci_disable_sriov(dev); | ||
| 1138 | return 0; | ||
| 1139 | } | ||
| 1140 | return 0; | ||
| 1141 | } | ||
| 1142 | |||
| 1143 | static struct pci_error_handlers genwqe_err_handler = { | ||
| 1144 | .error_detected = genwqe_err_error_detected, | ||
| 1145 | .mmio_enabled = genwqe_err_result_none, | ||
| 1146 | .link_reset = genwqe_err_result_none, | ||
| 1147 | .slot_reset = genwqe_err_result_none, | ||
| 1148 | .resume = genwqe_err_resume, | ||
| 1149 | }; | ||
| 1150 | |||
| 1151 | static struct pci_driver genwqe_driver = { | ||
| 1152 | .name = genwqe_driver_name, | ||
| 1153 | .id_table = genwqe_device_table, | ||
| 1154 | .probe = genwqe_probe, | ||
| 1155 | .remove = genwqe_remove, | ||
| 1156 | .sriov_configure = genwqe_sriov_configure, | ||
| 1157 | .err_handler = &genwqe_err_handler, | ||
| 1158 | }; | ||
| 1159 | |||
| 1160 | /** | ||
| 1161 | * genwqe_init_module() - Driver registration and initialization | ||
| 1162 | */ | ||
| 1163 | static int __init genwqe_init_module(void) | ||
| 1164 | { | ||
| 1165 | int rc; | ||
| 1166 | |||
| 1167 | class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME); | ||
| 1168 | if (IS_ERR(class_genwqe)) { | ||
| 1169 | pr_err("[%s] create class failed\n", __func__); | ||
| 1170 | return -ENOMEM; | ||
| 1171 | } | ||
| 1172 | |||
| 1173 | debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL); | ||
| 1174 | if (!debugfs_genwqe) { | ||
| 1175 | rc = -ENOMEM; | ||
| 1176 | goto err_out; | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | rc = pci_register_driver(&genwqe_driver); | ||
| 1180 | if (rc != 0) { | ||
| 1181 | pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc); | ||
| 1182 | goto err_out0; | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | return rc; | ||
| 1186 | |||
| 1187 | err_out0: | ||
| 1188 | debugfs_remove(debugfs_genwqe); | ||
| 1189 | err_out: | ||
| 1190 | class_destroy(class_genwqe); | ||
| 1191 | return rc; | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | /** | ||
| 1195 | * genwqe_exit_module() - Driver exit | ||
| 1196 | */ | ||
| 1197 | static void __exit genwqe_exit_module(void) | ||
| 1198 | { | ||
| 1199 | pci_unregister_driver(&genwqe_driver); | ||
| 1200 | debugfs_remove(debugfs_genwqe); | ||
| 1201 | class_destroy(class_genwqe); | ||
| 1202 | } | ||
| 1203 | |||
| 1204 | module_init(genwqe_init_module); | ||
| 1205 | module_exit(genwqe_exit_module); | ||
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h new file mode 100644 index 000000000000..5e4dbd21f89a --- /dev/null +++ b/drivers/misc/genwqe/card_base.h | |||
| @@ -0,0 +1,557 @@ | |||
| 1 | #ifndef __CARD_BASE_H__ | ||
| 2 | #define __CARD_BASE_H__ | ||
| 3 | |||
| 4 | /** | ||
| 5 | * IBM Accelerator Family 'GenWQE' | ||
| 6 | * | ||
| 7 | * (C) Copyright IBM Corp. 2013 | ||
| 8 | * | ||
| 9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 11 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 12 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 13 | * | ||
| 14 | * This program is free software; you can redistribute it and/or modify | ||
| 15 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 16 | * as published by the Free Software Foundation. | ||
| 17 | * | ||
| 18 | * This program is distributed in the hope that it will be useful, | ||
| 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 21 | * GNU General Public License for more details. | ||
| 22 | */ | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Interfaces within the GenWQE module. Defines genwqe_card and | ||
| 26 | * ddcb_queue as well as ddcb_requ. | ||
| 27 | */ | ||
| 28 | |||
| 29 | #include <linux/kernel.h> | ||
| 30 | #include <linux/types.h> | ||
| 31 | #include <linux/cdev.h> | ||
| 32 | #include <linux/stringify.h> | ||
| 33 | #include <linux/pci.h> | ||
| 34 | #include <linux/semaphore.h> | ||
| 35 | #include <linux/uaccess.h> | ||
| 36 | #include <linux/io.h> | ||
| 37 | #include <linux/version.h> | ||
| 38 | #include <linux/debugfs.h> | ||
| 39 | #include <linux/slab.h> | ||
| 40 | |||
| 41 | #include <linux/genwqe/genwqe_card.h> | ||
| 42 | #include "genwqe_driver.h" | ||
| 43 | |||
| 44 | #define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */ | ||
| 45 | #define GENWQE_FLAG_MSI_ENABLED (1 << 0) | ||
| 46 | |||
| 47 | #define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */ | ||
| 48 | #define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */ | ||
| 49 | #define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS) | ||
| 50 | |||
| 51 | /* Compile parameters, some of them appear in debugfs for later adjustment */ | ||
| 52 | #define genwqe_ddcb_max 32 /* DDCBs on the work-queue */ | ||
| 53 | #define genwqe_polling_enabled 0 /* in case of irqs not working */ | ||
| 54 | #define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */ | ||
| 55 | #define genwqe_kill_timeout 8 /* time until process gets killed */ | ||
| 56 | #define genwqe_vf_jobtimeout_msec 250 /* 250 msec */ | ||
| 57 | #define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */ | ||
| 58 | #define genwqe_health_check_interval 4 /* <= 0: disabled */ | ||
| 59 | |||
| 60 | /* Sysfs attribute groups used when we create the genwqe device */ | ||
| 61 | extern const struct attribute_group *genwqe_attribute_groups[]; | ||
| 62 | |||
| 63 | /* | ||
| 64 | * Config space for Genwqe5 A7: | ||
| 65 | * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00 | ||
| 66 | * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00 | ||
| 67 | * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04] | ||
| 68 | * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 | ||
| 69 | */ | ||
| 70 | #define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */ | ||
| 71 | |||
| 72 | #define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */ | ||
| 73 | #define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */ | ||
| 74 | #define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */ | ||
| 75 | |||
| 76 | #define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000 | ||
| 77 | #define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */ | ||
| 78 | #define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */ | ||
| 79 | |||
| 80 | #define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */ | ||
| 81 | |||
| 82 | /** | ||
| 83 | * struct genwqe_reg - Genwqe data dump functionality | ||
| 84 | */ | ||
| 85 | struct genwqe_reg { | ||
| 86 | u32 addr; | ||
| 87 | u32 idx; | ||
| 88 | u64 val; | ||
| 89 | }; | ||
| 90 | |||
| 91 | /* | ||
| 92 | * enum genwqe_dbg_type - Specify chip unit to dump/debug | ||
| 93 | */ | ||
| 94 | enum genwqe_dbg_type { | ||
| 95 | GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */ | ||
| 96 | GENWQE_DBG_UNIT1 = 1, | ||
| 97 | GENWQE_DBG_UNIT2 = 2, | ||
| 98 | GENWQE_DBG_UNIT3 = 3, | ||
| 99 | GENWQE_DBG_UNIT4 = 4, | ||
| 100 | GENWQE_DBG_UNIT5 = 5, | ||
| 101 | GENWQE_DBG_UNIT6 = 6, | ||
| 102 | GENWQE_DBG_UNIT7 = 7, | ||
| 103 | GENWQE_DBG_REGS = 8, | ||
| 104 | GENWQE_DBG_DMA = 9, | ||
| 105 | GENWQE_DBG_UNITS = 10, /* max number of possible debug units */ | ||
| 106 | }; | ||
| 107 | |||
| 108 | /* Software error injection to simulate card failures */ | ||
| 109 | #define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */ | ||
| 110 | #define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */ | ||
| 111 | #define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */ | ||
| 112 | #define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */ | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Genwqe card description and management data. | ||
| 116 | * | ||
| 117 | * Error-handling in case of card malfunction | ||
| 118 | * ------------------------------------------ | ||
| 119 | * | ||
| 120 | * If the card is detected to be defective the outside environment | ||
| 121 | * will cause the PCI layer to call deinit (the cleanup function for | ||
| 122 | * probe). This is the same effect like doing a unbind/bind operation | ||
| 123 | * on the card. | ||
| 124 | * | ||
| 125 | * The genwqe card driver implements a health checking thread which | ||
| 126 | * verifies the card function. If this detects a problem the cards | ||
| 127 | * device is being shutdown and restarted again, along with a reset of | ||
| 128 | * the card and queue. | ||
| 129 | * | ||
| 130 | * All functions accessing the card device return either -EIO or -ENODEV | ||
| 131 | * code to indicate the malfunction to the user. The user has to close | ||
| 132 | * the file descriptor and open a new one, once the card becomes | ||
| 133 | * available again. | ||
| 134 | * | ||
| 135 | * If the open file descriptor is setup to receive SIGIO, the signal is | ||
| 136 | * genereated for the application which has to provide a handler to | ||
| 137 | * react on it. If the application does not close the open | ||
| 138 | * file descriptor a SIGKILL is send to enforce freeing the cards | ||
| 139 | * resources. | ||
| 140 | * | ||
| 141 | * I did not find a different way to prevent kernel problems due to | ||
| 142 | * reference counters for the cards character devices getting out of | ||
| 143 | * sync. The character device deallocation does not block, even if | ||
| 144 | * there is still an open file descriptor pending. If this pending | ||
| 145 | * descriptor is closed, the data structures used by the character | ||
| 146 | * device is reinstantiated, which will lead to the reference counter | ||
| 147 | * dropping below the allowed values. | ||
| 148 | * | ||
| 149 | * Card recovery | ||
| 150 | * ------------- | ||
| 151 | * | ||
| 152 | * To test the internal driver recovery the following command can be used: | ||
| 153 | * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject' | ||
| 154 | */ | ||
| 155 | |||
| 156 | |||
| 157 | /** | ||
| 158 | * struct dma_mapping_type - Mapping type definition | ||
| 159 | * | ||
| 160 | * To avoid memcpying data arround we use user memory directly. To do | ||
| 161 | * this we need to pin/swap-in the memory and request a DMA address | ||
| 162 | * for it. | ||
| 163 | */ | ||
| 164 | enum dma_mapping_type { | ||
| 165 | GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */ | ||
| 166 | GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */ | ||
| 167 | GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */ | ||
| 168 | }; | ||
| 169 | |||
| 170 | /** | ||
| 171 | * struct dma_mapping - Information about memory mappings done by the driver | ||
| 172 | */ | ||
| 173 | struct dma_mapping { | ||
| 174 | enum dma_mapping_type type; | ||
| 175 | |||
| 176 | void *u_vaddr; /* user-space vaddr/non-aligned */ | ||
| 177 | void *k_vaddr; /* kernel-space vaddr/non-aligned */ | ||
| 178 | dma_addr_t dma_addr; /* physical DMA address */ | ||
| 179 | |||
| 180 | struct page **page_list; /* list of pages used by user buff */ | ||
| 181 | dma_addr_t *dma_list; /* list of dma addresses per page */ | ||
| 182 | unsigned int nr_pages; /* number of pages */ | ||
| 183 | unsigned int size; /* size in bytes */ | ||
| 184 | |||
| 185 | struct list_head card_list; /* list of usr_maps for card */ | ||
| 186 | struct list_head pin_list; /* list of pinned memory for dev */ | ||
| 187 | }; | ||
| 188 | |||
| 189 | static inline void genwqe_mapping_init(struct dma_mapping *m, | ||
| 190 | enum dma_mapping_type type) | ||
| 191 | { | ||
| 192 | memset(m, 0, sizeof(*m)); | ||
| 193 | m->type = type; | ||
| 194 | } | ||
| 195 | |||
| 196 | /** | ||
| 197 | * struct ddcb_queue - DDCB queue data | ||
| 198 | * @ddcb_max: Number of DDCBs on the queue | ||
| 199 | * @ddcb_next: Next free DDCB | ||
| 200 | * @ddcb_act: Next DDCB supposed to finish | ||
| 201 | * @ddcb_seq: Sequence number of last DDCB | ||
| 202 | * @ddcbs_in_flight: Currently enqueued DDCBs | ||
| 203 | * @ddcbs_completed: Number of already completed DDCBs | ||
| 204 | * @busy: Number of -EBUSY returns | ||
| 205 | * @ddcb_daddr: DMA address of first DDCB in the queue | ||
| 206 | * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue | ||
| 207 | * @ddcb_req: Associated requests (one per DDCB) | ||
| 208 | * @ddcb_waitqs: Associated wait queues (one per DDCB) | ||
| 209 | * @ddcb_lock: Lock to protect queuing operations | ||
| 210 | * @ddcb_waitq: Wait on next DDCB finishing | ||
| 211 | */ | ||
| 212 | |||
| 213 | struct ddcb_queue { | ||
| 214 | int ddcb_max; /* amount of DDCBs */ | ||
| 215 | int ddcb_next; /* next available DDCB num */ | ||
| 216 | int ddcb_act; /* DDCB to be processed */ | ||
| 217 | u16 ddcb_seq; /* slc seq num */ | ||
| 218 | unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ | ||
| 219 | unsigned int ddcbs_completed; | ||
| 220 | unsigned int ddcbs_max_in_flight; | ||
| 221 | unsigned int busy; /* how many times -EBUSY? */ | ||
| 222 | |||
| 223 | dma_addr_t ddcb_daddr; /* DMA address */ | ||
| 224 | struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ | ||
| 225 | struct ddcb_requ **ddcb_req; /* ddcb processing parameter */ | ||
| 226 | wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ | ||
| 227 | |||
| 228 | spinlock_t ddcb_lock; /* exclusive access to queue */ | ||
| 229 | wait_queue_head_t ddcb_waitq; /* wait for ddcb processing */ | ||
| 230 | |||
| 231 | /* registers or the respective queue to be used */ | ||
| 232 | u32 IO_QUEUE_CONFIG; | ||
| 233 | u32 IO_QUEUE_STATUS; | ||
| 234 | u32 IO_QUEUE_SEGMENT; | ||
| 235 | u32 IO_QUEUE_INITSQN; | ||
| 236 | u32 IO_QUEUE_WRAP; | ||
| 237 | u32 IO_QUEUE_OFFSET; | ||
| 238 | u32 IO_QUEUE_WTIME; | ||
| 239 | u32 IO_QUEUE_ERRCNTS; | ||
| 240 | u32 IO_QUEUE_LRW; | ||
| 241 | }; | ||
| 242 | |||
| 243 | /* | ||
| 244 | * GFIR, SLU_UNITCFG, APP_UNITCFG | ||
| 245 | * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC. | ||
| 246 | */ | ||
| 247 | #define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64))) | ||
| 248 | |||
| 249 | struct genwqe_ffdc { | ||
| 250 | unsigned int entries; | ||
| 251 | struct genwqe_reg *regs; | ||
| 252 | }; | ||
| 253 | |||
| 254 | /** | ||
| 255 | * struct genwqe_dev - GenWQE device information | ||
| 256 | * @card_state: Card operation state, see above | ||
| 257 | * @ffdc: First Failure Data Capture buffers for each unit | ||
| 258 | * @card_thread: Working thread to operate the DDCB queue | ||
| 259 | * @card_waitq: Wait queue used in card_thread | ||
| 260 | * @queue: DDCB queue | ||
| 261 | * @health_thread: Card monitoring thread (only for PFs) | ||
| 262 | * @health_waitq: Wait queue used in health_thread | ||
| 263 | * @pci_dev: Associated PCI device (function) | ||
| 264 | * @mmio: Base address of 64-bit register space | ||
| 265 | * @mmio_len: Length of register area | ||
| 266 | * @file_lock: Lock to protect access to file_list | ||
| 267 | * @file_list: List of all processes with open GenWQE file descriptors | ||
| 268 | * | ||
| 269 | * This struct contains all information needed to communicate with a | ||
| 270 | * GenWQE card. It is initialized when a GenWQE device is found and | ||
| 271 | * destroyed when it goes away. It holds data to maintain the queue as | ||
| 272 | * well as data needed to feed the user interfaces. | ||
| 273 | */ | ||
| 274 | struct genwqe_dev { | ||
| 275 | enum genwqe_card_state card_state; | ||
| 276 | spinlock_t print_lock; | ||
| 277 | |||
| 278 | int card_idx; /* card index 0..CARD_NO_MAX-1 */ | ||
| 279 | u64 flags; /* general flags */ | ||
| 280 | |||
| 281 | /* FFDC data gathering */ | ||
| 282 | struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS]; | ||
| 283 | |||
| 284 | /* DDCB workqueue */ | ||
| 285 | struct task_struct *card_thread; | ||
| 286 | wait_queue_head_t queue_waitq; | ||
| 287 | struct ddcb_queue queue; /* genwqe DDCB queue */ | ||
| 288 | unsigned int irqs_processed; | ||
| 289 | |||
| 290 | /* Card health checking thread */ | ||
| 291 | struct task_struct *health_thread; | ||
| 292 | wait_queue_head_t health_waitq; | ||
| 293 | |||
| 294 | /* char device */ | ||
| 295 | dev_t devnum_genwqe; /* major/minor num card */ | ||
| 296 | struct class *class_genwqe; /* reference to class object */ | ||
| 297 | struct device *dev; /* for device creation */ | ||
| 298 | struct cdev cdev_genwqe; /* char device for card */ | ||
| 299 | |||
| 300 | struct dentry *debugfs_root; /* debugfs card root directory */ | ||
| 301 | struct dentry *debugfs_genwqe; /* debugfs driver root directory */ | ||
| 302 | |||
| 303 | /* pci resources */ | ||
| 304 | struct pci_dev *pci_dev; /* PCI device */ | ||
| 305 | void __iomem *mmio; /* BAR-0 MMIO start */ | ||
| 306 | unsigned long mmio_len; | ||
| 307 | u16 num_vfs; | ||
| 308 | u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; | ||
| 309 | int is_privileged; /* access to all regs possible */ | ||
| 310 | |||
| 311 | /* config regs which we need often */ | ||
| 312 | u64 slu_unitcfg; | ||
| 313 | u64 app_unitcfg; | ||
| 314 | u64 softreset; | ||
| 315 | u64 err_inject; | ||
| 316 | u64 last_gfir; | ||
| 317 | char app_name[5]; | ||
| 318 | |||
| 319 | spinlock_t file_lock; /* lock for open files */ | ||
| 320 | struct list_head file_list; /* list of open files */ | ||
| 321 | |||
| 322 | /* debugfs parameters */ | ||
| 323 | int ddcb_software_timeout; /* wait until DDCB times out */ | ||
| 324 | int skip_recovery; /* circumvention if recovery fails */ | ||
| 325 | int kill_timeout; /* wait after sending SIGKILL */ | ||
| 326 | }; | ||
| 327 | |||
| 328 | /** | ||
| 329 | * enum genwqe_requ_state - State of a DDCB execution request | ||
| 330 | */ | ||
| 331 | enum genwqe_requ_state { | ||
| 332 | GENWQE_REQU_NEW = 0, | ||
| 333 | GENWQE_REQU_ENQUEUED = 1, | ||
| 334 | GENWQE_REQU_TAPPED = 2, | ||
| 335 | GENWQE_REQU_FINISHED = 3, | ||
| 336 | GENWQE_REQU_STATE_MAX, | ||
| 337 | }; | ||
| 338 | |||
| 339 | /** | ||
| 340 | * struct ddcb_requ - Kernel internal representation of the DDCB request | ||
| 341 | * @cmd: User space representation of the DDCB execution request | ||
| 342 | */ | ||
| 343 | struct ddcb_requ { | ||
| 344 | /* kernel specific content */ | ||
| 345 | enum genwqe_requ_state req_state; /* request status */ | ||
| 346 | int num; /* ddcb_no for this request */ | ||
| 347 | struct ddcb_queue *queue; /* associated queue */ | ||
| 348 | |||
| 349 | struct dma_mapping dma_mappings[DDCB_FIXUPS]; | ||
| 350 | struct sg_entry *sgl[DDCB_FIXUPS]; | ||
| 351 | dma_addr_t sgl_dma_addr[DDCB_FIXUPS]; | ||
| 352 | size_t sgl_size[DDCB_FIXUPS]; | ||
| 353 | |||
| 354 | /* kernel/user shared content */ | ||
| 355 | struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ | ||
| 356 | struct genwqe_debug_data debug_data; | ||
| 357 | }; | ||
| 358 | |||
| 359 | /** | ||
| 360 | * struct genwqe_file - Information for open GenWQE devices | ||
| 361 | */ | ||
| 362 | struct genwqe_file { | ||
| 363 | struct genwqe_dev *cd; | ||
| 364 | struct genwqe_driver *client; | ||
| 365 | struct file *filp; | ||
| 366 | |||
| 367 | struct fasync_struct *async_queue; | ||
| 368 | struct task_struct *owner; | ||
| 369 | struct list_head list; /* entry in list of open files */ | ||
| 370 | |||
| 371 | spinlock_t map_lock; /* lock for dma_mappings */ | ||
| 372 | struct list_head map_list; /* list of dma_mappings */ | ||
| 373 | |||
| 374 | spinlock_t pin_lock; /* lock for pinned memory */ | ||
| 375 | struct list_head pin_list; /* list of pinned memory */ | ||
| 376 | }; | ||
| 377 | |||
| 378 | int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */ | ||
| 379 | int genwqe_finish_queue(struct genwqe_dev *cd); | ||
| 380 | int genwqe_release_service_layer(struct genwqe_dev *cd); | ||
| 381 | |||
| 382 | /** | ||
| 383 | * genwqe_get_slu_id() - Read Service Layer Unit Id | ||
| 384 | * Return: 0x00: Development code | ||
| 385 | * 0x01: SLC1 (old) | ||
| 386 | * 0x02: SLC2 (sept2012) | ||
| 387 | * 0x03: SLC2 (feb2013, generic driver) | ||
| 388 | */ | ||
| 389 | static inline int genwqe_get_slu_id(struct genwqe_dev *cd) | ||
| 390 | { | ||
| 391 | return (int)((cd->slu_unitcfg >> 32) & 0xff); | ||
| 392 | } | ||
| 393 | |||
| 394 | int genwqe_ddcbs_in_flight(struct genwqe_dev *cd); | ||
| 395 | |||
| 396 | u8 genwqe_card_type(struct genwqe_dev *cd); | ||
| 397 | int genwqe_card_reset(struct genwqe_dev *cd); | ||
| 398 | int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count); | ||
| 399 | void genwqe_reset_interrupt_capability(struct genwqe_dev *cd); | ||
| 400 | |||
| 401 | int genwqe_device_create(struct genwqe_dev *cd); | ||
| 402 | int genwqe_device_remove(struct genwqe_dev *cd); | ||
| 403 | |||
| 404 | /* debugfs */ | ||
| 405 | int genwqe_init_debugfs(struct genwqe_dev *cd); | ||
| 406 | void genqwe_exit_debugfs(struct genwqe_dev *cd); | ||
| 407 | |||
| 408 | int genwqe_read_softreset(struct genwqe_dev *cd); | ||
| 409 | |||
| 410 | /* Hardware Circumventions */ | ||
| 411 | int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd); | ||
| 412 | int genwqe_flash_readback_fails(struct genwqe_dev *cd); | ||
| 413 | |||
| 414 | /** | ||
| 415 | * genwqe_write_vreg() - Write register in VF window | ||
| 416 | * @cd: genwqe device | ||
| 417 | * @reg: register address | ||
| 418 | * @val: value to write | ||
| 419 | * @func: 0: PF, 1: VF0, ..., 15: VF14 | ||
| 420 | */ | ||
| 421 | int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func); | ||
| 422 | |||
| 423 | /** | ||
| 424 | * genwqe_read_vreg() - Read register in VF window | ||
| 425 | * @cd: genwqe device | ||
| 426 | * @reg: register address | ||
| 427 | * @func: 0: PF, 1: VF0, ..., 15: VF14 | ||
| 428 | * | ||
| 429 | * Return: content of the register | ||
| 430 | */ | ||
| 431 | u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func); | ||
| 432 | |||
| 433 | /* FFDC Buffer Management */ | ||
| 434 | int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id); | ||
| 435 | int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id, | ||
| 436 | struct genwqe_reg *regs, unsigned int max_regs); | ||
| 437 | int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, | ||
| 438 | unsigned int max_regs, int all); | ||
| 439 | int genwqe_ffdc_dump_dma(struct genwqe_dev *cd, | ||
| 440 | struct genwqe_reg *regs, unsigned int max_regs); | ||
| 441 | |||
| 442 | int genwqe_init_debug_data(struct genwqe_dev *cd, | ||
| 443 | struct genwqe_debug_data *d); | ||
| 444 | |||
| 445 | void genwqe_init_crc32(void); | ||
| 446 | int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len); | ||
| 447 | |||
| 448 | /* Memory allocation/deallocation; dma address handling */ | ||
| 449 | int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, | ||
| 450 | void *uaddr, unsigned long size, | ||
| 451 | struct ddcb_requ *req); | ||
| 452 | |||
| 453 | int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, | ||
| 454 | struct ddcb_requ *req); | ||
| 455 | |||
| 456 | struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages, | ||
| 457 | dma_addr_t *dma_addr, size_t *sgl_size); | ||
| 458 | |||
| 459 | void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, | ||
| 460 | dma_addr_t dma_addr, size_t size); | ||
| 461 | |||
| 462 | int genwqe_setup_sgl(struct genwqe_dev *cd, | ||
| 463 | unsigned long offs, | ||
| 464 | unsigned long size, | ||
| 465 | struct sg_entry *sgl, /* genwqe sgl */ | ||
| 466 | dma_addr_t dma_addr, size_t sgl_size, | ||
| 467 | dma_addr_t *dma_list, int page_offs, int num_pages); | ||
| 468 | |||
| 469 | int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, | ||
| 470 | int size); | ||
| 471 | |||
| 472 | static inline bool dma_mapping_used(struct dma_mapping *m) | ||
| 473 | { | ||
| 474 | if (!m) | ||
| 475 | return 0; | ||
| 476 | return m->size != 0; | ||
| 477 | } | ||
| 478 | |||
| 479 | /** | ||
| 480 | * __genwqe_execute_ddcb() - Execute DDCB request with addr translation | ||
| 481 | * | ||
| 482 | * This function will do the address translation changes to the DDCBs | ||
| 483 | * according to the definitions required by the ATS field. It looks up | ||
| 484 | * the memory allocation buffer or does vmap/vunmap for the respective | ||
| 485 | * user-space buffers, inclusive page pinning and scatter gather list | ||
| 486 | * buildup and teardown. | ||
| 487 | */ | ||
| 488 | int __genwqe_execute_ddcb(struct genwqe_dev *cd, | ||
| 489 | struct genwqe_ddcb_cmd *cmd); | ||
| 490 | |||
| 491 | /** | ||
| 492 | * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation | ||
| 493 | * | ||
| 494 | * This version will not do address translation or any modifcation of | ||
| 495 | * the DDCB data. It is used e.g. for the MoveFlash DDCB which is | ||
| 496 | * entirely prepared by the driver itself. That means the appropriate | ||
| 497 | * DMA addresses are already in the DDCB and do not need any | ||
| 498 | * modification. | ||
| 499 | */ | ||
| 500 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, | ||
| 501 | struct genwqe_ddcb_cmd *cmd); | ||
| 502 | |||
| 503 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | ||
| 504 | int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | ||
| 505 | int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | ||
| 506 | |||
| 507 | /* register access */ | ||
| 508 | int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val); | ||
| 509 | u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs); | ||
| 510 | int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val); | ||
| 511 | u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs); | ||
| 512 | |||
| 513 | void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, | ||
| 514 | dma_addr_t *dma_handle); | ||
| 515 | void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, | ||
| 516 | void *vaddr, dma_addr_t dma_handle); | ||
| 517 | |||
| 518 | /* Base clock frequency in MHz */ | ||
| 519 | int genwqe_base_clock_frequency(struct genwqe_dev *cd); | ||
| 520 | |||
| 521 | /* Before FFDC is captured the traps should be stopped. */ | ||
| 522 | void genwqe_stop_traps(struct genwqe_dev *cd); | ||
| 523 | void genwqe_start_traps(struct genwqe_dev *cd); | ||
| 524 | |||
| 525 | /* Hardware circumvention */ | ||
| 526 | bool genwqe_need_err_masking(struct genwqe_dev *cd); | ||
| 527 | |||
| 528 | /** | ||
| 529 | * genwqe_is_privileged() - Determine operation mode for PCI function | ||
| 530 | * | ||
| 531 | * On Intel with SRIOV support we see: | ||
| 532 | * PF: is_physfn = 1 is_virtfn = 0 | ||
| 533 | * VF: is_physfn = 0 is_virtfn = 1 | ||
| 534 | * | ||
| 535 | * On Systems with no SRIOV support _and_ virtualized systems we get: | ||
| 536 | * is_physfn = 0 is_virtfn = 0 | ||
| 537 | * | ||
| 538 | * Other vendors have individual pci device ids to distinguish between | ||
| 539 | * virtual function drivers and physical function drivers. GenWQE | ||
| 540 | * unfortunately has just on pci device id for both, VFs and PF. | ||
| 541 | * | ||
| 542 | * The following code is used to distinguish if the card is running in | ||
| 543 | * privileged mode, either as true PF or in a virtualized system with | ||
| 544 | * full register access e.g. currently on PowerPC. | ||
| 545 | * | ||
| 546 | * if (pci_dev->is_virtfn) | ||
| 547 | * cd->is_privileged = 0; | ||
| 548 | * else | ||
| 549 | * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) | ||
| 550 | * != IO_ILLEGAL_VALUE); | ||
| 551 | */ | ||
| 552 | static inline int genwqe_is_privileged(struct genwqe_dev *cd) | ||
| 553 | { | ||
| 554 | return cd->is_privileged; | ||
| 555 | } | ||
| 556 | |||
| 557 | #endif /* __CARD_BASE_H__ */ | ||
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c new file mode 100644 index 000000000000..6f1acc0ccf88 --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.c | |||
| @@ -0,0 +1,1376 @@ | |||
| 1 | /** | ||
| 2 | * IBM Accelerator Family 'GenWQE' | ||
| 3 | * | ||
| 4 | * (C) Copyright IBM Corp. 2013 | ||
| 5 | * | ||
| 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 8 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 9 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 13 | * as published by the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Device Driver Control Block (DDCB) queue support. Definition of | ||
| 23 | * interrupt handlers for queue support as well as triggering the | ||
| 24 | * health monitor code in case of problems. The current hardware uses | ||
| 25 | * an MSI interrupt which is shared between error handling and | ||
| 26 | * functional code. | ||
| 27 | */ | ||
| 28 | |||
| 29 | #include <linux/types.h> | ||
| 30 | #include <linux/module.h> | ||
| 31 | #include <linux/sched.h> | ||
| 32 | #include <linux/wait.h> | ||
| 33 | #include <linux/pci.h> | ||
| 34 | #include <linux/string.h> | ||
| 35 | #include <linux/dma-mapping.h> | ||
| 36 | #include <linux/delay.h> | ||
| 37 | #include <linux/module.h> | ||
| 38 | #include <linux/interrupt.h> | ||
| 39 | #include <linux/crc-itu-t.h> | ||
| 40 | |||
| 41 | #include "card_base.h" | ||
| 42 | #include "card_ddcb.h" | ||
| 43 | |||
| 44 | /* | ||
| 45 | * N: next DDCB, this is where the next DDCB will be put. | ||
| 46 | * A: active DDCB, this is where the code will look for the next completion. | ||
| 47 | * x: DDCB is enqueued, we are waiting for its completion. | ||
| 48 | |||
| 49 | * Situation (1): Empty queue | ||
| 50 | * +---+---+---+---+---+---+---+---+ | ||
| 51 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
| 52 | * | | | | | | | | | | ||
| 53 | * +---+---+---+---+---+---+---+---+ | ||
| 54 | * A/N | ||
| 55 | * enqueued_ddcbs = A - N = 2 - 2 = 0 | ||
| 56 | * | ||
| 57 | * Situation (2): Wrapped, N > A | ||
| 58 | * +---+---+---+---+---+---+---+---+ | ||
| 59 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
| 60 | * | | | x | x | | | | | | ||
| 61 | * +---+---+---+---+---+---+---+---+ | ||
| 62 | * A N | ||
| 63 | * enqueued_ddcbs = N - A = 4 - 2 = 2 | ||
| 64 | * | ||
| 65 | * Situation (3): Queue wrapped, A > N | ||
| 66 | * +---+---+---+---+---+---+---+---+ | ||
| 67 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
| 68 | * | x | x | | | x | x | x | x | | ||
| 69 | * +---+---+---+---+---+---+---+---+ | ||
| 70 | * N A | ||
| 71 | * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6 | ||
| 72 | * | ||
| 73 | * Situation (4a): Queue full N > A | ||
| 74 | * +---+---+---+---+---+---+---+---+ | ||
| 75 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
| 76 | * | x | x | x | x | x | x | x | | | ||
| 77 | * +---+---+---+---+---+---+---+---+ | ||
| 78 | * A N | ||
| 79 | * | ||
| 80 | * enqueued_ddcbs = N - A = 7 - 0 = 7 | ||
| 81 | * | ||
| 82 | * Situation (4a): Queue full A > N | ||
| 83 | * +---+---+---+---+---+---+---+---+ | ||
| 84 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
| 85 | * | x | x | x | | x | x | x | x | | ||
| 86 | * +---+---+---+---+---+---+---+---+ | ||
| 87 | * N A | ||
| 88 | * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7 | ||
| 89 | */ | ||
| 90 | |||
| 91 | static int queue_empty(struct ddcb_queue *queue) | ||
| 92 | { | ||
| 93 | return queue->ddcb_next == queue->ddcb_act; | ||
| 94 | } | ||
| 95 | |||
| 96 | static int queue_enqueued_ddcbs(struct ddcb_queue *queue) | ||
| 97 | { | ||
| 98 | if (queue->ddcb_next >= queue->ddcb_act) | ||
| 99 | return queue->ddcb_next - queue->ddcb_act; | ||
| 100 | |||
| 101 | return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); | ||
| 102 | } | ||
| 103 | |||
| 104 | static int queue_free_ddcbs(struct ddcb_queue *queue) | ||
| 105 | { | ||
| 106 | int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; | ||
| 107 | |||
| 108 | if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */ | ||
| 109 | return 0; | ||
| 110 | } | ||
| 111 | return free_ddcbs; | ||
| 112 | } | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Use of the PRIV field in the DDCB for queue debugging: | ||
| 116 | * | ||
| 117 | * (1) Trying to get rid of a DDCB which saw a timeout: | ||
| 118 | * pddcb->priv[6] = 0xcc; # cleared | ||
| 119 | * | ||
| 120 | * (2) Append a DDCB via NEXT bit: | ||
| 121 | * pddcb->priv[7] = 0xaa; # appended | ||
| 122 | * | ||
| 123 | * (3) DDCB needed tapping: | ||
| 124 | * pddcb->priv[7] = 0xbb; # tapped | ||
| 125 | * | ||
| 126 | * (4) DDCB marked as correctly finished: | ||
| 127 | * pddcb->priv[6] = 0xff; # finished | ||
| 128 | */ | ||
| 129 | |||
| 130 | static inline void ddcb_mark_tapped(struct ddcb *pddcb) | ||
| 131 | { | ||
| 132 | pddcb->priv[7] = 0xbb; /* tapped */ | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline void ddcb_mark_appended(struct ddcb *pddcb) | ||
| 136 | { | ||
| 137 | pddcb->priv[7] = 0xaa; /* appended */ | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline void ddcb_mark_cleared(struct ddcb *pddcb) | ||
| 141 | { | ||
| 142 | pddcb->priv[6] = 0xcc; /* cleared */ | ||
| 143 | } | ||
| 144 | |||
| 145 | static inline void ddcb_mark_finished(struct ddcb *pddcb) | ||
| 146 | { | ||
| 147 | pddcb->priv[6] = 0xff; /* finished */ | ||
| 148 | } | ||
| 149 | |||
| 150 | static inline void ddcb_mark_unused(struct ddcb *pddcb) | ||
| 151 | { | ||
| 152 | pddcb->priv_64 = cpu_to_be64(0); /* not tapped */ | ||
| 153 | } | ||
| 154 | |||
| 155 | /** | ||
| 156 | * genwqe_crc16() - Generate 16-bit crc as required for DDCBs | ||
| 157 | * @buff: pointer to data buffer | ||
| 158 | * @len: length of data for calculation | ||
| 159 | * @init: initial crc (0xffff at start) | ||
| 160 | * | ||
| 161 | * Polynomial = x^16 + x^12 + x^5 + 1 (0x1021) | ||
| 162 | * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff | ||
| 163 | * should result in a crc16 of 0x89c3 | ||
| 164 | * | ||
| 165 | * Return: crc16 checksum in big endian format ! | ||
| 166 | */ | ||
| 167 | static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init) | ||
| 168 | { | ||
| 169 | return crc_itu_t(init, buff, len); | ||
| 170 | } | ||
| 171 | |||
| 172 | static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) | ||
| 173 | { | ||
| 174 | int i; | ||
| 175 | struct ddcb *pddcb; | ||
| 176 | unsigned long flags; | ||
| 177 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 178 | |||
| 179 | spin_lock_irqsave(&cd->print_lock, flags); | ||
| 180 | |||
| 181 | dev_info(&pci_dev->dev, | ||
| 182 | "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n", | ||
| 183 | cd->card_idx, queue->ddcb_act, queue->ddcb_next); | ||
| 184 | |||
| 185 | pddcb = queue->ddcb_vaddr; | ||
| 186 | for (i = 0; i < queue->ddcb_max; i++) { | ||
| 187 | dev_err(&pci_dev->dev, | ||
| 188 | " %c %-3d: RETC=%03x SEQ=%04x " | ||
| 189 | "HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n", | ||
| 190 | i == queue->ddcb_act ? '>' : ' ', | ||
| 191 | i, | ||
| 192 | be16_to_cpu(pddcb->retc_16), | ||
| 193 | be16_to_cpu(pddcb->seqnum_16), | ||
| 194 | pddcb->hsi, | ||
| 195 | pddcb->shi, | ||
| 196 | be64_to_cpu(pddcb->priv_64), | ||
| 197 | pddcb->cmd); | ||
| 198 | pddcb++; | ||
| 199 | } | ||
| 200 | spin_unlock_irqrestore(&cd->print_lock, flags); | ||
| 201 | } | ||
| 202 | |||
| 203 | struct genwqe_ddcb_cmd *ddcb_requ_alloc(void) | ||
| 204 | { | ||
| 205 | struct ddcb_requ *req; | ||
| 206 | |||
| 207 | req = kzalloc(sizeof(*req), GFP_ATOMIC); | ||
| 208 | if (!req) | ||
| 209 | return NULL; | ||
| 210 | |||
| 211 | return &req->cmd; | ||
| 212 | } | ||
| 213 | |||
| 214 | void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd) | ||
| 215 | { | ||
| 216 | struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); | ||
| 217 | kfree(req); | ||
| 218 | } | ||
| 219 | |||
| 220 | static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req) | ||
| 221 | { | ||
| 222 | return req->req_state; | ||
| 223 | } | ||
| 224 | |||
| 225 | static inline void ddcb_requ_set_state(struct ddcb_requ *req, | ||
| 226 | enum genwqe_requ_state new_state) | ||
| 227 | { | ||
| 228 | req->req_state = new_state; | ||
| 229 | } | ||
| 230 | |||
| 231 | static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req) | ||
| 232 | { | ||
| 233 | return req->cmd.ddata_addr != 0x0; | ||
| 234 | } | ||
| 235 | |||
| 236 | /** | ||
| 237 | * ddcb_requ_finished() - Returns the hardware state of the associated DDCB | ||
| 238 | * @cd: pointer to genwqe device descriptor | ||
| 239 | * @req: DDCB work request | ||
| 240 | * | ||
| 241 | * Status of ddcb_requ mirrors this hardware state, but is copied in | ||
| 242 | * the ddcb_requ on interrupt/polling function. The lowlevel code | ||
| 243 | * should check the hardware state directly, the higher level code | ||
| 244 | * should check the copy. | ||
| 245 | * | ||
| 246 | * This function will also return true if the state of the queue is | ||
| 247 | * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the | ||
| 248 | * shutdown case. | ||
| 249 | */ | ||
| 250 | static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req) | ||
| 251 | { | ||
| 252 | return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) || | ||
| 253 | (cd->card_state != GENWQE_CARD_USED); | ||
| 254 | } | ||
| 255 | |||
| 256 | /** | ||
| 257 | * enqueue_ddcb() - Enqueue a DDCB | ||
| 258 | * @cd: pointer to genwqe device descriptor | ||
| 259 | * @queue: queue this operation should be done on | ||
| 260 | * @ddcb_no: pointer to ddcb number being tapped | ||
| 261 | * | ||
| 262 | * Start execution of DDCB by tapping or append to queue via NEXT | ||
| 263 | * bit. This is done by an atomic 'compare and swap' instruction and | ||
| 264 | * checking SHI and HSI of the previous DDCB. | ||
| 265 | * | ||
| 266 | * This function must only be called with ddcb_lock held. | ||
| 267 | * | ||
| 268 | * Return: 1 if new DDCB is appended to previous | ||
| 269 | * 2 if DDCB queue is tapped via register/simulation | ||
| 270 | */ | ||
| 271 | #define RET_DDCB_APPENDED 1 | ||
| 272 | #define RET_DDCB_TAPPED 2 | ||
| 273 | |||
| 274 | static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, | ||
| 275 | struct ddcb *pddcb, int ddcb_no) | ||
| 276 | { | ||
| 277 | unsigned int try; | ||
| 278 | int prev_no; | ||
| 279 | struct ddcb *prev_ddcb; | ||
| 280 | __be32 old, new, icrc_hsi_shi; | ||
| 281 | u64 num; | ||
| 282 | |||
| 283 | /* | ||
| 284 | * For performance checks a Dispatch Timestamp can be put into | ||
| 285 | * DDCB It is supposed to use the SLU's free running counter, | ||
| 286 | * but this requires PCIe cycles. | ||
| 287 | */ | ||
| 288 | ddcb_mark_unused(pddcb); | ||
| 289 | |||
| 290 | /* check previous DDCB if already fetched */ | ||
| 291 | prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1; | ||
| 292 | prev_ddcb = &queue->ddcb_vaddr[prev_no]; | ||
| 293 | |||
| 294 | /* | ||
| 295 | * It might have happened that the HSI.FETCHED bit is | ||
| 296 | * set. Retry in this case. Therefore I expect maximum 2 times | ||
| 297 | * trying. | ||
| 298 | */ | ||
| 299 | ddcb_mark_appended(pddcb); | ||
| 300 | for (try = 0; try < 2; try++) { | ||
| 301 | old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ | ||
| 302 | |||
| 303 | /* try to append via NEXT bit if prev DDCB is not completed */ | ||
| 304 | if ((old & DDCB_COMPLETED_BE32) != 0x00000000) | ||
| 305 | break; | ||
| 306 | |||
| 307 | new = (old | DDCB_NEXT_BE32); | ||
| 308 | icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); | ||
| 309 | |||
| 310 | if (icrc_hsi_shi == old) | ||
| 311 | return RET_DDCB_APPENDED; /* appended to queue */ | ||
| 312 | } | ||
| 313 | |||
| 314 | /* Queue must be re-started by updating QUEUE_OFFSET */ | ||
| 315 | ddcb_mark_tapped(pddcb); | ||
| 316 | num = (u64)ddcb_no << 8; | ||
| 317 | __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ | ||
| 318 | |||
| 319 | return RET_DDCB_TAPPED; | ||
| 320 | } | ||
| 321 | |||
| 322 | /** | ||
| 323 | * copy_ddcb_results() - Copy output state from real DDCB to request | ||
| 324 | * | ||
| 325 | * Copy DDCB ASV to request struct. There is no endian | ||
| 326 | * conversion made, since data structure in ASV is still | ||
| 327 | * unknown here. | ||
| 328 | * | ||
| 329 | * This is needed by: | ||
| 330 | * - genwqe_purge_ddcb() | ||
| 331 | * - genwqe_check_ddcb_queue() | ||
| 332 | */ | ||
| 333 | static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no) | ||
| 334 | { | ||
| 335 | struct ddcb_queue *queue = req->queue; | ||
| 336 | struct ddcb *pddcb = &queue->ddcb_vaddr[req->num]; | ||
| 337 | |||
| 338 | memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH); | ||
| 339 | |||
| 340 | /* copy status flags of the variant part */ | ||
| 341 | req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16); | ||
| 342 | req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64); | ||
| 343 | req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64); | ||
| 344 | |||
| 345 | req->cmd.attn = be16_to_cpu(pddcb->attn_16); | ||
| 346 | req->cmd.progress = be32_to_cpu(pddcb->progress_32); | ||
| 347 | req->cmd.retc = be16_to_cpu(pddcb->retc_16); | ||
| 348 | |||
| 349 | if (ddcb_requ_collect_debug_data(req)) { | ||
| 350 | int prev_no = (ddcb_no == 0) ? | ||
| 351 | queue->ddcb_max - 1 : ddcb_no - 1; | ||
| 352 | struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no]; | ||
| 353 | |||
| 354 | memcpy(&req->debug_data.ddcb_finished, pddcb, | ||
| 355 | sizeof(req->debug_data.ddcb_finished)); | ||
| 356 | memcpy(&req->debug_data.ddcb_prev, prev_pddcb, | ||
| 357 | sizeof(req->debug_data.ddcb_prev)); | ||
| 358 | } | ||
| 359 | } | ||
| 360 | |||
| 361 | /** | ||
| 362 | * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests. | ||
| 363 | * @cd: pointer to genwqe device descriptor | ||
| 364 | * | ||
| 365 | * Return: Number of DDCBs which were finished | ||
| 366 | */ | ||
| 367 | static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, | ||
| 368 | struct ddcb_queue *queue) | ||
| 369 | { | ||
| 370 | unsigned long flags; | ||
| 371 | int ddcbs_finished = 0; | ||
| 372 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 373 | |||
| 374 | spin_lock_irqsave(&queue->ddcb_lock, flags); | ||
| 375 | |||
| 376 | /* FIXME avoid soft locking CPU */ | ||
| 377 | while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) { | ||
| 378 | |||
| 379 | struct ddcb *pddcb; | ||
| 380 | struct ddcb_requ *req; | ||
| 381 | u16 vcrc, vcrc_16, retc_16; | ||
| 382 | |||
| 383 | pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; | ||
| 384 | |||
| 385 | if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == | ||
| 386 | 0x00000000) | ||
| 387 | goto go_home; /* not completed, continue waiting */ | ||
| 388 | |||
| 389 | /* Note: DDCB could be purged */ | ||
| 390 | |||
| 391 | req = queue->ddcb_req[queue->ddcb_act]; | ||
| 392 | if (req == NULL) { | ||
| 393 | /* this occurs if DDCB is purged, not an error */ | ||
| 394 | /* Move active DDCB further; Nothing to do anymore. */ | ||
| 395 | goto pick_next_one; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* | ||
| 399 | * HSI=0x44 (fetched and completed), but RETC is | ||
| 400 | * 0x101, or even worse 0x000. | ||
| 401 | * | ||
| 402 | * In case of seeing the queue in inconsistent state | ||
| 403 | * we read the errcnts and the queue status to provide | ||
| 404 | * a trigger for our PCIe analyzer stop capturing. | ||
| 405 | */ | ||
| 406 | retc_16 = be16_to_cpu(pddcb->retc_16); | ||
| 407 | if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) { | ||
| 408 | u64 errcnts, status; | ||
| 409 | u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr; | ||
| 410 | |||
| 411 | errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS); | ||
| 412 | status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); | ||
| 413 | |||
| 414 | dev_err(&pci_dev->dev, | ||
| 415 | "[%s] SEQN=%04x HSI=%02x RETC=%03x " | ||
| 416 | " Q_ERRCNTS=%016llx Q_STATUS=%016llx\n" | ||
| 417 | " DDCB_DMA_ADDR=%016llx\n", | ||
| 418 | __func__, be16_to_cpu(pddcb->seqnum_16), | ||
| 419 | pddcb->hsi, retc_16, errcnts, status, | ||
| 420 | queue->ddcb_daddr + ddcb_offs); | ||
| 421 | } | ||
| 422 | |||
| 423 | copy_ddcb_results(req, queue->ddcb_act); | ||
| 424 | queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */ | ||
| 425 | |||
| 426 | dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num); | ||
| 427 | genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); | ||
| 428 | |||
| 429 | ddcb_mark_finished(pddcb); | ||
| 430 | |||
| 431 | /* calculate CRC_16 to see if VCRC is correct */ | ||
| 432 | vcrc = genwqe_crc16(pddcb->asv, | ||
| 433 | VCRC_LENGTH(req->cmd.asv_length), | ||
| 434 | 0xffff); | ||
| 435 | vcrc_16 = be16_to_cpu(pddcb->vcrc_16); | ||
| 436 | if (vcrc != vcrc_16) { | ||
| 437 | printk_ratelimited(KERN_ERR | ||
| 438 | "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d " | ||
| 439 | "bytes vcrc_data=%04x is not vcrc_card=%04x\n", | ||
| 440 | GENWQE_DEVNAME, dev_name(&pci_dev->dev), | ||
| 441 | pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), | ||
| 442 | vcrc, vcrc_16); | ||
| 443 | } | ||
| 444 | |||
| 445 | ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); | ||
| 446 | queue->ddcbs_completed++; | ||
| 447 | queue->ddcbs_in_flight--; | ||
| 448 | |||
| 449 | /* wake up process waiting for this DDCB */ | ||
| 450 | wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); | ||
| 451 | |||
| 452 | pick_next_one: | ||
| 453 | queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; | ||
| 454 | ddcbs_finished++; | ||
| 455 | } | ||
| 456 | |||
| 457 | go_home: | ||
| 458 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 459 | return ddcbs_finished; | ||
| 460 | } | ||
| 461 | |||
| 462 | /** | ||
| 463 | * __genwqe_wait_ddcb(): Waits until DDCB is completed | ||
| 464 | * @cd: pointer to genwqe device descriptor | ||
| 465 | * @req: pointer to requsted DDCB parameters | ||
| 466 | * | ||
| 467 | * The Service Layer will update the RETC in DDCB when processing is | ||
| 468 | * pending or done. | ||
| 469 | * | ||
| 470 | * Return: > 0 remaining jiffies, DDCB completed | ||
| 471 | * -ETIMEDOUT when timeout | ||
| 472 | * -ERESTARTSYS when ^C | ||
| 473 | * -EINVAL when unknown error condition | ||
| 474 | * | ||
| 475 | * When an error is returned the called needs to ensure that | ||
| 476 | * purge_ddcb() is being called to get the &req removed from the | ||
| 477 | * queue. | ||
| 478 | */ | ||
| 479 | int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) | ||
| 480 | { | ||
| 481 | int rc; | ||
| 482 | unsigned int ddcb_no; | ||
| 483 | struct ddcb_queue *queue; | ||
| 484 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 485 | |||
| 486 | if (req == NULL) | ||
| 487 | return -EINVAL; | ||
| 488 | |||
| 489 | queue = req->queue; | ||
| 490 | if (queue == NULL) | ||
| 491 | return -EINVAL; | ||
| 492 | |||
| 493 | ddcb_no = req->num; | ||
| 494 | if (ddcb_no >= queue->ddcb_max) | ||
| 495 | return -EINVAL; | ||
| 496 | |||
| 497 | rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no], | ||
| 498 | ddcb_requ_finished(cd, req), | ||
| 499 | genwqe_ddcb_software_timeout * HZ); | ||
| 500 | |||
| 501 | /* | ||
| 502 | * We need to distinguish 3 cases here: | ||
| 503 | * 1. rc == 0 timeout occured | ||
| 504 | * 2. rc == -ERESTARTSYS signal received | ||
| 505 | * 3. rc > 0 remaining jiffies condition is true | ||
| 506 | */ | ||
| 507 | if (rc == 0) { | ||
| 508 | struct ddcb_queue *queue = req->queue; | ||
| 509 | struct ddcb *pddcb; | ||
| 510 | |||
| 511 | /* | ||
| 512 | * Timeout may be caused by long task switching time. | ||
| 513 | * When timeout happens, check if the request has | ||
| 514 | * meanwhile completed. | ||
| 515 | */ | ||
| 516 | genwqe_check_ddcb_queue(cd, req->queue); | ||
| 517 | if (ddcb_requ_finished(cd, req)) | ||
| 518 | return rc; | ||
| 519 | |||
| 520 | dev_err(&pci_dev->dev, | ||
| 521 | "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n", | ||
| 522 | __func__, req->num, rc, ddcb_requ_get_state(req), | ||
| 523 | req); | ||
| 524 | dev_err(&pci_dev->dev, | ||
| 525 | "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__, | ||
| 526 | __genwqe_readq(cd, queue->IO_QUEUE_STATUS)); | ||
| 527 | |||
| 528 | pddcb = &queue->ddcb_vaddr[req->num]; | ||
| 529 | genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); | ||
| 530 | |||
| 531 | print_ddcb_info(cd, req->queue); | ||
| 532 | return -ETIMEDOUT; | ||
| 533 | |||
| 534 | } else if (rc == -ERESTARTSYS) { | ||
| 535 | return rc; | ||
| 536 | /* | ||
| 537 | * EINTR: Stops the application | ||
| 538 | * ERESTARTSYS: Restartable systemcall; called again | ||
| 539 | */ | ||
| 540 | |||
| 541 | } else if (rc < 0) { | ||
| 542 | dev_err(&pci_dev->dev, | ||
| 543 | "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n", | ||
| 544 | __func__, req->num, rc, ddcb_requ_get_state(req)); | ||
| 545 | return -EINVAL; | ||
| 546 | } | ||
| 547 | |||
| 548 | /* Severe error occured. Driver is forced to stop operation */ | ||
| 549 | if (cd->card_state != GENWQE_CARD_USED) { | ||
| 550 | dev_err(&pci_dev->dev, | ||
| 551 | "[%s] err: DDCB#%d forced to stop (rc=%d)\n", | ||
| 552 | __func__, req->num, rc); | ||
| 553 | return -EIO; | ||
| 554 | } | ||
| 555 | return rc; | ||
| 556 | } | ||
| 557 | |||
| 558 | /** | ||
| 559 | * get_next_ddcb() - Get next available DDCB | ||
| 560 | * @cd: pointer to genwqe device descriptor | ||
| 561 | * | ||
| 562 | * DDCB's content is completely cleared but presets for PRE and | ||
| 563 | * SEQNUM. This function must only be called when ddcb_lock is held. | ||
| 564 | * | ||
| 565 | * Return: NULL if no empty DDCB available otherwise ptr to next DDCB. | ||
| 566 | */ | ||
| 567 | static struct ddcb *get_next_ddcb(struct genwqe_dev *cd, | ||
| 568 | struct ddcb_queue *queue, | ||
| 569 | int *num) | ||
| 570 | { | ||
| 571 | u64 *pu64; | ||
| 572 | struct ddcb *pddcb; | ||
| 573 | |||
| 574 | if (queue_free_ddcbs(queue) == 0) /* queue is full */ | ||
| 575 | return NULL; | ||
| 576 | |||
| 577 | /* find new ddcb */ | ||
| 578 | pddcb = &queue->ddcb_vaddr[queue->ddcb_next]; | ||
| 579 | |||
| 580 | /* if it is not completed, we are not allowed to use it */ | ||
| 581 | /* barrier(); */ | ||
| 582 | if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000) | ||
| 583 | return NULL; | ||
| 584 | |||
| 585 | *num = queue->ddcb_next; /* internal DDCB number */ | ||
| 586 | queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max; | ||
| 587 | |||
| 588 | /* clear important DDCB fields */ | ||
| 589 | pu64 = (u64 *)pddcb; | ||
| 590 | pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */ | ||
| 591 | pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */ | ||
| 592 | |||
| 593 | /* destroy previous results in ASV */ | ||
| 594 | pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */ | ||
| 595 | pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */ | ||
| 596 | pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */ | ||
| 597 | pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */ | ||
| 598 | pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */ | ||
| 599 | |||
| 600 | pddcb->pre = DDCB_PRESET_PRE; /* 128 */ | ||
| 601 | pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++); | ||
| 602 | return pddcb; | ||
| 603 | } | ||
| 604 | |||
| 605 | /** | ||
| 606 | * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue | ||
| 607 | * @cd: genwqe device descriptor | ||
| 608 | * @req: DDCB request | ||
| 609 | * | ||
| 610 | * This will fail when the request was already FETCHED. In this case | ||
| 611 | * we need to wait until it is finished. Else the DDCB can be | ||
| 612 | * reused. This function also ensures that the request data structure | ||
| 613 | * is removed from ddcb_req[]. | ||
| 614 | * | ||
| 615 | * Do not forget to call this function when genwqe_wait_ddcb() fails, | ||
| 616 | * such that the request gets really removed from ddcb_req[]. | ||
| 617 | * | ||
| 618 | * Return: 0 success | ||
| 619 | */ | ||
| 620 | int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) | ||
| 621 | { | ||
| 622 | struct ddcb *pddcb = NULL; | ||
| 623 | unsigned int t; | ||
| 624 | unsigned long flags; | ||
| 625 | struct ddcb_queue *queue = req->queue; | ||
| 626 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 627 | u64 queue_status; | ||
| 628 | __be32 icrc_hsi_shi = 0x0000; | ||
| 629 | __be32 old, new; | ||
| 630 | |||
| 631 | /* unsigned long flags; */ | ||
| 632 | if (genwqe_ddcb_software_timeout <= 0) { | ||
| 633 | dev_err(&pci_dev->dev, | ||
| 634 | "[%s] err: software timeout is not set!\n", __func__); | ||
| 635 | return -EFAULT; | ||
| 636 | } | ||
| 637 | |||
| 638 | pddcb = &queue->ddcb_vaddr[req->num]; | ||
| 639 | |||
| 640 | for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) { | ||
| 641 | |||
| 642 | spin_lock_irqsave(&queue->ddcb_lock, flags); | ||
| 643 | |||
| 644 | /* Check if req was meanwhile finished */ | ||
| 645 | if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) | ||
| 646 | goto go_home; | ||
| 647 | |||
| 648 | /* try to set PURGE bit if FETCHED/COMPLETED are not set */ | ||
| 649 | old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ | ||
| 650 | if ((old & DDCB_FETCHED_BE32) == 0x00000000) { | ||
| 651 | |||
| 652 | new = (old | DDCB_PURGE_BE32); | ||
| 653 | icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32, | ||
| 654 | old, new); | ||
| 655 | if (icrc_hsi_shi == old) | ||
| 656 | goto finish_ddcb; | ||
| 657 | } | ||
| 658 | |||
| 659 | /* normal finish with HSI bit */ | ||
| 660 | barrier(); | ||
| 661 | icrc_hsi_shi = pddcb->icrc_hsi_shi_32; | ||
| 662 | if (icrc_hsi_shi & DDCB_COMPLETED_BE32) | ||
| 663 | goto finish_ddcb; | ||
| 664 | |||
| 665 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 666 | |||
| 667 | /* | ||
| 668 | * Here the check_ddcb() function will most likely | ||
| 669 | * discover this DDCB to be finished some point in | ||
| 670 | * time. It will mark the req finished and free it up | ||
| 671 | * in the list. | ||
| 672 | */ | ||
| 673 | |||
| 674 | copy_ddcb_results(req, req->num); /* for the failing case */ | ||
| 675 | msleep(100); /* sleep for 1/10 second and try again */ | ||
| 676 | continue; | ||
| 677 | |||
| 678 | finish_ddcb: | ||
| 679 | copy_ddcb_results(req, req->num); | ||
| 680 | ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); | ||
| 681 | queue->ddcbs_in_flight--; | ||
| 682 | queue->ddcb_req[req->num] = NULL; /* delete from array */ | ||
| 683 | ddcb_mark_cleared(pddcb); | ||
| 684 | |||
| 685 | /* Move active DDCB further; Nothing to do here anymore. */ | ||
| 686 | |||
| 687 | /* | ||
| 688 | * We need to ensure that there is at least one free | ||
| 689 | * DDCB in the queue. To do that, we must update | ||
| 690 | * ddcb_act only if the COMPLETED bit is set for the | ||
| 691 | * DDCB we are working on else we treat that DDCB even | ||
| 692 | * if we PURGED it as occupied (hardware is supposed | ||
| 693 | * to set the COMPLETED bit yet!). | ||
| 694 | */ | ||
| 695 | icrc_hsi_shi = pddcb->icrc_hsi_shi_32; | ||
| 696 | if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) && | ||
| 697 | (queue->ddcb_act == req->num)) { | ||
| 698 | queue->ddcb_act = ((queue->ddcb_act + 1) % | ||
| 699 | queue->ddcb_max); | ||
| 700 | } | ||
| 701 | go_home: | ||
| 702 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 703 | return 0; | ||
| 704 | } | ||
| 705 | |||
| 706 | /* | ||
| 707 | * If the card is dead and the queue is forced to stop, we | ||
| 708 | * might see this in the queue status register. | ||
| 709 | */ | ||
| 710 | queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); | ||
| 711 | |||
| 712 | dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num); | ||
| 713 | genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); | ||
| 714 | |||
| 715 | dev_err(&pci_dev->dev, | ||
| 716 | "[%s] err: DDCB#%d not purged and not completed " | ||
| 717 | "after %d seconds QSTAT=%016llx!!\n", | ||
| 718 | __func__, req->num, genwqe_ddcb_software_timeout, | ||
| 719 | queue_status); | ||
| 720 | |||
| 721 | print_ddcb_info(cd, req->queue); | ||
| 722 | |||
| 723 | return -EFAULT; | ||
| 724 | } | ||
| 725 | |||
| 726 | int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d) | ||
| 727 | { | ||
| 728 | int len; | ||
| 729 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 730 | |||
| 731 | if (d == NULL) { | ||
| 732 | dev_err(&pci_dev->dev, | ||
| 733 | "[%s] err: invalid memory for debug data!\n", | ||
| 734 | __func__); | ||
| 735 | return -EFAULT; | ||
| 736 | } | ||
| 737 | |||
| 738 | len = sizeof(d->driver_version); | ||
| 739 | snprintf(d->driver_version, len, "%s", DRV_VERS_STRING); | ||
| 740 | d->slu_unitcfg = cd->slu_unitcfg; | ||
| 741 | d->app_unitcfg = cd->app_unitcfg; | ||
| 742 | return 0; | ||
| 743 | } | ||
| 744 | |||
| 745 | /** | ||
| 746 | * __genwqe_enqueue_ddcb() - Enqueue a DDCB | ||
| 747 | * @cd: pointer to genwqe device descriptor | ||
| 748 | * @req: pointer to DDCB execution request | ||
| 749 | * | ||
| 750 | * Return: 0 if enqueuing succeeded | ||
| 751 | * -EIO if card is unusable/PCIe problems | ||
| 752 | * -EBUSY if enqueuing failed | ||
| 753 | */ | ||
| 754 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) | ||
| 755 | { | ||
| 756 | struct ddcb *pddcb; | ||
| 757 | unsigned long flags; | ||
| 758 | struct ddcb_queue *queue; | ||
| 759 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 760 | u16 icrc; | ||
| 761 | |||
| 762 | if (cd->card_state != GENWQE_CARD_USED) { | ||
| 763 | printk_ratelimited(KERN_ERR | ||
| 764 | "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", | ||
| 765 | GENWQE_DEVNAME, dev_name(&pci_dev->dev), | ||
| 766 | __func__, req->num); | ||
| 767 | return -EIO; | ||
| 768 | } | ||
| 769 | |||
| 770 | queue = req->queue = &cd->queue; | ||
| 771 | |||
| 772 | /* FIXME circumvention to improve performance when no irq is | ||
| 773 | * there. | ||
| 774 | */ | ||
| 775 | if (genwqe_polling_enabled) | ||
| 776 | genwqe_check_ddcb_queue(cd, queue); | ||
| 777 | |||
| 778 | /* | ||
| 779 | * It must be ensured to process all DDCBs in successive | ||
| 780 | * order. Use a lock here in order to prevent nested DDCB | ||
| 781 | * enqueuing. | ||
| 782 | */ | ||
| 783 | spin_lock_irqsave(&queue->ddcb_lock, flags); | ||
| 784 | |||
| 785 | pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ | ||
| 786 | if (pddcb == NULL) { | ||
| 787 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 788 | queue->busy++; | ||
| 789 | return -EBUSY; | ||
| 790 | } | ||
| 791 | |||
| 792 | if (queue->ddcb_req[req->num] != NULL) { | ||
| 793 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 794 | |||
| 795 | dev_err(&pci_dev->dev, | ||
| 796 | "[%s] picked DDCB %d with req=%p still in use!!\n", | ||
| 797 | __func__, req->num, req); | ||
| 798 | return -EFAULT; | ||
| 799 | } | ||
| 800 | ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED); | ||
| 801 | queue->ddcb_req[req->num] = req; | ||
| 802 | |||
| 803 | pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts); | ||
| 804 | pddcb->cmd = req->cmd.cmd; | ||
| 805 | pddcb->acfunc = req->cmd.acfunc; /* functional unit */ | ||
| 806 | |||
| 807 | /* | ||
| 808 | * We know that we can get retc 0x104 with CRC error, do not | ||
| 809 | * stop the queue in those cases for this command. XDIR = 1 | ||
| 810 | * does not work for old SLU versions. | ||
| 811 | * | ||
| 812 | * Last bitstream with the old XDIR behavior had SLU_ID | ||
| 813 | * 0x34199. | ||
| 814 | */ | ||
| 815 | if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull) | ||
| 816 | pddcb->xdir = 0x1; | ||
| 817 | else | ||
| 818 | pddcb->xdir = 0x0; | ||
| 819 | |||
| 820 | |||
| 821 | pddcb->psp = (((req->cmd.asiv_length / 8) << 4) | | ||
| 822 | ((req->cmd.asv_length / 8))); | ||
| 823 | pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts); | ||
| 824 | |||
| 825 | /* | ||
| 826 | * If copying the whole DDCB_ASIV_LENGTH is impacting | ||
| 827 | * performance we need to change it to | ||
| 828 | * req->cmd.asiv_length. But simulation benefits from some | ||
| 829 | * non-architectured bits behind the architectured content. | ||
| 830 | * | ||
| 831 | * How much data is copied depends on the availability of the | ||
| 832 | * ATS field, which was introduced late. If the ATS field is | ||
| 833 | * supported ASIV is 8 bytes shorter than it used to be. Since | ||
| 834 | * the ATS field is copied too, the code should do exactly | ||
| 835 | * what it did before, but I wanted to make copying of the ATS | ||
| 836 | * field very explicit. | ||
| 837 | */ | ||
| 838 | if (genwqe_get_slu_id(cd) <= 0x2) { | ||
| 839 | memcpy(&pddcb->__asiv[0], /* destination */ | ||
| 840 | &req->cmd.__asiv[0], /* source */ | ||
| 841 | DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */ | ||
| 842 | } else { | ||
| 843 | pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats); | ||
| 844 | memcpy(&pddcb->n.asiv[0], /* destination */ | ||
| 845 | &req->cmd.asiv[0], /* source */ | ||
| 846 | DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */ | ||
| 847 | } | ||
| 848 | |||
| 849 | pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */ | ||
| 850 | |||
| 851 | /* | ||
| 852 | * Calculate CRC_16 for corresponding range PSP(7:4). Include | ||
| 853 | * empty 4 bytes prior to the data. | ||
| 854 | */ | ||
| 855 | icrc = genwqe_crc16((const u8 *)pddcb, | ||
| 856 | ICRC_LENGTH(req->cmd.asiv_length), 0xffff); | ||
| 857 | pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16); | ||
| 858 | |||
| 859 | /* enable DDCB completion irq */ | ||
| 860 | if (!genwqe_polling_enabled) | ||
| 861 | pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32; | ||
| 862 | |||
| 863 | dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num); | ||
| 864 | genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); | ||
| 865 | |||
| 866 | if (ddcb_requ_collect_debug_data(req)) { | ||
| 867 | /* use the kernel copy of debug data. copying back to | ||
| 868 | user buffer happens later */ | ||
| 869 | |||
| 870 | genwqe_init_debug_data(cd, &req->debug_data); | ||
| 871 | memcpy(&req->debug_data.ddcb_before, pddcb, | ||
| 872 | sizeof(req->debug_data.ddcb_before)); | ||
| 873 | } | ||
| 874 | |||
| 875 | enqueue_ddcb(cd, queue, pddcb, req->num); | ||
| 876 | queue->ddcbs_in_flight++; | ||
| 877 | |||
| 878 | if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight) | ||
| 879 | queue->ddcbs_max_in_flight = queue->ddcbs_in_flight; | ||
| 880 | |||
| 881 | ddcb_requ_set_state(req, GENWQE_REQU_TAPPED); | ||
| 882 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 883 | wake_up_interruptible(&cd->queue_waitq); | ||
| 884 | |||
| 885 | return 0; | ||
| 886 | } | ||
| 887 | |||
| 888 | /** | ||
| 889 | * __genwqe_execute_raw_ddcb() - Setup and execute DDCB | ||
| 890 | * @cd: pointer to genwqe device descriptor | ||
| 891 | * @req: user provided DDCB request | ||
| 892 | */ | ||
| 893 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, | ||
| 894 | struct genwqe_ddcb_cmd *cmd) | ||
| 895 | { | ||
| 896 | int rc = 0; | ||
| 897 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 898 | struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); | ||
| 899 | |||
| 900 | if (cmd->asiv_length > DDCB_ASIV_LENGTH) { | ||
| 901 | dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n", | ||
| 902 | __func__, cmd->asiv_length); | ||
| 903 | return -EINVAL; | ||
| 904 | } | ||
| 905 | if (cmd->asv_length > DDCB_ASV_LENGTH) { | ||
| 906 | dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", | ||
| 907 | __func__, cmd->asiv_length); | ||
| 908 | return -EINVAL; | ||
| 909 | } | ||
| 910 | rc = __genwqe_enqueue_ddcb(cd, req); | ||
| 911 | if (rc != 0) | ||
| 912 | return rc; | ||
| 913 | |||
| 914 | rc = __genwqe_wait_ddcb(cd, req); | ||
| 915 | if (rc < 0) /* error or signal interrupt */ | ||
| 916 | goto err_exit; | ||
| 917 | |||
| 918 | if (ddcb_requ_collect_debug_data(req)) { | ||
| 919 | if (copy_to_user((struct genwqe_debug_data __user *) | ||
| 920 | (unsigned long)cmd->ddata_addr, | ||
| 921 | &req->debug_data, | ||
| 922 | sizeof(struct genwqe_debug_data))) | ||
| 923 | return -EFAULT; | ||
| 924 | } | ||
| 925 | |||
| 926 | /* | ||
| 927 | * Higher values than 0x102 indicate completion with faults, | ||
| 928 | * lower values than 0x102 indicate processing faults. Note | ||
| 929 | * that DDCB might have been purged. E.g. Cntl+C. | ||
| 930 | */ | ||
| 931 | if (cmd->retc != DDCB_RETC_COMPLETE) { | ||
| 932 | /* This might happen e.g. flash read, and needs to be | ||
| 933 | handled by the upper layer code. */ | ||
| 934 | rc = -EBADMSG; /* not processed/error retc */ | ||
| 935 | } | ||
| 936 | |||
| 937 | return rc; | ||
| 938 | |||
| 939 | err_exit: | ||
| 940 | __genwqe_purge_ddcb(cd, req); | ||
| 941 | |||
| 942 | if (ddcb_requ_collect_debug_data(req)) { | ||
| 943 | if (copy_to_user((struct genwqe_debug_data __user *) | ||
| 944 | (unsigned long)cmd->ddata_addr, | ||
| 945 | &req->debug_data, | ||
| 946 | sizeof(struct genwqe_debug_data))) | ||
| 947 | return -EFAULT; | ||
| 948 | } | ||
| 949 | return rc; | ||
| 950 | } | ||
| 951 | |||
| 952 | /** | ||
| 953 | * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished | ||
| 954 | * | ||
| 955 | * We use this as condition for our wait-queue code. | ||
| 956 | */ | ||
| 957 | static int genwqe_next_ddcb_ready(struct genwqe_dev *cd) | ||
| 958 | { | ||
| 959 | unsigned long flags; | ||
| 960 | struct ddcb *pddcb; | ||
| 961 | struct ddcb_queue *queue = &cd->queue; | ||
| 962 | |||
| 963 | spin_lock_irqsave(&queue->ddcb_lock, flags); | ||
| 964 | |||
| 965 | if (queue_empty(queue)) { /* emtpy queue */ | ||
| 966 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 967 | return 0; | ||
| 968 | } | ||
| 969 | |||
| 970 | pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; | ||
| 971 | if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */ | ||
| 972 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 973 | return 1; | ||
| 974 | } | ||
| 975 | |||
| 976 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 977 | return 0; | ||
| 978 | } | ||
| 979 | |||
| 980 | /** | ||
| 981 | * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight | ||
| 982 | * | ||
| 983 | * Keep track on the number of DDCBs which ware currently in the | ||
| 984 | * queue. This is needed for statistics as well as conditon if we want | ||
| 985 | * to wait or better do polling in case of no interrupts available. | ||
| 986 | */ | ||
| 987 | int genwqe_ddcbs_in_flight(struct genwqe_dev *cd) | ||
| 988 | { | ||
| 989 | unsigned long flags; | ||
| 990 | int ddcbs_in_flight = 0; | ||
| 991 | struct ddcb_queue *queue = &cd->queue; | ||
| 992 | |||
| 993 | spin_lock_irqsave(&queue->ddcb_lock, flags); | ||
| 994 | ddcbs_in_flight += queue->ddcbs_in_flight; | ||
| 995 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 996 | |||
| 997 | return ddcbs_in_flight; | ||
| 998 | } | ||
| 999 | |||
| 1000 | static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) | ||
| 1001 | { | ||
| 1002 | int rc, i; | ||
| 1003 | struct ddcb *pddcb; | ||
| 1004 | u64 val64; | ||
| 1005 | unsigned int queue_size; | ||
| 1006 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 1007 | |||
| 1008 | if (genwqe_ddcb_max < 2) | ||
| 1009 | return -EINVAL; | ||
| 1010 | |||
| 1011 | queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE); | ||
| 1012 | |||
| 1013 | queue->ddcbs_in_flight = 0; /* statistics */ | ||
| 1014 | queue->ddcbs_max_in_flight = 0; | ||
| 1015 | queue->ddcbs_completed = 0; | ||
| 1016 | queue->busy = 0; | ||
| 1017 | |||
| 1018 | queue->ddcb_seq = 0x100; /* start sequence number */ | ||
| 1019 | queue->ddcb_max = genwqe_ddcb_max; /* module parameter */ | ||
| 1020 | queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, | ||
| 1021 | &queue->ddcb_daddr); | ||
| 1022 | if (queue->ddcb_vaddr == NULL) { | ||
| 1023 | dev_err(&pci_dev->dev, | ||
| 1024 | "[%s] **err: could not allocate DDCB **\n", __func__); | ||
| 1025 | return -ENOMEM; | ||
| 1026 | } | ||
| 1027 | memset(queue->ddcb_vaddr, 0, queue_size); | ||
| 1028 | |||
| 1029 | queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) * | ||
| 1030 | queue->ddcb_max, GFP_KERNEL); | ||
| 1031 | if (!queue->ddcb_req) { | ||
| 1032 | rc = -ENOMEM; | ||
| 1033 | goto free_ddcbs; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) * | ||
| 1037 | queue->ddcb_max, GFP_KERNEL); | ||
| 1038 | if (!queue->ddcb_waitqs) { | ||
| 1039 | rc = -ENOMEM; | ||
| 1040 | goto free_requs; | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | for (i = 0; i < queue->ddcb_max; i++) { | ||
| 1044 | pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */ | ||
| 1045 | pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32; | ||
| 1046 | pddcb->retc_16 = cpu_to_be16(0xfff); | ||
| 1047 | |||
| 1048 | queue->ddcb_req[i] = NULL; /* requests */ | ||
| 1049 | init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */ | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | queue->ddcb_act = 0; | ||
| 1053 | queue->ddcb_next = 0; /* queue is empty */ | ||
| 1054 | |||
| 1055 | spin_lock_init(&queue->ddcb_lock); | ||
| 1056 | init_waitqueue_head(&queue->ddcb_waitq); | ||
| 1057 | |||
| 1058 | val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ | ||
| 1059 | __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ | ||
| 1060 | __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr); | ||
| 1061 | __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq); | ||
| 1062 | __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64); | ||
| 1063 | return 0; | ||
| 1064 | |||
| 1065 | free_requs: | ||
| 1066 | kfree(queue->ddcb_req); | ||
| 1067 | queue->ddcb_req = NULL; | ||
| 1068 | free_ddcbs: | ||
| 1069 | __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, | ||
| 1070 | queue->ddcb_daddr); | ||
| 1071 | queue->ddcb_vaddr = NULL; | ||
| 1072 | queue->ddcb_daddr = 0ull; | ||
| 1073 | return -ENODEV; | ||
| 1074 | |||
| 1075 | } | ||
| 1076 | |||
| 1077 | static int ddcb_queue_initialized(struct ddcb_queue *queue) | ||
| 1078 | { | ||
| 1079 | return queue->ddcb_vaddr != NULL; | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) | ||
| 1083 | { | ||
| 1084 | unsigned int queue_size; | ||
| 1085 | |||
| 1086 | queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE); | ||
| 1087 | |||
| 1088 | kfree(queue->ddcb_req); | ||
| 1089 | queue->ddcb_req = NULL; | ||
| 1090 | |||
| 1091 | if (queue->ddcb_vaddr) { | ||
| 1092 | __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, | ||
| 1093 | queue->ddcb_daddr); | ||
| 1094 | queue->ddcb_vaddr = NULL; | ||
| 1095 | queue->ddcb_daddr = 0ull; | ||
| 1096 | } | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | static irqreturn_t genwqe_pf_isr(int irq, void *dev_id) | ||
| 1100 | { | ||
| 1101 | u64 gfir; | ||
| 1102 | struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; | ||
| 1103 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 1104 | |||
| 1105 | /* | ||
| 1106 | * In case of fatal FIR error the queue is stopped, such that | ||
| 1107 | * we can safely check it without risking anything. | ||
| 1108 | */ | ||
| 1109 | cd->irqs_processed++; | ||
| 1110 | wake_up_interruptible(&cd->queue_waitq); | ||
| 1111 | |||
| 1112 | /* | ||
| 1113 | * Checking for errors before kicking the queue might be | ||
| 1114 | * safer, but slower for the good-case ... See above. | ||
| 1115 | */ | ||
| 1116 | gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); | ||
| 1117 | if ((gfir & GFIR_ERR_TRIGGER) != 0x0) { | ||
| 1118 | |||
| 1119 | wake_up_interruptible(&cd->health_waitq); | ||
| 1120 | |||
| 1121 | /* | ||
| 1122 | * By default GFIRs causes recovery actions. This | ||
| 1123 | * count is just for debug when recovery is masked. | ||
| 1124 | */ | ||
| 1125 | printk_ratelimited(KERN_ERR | ||
| 1126 | "%s %s: [%s] GFIR=%016llx\n", | ||
| 1127 | GENWQE_DEVNAME, dev_name(&pci_dev->dev), | ||
| 1128 | __func__, gfir); | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | return IRQ_HANDLED; | ||
| 1132 | } | ||
| 1133 | |||
| 1134 | static irqreturn_t genwqe_vf_isr(int irq, void *dev_id) | ||
| 1135 | { | ||
| 1136 | struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; | ||
| 1137 | |||
| 1138 | cd->irqs_processed++; | ||
| 1139 | wake_up_interruptible(&cd->queue_waitq); | ||
| 1140 | |||
| 1141 | return IRQ_HANDLED; | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | /** | ||
| 1145 | * genwqe_card_thread() - Work thread for the DDCB queue | ||
| 1146 | * | ||
| 1147 | * The idea is to check if there are DDCBs in processing. If there are | ||
| 1148 | * some finished DDCBs, we process them and wakeup the | ||
| 1149 | * requestors. Otherwise we give other processes time using | ||
| 1150 | * cond_resched(). | ||
| 1151 | */ | ||
| 1152 | static int genwqe_card_thread(void *data) | ||
| 1153 | { | ||
| 1154 | int should_stop = 0, rc = 0; | ||
| 1155 | struct genwqe_dev *cd = (struct genwqe_dev *)data; | ||
| 1156 | |||
| 1157 | while (!kthread_should_stop()) { | ||
| 1158 | |||
| 1159 | genwqe_check_ddcb_queue(cd, &cd->queue); | ||
| 1160 | |||
| 1161 | if (genwqe_polling_enabled) { | ||
| 1162 | rc = wait_event_interruptible_timeout( | ||
| 1163 | cd->queue_waitq, | ||
| 1164 | genwqe_ddcbs_in_flight(cd) || | ||
| 1165 | (should_stop = kthread_should_stop()), 1); | ||
| 1166 | } else { | ||
| 1167 | rc = wait_event_interruptible_timeout( | ||
| 1168 | cd->queue_waitq, | ||
| 1169 | genwqe_next_ddcb_ready(cd) || | ||
| 1170 | (should_stop = kthread_should_stop()), HZ); | ||
| 1171 | } | ||
| 1172 | if (should_stop) | ||
| 1173 | break; | ||
| 1174 | |||
| 1175 | /* | ||
| 1176 | * Avoid soft lockups on heavy loads; we do not want | ||
| 1177 | * to disable our interrupts. | ||
| 1178 | */ | ||
| 1179 | cond_resched(); | ||
| 1180 | } | ||
| 1181 | return 0; | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | /** | ||
| 1185 | * genwqe_setup_service_layer() - Setup DDCB queue | ||
| 1186 | * @cd: pointer to genwqe device descriptor | ||
| 1187 | * | ||
| 1188 | * Allocate DDCBs. Configure Service Layer Controller (SLC). | ||
| 1189 | * | ||
| 1190 | * Return: 0 success | ||
| 1191 | */ | ||
| 1192 | int genwqe_setup_service_layer(struct genwqe_dev *cd) | ||
| 1193 | { | ||
| 1194 | int rc; | ||
| 1195 | struct ddcb_queue *queue; | ||
| 1196 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 1197 | |||
| 1198 | if (genwqe_is_privileged(cd)) { | ||
| 1199 | rc = genwqe_card_reset(cd); | ||
| 1200 | if (rc < 0) { | ||
| 1201 | dev_err(&pci_dev->dev, | ||
| 1202 | "[%s] err: reset failed.\n", __func__); | ||
| 1203 | return rc; | ||
| 1204 | } | ||
| 1205 | genwqe_read_softreset(cd); | ||
| 1206 | } | ||
| 1207 | |||
| 1208 | queue = &cd->queue; | ||
| 1209 | queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG; | ||
| 1210 | queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS; | ||
| 1211 | queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT; | ||
| 1212 | queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN; | ||
| 1213 | queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET; | ||
| 1214 | queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP; | ||
| 1215 | queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME; | ||
| 1216 | queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS; | ||
| 1217 | queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW; | ||
| 1218 | |||
| 1219 | rc = setup_ddcb_queue(cd, queue); | ||
| 1220 | if (rc != 0) { | ||
| 1221 | rc = -ENODEV; | ||
| 1222 | goto err_out; | ||
| 1223 | } | ||
| 1224 | |||
| 1225 | init_waitqueue_head(&cd->queue_waitq); | ||
| 1226 | cd->card_thread = kthread_run(genwqe_card_thread, cd, | ||
| 1227 | GENWQE_DEVNAME "%d_thread", | ||
| 1228 | cd->card_idx); | ||
| 1229 | if (IS_ERR(cd->card_thread)) { | ||
| 1230 | rc = PTR_ERR(cd->card_thread); | ||
| 1231 | cd->card_thread = NULL; | ||
| 1232 | goto stop_free_queue; | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS); | ||
| 1236 | if (rc > 0) | ||
| 1237 | rc = genwqe_set_interrupt_capability(cd, rc); | ||
| 1238 | if (rc != 0) { | ||
| 1239 | rc = -ENODEV; | ||
| 1240 | goto stop_kthread; | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | /* | ||
| 1244 | * We must have all wait-queues initialized when we enable the | ||
| 1245 | * interrupts. Otherwise we might crash if we get an early | ||
| 1246 | * irq. | ||
| 1247 | */ | ||
| 1248 | init_waitqueue_head(&cd->health_waitq); | ||
| 1249 | |||
| 1250 | if (genwqe_is_privileged(cd)) { | ||
| 1251 | rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED, | ||
| 1252 | GENWQE_DEVNAME, cd); | ||
| 1253 | } else { | ||
| 1254 | rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED, | ||
| 1255 | GENWQE_DEVNAME, cd); | ||
| 1256 | } | ||
| 1257 | if (rc < 0) { | ||
| 1258 | dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq); | ||
| 1259 | goto stop_irq_cap; | ||
| 1260 | } | ||
| 1261 | |||
| 1262 | cd->card_state = GENWQE_CARD_USED; | ||
| 1263 | return 0; | ||
| 1264 | |||
| 1265 | stop_irq_cap: | ||
| 1266 | genwqe_reset_interrupt_capability(cd); | ||
| 1267 | stop_kthread: | ||
| 1268 | kthread_stop(cd->card_thread); | ||
| 1269 | cd->card_thread = NULL; | ||
| 1270 | stop_free_queue: | ||
| 1271 | free_ddcb_queue(cd, queue); | ||
| 1272 | err_out: | ||
| 1273 | return rc; | ||
| 1274 | } | ||
| 1275 | |||
| 1276 | /** | ||
| 1277 | * queue_wake_up_all() - Handles fatal error case | ||
| 1278 | * | ||
| 1279 | * The PCI device got unusable and we have to stop all pending | ||
| 1280 | * requests as fast as we can. The code after this must purge the | ||
| 1281 | * DDCBs in question and ensure that all mappings are freed. | ||
| 1282 | */ | ||
| 1283 | static int queue_wake_up_all(struct genwqe_dev *cd) | ||
| 1284 | { | ||
| 1285 | unsigned int i; | ||
| 1286 | unsigned long flags; | ||
| 1287 | struct ddcb_queue *queue = &cd->queue; | ||
| 1288 | |||
| 1289 | spin_lock_irqsave(&queue->ddcb_lock, flags); | ||
| 1290 | |||
| 1291 | for (i = 0; i < queue->ddcb_max; i++) | ||
| 1292 | wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); | ||
| 1293 | |||
| 1294 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | ||
| 1295 | |||
| 1296 | return 0; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | /** | ||
| 1300 | * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces | ||
| 1301 | * | ||
| 1302 | * Relies on the pre-condition that there are no users of the card | ||
| 1303 | * device anymore e.g. with open file-descriptors. | ||
| 1304 | * | ||
| 1305 | * This function must be robust enough to be called twice. | ||
| 1306 | */ | ||
| 1307 | int genwqe_finish_queue(struct genwqe_dev *cd) | ||
| 1308 | { | ||
| 1309 | int i, rc, in_flight; | ||
| 1310 | int waitmax = genwqe_ddcb_software_timeout; | ||
| 1311 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 1312 | struct ddcb_queue *queue = &cd->queue; | ||
| 1313 | |||
| 1314 | if (!ddcb_queue_initialized(queue)) | ||
| 1315 | return 0; | ||
| 1316 | |||
| 1317 | /* Do not wipe out the error state. */ | ||
| 1318 | if (cd->card_state == GENWQE_CARD_USED) | ||
| 1319 | cd->card_state = GENWQE_CARD_UNUSED; | ||
| 1320 | |||
| 1321 | /* Wake up all requests in the DDCB queue such that they | ||
| 1322 | should be removed nicely. */ | ||
| 1323 | queue_wake_up_all(cd); | ||
| 1324 | |||
| 1325 | /* We must wait to get rid of the DDCBs in flight */ | ||
| 1326 | for (i = 0; i < waitmax; i++) { | ||
| 1327 | in_flight = genwqe_ddcbs_in_flight(cd); | ||
| 1328 | |||
| 1329 | if (in_flight == 0) | ||
| 1330 | break; | ||
| 1331 | |||
| 1332 | dev_dbg(&pci_dev->dev, | ||
| 1333 | " DEBUG [%d/%d] waiting for queue to get empty: " | ||
| 1334 | "%d requests!\n", i, waitmax, in_flight); | ||
| 1335 | |||
| 1336 | /* | ||
| 1337 | * Severe severe error situation: The card itself has | ||
| 1338 | * 16 DDCB queues, each queue has e.g. 32 entries, | ||
| 1339 | * each DDBC has a hardware timeout of currently 250 | ||
| 1340 | * msec but the PFs have a hardware timeout of 8 sec | ||
| 1341 | * ... so I take something large. | ||
| 1342 | */ | ||
| 1343 | msleep(1000); | ||
| 1344 | } | ||
| 1345 | if (i == waitmax) { | ||
| 1346 | dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n", | ||
| 1347 | __func__); | ||
| 1348 | rc = -EIO; | ||
| 1349 | } | ||
| 1350 | return rc; | ||
| 1351 | } | ||
| 1352 | |||
| 1353 | /** | ||
| 1354 | * genwqe_release_service_layer() - Shutdown DDCB queue | ||
| 1355 | * @cd: genwqe device descriptor | ||
| 1356 | * | ||
| 1357 | * This function must be robust enough to be called twice. | ||
| 1358 | */ | ||
| 1359 | int genwqe_release_service_layer(struct genwqe_dev *cd) | ||
| 1360 | { | ||
| 1361 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 1362 | |||
| 1363 | if (!ddcb_queue_initialized(&cd->queue)) | ||
| 1364 | return 1; | ||
| 1365 | |||
| 1366 | free_irq(pci_dev->irq, cd); | ||
| 1367 | genwqe_reset_interrupt_capability(cd); | ||
| 1368 | |||
| 1369 | if (cd->card_thread != NULL) { | ||
| 1370 | kthread_stop(cd->card_thread); | ||
| 1371 | cd->card_thread = NULL; | ||
| 1372 | } | ||
| 1373 | |||
| 1374 | free_ddcb_queue(cd, &cd->queue); | ||
| 1375 | return 0; | ||
| 1376 | } | ||
diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h new file mode 100644 index 000000000000..c4f26720753e --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.h | |||
| @@ -0,0 +1,188 @@ | |||
| 1 | #ifndef __CARD_DDCB_H__ | ||
| 2 | #define __CARD_DDCB_H__ | ||
| 3 | |||
| 4 | /** | ||
| 5 | * IBM Accelerator Family 'GenWQE' | ||
| 6 | * | ||
| 7 | * (C) Copyright IBM Corp. 2013 | ||
| 8 | * | ||
| 9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 11 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 12 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 13 | * | ||
| 14 | * This program is free software; you can redistribute it and/or modify | ||
| 15 | * it under the terms of the GNU General Public License as published by | ||
| 16 | * the Free Software Foundation; either version 2, or (at your option) | ||
| 17 | * any later version. | ||
| 18 | * | ||
| 19 | * This program is distributed in the hope that it will be useful, | ||
| 20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 22 | * GNU General Public License for more details. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/types.h> | ||
| 26 | #include <asm/byteorder.h> | ||
| 27 | |||
| 28 | #include "genwqe_driver.h" | ||
| 29 | #include "card_base.h" | ||
| 30 | |||
| 31 | /** | ||
| 32 | * struct ddcb - Device Driver Control Block DDCB | ||
| 33 | * @hsi: Hardware software interlock | ||
| 34 | * @shi: Software hardware interlock. Hsi and shi are used to interlock | ||
| 35 | * software and hardware activities. We are using a compare and | ||
| 36 | * swap operation to ensure that there are no races when | ||
| 37 | * activating new DDCBs on the queue, or when we need to | ||
| 38 | * purge a DDCB from a running queue. | ||
| 39 | * @acfunc: Accelerator function addresses a unit within the chip | ||
| 40 | * @cmd: Command to work on | ||
| 41 | * @cmdopts_16: Options for the command | ||
| 42 | * @asiv: Input data | ||
| 43 | * @asv: Output data | ||
| 44 | * | ||
| 45 | * The DDCB data format is big endian. Multiple consequtive DDBCs form | ||
| 46 | * a DDCB queue. | ||
| 47 | */ | ||
| 48 | #define ASIV_LENGTH 104 /* Old specification without ATS field */ | ||
| 49 | #define ASIV_LENGTH_ATS 96 /* New specification with ATS field */ | ||
| 50 | #define ASV_LENGTH 64 | ||
| 51 | |||
| 52 | struct ddcb { | ||
| 53 | union { | ||
| 54 | __be32 icrc_hsi_shi_32; /* iCRC, Hardware/SW interlock */ | ||
| 55 | struct { | ||
| 56 | __be16 icrc_16; | ||
| 57 | u8 hsi; | ||
| 58 | u8 shi; | ||
| 59 | }; | ||
| 60 | }; | ||
| 61 | u8 pre; /* Preamble */ | ||
| 62 | u8 xdir; /* Execution Directives */ | ||
| 63 | __be16 seqnum_16; /* Sequence Number */ | ||
| 64 | |||
| 65 | u8 acfunc; /* Accelerator Function.. */ | ||
| 66 | u8 cmd; /* Command. */ | ||
| 67 | __be16 cmdopts_16; /* Command Options */ | ||
| 68 | u8 sur; /* Status Update Rate */ | ||
| 69 | u8 psp; /* Protection Section Pointer */ | ||
| 70 | __be16 rsvd_0e_16; /* Reserved invariant */ | ||
| 71 | |||
| 72 | __be64 fwiv_64; /* Firmware Invariant. */ | ||
| 73 | |||
| 74 | union { | ||
| 75 | struct { | ||
| 76 | __be64 ats_64; /* Address Translation Spec */ | ||
| 77 | u8 asiv[ASIV_LENGTH_ATS]; /* New ASIV */ | ||
| 78 | } n; | ||
| 79 | u8 __asiv[ASIV_LENGTH]; /* obsolete */ | ||
| 80 | }; | ||
| 81 | u8 asv[ASV_LENGTH]; /* Appl Spec Variant */ | ||
| 82 | |||
| 83 | __be16 rsvd_c0_16; /* Reserved Variant */ | ||
| 84 | __be16 vcrc_16; /* Variant CRC */ | ||
| 85 | __be32 rsvd_32; /* Reserved unprotected */ | ||
| 86 | |||
| 87 | __be64 deque_ts_64; /* Deque Time Stamp. */ | ||
| 88 | |||
| 89 | __be16 retc_16; /* Return Code */ | ||
| 90 | __be16 attn_16; /* Attention/Extended Error Codes */ | ||
| 91 | __be32 progress_32; /* Progress indicator. */ | ||
| 92 | |||
| 93 | __be64 cmplt_ts_64; /* Completion Time Stamp. */ | ||
| 94 | |||
| 95 | /* The following layout matches the new service layer format */ | ||
| 96 | __be32 ibdc_32; /* Inbound Data Count (* 256) */ | ||
| 97 | __be32 obdc_32; /* Outbound Data Count (* 256) */ | ||
| 98 | |||
| 99 | __be64 rsvd_SLH_64; /* Reserved for hardware */ | ||
| 100 | union { /* private data for driver */ | ||
| 101 | u8 priv[8]; | ||
| 102 | __be64 priv_64; | ||
| 103 | }; | ||
| 104 | __be64 disp_ts_64; /* Dispatch TimeStamp */ | ||
| 105 | } __attribute__((__packed__)); | ||
| 106 | |||
| 107 | /* CRC polynomials for DDCB */ | ||
| 108 | #define CRC16_POLYNOMIAL 0x1021 | ||
| 109 | |||
| 110 | /* | ||
| 111 | * SHI: Software to Hardware Interlock | ||
| 112 | * This 1 byte field is written by software to interlock the | ||
| 113 | * movement of one queue entry to another with the hardware in the | ||
| 114 | * chip. | ||
| 115 | */ | ||
| 116 | #define DDCB_SHI_INTR 0x04 /* Bit 2 */ | ||
| 117 | #define DDCB_SHI_PURGE 0x02 /* Bit 1 */ | ||
| 118 | #define DDCB_SHI_NEXT 0x01 /* Bit 0 */ | ||
| 119 | |||
| 120 | /* | ||
| 121 | * HSI: Hardware to Software interlock | ||
| 122 | * This 1 byte field is written by hardware to interlock the movement | ||
| 123 | * of one queue entry to another with the software in the chip. | ||
| 124 | */ | ||
| 125 | #define DDCB_HSI_COMPLETED 0x40 /* Bit 6 */ | ||
| 126 | #define DDCB_HSI_FETCHED 0x04 /* Bit 2 */ | ||
| 127 | |||
| 128 | /* | ||
| 129 | * Accessing HSI/SHI is done 32-bit wide | ||
| 130 | * Normally 16-bit access would work too, but on some platforms the | ||
| 131 | * 16 compare and swap operation is not supported. Therefore | ||
| 132 | * switching to 32-bit such that those platforms will work too. | ||
| 133 | * | ||
| 134 | * iCRC HSI/SHI | ||
| 135 | */ | ||
| 136 | #define DDCB_INTR_BE32 cpu_to_be32(0x00000004) | ||
| 137 | #define DDCB_PURGE_BE32 cpu_to_be32(0x00000002) | ||
| 138 | #define DDCB_NEXT_BE32 cpu_to_be32(0x00000001) | ||
| 139 | #define DDCB_COMPLETED_BE32 cpu_to_be32(0x00004000) | ||
| 140 | #define DDCB_FETCHED_BE32 cpu_to_be32(0x00000400) | ||
| 141 | |||
| 142 | /* Definitions of DDCB presets */ | ||
| 143 | #define DDCB_PRESET_PRE 0x80 | ||
| 144 | #define ICRC_LENGTH(n) ((n) + 8 + 8 + 8) /* used ASIV + hdr fields */ | ||
| 145 | #define VCRC_LENGTH(n) ((n)) /* used ASV */ | ||
| 146 | |||
| 147 | /* | ||
| 148 | * Genwqe Scatter Gather list | ||
| 149 | * Each element has up to 8 entries. | ||
| 150 | * The chaining element is element 0 cause of prefetching needs. | ||
| 151 | */ | ||
| 152 | |||
| 153 | /* | ||
| 154 | * 0b0110 Chained descriptor. The descriptor is describing the next | ||
| 155 | * descriptor list. | ||
| 156 | */ | ||
| 157 | #define SG_CHAINED (0x6) | ||
| 158 | |||
| 159 | /* | ||
| 160 | * 0b0010 First entry of a descriptor list. Start from a Buffer-Empty | ||
| 161 | * condition. | ||
| 162 | */ | ||
| 163 | #define SG_DATA (0x2) | ||
| 164 | |||
| 165 | /* | ||
| 166 | * 0b0000 Early terminator. This is the last entry on the list | ||
| 167 | * irregardless of the length indicated. | ||
| 168 | */ | ||
| 169 | #define SG_END_LIST (0x0) | ||
| 170 | |||
| 171 | /** | ||
| 172 | * struct sglist - Scatter gather list | ||
| 173 | * @target_addr: Either a dma addr of memory to work on or a | ||
| 174 | * dma addr or a subsequent sglist block. | ||
| 175 | * @len: Length of the data block. | ||
| 176 | * @flags: See above. | ||
| 177 | * | ||
| 178 | * Depending on the command the GenWQE card can use a scatter gather | ||
| 179 | * list to describe the memory it works on. Always 8 sg_entry's form | ||
| 180 | * a block. | ||
| 181 | */ | ||
| 182 | struct sg_entry { | ||
| 183 | __be64 target_addr; | ||
| 184 | __be32 len; | ||
| 185 | __be32 flags; | ||
| 186 | }; | ||
| 187 | |||
| 188 | #endif /* __CARD_DDCB_H__ */ | ||
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c new file mode 100644 index 000000000000..3bfdc07a7248 --- /dev/null +++ b/drivers/misc/genwqe/card_debugfs.c | |||
| @@ -0,0 +1,500 @@ | |||
| 1 | /** | ||
| 2 | * IBM Accelerator Family 'GenWQE' | ||
| 3 | * | ||
| 4 | * (C) Copyright IBM Corp. 2013 | ||
| 5 | * | ||
| 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 8 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 9 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 13 | * as published by the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Debugfs interfaces for the GenWQE card. Help to debug potential | ||
| 23 | * problems. Dump internal chip state for debugging and failure | ||
| 24 | * determination. | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/kernel.h> | ||
| 29 | #include <linux/init.h> | ||
| 30 | #include <linux/debugfs.h> | ||
| 31 | #include <linux/seq_file.h> | ||
| 32 | #include <linux/uaccess.h> | ||
| 33 | |||
| 34 | #include "card_base.h" | ||
| 35 | #include "card_ddcb.h" | ||
| 36 | |||
| 37 | #define GENWQE_DEBUGFS_RO(_name, _showfn) \ | ||
| 38 | static int genwqe_debugfs_##_name##_open(struct inode *inode, \ | ||
| 39 | struct file *file) \ | ||
| 40 | { \ | ||
| 41 | return single_open(file, _showfn, inode->i_private); \ | ||
| 42 | } \ | ||
| 43 | static const struct file_operations genwqe_##_name##_fops = { \ | ||
| 44 | .open = genwqe_debugfs_##_name##_open, \ | ||
| 45 | .read = seq_read, \ | ||
| 46 | .llseek = seq_lseek, \ | ||
| 47 | .release = single_release, \ | ||
| 48 | } | ||
| 49 | |||
| 50 | static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs, | ||
| 51 | int entries) | ||
| 52 | { | ||
| 53 | unsigned int i; | ||
| 54 | u32 v_hi, v_lo; | ||
| 55 | |||
| 56 | for (i = 0; i < entries; i++) { | ||
| 57 | v_hi = (regs[i].val >> 32) & 0xffffffff; | ||
| 58 | v_lo = (regs[i].val) & 0xffffffff; | ||
| 59 | |||
| 60 | seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n", | ||
| 61 | regs[i].addr, regs[i].idx, v_hi, v_lo); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid) | ||
| 66 | { | ||
| 67 | struct genwqe_dev *cd = s->private; | ||
| 68 | int entries; | ||
| 69 | struct genwqe_reg *regs; | ||
| 70 | |||
| 71 | entries = genwqe_ffdc_buff_size(cd, uid); | ||
| 72 | if (entries < 0) | ||
| 73 | return -EINVAL; | ||
| 74 | |||
| 75 | if (entries == 0) | ||
| 76 | return 0; | ||
| 77 | |||
| 78 | regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL); | ||
| 79 | if (regs == NULL) | ||
| 80 | return -ENOMEM; | ||
| 81 | |||
| 82 | genwqe_stop_traps(cd); /* halt the traps while dumping data */ | ||
| 83 | genwqe_ffdc_buff_read(cd, uid, regs, entries); | ||
| 84 | genwqe_start_traps(cd); | ||
| 85 | |||
| 86 | dbg_uidn_show(s, regs, entries); | ||
| 87 | kfree(regs); | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused) | ||
| 92 | { | ||
| 93 | return curr_dbg_uidn_show(s, unused, 0); | ||
| 94 | } | ||
| 95 | |||
| 96 | GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show); | ||
| 97 | |||
| 98 | static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused) | ||
| 99 | { | ||
| 100 | return curr_dbg_uidn_show(s, unused, 1); | ||
| 101 | } | ||
| 102 | |||
| 103 | GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show); | ||
| 104 | |||
| 105 | static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused) | ||
| 106 | { | ||
| 107 | return curr_dbg_uidn_show(s, unused, 2); | ||
| 108 | } | ||
| 109 | |||
| 110 | GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show); | ||
| 111 | |||
| 112 | static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid) | ||
| 113 | { | ||
| 114 | struct genwqe_dev *cd = s->private; | ||
| 115 | |||
| 116 | dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries); | ||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | |||
| 120 | static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused) | ||
| 121 | { | ||
| 122 | return prev_dbg_uidn_show(s, unused, 0); | ||
| 123 | } | ||
| 124 | |||
| 125 | GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show); | ||
| 126 | |||
| 127 | static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused) | ||
| 128 | { | ||
| 129 | return prev_dbg_uidn_show(s, unused, 1); | ||
| 130 | } | ||
| 131 | |||
| 132 | GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show); | ||
| 133 | |||
| 134 | static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused) | ||
| 135 | { | ||
| 136 | return prev_dbg_uidn_show(s, unused, 2); | ||
| 137 | } | ||
| 138 | |||
| 139 | GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show); | ||
| 140 | |||
| 141 | static int genwqe_curr_regs_show(struct seq_file *s, void *unused) | ||
| 142 | { | ||
| 143 | struct genwqe_dev *cd = s->private; | ||
| 144 | unsigned int i; | ||
| 145 | struct genwqe_reg *regs; | ||
| 146 | |||
| 147 | regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL); | ||
| 148 | if (regs == NULL) | ||
| 149 | return -ENOMEM; | ||
| 150 | |||
| 151 | genwqe_stop_traps(cd); | ||
| 152 | genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1); | ||
| 153 | genwqe_start_traps(cd); | ||
| 154 | |||
| 155 | for (i = 0; i < GENWQE_FFDC_REGS; i++) { | ||
| 156 | if (regs[i].addr == 0xffffffff) | ||
| 157 | break; /* invalid entries */ | ||
| 158 | |||
| 159 | if (regs[i].val == 0x0ull) | ||
| 160 | continue; /* do not print 0x0 FIRs */ | ||
| 161 | |||
| 162 | seq_printf(s, " 0x%08x 0x%016llx\n", | ||
| 163 | regs[i].addr, regs[i].val); | ||
| 164 | } | ||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | |||
| 168 | GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show); | ||
| 169 | |||
| 170 | static int genwqe_prev_regs_show(struct seq_file *s, void *unused) | ||
| 171 | { | ||
| 172 | struct genwqe_dev *cd = s->private; | ||
| 173 | unsigned int i; | ||
| 174 | struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs; | ||
| 175 | |||
| 176 | if (regs == NULL) | ||
| 177 | return -EINVAL; | ||
| 178 | |||
| 179 | for (i = 0; i < GENWQE_FFDC_REGS; i++) { | ||
| 180 | if (regs[i].addr == 0xffffffff) | ||
| 181 | break; /* invalid entries */ | ||
| 182 | |||
| 183 | if (regs[i].val == 0x0ull) | ||
| 184 | continue; /* do not print 0x0 FIRs */ | ||
| 185 | |||
| 186 | seq_printf(s, " 0x%08x 0x%016llx\n", | ||
| 187 | regs[i].addr, regs[i].val); | ||
| 188 | } | ||
| 189 | return 0; | ||
| 190 | } | ||
| 191 | |||
| 192 | GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show); | ||
| 193 | |||
| 194 | static int genwqe_jtimer_show(struct seq_file *s, void *unused) | ||
| 195 | { | ||
| 196 | struct genwqe_dev *cd = s->private; | ||
| 197 | unsigned int vf_num; | ||
| 198 | u64 jtimer; | ||
| 199 | |||
| 200 | jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0); | ||
| 201 | seq_printf(s, " PF 0x%016llx %d msec\n", jtimer, | ||
| 202 | genwqe_pf_jobtimeout_msec); | ||
| 203 | |||
| 204 | for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { | ||
| 205 | jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, | ||
| 206 | vf_num + 1); | ||
| 207 | seq_printf(s, " VF%-2d 0x%016llx %d msec\n", vf_num, jtimer, | ||
| 208 | cd->vf_jobtimeout_msec[vf_num]); | ||
| 209 | } | ||
| 210 | return 0; | ||
| 211 | } | ||
| 212 | |||
| 213 | GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show); | ||
| 214 | |||
| 215 | static int genwqe_queue_working_time_show(struct seq_file *s, void *unused) | ||
| 216 | { | ||
| 217 | struct genwqe_dev *cd = s->private; | ||
| 218 | unsigned int vf_num; | ||
| 219 | u64 t; | ||
| 220 | |||
| 221 | t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0); | ||
| 222 | seq_printf(s, " PF 0x%016llx\n", t); | ||
| 223 | |||
| 224 | for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { | ||
| 225 | t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1); | ||
| 226 | seq_printf(s, " VF%-2d 0x%016llx\n", vf_num, t); | ||
| 227 | } | ||
| 228 | return 0; | ||
| 229 | } | ||
| 230 | |||
| 231 | GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show); | ||
| 232 | |||
| 233 | static int genwqe_ddcb_info_show(struct seq_file *s, void *unused) | ||
| 234 | { | ||
| 235 | struct genwqe_dev *cd = s->private; | ||
| 236 | unsigned int i; | ||
| 237 | struct ddcb_queue *queue; | ||
| 238 | struct ddcb *pddcb; | ||
| 239 | |||
| 240 | queue = &cd->queue; | ||
| 241 | seq_puts(s, "DDCB QUEUE:\n"); | ||
| 242 | seq_printf(s, " ddcb_max: %d\n" | ||
| 243 | " ddcb_daddr: %016llx - %016llx\n" | ||
| 244 | " ddcb_vaddr: %016llx\n" | ||
| 245 | " ddcbs_in_flight: %u\n" | ||
| 246 | " ddcbs_max_in_flight: %u\n" | ||
| 247 | " ddcbs_completed: %u\n" | ||
| 248 | " busy: %u\n" | ||
| 249 | " irqs_processed: %u\n", | ||
| 250 | queue->ddcb_max, (long long)queue->ddcb_daddr, | ||
| 251 | (long long)queue->ddcb_daddr + | ||
| 252 | (queue->ddcb_max * DDCB_LENGTH), | ||
| 253 | (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, | ||
| 254 | queue->ddcbs_max_in_flight, queue->ddcbs_completed, | ||
| 255 | queue->busy, cd->irqs_processed); | ||
| 256 | |||
| 257 | /* Hardware State */ | ||
| 258 | seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n" | ||
| 259 | " 0x%08x 0x%016llx IO_QUEUE_STATUS\n" | ||
| 260 | " 0x%08x 0x%016llx IO_QUEUE_SEGMENT\n" | ||
| 261 | " 0x%08x 0x%016llx IO_QUEUE_INITSQN\n" | ||
| 262 | " 0x%08x 0x%016llx IO_QUEUE_WRAP\n" | ||
| 263 | " 0x%08x 0x%016llx IO_QUEUE_OFFSET\n" | ||
| 264 | " 0x%08x 0x%016llx IO_QUEUE_WTIME\n" | ||
| 265 | " 0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n" | ||
| 266 | " 0x%08x 0x%016llx IO_QUEUE_LRW\n", | ||
| 267 | queue->IO_QUEUE_CONFIG, | ||
| 268 | __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), | ||
| 269 | queue->IO_QUEUE_STATUS, | ||
| 270 | __genwqe_readq(cd, queue->IO_QUEUE_STATUS), | ||
| 271 | queue->IO_QUEUE_SEGMENT, | ||
| 272 | __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT), | ||
| 273 | queue->IO_QUEUE_INITSQN, | ||
| 274 | __genwqe_readq(cd, queue->IO_QUEUE_INITSQN), | ||
| 275 | queue->IO_QUEUE_WRAP, | ||
| 276 | __genwqe_readq(cd, queue->IO_QUEUE_WRAP), | ||
| 277 | queue->IO_QUEUE_OFFSET, | ||
| 278 | __genwqe_readq(cd, queue->IO_QUEUE_OFFSET), | ||
| 279 | queue->IO_QUEUE_WTIME, | ||
| 280 | __genwqe_readq(cd, queue->IO_QUEUE_WTIME), | ||
| 281 | queue->IO_QUEUE_ERRCNTS, | ||
| 282 | __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS), | ||
| 283 | queue->IO_QUEUE_LRW, | ||
| 284 | __genwqe_readq(cd, queue->IO_QUEUE_LRW)); | ||
| 285 | |||
| 286 | seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n", | ||
| 287 | queue->ddcb_act, queue->ddcb_next); | ||
| 288 | |||
| 289 | pddcb = queue->ddcb_vaddr; | ||
| 290 | for (i = 0; i < queue->ddcb_max; i++) { | ||
| 291 | seq_printf(s, " %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ", | ||
| 292 | i, be16_to_cpu(pddcb->retc_16), | ||
| 293 | be16_to_cpu(pddcb->seqnum_16), | ||
| 294 | pddcb->hsi, pddcb->shi); | ||
| 295 | seq_printf(s, "PRIV=%06llx CMD=%02x\n", | ||
| 296 | be64_to_cpu(pddcb->priv_64), pddcb->cmd); | ||
| 297 | pddcb++; | ||
| 298 | } | ||
| 299 | return 0; | ||
| 300 | } | ||
| 301 | |||
| 302 | GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show); | ||
| 303 | |||
| 304 | static int genwqe_info_show(struct seq_file *s, void *unused) | ||
| 305 | { | ||
| 306 | struct genwqe_dev *cd = s->private; | ||
| 307 | u16 val16, type; | ||
| 308 | u64 app_id, slu_id, bitstream = -1; | ||
| 309 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 310 | |||
| 311 | slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); | ||
| 312 | app_id = __genwqe_readq(cd, IO_APP_UNITCFG); | ||
| 313 | |||
| 314 | if (genwqe_is_privileged(cd)) | ||
| 315 | bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM); | ||
| 316 | |||
| 317 | val16 = (u16)(slu_id & 0x0fLLU); | ||
| 318 | type = (u16)((slu_id >> 20) & 0xffLLU); | ||
| 319 | |||
| 320 | seq_printf(s, "%s driver version: %s\n" | ||
| 321 | " Device Name/Type: %s %s CardIdx: %d\n" | ||
| 322 | " SLU/APP Config : 0x%016llx/0x%016llx\n" | ||
| 323 | " Build Date : %u/%x/%u\n" | ||
| 324 | " Base Clock : %u MHz\n" | ||
| 325 | " Arch/SVN Release: %u/%llx\n" | ||
| 326 | " Bitstream : %llx\n", | ||
| 327 | GENWQE_DEVNAME, DRV_VERS_STRING, dev_name(&pci_dev->dev), | ||
| 328 | genwqe_is_privileged(cd) ? | ||
| 329 | "Physical" : "Virtual or no SR-IOV", | ||
| 330 | cd->card_idx, slu_id, app_id, | ||
| 331 | (u16)((slu_id >> 12) & 0x0fLLU), /* month */ | ||
| 332 | (u16)((slu_id >> 4) & 0xffLLU), /* day */ | ||
| 333 | (u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */ | ||
| 334 | genwqe_base_clock_frequency(cd), | ||
| 335 | (u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40, | ||
| 336 | bitstream); | ||
| 337 | |||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 341 | GENWQE_DEBUGFS_RO(info, genwqe_info_show); | ||
| 342 | |||
| 343 | int genwqe_init_debugfs(struct genwqe_dev *cd) | ||
| 344 | { | ||
| 345 | struct dentry *root; | ||
| 346 | struct dentry *file; | ||
| 347 | int ret; | ||
| 348 | char card_name[64]; | ||
| 349 | char name[64]; | ||
| 350 | unsigned int i; | ||
| 351 | |||
| 352 | sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx); | ||
| 353 | |||
| 354 | root = debugfs_create_dir(card_name, cd->debugfs_genwqe); | ||
| 355 | if (!root) { | ||
| 356 | ret = -ENOMEM; | ||
| 357 | goto err0; | ||
| 358 | } | ||
| 359 | |||
| 360 | /* non privileged interfaces are done here */ | ||
| 361 | file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd, | ||
| 362 | &genwqe_ddcb_info_fops); | ||
| 363 | if (!file) { | ||
| 364 | ret = -ENOMEM; | ||
| 365 | goto err1; | ||
| 366 | } | ||
| 367 | |||
| 368 | file = debugfs_create_file("info", S_IRUGO, root, cd, | ||
| 369 | &genwqe_info_fops); | ||
| 370 | if (!file) { | ||
| 371 | ret = -ENOMEM; | ||
| 372 | goto err1; | ||
| 373 | } | ||
| 374 | |||
| 375 | file = debugfs_create_x64("err_inject", 0666, root, &cd->err_inject); | ||
| 376 | if (!file) { | ||
| 377 | ret = -ENOMEM; | ||
| 378 | goto err1; | ||
| 379 | } | ||
| 380 | |||
| 381 | file = debugfs_create_u32("ddcb_software_timeout", 0666, root, | ||
| 382 | &cd->ddcb_software_timeout); | ||
| 383 | if (!file) { | ||
| 384 | ret = -ENOMEM; | ||
| 385 | goto err1; | ||
| 386 | } | ||
| 387 | |||
| 388 | file = debugfs_create_u32("kill_timeout", 0666, root, | ||
| 389 | &cd->kill_timeout); | ||
| 390 | if (!file) { | ||
| 391 | ret = -ENOMEM; | ||
| 392 | goto err1; | ||
| 393 | } | ||
| 394 | |||
| 395 | /* privileged interfaces follow here */ | ||
| 396 | if (!genwqe_is_privileged(cd)) { | ||
| 397 | cd->debugfs_root = root; | ||
| 398 | return 0; | ||
| 399 | } | ||
| 400 | |||
| 401 | file = debugfs_create_file("curr_regs", S_IRUGO, root, cd, | ||
| 402 | &genwqe_curr_regs_fops); | ||
| 403 | if (!file) { | ||
| 404 | ret = -ENOMEM; | ||
| 405 | goto err1; | ||
| 406 | } | ||
| 407 | |||
| 408 | file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd, | ||
| 409 | &genwqe_curr_dbg_uid0_fops); | ||
| 410 | if (!file) { | ||
| 411 | ret = -ENOMEM; | ||
| 412 | goto err1; | ||
| 413 | } | ||
| 414 | |||
| 415 | file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd, | ||
| 416 | &genwqe_curr_dbg_uid1_fops); | ||
| 417 | if (!file) { | ||
| 418 | ret = -ENOMEM; | ||
| 419 | goto err1; | ||
| 420 | } | ||
| 421 | |||
| 422 | file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd, | ||
| 423 | &genwqe_curr_dbg_uid2_fops); | ||
| 424 | if (!file) { | ||
| 425 | ret = -ENOMEM; | ||
| 426 | goto err1; | ||
| 427 | } | ||
| 428 | |||
| 429 | file = debugfs_create_file("prev_regs", S_IRUGO, root, cd, | ||
| 430 | &genwqe_prev_regs_fops); | ||
| 431 | if (!file) { | ||
| 432 | ret = -ENOMEM; | ||
| 433 | goto err1; | ||
| 434 | } | ||
| 435 | |||
| 436 | file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd, | ||
| 437 | &genwqe_prev_dbg_uid0_fops); | ||
| 438 | if (!file) { | ||
| 439 | ret = -ENOMEM; | ||
| 440 | goto err1; | ||
| 441 | } | ||
| 442 | |||
| 443 | file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd, | ||
| 444 | &genwqe_prev_dbg_uid1_fops); | ||
| 445 | if (!file) { | ||
| 446 | ret = -ENOMEM; | ||
| 447 | goto err1; | ||
| 448 | } | ||
| 449 | |||
| 450 | file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd, | ||
| 451 | &genwqe_prev_dbg_uid2_fops); | ||
| 452 | if (!file) { | ||
| 453 | ret = -ENOMEM; | ||
| 454 | goto err1; | ||
| 455 | } | ||
| 456 | |||
| 457 | for (i = 0; i < GENWQE_MAX_VFS; i++) { | ||
| 458 | sprintf(name, "vf%d_jobtimeout_msec", i); | ||
| 459 | |||
| 460 | file = debugfs_create_u32(name, 0666, root, | ||
| 461 | &cd->vf_jobtimeout_msec[i]); | ||
| 462 | if (!file) { | ||
| 463 | ret = -ENOMEM; | ||
| 464 | goto err1; | ||
| 465 | } | ||
| 466 | } | ||
| 467 | |||
| 468 | file = debugfs_create_file("jobtimer", S_IRUGO, root, cd, | ||
| 469 | &genwqe_jtimer_fops); | ||
| 470 | if (!file) { | ||
| 471 | ret = -ENOMEM; | ||
| 472 | goto err1; | ||
| 473 | } | ||
| 474 | |||
| 475 | file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd, | ||
| 476 | &genwqe_queue_working_time_fops); | ||
| 477 | if (!file) { | ||
| 478 | ret = -ENOMEM; | ||
| 479 | goto err1; | ||
| 480 | } | ||
| 481 | |||
| 482 | file = debugfs_create_u32("skip_recovery", 0666, root, | ||
| 483 | &cd->skip_recovery); | ||
| 484 | if (!file) { | ||
| 485 | ret = -ENOMEM; | ||
| 486 | goto err1; | ||
| 487 | } | ||
| 488 | |||
| 489 | cd->debugfs_root = root; | ||
| 490 | return 0; | ||
| 491 | err1: | ||
| 492 | debugfs_remove_recursive(root); | ||
| 493 | err0: | ||
| 494 | return ret; | ||
| 495 | } | ||
| 496 | |||
| 497 | void genqwe_exit_debugfs(struct genwqe_dev *cd) | ||
| 498 | { | ||
| 499 | debugfs_remove_recursive(cd->debugfs_root); | ||
| 500 | } | ||
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c new file mode 100644 index 000000000000..8f8a6b327cdb --- /dev/null +++ b/drivers/misc/genwqe/card_dev.c | |||
| @@ -0,0 +1,1414 @@ | |||
| 1 | /** | ||
| 2 | * IBM Accelerator Family 'GenWQE' | ||
| 3 | * | ||
| 4 | * (C) Copyright IBM Corp. 2013 | ||
| 5 | * | ||
| 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 8 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 9 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 13 | * as published by the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Character device representation of the GenWQE device. This allows | ||
| 23 | * user-space applications to communicate with the card. | ||
| 24 | */ | ||
| 25 | |||
| 26 | #include <linux/kernel.h> | ||
| 27 | #include <linux/types.h> | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/pci.h> | ||
| 30 | #include <linux/string.h> | ||
| 31 | #include <linux/fs.h> | ||
| 32 | #include <linux/sched.h> | ||
| 33 | #include <linux/wait.h> | ||
| 34 | #include <linux/delay.h> | ||
| 35 | #include <linux/atomic.h> | ||
| 36 | |||
| 37 | #include "card_base.h" | ||
| 38 | #include "card_ddcb.h" | ||
| 39 | |||
| 40 | static int genwqe_open_files(struct genwqe_dev *cd) | ||
| 41 | { | ||
| 42 | int rc; | ||
| 43 | unsigned long flags; | ||
| 44 | |||
| 45 | spin_lock_irqsave(&cd->file_lock, flags); | ||
| 46 | rc = list_empty(&cd->file_list); | ||
| 47 | spin_unlock_irqrestore(&cd->file_lock, flags); | ||
| 48 | return !rc; | ||
| 49 | } | ||
| 50 | |||
| 51 | static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) | ||
| 52 | { | ||
| 53 | unsigned long flags; | ||
| 54 | |||
| 55 | cfile->owner = current; | ||
| 56 | spin_lock_irqsave(&cd->file_lock, flags); | ||
| 57 | list_add(&cfile->list, &cd->file_list); | ||
| 58 | spin_unlock_irqrestore(&cd->file_lock, flags); | ||
| 59 | } | ||
| 60 | |||
| 61 | static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) | ||
| 62 | { | ||
| 63 | unsigned long flags; | ||
| 64 | |||
| 65 | spin_lock_irqsave(&cd->file_lock, flags); | ||
| 66 | list_del(&cfile->list); | ||
| 67 | spin_unlock_irqrestore(&cd->file_lock, flags); | ||
| 68 | |||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m) | ||
| 73 | { | ||
| 74 | unsigned long flags; | ||
| 75 | |||
| 76 | spin_lock_irqsave(&cfile->pin_lock, flags); | ||
| 77 | list_add(&m->pin_list, &cfile->pin_list); | ||
| 78 | spin_unlock_irqrestore(&cfile->pin_lock, flags); | ||
| 79 | } | ||
| 80 | |||
| 81 | static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m) | ||
| 82 | { | ||
| 83 | unsigned long flags; | ||
| 84 | |||
| 85 | spin_lock_irqsave(&cfile->pin_lock, flags); | ||
| 86 | list_del(&m->pin_list); | ||
| 87 | spin_unlock_irqrestore(&cfile->pin_lock, flags); | ||
| 88 | |||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 92 | /** | ||
| 93 | * genwqe_search_pin() - Search for the mapping for a userspace address | ||
| 94 | * @cfile: Descriptor of opened file | ||
| 95 | * @u_addr: User virtual address | ||
| 96 | * @size: Size of buffer | ||
| 97 | * @dma_addr: DMA address to be updated | ||
| 98 | * | ||
| 99 | * Return: Pointer to the corresponding mapping NULL if not found | ||
| 100 | */ | ||
| 101 | static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, | ||
| 102 | unsigned long u_addr, | ||
| 103 | unsigned int size, | ||
| 104 | void **virt_addr) | ||
| 105 | { | ||
| 106 | unsigned long flags; | ||
| 107 | struct dma_mapping *m; | ||
| 108 | |||
| 109 | spin_lock_irqsave(&cfile->pin_lock, flags); | ||
| 110 | |||
| 111 | list_for_each_entry(m, &cfile->pin_list, pin_list) { | ||
| 112 | if ((((u64)m->u_vaddr) <= (u_addr)) && | ||
| 113 | (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { | ||
| 114 | |||
| 115 | if (virt_addr) | ||
| 116 | *virt_addr = m->k_vaddr + | ||
| 117 | (u_addr - (u64)m->u_vaddr); | ||
| 118 | |||
| 119 | spin_unlock_irqrestore(&cfile->pin_lock, flags); | ||
| 120 | return m; | ||
| 121 | } | ||
| 122 | } | ||
| 123 | spin_unlock_irqrestore(&cfile->pin_lock, flags); | ||
| 124 | return NULL; | ||
| 125 | } | ||
| 126 | |||
| 127 | static void __genwqe_add_mapping(struct genwqe_file *cfile, | ||
| 128 | struct dma_mapping *dma_map) | ||
| 129 | { | ||
| 130 | unsigned long flags; | ||
| 131 | |||
| 132 | spin_lock_irqsave(&cfile->map_lock, flags); | ||
| 133 | list_add(&dma_map->card_list, &cfile->map_list); | ||
| 134 | spin_unlock_irqrestore(&cfile->map_lock, flags); | ||
| 135 | } | ||
| 136 | |||
| 137 | static void __genwqe_del_mapping(struct genwqe_file *cfile, | ||
| 138 | struct dma_mapping *dma_map) | ||
| 139 | { | ||
| 140 | unsigned long flags; | ||
| 141 | |||
| 142 | spin_lock_irqsave(&cfile->map_lock, flags); | ||
| 143 | list_del(&dma_map->card_list); | ||
| 144 | spin_unlock_irqrestore(&cfile->map_lock, flags); | ||
| 145 | } | ||
| 146 | |||
| 147 | |||
| 148 | /** | ||
| 149 | * __genwqe_search_mapping() - Search for the mapping for a userspace address | ||
| 150 | * @cfile: descriptor of opened file | ||
| 151 | * @u_addr: user virtual address | ||
| 152 | * @size: size of buffer | ||
| 153 | * @dma_addr: DMA address to be updated | ||
| 154 | * Return: Pointer to the corresponding mapping NULL if not found | ||
| 155 | */ | ||
| 156 | static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, | ||
| 157 | unsigned long u_addr, | ||
| 158 | unsigned int size, | ||
| 159 | dma_addr_t *dma_addr, | ||
| 160 | void **virt_addr) | ||
| 161 | { | ||
| 162 | unsigned long flags; | ||
| 163 | struct dma_mapping *m; | ||
| 164 | struct pci_dev *pci_dev = cfile->cd->pci_dev; | ||
| 165 | |||
| 166 | spin_lock_irqsave(&cfile->map_lock, flags); | ||
| 167 | list_for_each_entry(m, &cfile->map_list, card_list) { | ||
| 168 | |||
| 169 | if ((((u64)m->u_vaddr) <= (u_addr)) && | ||
| 170 | (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { | ||
| 171 | |||
| 172 | /* match found: current is as expected and | ||
| 173 | addr is in range */ | ||
| 174 | if (dma_addr) | ||
| 175 | *dma_addr = m->dma_addr + | ||
| 176 | (u_addr - (u64)m->u_vaddr); | ||
| 177 | |||
| 178 | if (virt_addr) | ||
| 179 | *virt_addr = m->k_vaddr + | ||
| 180 | (u_addr - (u64)m->u_vaddr); | ||
| 181 | |||
| 182 | spin_unlock_irqrestore(&cfile->map_lock, flags); | ||
| 183 | return m; | ||
| 184 | } | ||
| 185 | } | ||
| 186 | spin_unlock_irqrestore(&cfile->map_lock, flags); | ||
| 187 | |||
| 188 | dev_err(&pci_dev->dev, | ||
| 189 | "[%s] Entry not found: u_addr=%lx, size=%x\n", | ||
| 190 | __func__, u_addr, size); | ||
| 191 | |||
| 192 | return NULL; | ||
| 193 | } | ||
| 194 | |||
| 195 | static void genwqe_remove_mappings(struct genwqe_file *cfile) | ||
| 196 | { | ||
| 197 | int i = 0; | ||
| 198 | struct list_head *node, *next; | ||
| 199 | struct dma_mapping *dma_map; | ||
| 200 | struct genwqe_dev *cd = cfile->cd; | ||
| 201 | struct pci_dev *pci_dev = cfile->cd->pci_dev; | ||
| 202 | |||
| 203 | list_for_each_safe(node, next, &cfile->map_list) { | ||
| 204 | dma_map = list_entry(node, struct dma_mapping, card_list); | ||
| 205 | |||
| 206 | list_del_init(&dma_map->card_list); | ||
| 207 | |||
| 208 | /* | ||
| 209 | * This is really a bug, because those things should | ||
| 210 | * have been already tidied up. | ||
| 211 | * | ||
| 212 | * GENWQE_MAPPING_RAW should have been removed via mmunmap(). | ||
| 213 | * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. | ||
| 214 | */ | ||
| 215 | dev_err(&pci_dev->dev, | ||
| 216 | "[%s] %d. cleanup mapping: u_vaddr=%p " | ||
| 217 | "u_kaddr=%016lx dma_addr=%lx\n", __func__, i++, | ||
| 218 | dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr, | ||
| 219 | (unsigned long)dma_map->dma_addr); | ||
| 220 | |||
| 221 | if (dma_map->type == GENWQE_MAPPING_RAW) { | ||
| 222 | /* we allocated this dynamically */ | ||
| 223 | __genwqe_free_consistent(cd, dma_map->size, | ||
| 224 | dma_map->k_vaddr, | ||
| 225 | dma_map->dma_addr); | ||
| 226 | kfree(dma_map); | ||
| 227 | } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { | ||
| 228 | /* we use dma_map statically from the request */ | ||
| 229 | genwqe_user_vunmap(cd, dma_map, NULL); | ||
| 230 | } | ||
| 231 | } | ||
| 232 | } | ||
| 233 | |||
| 234 | static void genwqe_remove_pinnings(struct genwqe_file *cfile) | ||
| 235 | { | ||
| 236 | struct list_head *node, *next; | ||
| 237 | struct dma_mapping *dma_map; | ||
| 238 | struct genwqe_dev *cd = cfile->cd; | ||
| 239 | |||
| 240 | list_for_each_safe(node, next, &cfile->pin_list) { | ||
| 241 | dma_map = list_entry(node, struct dma_mapping, pin_list); | ||
| 242 | |||
| 243 | /* | ||
| 244 | * This is not a bug, because a killed processed might | ||
| 245 | * not call the unpin ioctl, which is supposed to free | ||
| 246 | * the resources. | ||
| 247 | * | ||
| 248 | * Pinnings are dymically allocated and need to be | ||
| 249 | * deleted. | ||
| 250 | */ | ||
| 251 | list_del_init(&dma_map->pin_list); | ||
| 252 | genwqe_user_vunmap(cd, dma_map, NULL); | ||
| 253 | kfree(dma_map); | ||
| 254 | } | ||
| 255 | } | ||
| 256 | |||
| 257 | /** | ||
| 258 | * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files | ||
| 259 | * | ||
| 260 | * E.g. genwqe_send_signal(cd, SIGIO); | ||
| 261 | */ | ||
| 262 | static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) | ||
| 263 | { | ||
| 264 | unsigned int files = 0; | ||
| 265 | unsigned long flags; | ||
| 266 | struct genwqe_file *cfile; | ||
| 267 | |||
| 268 | spin_lock_irqsave(&cd->file_lock, flags); | ||
| 269 | list_for_each_entry(cfile, &cd->file_list, list) { | ||
| 270 | if (cfile->async_queue) | ||
| 271 | kill_fasync(&cfile->async_queue, sig, POLL_HUP); | ||
| 272 | files++; | ||
| 273 | } | ||
| 274 | spin_unlock_irqrestore(&cd->file_lock, flags); | ||
| 275 | return files; | ||
| 276 | } | ||
| 277 | |||
| 278 | static int genwqe_force_sig(struct genwqe_dev *cd, int sig) | ||
| 279 | { | ||
| 280 | unsigned int files = 0; | ||
| 281 | unsigned long flags; | ||
| 282 | struct genwqe_file *cfile; | ||
| 283 | |||
| 284 | spin_lock_irqsave(&cd->file_lock, flags); | ||
| 285 | list_for_each_entry(cfile, &cd->file_list, list) { | ||
| 286 | force_sig(sig, cfile->owner); | ||
| 287 | files++; | ||
| 288 | } | ||
| 289 | spin_unlock_irqrestore(&cd->file_lock, flags); | ||
| 290 | return files; | ||
| 291 | } | ||
| 292 | |||
| 293 | /** | ||
| 294 | * genwqe_open() - file open | ||
| 295 | * @inode: file system information | ||
| 296 | * @filp: file handle | ||
| 297 | * | ||
| 298 | * This function is executed whenever an application calls | ||
| 299 | * open("/dev/genwqe",..). | ||
| 300 | * | ||
| 301 | * Return: 0 if successful or <0 if errors | ||
| 302 | */ | ||
| 303 | static int genwqe_open(struct inode *inode, struct file *filp) | ||
| 304 | { | ||
| 305 | struct genwqe_dev *cd; | ||
| 306 | struct genwqe_file *cfile; | ||
| 307 | struct pci_dev *pci_dev; | ||
| 308 | |||
| 309 | cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); | ||
| 310 | if (cfile == NULL) | ||
| 311 | return -ENOMEM; | ||
| 312 | |||
| 313 | cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); | ||
| 314 | pci_dev = cd->pci_dev; | ||
| 315 | cfile->cd = cd; | ||
| 316 | cfile->filp = filp; | ||
| 317 | cfile->client = NULL; | ||
| 318 | |||
| 319 | spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */ | ||
| 320 | INIT_LIST_HEAD(&cfile->map_list); | ||
| 321 | |||
| 322 | spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */ | ||
| 323 | INIT_LIST_HEAD(&cfile->pin_list); | ||
| 324 | |||
| 325 | filp->private_data = cfile; | ||
| 326 | |||
| 327 | genwqe_add_file(cd, cfile); | ||
| 328 | return 0; | ||
| 329 | } | ||
| 330 | |||
| 331 | /** | ||
| 332 | * genwqe_fasync() - Setup process to receive SIGIO. | ||
| 333 | * @fd: file descriptor | ||
| 334 | * @filp: file handle | ||
| 335 | * @mode: file mode | ||
| 336 | * | ||
| 337 | * Sending a signal is working as following: | ||
| 338 | * | ||
| 339 | * if (cdev->async_queue) | ||
| 340 | * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); | ||
| 341 | * | ||
| 342 | * Some devices also implement asynchronous notification to indicate | ||
| 343 | * when the device can be written; in this case, of course, | ||
| 344 | * kill_fasync must be called with a mode of POLL_OUT. | ||
| 345 | */ | ||
| 346 | static int genwqe_fasync(int fd, struct file *filp, int mode) | ||
| 347 | { | ||
| 348 | struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; | ||
| 349 | return fasync_helper(fd, filp, mode, &cdev->async_queue); | ||
| 350 | } | ||
| 351 | |||
| 352 | |||
| 353 | /** | ||
| 354 | * genwqe_release() - file close | ||
| 355 | * @inode: file system information | ||
| 356 | * @filp: file handle | ||
| 357 | * | ||
| 358 | * This function is executed whenever an application calls 'close(fd_genwqe)' | ||
| 359 | * | ||
| 360 | * Return: always 0 | ||
| 361 | */ | ||
| 362 | static int genwqe_release(struct inode *inode, struct file *filp) | ||
| 363 | { | ||
| 364 | struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; | ||
| 365 | struct genwqe_dev *cd = cfile->cd; | ||
| 366 | |||
| 367 | /* there must be no entries in these lists! */ | ||
| 368 | genwqe_remove_mappings(cfile); | ||
| 369 | genwqe_remove_pinnings(cfile); | ||
| 370 | |||
| 371 | /* remove this filp from the asynchronously notified filp's */ | ||
| 372 | genwqe_fasync(-1, filp, 0); | ||
| 373 | |||
| 374 | /* | ||
| 375 | * For this to work we must not release cd when this cfile is | ||
| 376 | * not yet released, otherwise the list entry is invalid, | ||
| 377 | * because the list itself gets reinstantiated! | ||
| 378 | */ | ||
| 379 | genwqe_del_file(cd, cfile); | ||
| 380 | kfree(cfile); | ||
| 381 | return 0; | ||
| 382 | } | ||
| 383 | |||
| 384 | static void genwqe_vma_open(struct vm_area_struct *vma) | ||
| 385 | { | ||
| 386 | /* nothing ... */ | ||
| 387 | } | ||
| 388 | |||
| 389 | /** | ||
| 390 | * genwqe_vma_close() - Called each time when vma is unmapped | ||
| 391 | * | ||
| 392 | * Free memory which got allocated by GenWQE mmap(). | ||
| 393 | */ | ||
| 394 | static void genwqe_vma_close(struct vm_area_struct *vma) | ||
| 395 | { | ||
| 396 | unsigned long vsize = vma->vm_end - vma->vm_start; | ||
| 397 | struct inode *inode = vma->vm_file->f_dentry->d_inode; | ||
| 398 | struct dma_mapping *dma_map; | ||
| 399 | struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, | ||
| 400 | cdev_genwqe); | ||
| 401 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 402 | dma_addr_t d_addr = 0; | ||
| 403 | struct genwqe_file *cfile = vma->vm_private_data; | ||
| 404 | |||
| 405 | dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize, | ||
| 406 | &d_addr, NULL); | ||
| 407 | if (dma_map == NULL) { | ||
| 408 | dev_err(&pci_dev->dev, | ||
| 409 | " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n", | ||
| 410 | __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, | ||
| 411 | vsize); | ||
| 412 | return; | ||
| 413 | } | ||
| 414 | __genwqe_del_mapping(cfile, dma_map); | ||
| 415 | __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr, | ||
| 416 | dma_map->dma_addr); | ||
| 417 | kfree(dma_map); | ||
| 418 | } | ||
| 419 | |||
| 420 | static struct vm_operations_struct genwqe_vma_ops = { | ||
| 421 | .open = genwqe_vma_open, | ||
| 422 | .close = genwqe_vma_close, | ||
| 423 | }; | ||
| 424 | |||
| 425 | /** | ||
| 426 | * genwqe_mmap() - Provide contignous buffers to userspace | ||
| 427 | * | ||
| 428 | * We use mmap() to allocate contignous buffers used for DMA | ||
| 429 | * transfers. After the buffer is allocated we remap it to user-space | ||
| 430 | * and remember a reference to our dma_mapping data structure, where | ||
| 431 | * we store the associated DMA address and allocated size. | ||
| 432 | * | ||
| 433 | * When we receive a DDCB execution request with the ATS bits set to | ||
| 434 | * plain buffer, we lookup our dma_mapping list to find the | ||
| 435 | * corresponding DMA address for the associated user-space address. | ||
| 436 | */ | ||
| 437 | static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 438 | { | ||
| 439 | int rc; | ||
| 440 | unsigned long pfn, vsize = vma->vm_end - vma->vm_start; | ||
| 441 | struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; | ||
| 442 | struct genwqe_dev *cd = cfile->cd; | ||
| 443 | struct dma_mapping *dma_map; | ||
| 444 | |||
| 445 | if (vsize == 0) | ||
| 446 | return -EINVAL; | ||
| 447 | |||
| 448 | if (get_order(vsize) > MAX_ORDER) | ||
| 449 | return -ENOMEM; | ||
| 450 | |||
| 451 | dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); | ||
| 452 | if (dma_map == NULL) | ||
| 453 | return -ENOMEM; | ||
| 454 | |||
| 455 | genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW); | ||
| 456 | dma_map->u_vaddr = (void *)vma->vm_start; | ||
| 457 | dma_map->size = vsize; | ||
| 458 | dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE); | ||
| 459 | dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize, | ||
| 460 | &dma_map->dma_addr); | ||
| 461 | if (dma_map->k_vaddr == NULL) { | ||
| 462 | rc = -ENOMEM; | ||
| 463 | goto free_dma_map; | ||
| 464 | } | ||
| 465 | |||
| 466 | if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t))) | ||
| 467 | *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr; | ||
| 468 | |||
| 469 | pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT; | ||
| 470 | rc = remap_pfn_range(vma, | ||
| 471 | vma->vm_start, | ||
| 472 | pfn, | ||
| 473 | vsize, | ||
| 474 | vma->vm_page_prot); | ||
| 475 | if (rc != 0) { | ||
| 476 | rc = -EFAULT; | ||
| 477 | goto free_dma_mem; | ||
| 478 | } | ||
| 479 | |||
| 480 | vma->vm_private_data = cfile; | ||
| 481 | vma->vm_ops = &genwqe_vma_ops; | ||
| 482 | __genwqe_add_mapping(cfile, dma_map); | ||
| 483 | |||
| 484 | return 0; | ||
| 485 | |||
| 486 | free_dma_mem: | ||
| 487 | __genwqe_free_consistent(cd, dma_map->size, | ||
| 488 | dma_map->k_vaddr, | ||
| 489 | dma_map->dma_addr); | ||
| 490 | free_dma_map: | ||
| 491 | kfree(dma_map); | ||
| 492 | return rc; | ||
| 493 | } | ||
| 494 | |||
| 495 | /** | ||
| 496 | * do_flash_update() - Excute flash update (write image or CVPD) | ||
| 497 | * @cd: genwqe device | ||
| 498 | * @load: details about image load | ||
| 499 | * | ||
| 500 | * Return: 0 if successful | ||
| 501 | */ | ||
| 502 | |||
| 503 | #define FLASH_BLOCK 0x40000 /* we use 256k blocks */ | ||
| 504 | |||
| 505 | static int do_flash_update(struct genwqe_file *cfile, | ||
| 506 | struct genwqe_bitstream *load) | ||
| 507 | { | ||
| 508 | int rc = 0; | ||
| 509 | int blocks_to_flash; | ||
| 510 | dma_addr_t dma_addr; | ||
| 511 | u64 flash = 0; | ||
| 512 | size_t tocopy = 0; | ||
| 513 | u8 __user *buf; | ||
| 514 | u8 *xbuf; | ||
| 515 | u32 crc; | ||
| 516 | u8 cmdopts; | ||
| 517 | struct genwqe_dev *cd = cfile->cd; | ||
| 518 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 519 | |||
| 520 | if ((load->size & 0x3) != 0) | ||
| 521 | return -EINVAL; | ||
| 522 | |||
| 523 | if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) | ||
| 524 | return -EINVAL; | ||
| 525 | |||
| 526 | /* FIXME Bits have changed for new service layer! */ | ||
| 527 | switch ((char)load->partition) { | ||
| 528 | case '0': | ||
| 529 | cmdopts = 0x14; | ||
| 530 | break; /* download/erase_first/part_0 */ | ||
| 531 | case '1': | ||
| 532 | cmdopts = 0x1C; | ||
| 533 | break; /* download/erase_first/part_1 */ | ||
| 534 | case 'v': /* cmdopts = 0x0c (VPD) */ | ||
| 535 | default: | ||
| 536 | return -EINVAL; | ||
| 537 | } | ||
| 538 | |||
| 539 | buf = (u8 __user *)load->data_addr; | ||
| 540 | xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); | ||
| 541 | if (xbuf == NULL) | ||
| 542 | return -ENOMEM; | ||
| 543 | |||
| 544 | blocks_to_flash = load->size / FLASH_BLOCK; | ||
| 545 | while (load->size) { | ||
| 546 | struct genwqe_ddcb_cmd *req; | ||
| 547 | |||
| 548 | /* | ||
| 549 | * We must be 4 byte aligned. Buffer must be 0 appened | ||
| 550 | * to have defined values when calculating CRC. | ||
| 551 | */ | ||
| 552 | tocopy = min_t(size_t, load->size, FLASH_BLOCK); | ||
| 553 | |||
| 554 | rc = copy_from_user(xbuf, buf, tocopy); | ||
| 555 | if (rc) { | ||
| 556 | rc = -EFAULT; | ||
| 557 | goto free_buffer; | ||
| 558 | } | ||
| 559 | crc = genwqe_crc32(xbuf, tocopy, 0xffffffff); | ||
| 560 | |||
| 561 | dev_dbg(&pci_dev->dev, | ||
| 562 | "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n", | ||
| 563 | __func__, (unsigned long)dma_addr, crc, tocopy, | ||
| 564 | blocks_to_flash); | ||
| 565 | |||
| 566 | /* prepare DDCB for SLU process */ | ||
| 567 | req = ddcb_requ_alloc(); | ||
| 568 | if (req == NULL) { | ||
| 569 | rc = -ENOMEM; | ||
| 570 | goto free_buffer; | ||
| 571 | } | ||
| 572 | |||
| 573 | req->cmd = SLCMD_MOVE_FLASH; | ||
| 574 | req->cmdopts = cmdopts; | ||
| 575 | |||
| 576 | /* prepare invariant values */ | ||
| 577 | if (genwqe_get_slu_id(cd) <= 0x2) { | ||
| 578 | *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr); | ||
| 579 | *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy); | ||
| 580 | *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash); | ||
| 581 | *(__be32 *)&req->__asiv[24] = cpu_to_be32(0); | ||
| 582 | req->__asiv[24] = load->uid; | ||
| 583 | *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc); | ||
| 584 | |||
| 585 | /* for simulation only */ | ||
| 586 | *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); | ||
| 587 | *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); | ||
| 588 | req->asiv_length = 32; /* bytes included in crc calc */ | ||
| 589 | } else { /* setup DDCB for ATS architecture */ | ||
| 590 | *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr); | ||
| 591 | *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy); | ||
| 592 | *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */ | ||
| 593 | *(__be64 *)&req->asiv[16] = cpu_to_be64(flash); | ||
| 594 | *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); | ||
| 595 | *(__be32 *)&req->asiv[28] = cpu_to_be32(crc); | ||
| 596 | |||
| 597 | /* for simulation only */ | ||
| 598 | *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); | ||
| 599 | *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); | ||
| 600 | |||
| 601 | /* Rd only */ | ||
| 602 | req->ats = 0x4ULL << 44; | ||
| 603 | req->asiv_length = 40; /* bytes included in crc calc */ | ||
| 604 | } | ||
| 605 | req->asv_length = 8; | ||
| 606 | |||
| 607 | /* For Genwqe5 we get back the calculated CRC */ | ||
| 608 | *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ | ||
| 609 | |||
| 610 | rc = __genwqe_execute_raw_ddcb(cd, req); | ||
| 611 | |||
| 612 | load->retc = req->retc; | ||
| 613 | load->attn = req->attn; | ||
| 614 | load->progress = req->progress; | ||
| 615 | |||
| 616 | if (rc < 0) { | ||
| 617 | ddcb_requ_free(req); | ||
| 618 | goto free_buffer; | ||
| 619 | } | ||
| 620 | |||
| 621 | if (req->retc != DDCB_RETC_COMPLETE) { | ||
| 622 | rc = -EIO; | ||
| 623 | ddcb_requ_free(req); | ||
| 624 | goto free_buffer; | ||
| 625 | } | ||
| 626 | |||
| 627 | load->size -= tocopy; | ||
| 628 | flash += tocopy; | ||
| 629 | buf += tocopy; | ||
| 630 | blocks_to_flash--; | ||
| 631 | ddcb_requ_free(req); | ||
| 632 | } | ||
| 633 | |||
| 634 | free_buffer: | ||
| 635 | __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); | ||
| 636 | return rc; | ||
| 637 | } | ||
| 638 | |||
| 639 | static int do_flash_read(struct genwqe_file *cfile, | ||
| 640 | struct genwqe_bitstream *load) | ||
| 641 | { | ||
| 642 | int rc, blocks_to_flash; | ||
| 643 | dma_addr_t dma_addr; | ||
| 644 | u64 flash = 0; | ||
| 645 | size_t tocopy = 0; | ||
| 646 | u8 __user *buf; | ||
| 647 | u8 *xbuf; | ||
| 648 | u8 cmdopts; | ||
| 649 | struct genwqe_dev *cd = cfile->cd; | ||
| 650 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 651 | struct genwqe_ddcb_cmd *cmd; | ||
| 652 | |||
| 653 | if ((load->size & 0x3) != 0) | ||
| 654 | return -EINVAL; | ||
| 655 | |||
| 656 | if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) | ||
| 657 | return -EINVAL; | ||
| 658 | |||
| 659 | /* FIXME Bits have changed for new service layer! */ | ||
| 660 | switch ((char)load->partition) { | ||
| 661 | case '0': | ||
| 662 | cmdopts = 0x12; | ||
| 663 | break; /* upload/part_0 */ | ||
| 664 | case '1': | ||
| 665 | cmdopts = 0x1A; | ||
| 666 | break; /* upload/part_1 */ | ||
| 667 | case 'v': | ||
| 668 | default: | ||
| 669 | return -EINVAL; | ||
| 670 | } | ||
| 671 | |||
| 672 | buf = (u8 __user *)load->data_addr; | ||
| 673 | xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); | ||
| 674 | if (xbuf == NULL) | ||
| 675 | return -ENOMEM; | ||
| 676 | |||
| 677 | blocks_to_flash = load->size / FLASH_BLOCK; | ||
| 678 | while (load->size) { | ||
| 679 | /* | ||
| 680 | * We must be 4 byte aligned. Buffer must be 0 appened | ||
| 681 | * to have defined values when calculating CRC. | ||
| 682 | */ | ||
| 683 | tocopy = min_t(size_t, load->size, FLASH_BLOCK); | ||
| 684 | |||
| 685 | dev_dbg(&pci_dev->dev, | ||
| 686 | "[%s] DMA: %lx SZ: %ld %d\n", | ||
| 687 | __func__, (unsigned long)dma_addr, tocopy, | ||
| 688 | blocks_to_flash); | ||
| 689 | |||
| 690 | /* prepare DDCB for SLU process */ | ||
| 691 | cmd = ddcb_requ_alloc(); | ||
| 692 | if (cmd == NULL) { | ||
| 693 | rc = -ENOMEM; | ||
| 694 | goto free_buffer; | ||
| 695 | } | ||
| 696 | cmd->cmd = SLCMD_MOVE_FLASH; | ||
| 697 | cmd->cmdopts = cmdopts; | ||
| 698 | |||
| 699 | /* prepare invariant values */ | ||
| 700 | if (genwqe_get_slu_id(cd) <= 0x2) { | ||
| 701 | *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr); | ||
| 702 | *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy); | ||
| 703 | *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash); | ||
| 704 | *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0); | ||
| 705 | cmd->__asiv[24] = load->uid; | ||
| 706 | *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */; | ||
| 707 | cmd->asiv_length = 32; /* bytes included in crc calc */ | ||
| 708 | } else { /* setup DDCB for ATS architecture */ | ||
| 709 | *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr); | ||
| 710 | *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy); | ||
| 711 | *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */ | ||
| 712 | *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash); | ||
| 713 | *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); | ||
| 714 | *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */ | ||
| 715 | |||
| 716 | /* rd/wr */ | ||
| 717 | cmd->ats = 0x5ULL << 44; | ||
| 718 | cmd->asiv_length = 40; /* bytes included in crc calc */ | ||
| 719 | } | ||
| 720 | cmd->asv_length = 8; | ||
| 721 | |||
| 722 | /* we only get back the calculated CRC */ | ||
| 723 | *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ | ||
| 724 | |||
| 725 | rc = __genwqe_execute_raw_ddcb(cd, cmd); | ||
| 726 | |||
| 727 | load->retc = cmd->retc; | ||
| 728 | load->attn = cmd->attn; | ||
| 729 | load->progress = cmd->progress; | ||
| 730 | |||
| 731 | if ((rc < 0) && (rc != -EBADMSG)) { | ||
| 732 | ddcb_requ_free(cmd); | ||
| 733 | goto free_buffer; | ||
| 734 | } | ||
| 735 | |||
| 736 | rc = copy_to_user(buf, xbuf, tocopy); | ||
| 737 | if (rc) { | ||
| 738 | rc = -EFAULT; | ||
| 739 | ddcb_requ_free(cmd); | ||
| 740 | goto free_buffer; | ||
| 741 | } | ||
| 742 | |||
| 743 | /* We know that we can get retc 0x104 with CRC err */ | ||
| 744 | if (((cmd->retc == DDCB_RETC_FAULT) && | ||
| 745 | (cmd->attn != 0x02)) || /* Normally ignore CRC error */ | ||
| 746 | ((cmd->retc == DDCB_RETC_COMPLETE) && | ||
| 747 | (cmd->attn != 0x00))) { /* Everything was fine */ | ||
| 748 | rc = -EIO; | ||
| 749 | ddcb_requ_free(cmd); | ||
| 750 | goto free_buffer; | ||
| 751 | } | ||
| 752 | |||
| 753 | load->size -= tocopy; | ||
| 754 | flash += tocopy; | ||
| 755 | buf += tocopy; | ||
| 756 | blocks_to_flash--; | ||
| 757 | ddcb_requ_free(cmd); | ||
| 758 | } | ||
| 759 | rc = 0; | ||
| 760 | |||
| 761 | free_buffer: | ||
| 762 | __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); | ||
| 763 | return rc; | ||
| 764 | } | ||
| 765 | |||
| 766 | static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) | ||
| 767 | { | ||
| 768 | int rc; | ||
| 769 | struct genwqe_dev *cd = cfile->cd; | ||
| 770 | struct pci_dev *pci_dev = cfile->cd->pci_dev; | ||
| 771 | struct dma_mapping *dma_map; | ||
| 772 | unsigned long map_addr; | ||
| 773 | unsigned long map_size; | ||
| 774 | |||
| 775 | if ((m->addr == 0x0) || (m->size == 0)) | ||
| 776 | return -EINVAL; | ||
| 777 | |||
| 778 | map_addr = (m->addr & PAGE_MASK); | ||
| 779 | map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); | ||
| 780 | |||
| 781 | dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); | ||
| 782 | if (dma_map == NULL) | ||
| 783 | return -ENOMEM; | ||
| 784 | |||
| 785 | genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED); | ||
| 786 | rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL); | ||
| 787 | if (rc != 0) { | ||
| 788 | dev_err(&pci_dev->dev, | ||
| 789 | "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); | ||
| 790 | return rc; | ||
| 791 | } | ||
| 792 | |||
| 793 | genwqe_add_pin(cfile, dma_map); | ||
| 794 | return 0; | ||
| 795 | } | ||
| 796 | |||
| 797 | static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) | ||
| 798 | { | ||
| 799 | struct genwqe_dev *cd = cfile->cd; | ||
| 800 | struct dma_mapping *dma_map; | ||
| 801 | unsigned long map_addr; | ||
| 802 | unsigned long map_size; | ||
| 803 | |||
| 804 | if (m->addr == 0x0) | ||
| 805 | return -EINVAL; | ||
| 806 | |||
| 807 | map_addr = (m->addr & PAGE_MASK); | ||
| 808 | map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); | ||
| 809 | |||
| 810 | dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL); | ||
| 811 | if (dma_map == NULL) | ||
| 812 | return -ENOENT; | ||
| 813 | |||
| 814 | genwqe_del_pin(cfile, dma_map); | ||
| 815 | genwqe_user_vunmap(cd, dma_map, NULL); | ||
| 816 | kfree(dma_map); | ||
| 817 | return 0; | ||
| 818 | } | ||
| 819 | |||
| 820 | /** | ||
| 821 | * ddcb_cmd_cleanup() - Remove dynamically created fixup entries | ||
| 822 | * | ||
| 823 | * Only if there are any. Pinnings are not removed. | ||
| 824 | */ | ||
| 825 | static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) | ||
| 826 | { | ||
| 827 | unsigned int i; | ||
| 828 | struct dma_mapping *dma_map; | ||
| 829 | struct genwqe_dev *cd = cfile->cd; | ||
| 830 | |||
| 831 | for (i = 0; i < DDCB_FIXUPS; i++) { | ||
| 832 | dma_map = &req->dma_mappings[i]; | ||
| 833 | |||
| 834 | if (dma_mapping_used(dma_map)) { | ||
| 835 | __genwqe_del_mapping(cfile, dma_map); | ||
| 836 | genwqe_user_vunmap(cd, dma_map, req); | ||
| 837 | } | ||
| 838 | if (req->sgl[i] != NULL) { | ||
| 839 | genwqe_free_sgl(cd, req->sgl[i], | ||
| 840 | req->sgl_dma_addr[i], | ||
| 841 | req->sgl_size[i]); | ||
| 842 | req->sgl[i] = NULL; | ||
| 843 | req->sgl_dma_addr[i] = 0x0; | ||
| 844 | req->sgl_size[i] = 0; | ||
| 845 | } | ||
| 846 | |||
| 847 | } | ||
| 848 | return 0; | ||
| 849 | } | ||
| 850 | |||
| 851 | /** | ||
| 852 | * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references | ||
| 853 | * | ||
| 854 | * Before the DDCB gets executed we need to handle the fixups. We | ||
| 855 | * replace the user-space addresses with DMA addresses or do | ||
| 856 | * additional setup work e.g. generating a scatter-gather list which | ||
| 857 | * is used to describe the memory referred to in the fixup. | ||
| 858 | */ | ||
| 859 | static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) | ||
| 860 | { | ||
| 861 | int rc; | ||
| 862 | unsigned int asiv_offs, i; | ||
| 863 | struct genwqe_dev *cd = cfile->cd; | ||
| 864 | struct genwqe_ddcb_cmd *cmd = &req->cmd; | ||
| 865 | struct dma_mapping *m; | ||
| 866 | const char *type = "UNKNOWN"; | ||
| 867 | |||
| 868 | for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; | ||
| 869 | i++, asiv_offs += 0x08) { | ||
| 870 | |||
| 871 | u64 u_addr; | ||
| 872 | dma_addr_t d_addr; | ||
| 873 | u32 u_size = 0; | ||
| 874 | u64 ats_flags; | ||
| 875 | |||
| 876 | ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs); | ||
| 877 | |||
| 878 | switch (ats_flags) { | ||
| 879 | |||
| 880 | case ATS_TYPE_DATA: | ||
| 881 | break; /* nothing to do here */ | ||
| 882 | |||
| 883 | case ATS_TYPE_FLAT_RDWR: | ||
| 884 | case ATS_TYPE_FLAT_RD: { | ||
| 885 | u_addr = be64_to_cpu(*((__be64 *)&cmd-> | ||
| 886 | asiv[asiv_offs])); | ||
| 887 | u_size = be32_to_cpu(*((__be32 *)&cmd-> | ||
| 888 | asiv[asiv_offs + 0x08])); | ||
| 889 | |||
| 890 | /* | ||
| 891 | * No data available. Ignore u_addr in this | ||
| 892 | * case and set addr to 0. Hardware must not | ||
| 893 | * fetch the buffer. | ||
| 894 | */ | ||
| 895 | if (u_size == 0x0) { | ||
| 896 | *((__be64 *)&cmd->asiv[asiv_offs]) = | ||
| 897 | cpu_to_be64(0x0); | ||
| 898 | break; | ||
| 899 | } | ||
| 900 | |||
| 901 | m = __genwqe_search_mapping(cfile, u_addr, u_size, | ||
| 902 | &d_addr, NULL); | ||
| 903 | if (m == NULL) { | ||
| 904 | rc = -EFAULT; | ||
| 905 | goto err_out; | ||
| 906 | } | ||
| 907 | |||
| 908 | *((__be64 *)&cmd->asiv[asiv_offs]) = | ||
| 909 | cpu_to_be64(d_addr); | ||
| 910 | break; | ||
| 911 | } | ||
| 912 | |||
| 913 | case ATS_TYPE_SGL_RDWR: | ||
| 914 | case ATS_TYPE_SGL_RD: { | ||
| 915 | int page_offs, nr_pages, offs; | ||
| 916 | |||
| 917 | u_addr = be64_to_cpu(*((__be64 *) | ||
| 918 | &cmd->asiv[asiv_offs])); | ||
| 919 | u_size = be32_to_cpu(*((__be32 *) | ||
| 920 | &cmd->asiv[asiv_offs + 0x08])); | ||
| 921 | |||
| 922 | /* | ||
| 923 | * No data available. Ignore u_addr in this | ||
| 924 | * case and set addr to 0. Hardware must not | ||
| 925 | * fetch the empty sgl. | ||
| 926 | */ | ||
| 927 | if (u_size == 0x0) { | ||
| 928 | *((__be64 *)&cmd->asiv[asiv_offs]) = | ||
| 929 | cpu_to_be64(0x0); | ||
| 930 | break; | ||
| 931 | } | ||
| 932 | |||
| 933 | m = genwqe_search_pin(cfile, u_addr, u_size, NULL); | ||
| 934 | if (m != NULL) { | ||
| 935 | type = "PINNING"; | ||
| 936 | page_offs = (u_addr - | ||
| 937 | (u64)m->u_vaddr)/PAGE_SIZE; | ||
| 938 | } else { | ||
| 939 | type = "MAPPING"; | ||
| 940 | m = &req->dma_mappings[i]; | ||
| 941 | |||
| 942 | genwqe_mapping_init(m, | ||
| 943 | GENWQE_MAPPING_SGL_TEMP); | ||
| 944 | rc = genwqe_user_vmap(cd, m, (void *)u_addr, | ||
| 945 | u_size, req); | ||
| 946 | if (rc != 0) | ||
| 947 | goto err_out; | ||
| 948 | |||
| 949 | __genwqe_add_mapping(cfile, m); | ||
| 950 | page_offs = 0; | ||
| 951 | } | ||
| 952 | |||
| 953 | offs = offset_in_page(u_addr); | ||
| 954 | nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE); | ||
| 955 | |||
| 956 | /* create genwqe style scatter gather list */ | ||
| 957 | req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages, | ||
| 958 | &req->sgl_dma_addr[i], | ||
| 959 | &req->sgl_size[i]); | ||
| 960 | if (req->sgl[i] == NULL) { | ||
| 961 | rc = -ENOMEM; | ||
| 962 | goto err_out; | ||
| 963 | } | ||
| 964 | genwqe_setup_sgl(cd, offs, u_size, | ||
| 965 | req->sgl[i], | ||
| 966 | req->sgl_dma_addr[i], | ||
| 967 | req->sgl_size[i], | ||
| 968 | m->dma_list, | ||
| 969 | page_offs, | ||
| 970 | nr_pages); | ||
| 971 | |||
| 972 | *((__be64 *)&cmd->asiv[asiv_offs]) = | ||
| 973 | cpu_to_be64(req->sgl_dma_addr[i]); | ||
| 974 | |||
| 975 | break; | ||
| 976 | } | ||
| 977 | default: | ||
| 978 | rc = -EINVAL; | ||
| 979 | goto err_out; | ||
| 980 | } | ||
| 981 | } | ||
| 982 | return 0; | ||
| 983 | |||
| 984 | err_out: | ||
| 985 | ddcb_cmd_cleanup(cfile, req); | ||
| 986 | return rc; | ||
| 987 | } | ||
| 988 | |||
| 989 | /** | ||
| 990 | * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups | ||
| 991 | * | ||
| 992 | * The code will build up the translation tables or lookup the | ||
| 993 | * contignous memory allocation table to find the right translations | ||
| 994 | * and DMA addresses. | ||
| 995 | */ | ||
| 996 | static int genwqe_execute_ddcb(struct genwqe_file *cfile, | ||
| 997 | struct genwqe_ddcb_cmd *cmd) | ||
| 998 | { | ||
| 999 | int rc; | ||
| 1000 | struct genwqe_dev *cd = cfile->cd; | ||
| 1001 | struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); | ||
| 1002 | |||
| 1003 | rc = ddcb_cmd_fixups(cfile, req); | ||
| 1004 | if (rc != 0) | ||
| 1005 | return rc; | ||
| 1006 | |||
| 1007 | rc = __genwqe_execute_raw_ddcb(cd, cmd); | ||
| 1008 | ddcb_cmd_cleanup(cfile, req); | ||
| 1009 | return rc; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | static int do_execute_ddcb(struct genwqe_file *cfile, | ||
| 1013 | unsigned long arg, int raw) | ||
| 1014 | { | ||
| 1015 | int rc; | ||
| 1016 | struct genwqe_ddcb_cmd *cmd; | ||
| 1017 | struct ddcb_requ *req; | ||
| 1018 | struct genwqe_dev *cd = cfile->cd; | ||
| 1019 | |||
| 1020 | cmd = ddcb_requ_alloc(); | ||
| 1021 | if (cmd == NULL) | ||
| 1022 | return -ENOMEM; | ||
| 1023 | |||
| 1024 | req = container_of(cmd, struct ddcb_requ, cmd); | ||
| 1025 | |||
| 1026 | if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) { | ||
| 1027 | ddcb_requ_free(cmd); | ||
| 1028 | return -EFAULT; | ||
| 1029 | } | ||
| 1030 | |||
| 1031 | if (!raw) | ||
| 1032 | rc = genwqe_execute_ddcb(cfile, cmd); | ||
| 1033 | else | ||
| 1034 | rc = __genwqe_execute_raw_ddcb(cd, cmd); | ||
| 1035 | |||
| 1036 | /* Copy back only the modifed fields. Do not copy ASIV | ||
| 1037 | back since the copy got modified by the driver. */ | ||
| 1038 | if (copy_to_user((void __user *)arg, cmd, | ||
| 1039 | sizeof(*cmd) - DDCB_ASIV_LENGTH)) { | ||
| 1040 | ddcb_requ_free(cmd); | ||
| 1041 | return -EFAULT; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | ddcb_requ_free(cmd); | ||
| 1045 | return rc; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | /** | ||
| 1049 | * genwqe_ioctl() - IO control | ||
| 1050 | * @filp: file handle | ||
| 1051 | * @cmd: command identifier (passed from user) | ||
| 1052 | * @arg: argument (passed from user) | ||
| 1053 | * | ||
| 1054 | * Return: 0 success | ||
| 1055 | */ | ||
| 1056 | static long genwqe_ioctl(struct file *filp, unsigned int cmd, | ||
| 1057 | unsigned long arg) | ||
| 1058 | { | ||
| 1059 | int rc = 0; | ||
| 1060 | struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; | ||
| 1061 | struct genwqe_dev *cd = cfile->cd; | ||
| 1062 | struct genwqe_reg_io __user *io; | ||
| 1063 | u64 val; | ||
| 1064 | u32 reg_offs; | ||
| 1065 | |||
| 1066 | if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) | ||
| 1067 | return -EINVAL; | ||
| 1068 | |||
| 1069 | switch (cmd) { | ||
| 1070 | |||
| 1071 | case GENWQE_GET_CARD_STATE: | ||
| 1072 | put_user(cd->card_state, (enum genwqe_card_state __user *)arg); | ||
| 1073 | return 0; | ||
| 1074 | |||
| 1075 | /* Register access */ | ||
| 1076 | case GENWQE_READ_REG64: { | ||
| 1077 | io = (struct genwqe_reg_io __user *)arg; | ||
| 1078 | |||
| 1079 | if (get_user(reg_offs, &io->num)) | ||
| 1080 | return -EFAULT; | ||
| 1081 | |||
| 1082 | if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) | ||
| 1083 | return -EINVAL; | ||
| 1084 | |||
| 1085 | val = __genwqe_readq(cd, reg_offs); | ||
| 1086 | put_user(val, &io->val64); | ||
| 1087 | return 0; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | case GENWQE_WRITE_REG64: { | ||
| 1091 | io = (struct genwqe_reg_io __user *)arg; | ||
| 1092 | |||
| 1093 | if (!capable(CAP_SYS_ADMIN)) | ||
| 1094 | return -EPERM; | ||
| 1095 | |||
| 1096 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) | ||
| 1097 | return -EPERM; | ||
| 1098 | |||
| 1099 | if (get_user(reg_offs, &io->num)) | ||
| 1100 | return -EFAULT; | ||
| 1101 | |||
| 1102 | if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) | ||
| 1103 | return -EINVAL; | ||
| 1104 | |||
| 1105 | if (get_user(val, &io->val64)) | ||
| 1106 | return -EFAULT; | ||
| 1107 | |||
| 1108 | __genwqe_writeq(cd, reg_offs, val); | ||
| 1109 | return 0; | ||
| 1110 | } | ||
| 1111 | |||
| 1112 | case GENWQE_READ_REG32: { | ||
| 1113 | io = (struct genwqe_reg_io __user *)arg; | ||
| 1114 | |||
| 1115 | if (get_user(reg_offs, &io->num)) | ||
| 1116 | return -EFAULT; | ||
| 1117 | |||
| 1118 | if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) | ||
| 1119 | return -EINVAL; | ||
| 1120 | |||
| 1121 | val = __genwqe_readl(cd, reg_offs); | ||
| 1122 | put_user(val, &io->val64); | ||
| 1123 | return 0; | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | case GENWQE_WRITE_REG32: { | ||
| 1127 | io = (struct genwqe_reg_io __user *)arg; | ||
| 1128 | |||
| 1129 | if (!capable(CAP_SYS_ADMIN)) | ||
| 1130 | return -EPERM; | ||
| 1131 | |||
| 1132 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) | ||
| 1133 | return -EPERM; | ||
| 1134 | |||
| 1135 | if (get_user(reg_offs, &io->num)) | ||
| 1136 | return -EFAULT; | ||
| 1137 | |||
| 1138 | if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) | ||
| 1139 | return -EINVAL; | ||
| 1140 | |||
| 1141 | if (get_user(val, &io->val64)) | ||
| 1142 | return -EFAULT; | ||
| 1143 | |||
| 1144 | __genwqe_writel(cd, reg_offs, val); | ||
| 1145 | return 0; | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | /* Flash update/reading */ | ||
| 1149 | case GENWQE_SLU_UPDATE: { | ||
| 1150 | struct genwqe_bitstream load; | ||
| 1151 | |||
| 1152 | if (!genwqe_is_privileged(cd)) | ||
| 1153 | return -EPERM; | ||
| 1154 | |||
| 1155 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) | ||
| 1156 | return -EPERM; | ||
| 1157 | |||
| 1158 | if (copy_from_user(&load, (void __user *)arg, | ||
| 1159 | sizeof(load))) | ||
| 1160 | return -EFAULT; | ||
| 1161 | |||
| 1162 | rc = do_flash_update(cfile, &load); | ||
| 1163 | |||
| 1164 | if (copy_to_user((void __user *)arg, &load, sizeof(load))) | ||
| 1165 | return -EFAULT; | ||
| 1166 | |||
| 1167 | return rc; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | case GENWQE_SLU_READ: { | ||
| 1171 | struct genwqe_bitstream load; | ||
| 1172 | |||
| 1173 | if (!genwqe_is_privileged(cd)) | ||
| 1174 | return -EPERM; | ||
| 1175 | |||
| 1176 | if (genwqe_flash_readback_fails(cd)) | ||
| 1177 | return -ENOSPC; /* known to fail for old versions */ | ||
| 1178 | |||
| 1179 | if (copy_from_user(&load, (void __user *)arg, sizeof(load))) | ||
| 1180 | return -EFAULT; | ||
| 1181 | |||
| 1182 | rc = do_flash_read(cfile, &load); | ||
| 1183 | |||
| 1184 | if (copy_to_user((void __user *)arg, &load, sizeof(load))) | ||
| 1185 | return -EFAULT; | ||
| 1186 | |||
| 1187 | return rc; | ||
| 1188 | } | ||
| 1189 | |||
| 1190 | /* memory pinning and unpinning */ | ||
| 1191 | case GENWQE_PIN_MEM: { | ||
| 1192 | struct genwqe_mem m; | ||
| 1193 | |||
| 1194 | if (copy_from_user(&m, (void __user *)arg, sizeof(m))) | ||
| 1195 | return -EFAULT; | ||
| 1196 | |||
| 1197 | return genwqe_pin_mem(cfile, &m); | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | case GENWQE_UNPIN_MEM: { | ||
| 1201 | struct genwqe_mem m; | ||
| 1202 | |||
| 1203 | if (copy_from_user(&m, (void __user *)arg, sizeof(m))) | ||
| 1204 | return -EFAULT; | ||
| 1205 | |||
| 1206 | return genwqe_unpin_mem(cfile, &m); | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | /* launch an DDCB and wait for completion */ | ||
| 1210 | case GENWQE_EXECUTE_DDCB: | ||
| 1211 | return do_execute_ddcb(cfile, arg, 0); | ||
| 1212 | |||
| 1213 | case GENWQE_EXECUTE_RAW_DDCB: { | ||
| 1214 | |||
| 1215 | if (!capable(CAP_SYS_ADMIN)) | ||
| 1216 | return -EPERM; | ||
| 1217 | |||
| 1218 | return do_execute_ddcb(cfile, arg, 1); | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | default: | ||
| 1222 | return -EINVAL; | ||
| 1223 | } | ||
| 1224 | |||
| 1225 | return rc; | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | #if defined(CONFIG_COMPAT) | ||
| 1229 | /** | ||
| 1230 | * genwqe_compat_ioctl() - Compatibility ioctl | ||
| 1231 | * | ||
| 1232 | * Called whenever a 32-bit process running under a 64-bit kernel | ||
| 1233 | * performs an ioctl on /dev/genwqe<n>_card. | ||
| 1234 | * | ||
| 1235 | * @filp: file pointer. | ||
| 1236 | * @cmd: command. | ||
| 1237 | * @arg: user argument. | ||
| 1238 | * Return: zero on success or negative number on failure. | ||
| 1239 | */ | ||
| 1240 | static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd, | ||
| 1241 | unsigned long arg) | ||
| 1242 | { | ||
| 1243 | return genwqe_ioctl(filp, cmd, arg); | ||
| 1244 | } | ||
| 1245 | #endif /* defined(CONFIG_COMPAT) */ | ||
| 1246 | |||
| 1247 | static const struct file_operations genwqe_fops = { | ||
| 1248 | .owner = THIS_MODULE, | ||
| 1249 | .open = genwqe_open, | ||
| 1250 | .fasync = genwqe_fasync, | ||
| 1251 | .mmap = genwqe_mmap, | ||
| 1252 | .unlocked_ioctl = genwqe_ioctl, | ||
| 1253 | #if defined(CONFIG_COMPAT) | ||
| 1254 | .compat_ioctl = genwqe_compat_ioctl, | ||
| 1255 | #endif | ||
| 1256 | .release = genwqe_release, | ||
| 1257 | }; | ||
| 1258 | |||
| 1259 | static int genwqe_device_initialized(struct genwqe_dev *cd) | ||
| 1260 | { | ||
| 1261 | return cd->dev != NULL; | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | /** | ||
| 1265 | * genwqe_device_create() - Create and configure genwqe char device | ||
| 1266 | * @cd: genwqe device descriptor | ||
| 1267 | * | ||
| 1268 | * This function must be called before we create any more genwqe | ||
| 1269 | * character devices, because it is allocating the major and minor | ||
| 1270 | * number which are supposed to be used by the client drivers. | ||
| 1271 | */ | ||
| 1272 | int genwqe_device_create(struct genwqe_dev *cd) | ||
| 1273 | { | ||
| 1274 | int rc; | ||
| 1275 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 1276 | |||
| 1277 | /* | ||
| 1278 | * Here starts the individual setup per client. It must | ||
| 1279 | * initialize its own cdev data structure with its own fops. | ||
| 1280 | * The appropriate devnum needs to be created. The ranges must | ||
| 1281 | * not overlap. | ||
| 1282 | */ | ||
| 1283 | rc = alloc_chrdev_region(&cd->devnum_genwqe, 0, | ||
| 1284 | GENWQE_MAX_MINOR, GENWQE_DEVNAME); | ||
| 1285 | if (rc < 0) { | ||
| 1286 | dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); | ||
| 1287 | goto err_dev; | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | cdev_init(&cd->cdev_genwqe, &genwqe_fops); | ||
| 1291 | cd->cdev_genwqe.owner = THIS_MODULE; | ||
| 1292 | |||
| 1293 | rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1); | ||
| 1294 | if (rc < 0) { | ||
| 1295 | dev_err(&pci_dev->dev, "err: cdev_add failed\n"); | ||
| 1296 | goto err_add; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | /* | ||
| 1300 | * Finally the device in /dev/... must be created. The rule is | ||
| 1301 | * to use card%d_clientname for each created device. | ||
| 1302 | */ | ||
| 1303 | cd->dev = device_create_with_groups(cd->class_genwqe, | ||
| 1304 | &cd->pci_dev->dev, | ||
| 1305 | cd->devnum_genwqe, cd, | ||
| 1306 | genwqe_attribute_groups, | ||
| 1307 | GENWQE_DEVNAME "%u_card", | ||
| 1308 | cd->card_idx); | ||
| 1309 | if (IS_ERR(cd->dev)) { | ||
| 1310 | rc = PTR_ERR(cd->dev); | ||
| 1311 | goto err_cdev; | ||
| 1312 | } | ||
| 1313 | |||
| 1314 | rc = genwqe_init_debugfs(cd); | ||
| 1315 | if (rc != 0) | ||
| 1316 | goto err_debugfs; | ||
| 1317 | |||
| 1318 | return 0; | ||
| 1319 | |||
| 1320 | err_debugfs: | ||
| 1321 | device_destroy(cd->class_genwqe, cd->devnum_genwqe); | ||
| 1322 | err_cdev: | ||
| 1323 | cdev_del(&cd->cdev_genwqe); | ||
| 1324 | err_add: | ||
| 1325 | unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); | ||
| 1326 | err_dev: | ||
| 1327 | cd->dev = NULL; | ||
| 1328 | return rc; | ||
| 1329 | } | ||
| 1330 | |||
| 1331 | static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) | ||
| 1332 | { | ||
| 1333 | int rc; | ||
| 1334 | unsigned int i; | ||
| 1335 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 1336 | |||
| 1337 | if (!genwqe_open_files(cd)) | ||
| 1338 | return 0; | ||
| 1339 | |||
| 1340 | dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__); | ||
| 1341 | |||
| 1342 | rc = genwqe_kill_fasync(cd, SIGIO); | ||
| 1343 | if (rc > 0) { | ||
| 1344 | /* give kill_timeout seconds to close file descriptors ... */ | ||
| 1345 | for (i = 0; (i < genwqe_kill_timeout) && | ||
| 1346 | genwqe_open_files(cd); i++) { | ||
| 1347 | dev_info(&pci_dev->dev, " %d sec ...", i); | ||
| 1348 | |||
| 1349 | cond_resched(); | ||
| 1350 | msleep(1000); | ||
| 1351 | } | ||
| 1352 | |||
| 1353 | /* if no open files we can safely continue, else ... */ | ||
| 1354 | if (!genwqe_open_files(cd)) | ||
| 1355 | return 0; | ||
| 1356 | |||
| 1357 | dev_warn(&pci_dev->dev, | ||
| 1358 | "[%s] send SIGKILL and wait ...\n", __func__); | ||
| 1359 | |||
| 1360 | rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ | ||
| 1361 | if (rc) { | ||
| 1362 | /* Give kill_timout more seconds to end processes */ | ||
| 1363 | for (i = 0; (i < genwqe_kill_timeout) && | ||
| 1364 | genwqe_open_files(cd); i++) { | ||
| 1365 | dev_warn(&pci_dev->dev, " %d sec ...", i); | ||
| 1366 | |||
| 1367 | cond_resched(); | ||
| 1368 | msleep(1000); | ||
| 1369 | } | ||
| 1370 | } | ||
| 1371 | } | ||
| 1372 | return 0; | ||
| 1373 | } | ||
| 1374 | |||
| 1375 | /** | ||
| 1376 | * genwqe_device_remove() - Remove genwqe's char device | ||
| 1377 | * | ||
| 1378 | * This function must be called after the client devices are removed | ||
| 1379 | * because it will free the major/minor number range for the genwqe | ||
| 1380 | * drivers. | ||
| 1381 | * | ||
| 1382 | * This function must be robust enough to be called twice. | ||
| 1383 | */ | ||
| 1384 | int genwqe_device_remove(struct genwqe_dev *cd) | ||
| 1385 | { | ||
| 1386 | int rc; | ||
| 1387 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 1388 | |||
| 1389 | if (!genwqe_device_initialized(cd)) | ||
| 1390 | return 1; | ||
| 1391 | |||
| 1392 | genwqe_inform_and_stop_processes(cd); | ||
| 1393 | |||
| 1394 | /* | ||
| 1395 | * We currently do wait until all filedescriptors are | ||
| 1396 | * closed. This leads to a problem when we abort the | ||
| 1397 | * application which will decrease this reference from | ||
| 1398 | * 1/unused to 0/illegal and not from 2/used 1/empty. | ||
| 1399 | */ | ||
| 1400 | rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount); | ||
| 1401 | if (rc != 1) { | ||
| 1402 | dev_err(&pci_dev->dev, | ||
| 1403 | "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc); | ||
| 1404 | panic("Fatal err: cannot free resources with pending references!"); | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | genqwe_exit_debugfs(cd); | ||
| 1408 | device_destroy(cd->class_genwqe, cd->devnum_genwqe); | ||
| 1409 | cdev_del(&cd->cdev_genwqe); | ||
| 1410 | unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); | ||
| 1411 | cd->dev = NULL; | ||
| 1412 | |||
| 1413 | return 0; | ||
| 1414 | } | ||
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c new file mode 100644 index 000000000000..a72a99266c3c --- /dev/null +++ b/drivers/misc/genwqe/card_sysfs.c | |||
| @@ -0,0 +1,288 @@ | |||
| 1 | /** | ||
| 2 | * IBM Accelerator Family 'GenWQE' | ||
| 3 | * | ||
| 4 | * (C) Copyright IBM Corp. 2013 | ||
| 5 | * | ||
| 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 8 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 9 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 13 | * as published by the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Sysfs interfaces for the GenWQE card. There are attributes to query | ||
| 23 | * the version of the bitstream as well as some for the driver. For | ||
| 24 | * debugging, please also see the debugfs interfaces of this driver. | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/version.h> | ||
| 28 | #include <linux/kernel.h> | ||
| 29 | #include <linux/types.h> | ||
| 30 | #include <linux/module.h> | ||
| 31 | #include <linux/pci.h> | ||
| 32 | #include <linux/string.h> | ||
| 33 | #include <linux/fs.h> | ||
| 34 | #include <linux/sysfs.h> | ||
| 35 | #include <linux/ctype.h> | ||
| 36 | #include <linux/device.h> | ||
| 37 | |||
| 38 | #include "card_base.h" | ||
| 39 | #include "card_ddcb.h" | ||
| 40 | |||
| 41 | static const char * const genwqe_types[] = { | ||
| 42 | [GENWQE_TYPE_ALTERA_230] = "GenWQE4-230", | ||
| 43 | [GENWQE_TYPE_ALTERA_530] = "GenWQE4-530", | ||
| 44 | [GENWQE_TYPE_ALTERA_A4] = "GenWQE5-A4", | ||
| 45 | [GENWQE_TYPE_ALTERA_A7] = "GenWQE5-A7", | ||
| 46 | }; | ||
| 47 | |||
| 48 | static ssize_t status_show(struct device *dev, struct device_attribute *attr, | ||
| 49 | char *buf) | ||
| 50 | { | ||
| 51 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 52 | const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" }; | ||
| 53 | |||
| 54 | return sprintf(buf, "%s\n", cs[cd->card_state]); | ||
| 55 | } | ||
| 56 | static DEVICE_ATTR_RO(status); | ||
| 57 | |||
| 58 | static ssize_t appid_show(struct device *dev, struct device_attribute *attr, | ||
| 59 | char *buf) | ||
| 60 | { | ||
| 61 | char app_name[5]; | ||
| 62 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 63 | |||
| 64 | genwqe_read_app_id(cd, app_name, sizeof(app_name)); | ||
| 65 | return sprintf(buf, "%s\n", app_name); | ||
| 66 | } | ||
| 67 | static DEVICE_ATTR_RO(appid); | ||
| 68 | |||
| 69 | static ssize_t version_show(struct device *dev, struct device_attribute *attr, | ||
| 70 | char *buf) | ||
| 71 | { | ||
| 72 | u64 slu_id, app_id; | ||
| 73 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 74 | |||
| 75 | slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); | ||
| 76 | app_id = __genwqe_readq(cd, IO_APP_UNITCFG); | ||
| 77 | |||
| 78 | return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id); | ||
| 79 | } | ||
| 80 | static DEVICE_ATTR_RO(version); | ||
| 81 | |||
| 82 | static ssize_t type_show(struct device *dev, struct device_attribute *attr, | ||
| 83 | char *buf) | ||
| 84 | { | ||
| 85 | u8 card_type; | ||
| 86 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 87 | |||
| 88 | card_type = genwqe_card_type(cd); | ||
| 89 | return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ? | ||
| 90 | "invalid" : genwqe_types[card_type]); | ||
| 91 | } | ||
| 92 | static DEVICE_ATTR_RO(type); | ||
| 93 | |||
| 94 | static ssize_t driver_show(struct device *dev, struct device_attribute *attr, | ||
| 95 | char *buf) | ||
| 96 | { | ||
| 97 | return sprintf(buf, "%s\n", DRV_VERS_STRING); | ||
| 98 | } | ||
| 99 | static DEVICE_ATTR_RO(driver); | ||
| 100 | |||
| 101 | static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr, | ||
| 102 | char *buf) | ||
| 103 | { | ||
| 104 | u64 tempsens; | ||
| 105 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 106 | |||
| 107 | tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR); | ||
| 108 | return sprintf(buf, "%016llx\n", tempsens); | ||
| 109 | } | ||
| 110 | static DEVICE_ATTR_RO(tempsens); | ||
| 111 | |||
| 112 | static ssize_t freerunning_timer_show(struct device *dev, | ||
| 113 | struct device_attribute *attr, | ||
| 114 | char *buf) | ||
| 115 | { | ||
| 116 | u64 t; | ||
| 117 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 118 | |||
| 119 | t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER); | ||
| 120 | return sprintf(buf, "%016llx\n", t); | ||
| 121 | } | ||
| 122 | static DEVICE_ATTR_RO(freerunning_timer); | ||
| 123 | |||
| 124 | static ssize_t queue_working_time_show(struct device *dev, | ||
| 125 | struct device_attribute *attr, | ||
| 126 | char *buf) | ||
| 127 | { | ||
| 128 | u64 t; | ||
| 129 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 130 | |||
| 131 | t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME); | ||
| 132 | return sprintf(buf, "%016llx\n", t); | ||
| 133 | } | ||
| 134 | static DEVICE_ATTR_RO(queue_working_time); | ||
| 135 | |||
| 136 | static ssize_t base_clock_show(struct device *dev, | ||
| 137 | struct device_attribute *attr, | ||
| 138 | char *buf) | ||
| 139 | { | ||
| 140 | u64 base_clock; | ||
| 141 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 142 | |||
| 143 | base_clock = genwqe_base_clock_frequency(cd); | ||
| 144 | return sprintf(buf, "%lld\n", base_clock); | ||
| 145 | } | ||
| 146 | static DEVICE_ATTR_RO(base_clock); | ||
| 147 | |||
| 148 | /** | ||
| 149 | * curr_bitstream_show() - Show the current bitstream id | ||
| 150 | * | ||
| 151 | * There is a bug in some old versions of the CPLD which selects the | ||
| 152 | * bitstream, which causes the IO_SLU_BITSTREAM register to report | ||
| 153 | * unreliable data in very rare cases. This makes this sysfs | ||
| 154 | * unreliable up to the point were a new CPLD version is being used. | ||
| 155 | * | ||
| 156 | * Unfortunately there is no automatic way yet to query the CPLD | ||
| 157 | * version, such that you need to manually ensure via programming | ||
| 158 | * tools that you have a recent version of the CPLD software. | ||
| 159 | * | ||
| 160 | * The proposed circumvention is to use a special recovery bitstream | ||
| 161 | * on the backup partition (0) to identify problems while loading the | ||
| 162 | * image. | ||
| 163 | */ | ||
| 164 | static ssize_t curr_bitstream_show(struct device *dev, | ||
| 165 | struct device_attribute *attr, char *buf) | ||
| 166 | { | ||
| 167 | int curr_bitstream; | ||
| 168 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 169 | |||
| 170 | curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; | ||
| 171 | return sprintf(buf, "%d\n", curr_bitstream); | ||
| 172 | } | ||
| 173 | static DEVICE_ATTR_RO(curr_bitstream); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * next_bitstream_show() - Show the next activated bitstream | ||
| 177 | * | ||
| 178 | * IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF. | ||
| 179 | */ | ||
| 180 | static ssize_t next_bitstream_show(struct device *dev, | ||
| 181 | struct device_attribute *attr, char *buf) | ||
| 182 | { | ||
| 183 | int next_bitstream; | ||
| 184 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 185 | |||
| 186 | switch ((cd->softreset & 0xc) >> 2) { | ||
| 187 | case 0x2: | ||
| 188 | next_bitstream = 0; | ||
| 189 | break; | ||
| 190 | case 0x3: | ||
| 191 | next_bitstream = 1; | ||
| 192 | break; | ||
| 193 | default: | ||
| 194 | next_bitstream = -1; | ||
| 195 | break; /* error */ | ||
| 196 | } | ||
| 197 | return sprintf(buf, "%d\n", next_bitstream); | ||
| 198 | } | ||
| 199 | |||
| 200 | static ssize_t next_bitstream_store(struct device *dev, | ||
| 201 | struct device_attribute *attr, | ||
| 202 | const char *buf, size_t count) | ||
| 203 | { | ||
| 204 | int partition; | ||
| 205 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 206 | |||
| 207 | if (kstrtoint(buf, 0, &partition) < 0) | ||
| 208 | return -EINVAL; | ||
| 209 | |||
| 210 | switch (partition) { | ||
| 211 | case 0x0: | ||
| 212 | cd->softreset = 0x78; | ||
| 213 | break; | ||
| 214 | case 0x1: | ||
| 215 | cd->softreset = 0x7c; | ||
| 216 | break; | ||
| 217 | default: | ||
| 218 | return -EINVAL; | ||
| 219 | } | ||
| 220 | |||
| 221 | __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); | ||
| 222 | return count; | ||
| 223 | } | ||
| 224 | static DEVICE_ATTR_RW(next_bitstream); | ||
| 225 | |||
| 226 | /* | ||
| 227 | * Create device_attribute structures / params: name, mode, show, store | ||
| 228 | * additional flag if valid in VF | ||
| 229 | */ | ||
| 230 | static struct attribute *genwqe_attributes[] = { | ||
| 231 | &dev_attr_tempsens.attr, | ||
| 232 | &dev_attr_next_bitstream.attr, | ||
| 233 | &dev_attr_curr_bitstream.attr, | ||
| 234 | &dev_attr_base_clock.attr, | ||
| 235 | &dev_attr_driver.attr, | ||
| 236 | &dev_attr_type.attr, | ||
| 237 | &dev_attr_version.attr, | ||
| 238 | &dev_attr_appid.attr, | ||
| 239 | &dev_attr_status.attr, | ||
| 240 | &dev_attr_freerunning_timer.attr, | ||
| 241 | &dev_attr_queue_working_time.attr, | ||
| 242 | NULL, | ||
| 243 | }; | ||
| 244 | |||
| 245 | static struct attribute *genwqe_normal_attributes[] = { | ||
| 246 | &dev_attr_driver.attr, | ||
| 247 | &dev_attr_type.attr, | ||
| 248 | &dev_attr_version.attr, | ||
| 249 | &dev_attr_appid.attr, | ||
| 250 | &dev_attr_status.attr, | ||
| 251 | &dev_attr_freerunning_timer.attr, | ||
| 252 | &dev_attr_queue_working_time.attr, | ||
| 253 | NULL, | ||
| 254 | }; | ||
| 255 | |||
| 256 | /** | ||
| 257 | * genwqe_is_visible() - Determine if sysfs attribute should be visible or not | ||
| 258 | * | ||
| 259 | * VFs have restricted mmio capabilities, so not all sysfs entries | ||
| 260 | * are allowed in VFs. | ||
| 261 | */ | ||
| 262 | static umode_t genwqe_is_visible(struct kobject *kobj, | ||
| 263 | struct attribute *attr, int n) | ||
| 264 | { | ||
| 265 | unsigned int j; | ||
| 266 | struct device *dev = container_of(kobj, struct device, kobj); | ||
| 267 | struct genwqe_dev *cd = dev_get_drvdata(dev); | ||
| 268 | umode_t mode = attr->mode; | ||
| 269 | |||
| 270 | if (genwqe_is_privileged(cd)) | ||
| 271 | return mode; | ||
| 272 | |||
| 273 | for (j = 0; genwqe_normal_attributes[j] != NULL; j++) | ||
| 274 | if (genwqe_normal_attributes[j] == attr) | ||
| 275 | return mode; | ||
| 276 | |||
| 277 | return 0; | ||
| 278 | } | ||
| 279 | |||
| 280 | static struct attribute_group genwqe_attribute_group = { | ||
| 281 | .is_visible = genwqe_is_visible, | ||
| 282 | .attrs = genwqe_attributes, | ||
| 283 | }; | ||
| 284 | |||
| 285 | const struct attribute_group *genwqe_attribute_groups[] = { | ||
| 286 | &genwqe_attribute_group, | ||
| 287 | NULL, | ||
| 288 | }; | ||
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c new file mode 100644 index 000000000000..6b1a6ef9f1a8 --- /dev/null +++ b/drivers/misc/genwqe/card_utils.c | |||
| @@ -0,0 +1,944 @@ | |||
| 1 | /** | ||
| 2 | * IBM Accelerator Family 'GenWQE' | ||
| 3 | * | ||
| 4 | * (C) Copyright IBM Corp. 2013 | ||
| 5 | * | ||
| 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 8 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 9 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 13 | * as published by the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Miscelanous functionality used in the other GenWQE driver parts. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/dma-mapping.h> | ||
| 27 | #include <linux/sched.h> | ||
| 28 | #include <linux/vmalloc.h> | ||
| 29 | #include <linux/page-flags.h> | ||
| 30 | #include <linux/scatterlist.h> | ||
| 31 | #include <linux/hugetlb.h> | ||
| 32 | #include <linux/iommu.h> | ||
| 33 | #include <linux/delay.h> | ||
| 34 | #include <linux/pci.h> | ||
| 35 | #include <linux/dma-mapping.h> | ||
| 36 | #include <linux/ctype.h> | ||
| 37 | #include <linux/module.h> | ||
| 38 | #include <linux/platform_device.h> | ||
| 39 | #include <linux/delay.h> | ||
| 40 | #include <asm/pgtable.h> | ||
| 41 | |||
| 42 | #include "genwqe_driver.h" | ||
| 43 | #include "card_base.h" | ||
| 44 | #include "card_ddcb.h" | ||
| 45 | |||
| 46 | /** | ||
| 47 | * __genwqe_writeq() - Write 64-bit register | ||
| 48 | * @cd: genwqe device descriptor | ||
| 49 | * @byte_offs: byte offset within BAR | ||
| 50 | * @val: 64-bit value | ||
| 51 | * | ||
| 52 | * Return: 0 if success; < 0 if error | ||
| 53 | */ | ||
| 54 | int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val) | ||
| 55 | { | ||
| 56 | if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) | ||
| 57 | return -EIO; | ||
| 58 | |||
| 59 | if (cd->mmio == NULL) | ||
| 60 | return -EIO; | ||
| 61 | |||
| 62 | __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs); | ||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | |||
| 66 | /** | ||
| 67 | * __genwqe_readq() - Read 64-bit register | ||
| 68 | * @cd: genwqe device descriptor | ||
| 69 | * @byte_offs: offset within BAR | ||
| 70 | * | ||
| 71 | * Return: value from register | ||
| 72 | */ | ||
| 73 | u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs) | ||
| 74 | { | ||
| 75 | if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) | ||
| 76 | return 0xffffffffffffffffull; | ||
| 77 | |||
| 78 | if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) && | ||
| 79 | (byte_offs == IO_SLC_CFGREG_GFIR)) | ||
| 80 | return 0x000000000000ffffull; | ||
| 81 | |||
| 82 | if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) && | ||
| 83 | (byte_offs == IO_SLC_CFGREG_GFIR)) | ||
| 84 | return 0x00000000ffff0000ull; | ||
| 85 | |||
| 86 | if (cd->mmio == NULL) | ||
| 87 | return 0xffffffffffffffffull; | ||
| 88 | |||
| 89 | return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs)); | ||
| 90 | } | ||
| 91 | |||
| 92 | /** | ||
| 93 | * __genwqe_writel() - Write 32-bit register | ||
| 94 | * @cd: genwqe device descriptor | ||
| 95 | * @byte_offs: byte offset within BAR | ||
| 96 | * @val: 32-bit value | ||
| 97 | * | ||
| 98 | * Return: 0 if success; < 0 if error | ||
| 99 | */ | ||
| 100 | int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val) | ||
| 101 | { | ||
| 102 | if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) | ||
| 103 | return -EIO; | ||
| 104 | |||
| 105 | if (cd->mmio == NULL) | ||
| 106 | return -EIO; | ||
| 107 | |||
| 108 | __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs); | ||
| 109 | return 0; | ||
| 110 | } | ||
| 111 | |||
| 112 | /** | ||
| 113 | * __genwqe_readl() - Read 32-bit register | ||
| 114 | * @cd: genwqe device descriptor | ||
| 115 | * @byte_offs: offset within BAR | ||
| 116 | * | ||
| 117 | * Return: Value from register | ||
| 118 | */ | ||
| 119 | u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs) | ||
| 120 | { | ||
| 121 | if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) | ||
| 122 | return 0xffffffff; | ||
| 123 | |||
| 124 | if (cd->mmio == NULL) | ||
| 125 | return 0xffffffff; | ||
| 126 | |||
| 127 | return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs)); | ||
| 128 | } | ||
| 129 | |||
| 130 | /** | ||
| 131 | * genwqe_read_app_id() - Extract app_id | ||
| 132 | * | ||
| 133 | * app_unitcfg need to be filled with valid data first | ||
| 134 | */ | ||
| 135 | int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len) | ||
| 136 | { | ||
| 137 | int i, j; | ||
| 138 | u32 app_id = (u32)cd->app_unitcfg; | ||
| 139 | |||
| 140 | memset(app_name, 0, len); | ||
| 141 | for (i = 0, j = 0; j < min(len, 4); j++) { | ||
| 142 | char ch = (char)((app_id >> (24 - j*8)) & 0xff); | ||
| 143 | if (ch == ' ') | ||
| 144 | continue; | ||
| 145 | app_name[i++] = isprint(ch) ? ch : 'X'; | ||
| 146 | } | ||
| 147 | return i; | ||
| 148 | } | ||
| 149 | |||
| 150 | /** | ||
| 151 | * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations | ||
| 152 | * | ||
| 153 | * Existing kernel functions seem to use a different polynom, | ||
| 154 | * therefore we could not use them here. | ||
| 155 | * | ||
| 156 | * Genwqe's Polynomial = 0x20044009 | ||
| 157 | */ | ||
| 158 | #define CRC32_POLYNOMIAL 0x20044009 | ||
| 159 | static u32 crc32_tab[256]; /* crc32 lookup table */ | ||
| 160 | |||
| 161 | void genwqe_init_crc32(void) | ||
| 162 | { | ||
| 163 | int i, j; | ||
| 164 | u32 crc; | ||
| 165 | |||
| 166 | for (i = 0; i < 256; i++) { | ||
| 167 | crc = i << 24; | ||
| 168 | for (j = 0; j < 8; j++) { | ||
| 169 | if (crc & 0x80000000) | ||
| 170 | crc = (crc << 1) ^ CRC32_POLYNOMIAL; | ||
| 171 | else | ||
| 172 | crc = (crc << 1); | ||
| 173 | } | ||
| 174 | crc32_tab[i] = crc; | ||
| 175 | } | ||
| 176 | } | ||
| 177 | |||
| 178 | /** | ||
| 179 | * genwqe_crc32() - Generate 32-bit crc as required for DDCBs | ||
| 180 | * @buff: pointer to data buffer | ||
| 181 | * @len: length of data for calculation | ||
| 182 | * @init: initial crc (0xffffffff at start) | ||
| 183 | * | ||
| 184 | * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009) | ||
| 185 | |||
| 186 | * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should | ||
| 187 | * result in a crc32 of 0xf33cb7d3. | ||
| 188 | * | ||
| 189 | * The existing kernel crc functions did not cover this polynom yet. | ||
| 190 | * | ||
| 191 | * Return: crc32 checksum. | ||
| 192 | */ | ||
| 193 | u32 genwqe_crc32(u8 *buff, size_t len, u32 init) | ||
| 194 | { | ||
| 195 | int i; | ||
| 196 | u32 crc; | ||
| 197 | |||
| 198 | crc = init; | ||
| 199 | while (len--) { | ||
| 200 | i = ((crc >> 24) ^ *buff++) & 0xFF; | ||
| 201 | crc = (crc << 8) ^ crc32_tab[i]; | ||
| 202 | } | ||
| 203 | return crc; | ||
| 204 | } | ||
| 205 | |||
| 206 | void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, | ||
| 207 | dma_addr_t *dma_handle) | ||
| 208 | { | ||
| 209 | if (get_order(size) > MAX_ORDER) | ||
| 210 | return NULL; | ||
| 211 | |||
| 212 | return pci_alloc_consistent(cd->pci_dev, size, dma_handle); | ||
| 213 | } | ||
| 214 | |||
| 215 | void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, | ||
| 216 | void *vaddr, dma_addr_t dma_handle) | ||
| 217 | { | ||
| 218 | if (vaddr == NULL) | ||
| 219 | return; | ||
| 220 | |||
| 221 | pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle); | ||
| 222 | } | ||
| 223 | |||
| 224 | static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, | ||
| 225 | int num_pages) | ||
| 226 | { | ||
| 227 | int i; | ||
| 228 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 229 | |||
| 230 | for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { | ||
| 231 | pci_unmap_page(pci_dev, dma_list[i], | ||
| 232 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
| 233 | dma_list[i] = 0x0; | ||
| 234 | } | ||
| 235 | } | ||
| 236 | |||
| 237 | static int genwqe_map_pages(struct genwqe_dev *cd, | ||
| 238 | struct page **page_list, int num_pages, | ||
| 239 | dma_addr_t *dma_list) | ||
| 240 | { | ||
| 241 | int i; | ||
| 242 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 243 | |||
| 244 | /* establish DMA mapping for requested pages */ | ||
| 245 | for (i = 0; i < num_pages; i++) { | ||
| 246 | dma_addr_t daddr; | ||
| 247 | |||
| 248 | dma_list[i] = 0x0; | ||
| 249 | daddr = pci_map_page(pci_dev, page_list[i], | ||
| 250 | 0, /* map_offs */ | ||
| 251 | PAGE_SIZE, | ||
| 252 | PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */ | ||
| 253 | |||
| 254 | if (pci_dma_mapping_error(pci_dev, daddr)) { | ||
| 255 | dev_err(&pci_dev->dev, | ||
| 256 | "[%s] err: no dma addr daddr=%016llx!\n", | ||
| 257 | __func__, (long long)daddr); | ||
| 258 | goto err; | ||
| 259 | } | ||
| 260 | |||
| 261 | dma_list[i] = daddr; | ||
| 262 | } | ||
| 263 | return 0; | ||
| 264 | |||
| 265 | err: | ||
| 266 | genwqe_unmap_pages(cd, dma_list, num_pages); | ||
| 267 | return -EIO; | ||
| 268 | } | ||
| 269 | |||
| 270 | static int genwqe_sgl_size(int num_pages) | ||
| 271 | { | ||
| 272 | int len, num_tlb = num_pages / 7; | ||
| 273 | |||
| 274 | len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1); | ||
| 275 | return roundup(len, PAGE_SIZE); | ||
| 276 | } | ||
| 277 | |||
| 278 | struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages, | ||
| 279 | dma_addr_t *dma_addr, size_t *sgl_size) | ||
| 280 | { | ||
| 281 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 282 | struct sg_entry *sgl; | ||
| 283 | |||
| 284 | *sgl_size = genwqe_sgl_size(num_pages); | ||
| 285 | if (get_order(*sgl_size) > MAX_ORDER) { | ||
| 286 | dev_err(&pci_dev->dev, | ||
| 287 | "[%s] err: too much memory requested!\n", __func__); | ||
| 288 | return NULL; | ||
| 289 | } | ||
| 290 | |||
| 291 | sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr); | ||
| 292 | if (sgl == NULL) { | ||
| 293 | dev_err(&pci_dev->dev, | ||
| 294 | "[%s] err: no memory available!\n", __func__); | ||
| 295 | return NULL; | ||
| 296 | } | ||
| 297 | |||
| 298 | return sgl; | ||
| 299 | } | ||
| 300 | |||
| 301 | int genwqe_setup_sgl(struct genwqe_dev *cd, | ||
| 302 | unsigned long offs, | ||
| 303 | unsigned long size, | ||
| 304 | struct sg_entry *sgl, | ||
| 305 | dma_addr_t dma_addr, size_t sgl_size, | ||
| 306 | dma_addr_t *dma_list, int page_offs, int num_pages) | ||
| 307 | { | ||
| 308 | int i = 0, j = 0, p; | ||
| 309 | unsigned long dma_offs, map_offs; | ||
| 310 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 311 | dma_addr_t prev_daddr = 0; | ||
| 312 | struct sg_entry *s, *last_s = NULL; | ||
| 313 | |||
| 314 | /* sanity checks */ | ||
| 315 | if (offs > PAGE_SIZE) { | ||
| 316 | dev_err(&pci_dev->dev, | ||
| 317 | "[%s] too large start offs %08lx\n", __func__, offs); | ||
| 318 | return -EFAULT; | ||
| 319 | } | ||
| 320 | if (sgl_size < genwqe_sgl_size(num_pages)) { | ||
| 321 | dev_err(&pci_dev->dev, | ||
| 322 | "[%s] sgl_size too small %08lx for %d pages\n", | ||
| 323 | __func__, sgl_size, num_pages); | ||
| 324 | return -EFAULT; | ||
| 325 | } | ||
| 326 | |||
| 327 | dma_offs = 128; /* next block if needed/dma_offset */ | ||
| 328 | map_offs = offs; /* offset in first page */ | ||
| 329 | |||
| 330 | s = &sgl[0]; /* first set of 8 entries */ | ||
| 331 | p = 0; /* page */ | ||
| 332 | while (p < num_pages) { | ||
| 333 | dma_addr_t daddr; | ||
| 334 | unsigned int size_to_map; | ||
| 335 | |||
| 336 | /* always write the chaining entry, cleanup is done later */ | ||
| 337 | j = 0; | ||
| 338 | s[j].target_addr = cpu_to_be64(dma_addr + dma_offs); | ||
| 339 | s[j].len = cpu_to_be32(128); | ||
| 340 | s[j].flags = cpu_to_be32(SG_CHAINED); | ||
| 341 | j++; | ||
| 342 | |||
| 343 | while (j < 8) { | ||
| 344 | /* DMA mapping for requested page, offs, size */ | ||
| 345 | size_to_map = min(size, PAGE_SIZE - map_offs); | ||
| 346 | daddr = dma_list[page_offs + p] + map_offs; | ||
| 347 | size -= size_to_map; | ||
| 348 | map_offs = 0; | ||
| 349 | |||
| 350 | if (prev_daddr == daddr) { | ||
| 351 | u32 prev_len = be32_to_cpu(last_s->len); | ||
| 352 | |||
| 353 | /* pr_info("daddr combining: " | ||
| 354 | "%016llx/%08x -> %016llx\n", | ||
| 355 | prev_daddr, prev_len, daddr); */ | ||
| 356 | |||
| 357 | last_s->len = cpu_to_be32(prev_len + | ||
| 358 | size_to_map); | ||
| 359 | |||
| 360 | p++; /* process next page */ | ||
| 361 | if (p == num_pages) | ||
| 362 | goto fixup; /* nothing to do */ | ||
| 363 | |||
| 364 | prev_daddr = daddr + size_to_map; | ||
| 365 | continue; | ||
| 366 | } | ||
| 367 | |||
| 368 | /* start new entry */ | ||
| 369 | s[j].target_addr = cpu_to_be64(daddr); | ||
| 370 | s[j].len = cpu_to_be32(size_to_map); | ||
| 371 | s[j].flags = cpu_to_be32(SG_DATA); | ||
| 372 | prev_daddr = daddr + size_to_map; | ||
| 373 | last_s = &s[j]; | ||
| 374 | j++; | ||
| 375 | |||
| 376 | p++; /* process next page */ | ||
| 377 | if (p == num_pages) | ||
| 378 | goto fixup; /* nothing to do */ | ||
| 379 | } | ||
| 380 | dma_offs += 128; | ||
| 381 | s += 8; /* continue 8 elements further */ | ||
| 382 | } | ||
| 383 | fixup: | ||
| 384 | if (j == 1) { /* combining happend on last entry! */ | ||
| 385 | s -= 8; /* full shift needed on previous sgl block */ | ||
| 386 | j = 7; /* shift all elements */ | ||
| 387 | } | ||
| 388 | |||
| 389 | for (i = 0; i < j; i++) /* move elements 1 up */ | ||
| 390 | s[i] = s[i + 1]; | ||
| 391 | |||
| 392 | s[i].target_addr = cpu_to_be64(0); | ||
| 393 | s[i].len = cpu_to_be32(0); | ||
| 394 | s[i].flags = cpu_to_be32(SG_END_LIST); | ||
| 395 | return 0; | ||
| 396 | } | ||
| 397 | |||
| 398 | void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, | ||
| 399 | dma_addr_t dma_addr, size_t size) | ||
| 400 | { | ||
| 401 | __genwqe_free_consistent(cd, size, sg_list, dma_addr); | ||
| 402 | } | ||
| 403 | |||
| 404 | /** | ||
| 405 | * free_user_pages() - Give pinned pages back | ||
| 406 | * | ||
| 407 | * Documentation of get_user_pages is in mm/memory.c: | ||
| 408 | * | ||
| 409 | * If the page is written to, set_page_dirty (or set_page_dirty_lock, | ||
| 410 | * as appropriate) must be called after the page is finished with, and | ||
| 411 | * before put_page is called. | ||
| 412 | * | ||
| 413 | * FIXME Could be of use to others and might belong in the generic | ||
| 414 | * code, if others agree. E.g. | ||
| 415 | * ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c | ||
| 416 | * ceph_put_page_vector in net/ceph/pagevec.c | ||
| 417 | * maybe more? | ||
| 418 | */ | ||
| 419 | static int free_user_pages(struct page **page_list, unsigned int nr_pages, | ||
| 420 | int dirty) | ||
| 421 | { | ||
| 422 | unsigned int i; | ||
| 423 | |||
| 424 | for (i = 0; i < nr_pages; i++) { | ||
| 425 | if (page_list[i] != NULL) { | ||
| 426 | if (dirty) | ||
| 427 | set_page_dirty_lock(page_list[i]); | ||
| 428 | put_page(page_list[i]); | ||
| 429 | } | ||
| 430 | } | ||
| 431 | return 0; | ||
| 432 | } | ||
| 433 | |||
| 434 | /** | ||
| 435 | * genwqe_user_vmap() - Map user-space memory to virtual kernel memory | ||
| 436 | * @cd: pointer to genwqe device | ||
| 437 | * @m: mapping params | ||
| 438 | * @uaddr: user virtual address | ||
| 439 | * @size: size of memory to be mapped | ||
| 440 | * | ||
| 441 | * We need to think about how we could speed this up. Of course it is | ||
| 442 | * not a good idea to do this over and over again, like we are | ||
| 443 | * currently doing it. Nevertheless, I am curious where on the path | ||
| 444 | * the performance is spend. Most probably within the memory | ||
| 445 | * allocation functions, but maybe also in the DMA mapping code. | ||
| 446 | * | ||
| 447 | * Restrictions: The maximum size of the possible mapping currently depends | ||
| 448 | * on the amount of memory we can get using kzalloc() for the | ||
| 449 | * page_list and pci_alloc_consistent for the sg_list. | ||
| 450 | * The sg_list is currently itself not scattered, which could | ||
| 451 | * be fixed with some effort. The page_list must be split into | ||
| 452 | * PAGE_SIZE chunks too. All that will make the complicated | ||
| 453 | * code more complicated. | ||
| 454 | * | ||
| 455 | * Return: 0 if success | ||
| 456 | */ | ||
| 457 | int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, | ||
| 458 | unsigned long size, struct ddcb_requ *req) | ||
| 459 | { | ||
| 460 | int rc = -EINVAL; | ||
| 461 | unsigned long data, offs; | ||
| 462 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 463 | |||
| 464 | if ((uaddr == NULL) || (size == 0)) { | ||
| 465 | m->size = 0; /* mark unused and not added */ | ||
| 466 | return -EINVAL; | ||
| 467 | } | ||
| 468 | m->u_vaddr = uaddr; | ||
| 469 | m->size = size; | ||
| 470 | |||
| 471 | /* determine space needed for page_list. */ | ||
| 472 | data = (unsigned long)uaddr; | ||
| 473 | offs = offset_in_page(data); | ||
| 474 | m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); | ||
| 475 | |||
| 476 | m->page_list = kcalloc(m->nr_pages, | ||
| 477 | sizeof(struct page *) + sizeof(dma_addr_t), | ||
| 478 | GFP_KERNEL); | ||
| 479 | if (!m->page_list) { | ||
| 480 | dev_err(&pci_dev->dev, "err: alloc page_list failed\n"); | ||
| 481 | m->nr_pages = 0; | ||
| 482 | m->u_vaddr = NULL; | ||
| 483 | m->size = 0; /* mark unused and not added */ | ||
| 484 | return -ENOMEM; | ||
| 485 | } | ||
| 486 | m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); | ||
| 487 | |||
| 488 | /* pin user pages in memory */ | ||
| 489 | rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ | ||
| 490 | m->nr_pages, | ||
| 491 | 1, /* write by caller */ | ||
| 492 | m->page_list); /* ptrs to pages */ | ||
| 493 | |||
| 494 | /* assumption: get_user_pages can be killed by signals. */ | ||
| 495 | if (rc < m->nr_pages) { | ||
| 496 | free_user_pages(m->page_list, rc, 0); | ||
| 497 | rc = -EFAULT; | ||
| 498 | goto fail_get_user_pages; | ||
| 499 | } | ||
| 500 | |||
| 501 | rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); | ||
| 502 | if (rc != 0) | ||
| 503 | goto fail_free_user_pages; | ||
| 504 | |||
| 505 | return 0; | ||
| 506 | |||
| 507 | fail_free_user_pages: | ||
| 508 | free_user_pages(m->page_list, m->nr_pages, 0); | ||
| 509 | |||
| 510 | fail_get_user_pages: | ||
| 511 | kfree(m->page_list); | ||
| 512 | m->page_list = NULL; | ||
| 513 | m->dma_list = NULL; | ||
| 514 | m->nr_pages = 0; | ||
| 515 | m->u_vaddr = NULL; | ||
| 516 | m->size = 0; /* mark unused and not added */ | ||
| 517 | return rc; | ||
| 518 | } | ||
| 519 | |||
| 520 | /** | ||
| 521 | * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel | ||
| 522 | * memory | ||
| 523 | * @cd: pointer to genwqe device | ||
| 524 | * @m: mapping params | ||
| 525 | */ | ||
| 526 | int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, | ||
| 527 | struct ddcb_requ *req) | ||
| 528 | { | ||
| 529 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 530 | |||
| 531 | if (!dma_mapping_used(m)) { | ||
| 532 | dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n", | ||
| 533 | __func__, m); | ||
| 534 | return -EINVAL; | ||
| 535 | } | ||
| 536 | |||
| 537 | if (m->dma_list) | ||
| 538 | genwqe_unmap_pages(cd, m->dma_list, m->nr_pages); | ||
| 539 | |||
| 540 | if (m->page_list) { | ||
| 541 | free_user_pages(m->page_list, m->nr_pages, 1); | ||
| 542 | |||
| 543 | kfree(m->page_list); | ||
| 544 | m->page_list = NULL; | ||
| 545 | m->dma_list = NULL; | ||
| 546 | m->nr_pages = 0; | ||
| 547 | } | ||
| 548 | |||
| 549 | m->u_vaddr = NULL; | ||
| 550 | m->size = 0; /* mark as unused and not added */ | ||
| 551 | return 0; | ||
| 552 | } | ||
| 553 | |||
| 554 | /** | ||
| 555 | * genwqe_card_type() - Get chip type SLU Configuration Register | ||
| 556 | * @cd: pointer to the genwqe device descriptor | ||
| 557 | * Return: 0: Altera Stratix-IV 230 | ||
| 558 | * 1: Altera Stratix-IV 530 | ||
| 559 | * 2: Altera Stratix-V A4 | ||
| 560 | * 3: Altera Stratix-V A7 | ||
| 561 | */ | ||
| 562 | u8 genwqe_card_type(struct genwqe_dev *cd) | ||
| 563 | { | ||
| 564 | u64 card_type = cd->slu_unitcfg; | ||
| 565 | return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20); | ||
| 566 | } | ||
| 567 | |||
| 568 | /** | ||
| 569 | * genwqe_card_reset() - Reset the card | ||
| 570 | * @cd: pointer to the genwqe device descriptor | ||
| 571 | */ | ||
| 572 | int genwqe_card_reset(struct genwqe_dev *cd) | ||
| 573 | { | ||
| 574 | u64 softrst; | ||
| 575 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 576 | |||
| 577 | if (!genwqe_is_privileged(cd)) | ||
| 578 | return -ENODEV; | ||
| 579 | |||
| 580 | /* new SL */ | ||
| 581 | __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull); | ||
| 582 | msleep(1000); | ||
| 583 | __genwqe_readq(cd, IO_HSU_FIR_CLR); | ||
| 584 | __genwqe_readq(cd, IO_APP_FIR_CLR); | ||
| 585 | __genwqe_readq(cd, IO_SLU_FIR_CLR); | ||
| 586 | |||
| 587 | /* | ||
| 588 | * Read-modify-write to preserve the stealth bits | ||
| 589 | * | ||
| 590 | * For SL >= 039, Stealth WE bit allows removing | ||
| 591 | * the read-modify-wrote. | ||
| 592 | * r-m-w may require a mask 0x3C to avoid hitting hard | ||
| 593 | * reset again for error reset (should be 0, chicken). | ||
| 594 | */ | ||
| 595 | softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull; | ||
| 596 | __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull); | ||
| 597 | |||
| 598 | /* give ERRORRESET some time to finish */ | ||
| 599 | msleep(50); | ||
| 600 | |||
| 601 | if (genwqe_need_err_masking(cd)) { | ||
| 602 | dev_info(&pci_dev->dev, | ||
| 603 | "[%s] masking errors for old bitstreams\n", __func__); | ||
| 604 | __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); | ||
| 605 | } | ||
| 606 | return 0; | ||
| 607 | } | ||
| 608 | |||
| 609 | int genwqe_read_softreset(struct genwqe_dev *cd) | ||
| 610 | { | ||
| 611 | u64 bitstream; | ||
| 612 | |||
| 613 | if (!genwqe_is_privileged(cd)) | ||
| 614 | return -ENODEV; | ||
| 615 | |||
| 616 | bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; | ||
| 617 | cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull; | ||
| 618 | return 0; | ||
| 619 | } | ||
| 620 | |||
| 621 | /** | ||
| 622 | * genwqe_set_interrupt_capability() - Configure MSI capability structure | ||
| 623 | * @cd: pointer to the device | ||
| 624 | * Return: 0 if no error | ||
| 625 | */ | ||
| 626 | int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) | ||
| 627 | { | ||
| 628 | int rc; | ||
| 629 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 630 | |||
| 631 | rc = pci_enable_msi_block(pci_dev, count); | ||
| 632 | if (rc == 0) | ||
| 633 | cd->flags |= GENWQE_FLAG_MSI_ENABLED; | ||
| 634 | return rc; | ||
| 635 | } | ||
| 636 | |||
| 637 | /** | ||
| 638 | * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability() | ||
| 639 | * @cd: pointer to the device | ||
| 640 | */ | ||
| 641 | void genwqe_reset_interrupt_capability(struct genwqe_dev *cd) | ||
| 642 | { | ||
| 643 | struct pci_dev *pci_dev = cd->pci_dev; | ||
| 644 | |||
| 645 | if (cd->flags & GENWQE_FLAG_MSI_ENABLED) { | ||
| 646 | pci_disable_msi(pci_dev); | ||
| 647 | cd->flags &= ~GENWQE_FLAG_MSI_ENABLED; | ||
| 648 | } | ||
| 649 | } | ||
| 650 | |||
| 651 | /** | ||
| 652 | * set_reg_idx() - Fill array with data. Ignore illegal offsets. | ||
| 653 | * @cd: card device | ||
| 654 | * @r: debug register array | ||
| 655 | * @i: index to desired entry | ||
| 656 | * @m: maximum possible entries | ||
| 657 | * @addr: addr which is read | ||
| 658 | * @index: index in debug array | ||
| 659 | * @val: read value | ||
| 660 | */ | ||
| 661 | static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r, | ||
| 662 | unsigned int *i, unsigned int m, u32 addr, u32 idx, | ||
| 663 | u64 val) | ||
| 664 | { | ||
| 665 | if (WARN_ON_ONCE(*i >= m)) | ||
| 666 | return -EFAULT; | ||
| 667 | |||
| 668 | r[*i].addr = addr; | ||
| 669 | r[*i].idx = idx; | ||
| 670 | r[*i].val = val; | ||
| 671 | ++*i; | ||
| 672 | return 0; | ||
| 673 | } | ||
| 674 | |||
| 675 | static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r, | ||
| 676 | unsigned int *i, unsigned int m, u32 addr, u64 val) | ||
| 677 | { | ||
| 678 | return set_reg_idx(cd, r, i, m, addr, 0, val); | ||
| 679 | } | ||
| 680 | |||
| 681 | int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, | ||
| 682 | unsigned int max_regs, int all) | ||
| 683 | { | ||
| 684 | unsigned int i, j, idx = 0; | ||
| 685 | u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr; | ||
| 686 | u64 gfir, sluid, appid, ufir, ufec, sfir, sfec; | ||
| 687 | |||
| 688 | /* Global FIR */ | ||
| 689 | gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); | ||
| 690 | set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir); | ||
| 691 | |||
| 692 | /* UnitCfg for SLU */ | ||
| 693 | sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */ | ||
| 694 | set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid); | ||
| 695 | |||
| 696 | /* UnitCfg for APP */ | ||
| 697 | appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */ | ||
| 698 | set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid); | ||
| 699 | |||
| 700 | /* Check all chip Units */ | ||
| 701 | for (i = 0; i < GENWQE_MAX_UNITS; i++) { | ||
| 702 | |||
| 703 | /* Unit FIR */ | ||
| 704 | ufir_addr = (i << 24) | 0x008; | ||
| 705 | ufir = __genwqe_readq(cd, ufir_addr); | ||
| 706 | set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir); | ||
| 707 | |||
| 708 | /* Unit FEC */ | ||
| 709 | ufec_addr = (i << 24) | 0x018; | ||
| 710 | ufec = __genwqe_readq(cd, ufec_addr); | ||
| 711 | set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec); | ||
| 712 | |||
| 713 | for (j = 0; j < 64; j++) { | ||
| 714 | /* wherever there is a primary 1, read the 2ndary */ | ||
| 715 | if (!all && (!(ufir & (1ull << j)))) | ||
| 716 | continue; | ||
| 717 | |||
| 718 | sfir_addr = (i << 24) | (0x100 + 8 * j); | ||
| 719 | sfir = __genwqe_readq(cd, sfir_addr); | ||
| 720 | set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir); | ||
| 721 | |||
| 722 | sfec_addr = (i << 24) | (0x300 + 8 * j); | ||
| 723 | sfec = __genwqe_readq(cd, sfec_addr); | ||
| 724 | set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec); | ||
| 725 | } | ||
| 726 | } | ||
| 727 | |||
| 728 | /* fill with invalid data until end */ | ||
| 729 | for (i = idx; i < max_regs; i++) { | ||
| 730 | regs[i].addr = 0xffffffff; | ||
| 731 | regs[i].val = 0xffffffffffffffffull; | ||
| 732 | } | ||
| 733 | return idx; | ||
| 734 | } | ||
| 735 | |||
| 736 | /** | ||
| 737 | * genwqe_ffdc_buff_size() - Calculates the number of dump registers | ||
| 738 | */ | ||
| 739 | int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid) | ||
| 740 | { | ||
| 741 | int entries = 0, ring, traps, traces, trace_entries; | ||
| 742 | u32 eevptr_addr, l_addr, d_len, d_type; | ||
| 743 | u64 eevptr, val, addr; | ||
| 744 | |||
| 745 | eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; | ||
| 746 | eevptr = __genwqe_readq(cd, eevptr_addr); | ||
| 747 | |||
| 748 | if ((eevptr != 0x0) && (eevptr != -1ull)) { | ||
| 749 | l_addr = GENWQE_UID_OFFS(uid) | eevptr; | ||
| 750 | |||
| 751 | while (1) { | ||
| 752 | val = __genwqe_readq(cd, l_addr); | ||
| 753 | |||
| 754 | if ((val == 0x0) || (val == -1ull)) | ||
| 755 | break; | ||
| 756 | |||
| 757 | /* 38:24 */ | ||
| 758 | d_len = (val & 0x0000007fff000000ull) >> 24; | ||
| 759 | |||
| 760 | /* 39 */ | ||
| 761 | d_type = (val & 0x0000008000000000ull) >> 36; | ||
| 762 | |||
| 763 | if (d_type) { /* repeat */ | ||
| 764 | entries += d_len; | ||
| 765 | } else { /* size in bytes! */ | ||
| 766 | entries += d_len >> 3; | ||
| 767 | } | ||
| 768 | |||
| 769 | l_addr += 8; | ||
| 770 | } | ||
| 771 | } | ||
| 772 | |||
| 773 | for (ring = 0; ring < 8; ring++) { | ||
| 774 | addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); | ||
| 775 | val = __genwqe_readq(cd, addr); | ||
| 776 | |||
| 777 | if ((val == 0x0ull) || (val == -1ull)) | ||
| 778 | continue; | ||
| 779 | |||
| 780 | traps = (val >> 24) & 0xff; | ||
| 781 | traces = (val >> 16) & 0xff; | ||
| 782 | trace_entries = val & 0xffff; | ||
| 783 | |||
| 784 | entries += traps + (traces * trace_entries); | ||
| 785 | } | ||
| 786 | return entries; | ||
| 787 | } | ||
| 788 | |||
| 789 | /** | ||
| 790 | * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure | ||
| 791 | */ | ||
| 792 | int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid, | ||
| 793 | struct genwqe_reg *regs, unsigned int max_regs) | ||
| 794 | { | ||
| 795 | int i, traps, traces, trace, trace_entries, trace_entry, ring; | ||
| 796 | unsigned int idx = 0; | ||
| 797 | u32 eevptr_addr, l_addr, d_addr, d_len, d_type; | ||
| 798 | u64 eevptr, e, val, addr; | ||
| 799 | |||
| 800 | eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; | ||
| 801 | eevptr = __genwqe_readq(cd, eevptr_addr); | ||
| 802 | |||
| 803 | if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) { | ||
| 804 | l_addr = GENWQE_UID_OFFS(uid) | eevptr; | ||
| 805 | while (1) { | ||
| 806 | e = __genwqe_readq(cd, l_addr); | ||
| 807 | if ((e == 0x0) || (e == 0xffffffffffffffffull)) | ||
| 808 | break; | ||
| 809 | |||
| 810 | d_addr = (e & 0x0000000000ffffffull); /* 23:0 */ | ||
| 811 | d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */ | ||
| 812 | d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */ | ||
| 813 | d_addr |= GENWQE_UID_OFFS(uid); | ||
| 814 | |||
| 815 | if (d_type) { | ||
| 816 | for (i = 0; i < (int)d_len; i++) { | ||
| 817 | val = __genwqe_readq(cd, d_addr); | ||
| 818 | set_reg_idx(cd, regs, &idx, max_regs, | ||
| 819 | d_addr, i, val); | ||
| 820 | } | ||
| 821 | } else { | ||
| 822 | d_len >>= 3; /* Size in bytes! */ | ||
| 823 | for (i = 0; i < (int)d_len; i++, d_addr += 8) { | ||
| 824 | val = __genwqe_readq(cd, d_addr); | ||
| 825 | set_reg_idx(cd, regs, &idx, max_regs, | ||
| 826 | d_addr, 0, val); | ||
| 827 | } | ||
| 828 | } | ||
| 829 | l_addr += 8; | ||
| 830 | } | ||
| 831 | } | ||
| 832 | |||
| 833 | /* | ||
| 834 | * To save time, there are only 6 traces poplulated on Uid=2, | ||
| 835 | * Ring=1. each with iters=512. | ||
| 836 | */ | ||
| 837 | for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds, | ||
| 838 | 2...7 are ASI rings */ | ||
| 839 | addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); | ||
| 840 | val = __genwqe_readq(cd, addr); | ||
| 841 | |||
| 842 | if ((val == 0x0ull) || (val == -1ull)) | ||
| 843 | continue; | ||
| 844 | |||
| 845 | traps = (val >> 24) & 0xff; /* Number of Traps */ | ||
| 846 | traces = (val >> 16) & 0xff; /* Number of Traces */ | ||
| 847 | trace_entries = val & 0xffff; /* Entries per trace */ | ||
| 848 | |||
| 849 | /* Note: This is a combined loop that dumps both the traps */ | ||
| 850 | /* (for the trace == 0 case) as well as the traces 1 to */ | ||
| 851 | /* 'traces'. */ | ||
| 852 | for (trace = 0; trace <= traces; trace++) { | ||
| 853 | u32 diag_sel = | ||
| 854 | GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace); | ||
| 855 | |||
| 856 | addr = (GENWQE_UID_OFFS(uid) | | ||
| 857 | IO_EXTENDED_DIAG_SELECTOR); | ||
| 858 | __genwqe_writeq(cd, addr, diag_sel); | ||
| 859 | |||
| 860 | for (trace_entry = 0; | ||
| 861 | trace_entry < (trace ? trace_entries : traps); | ||
| 862 | trace_entry++) { | ||
| 863 | addr = (GENWQE_UID_OFFS(uid) | | ||
| 864 | IO_EXTENDED_DIAG_READ_MBX); | ||
| 865 | val = __genwqe_readq(cd, addr); | ||
| 866 | set_reg_idx(cd, regs, &idx, max_regs, addr, | ||
| 867 | (diag_sel<<16) | trace_entry, val); | ||
| 868 | } | ||
| 869 | } | ||
| 870 | } | ||
| 871 | return 0; | ||
| 872 | } | ||
| 873 | |||
| 874 | /** | ||
| 875 | * genwqe_write_vreg() - Write register in virtual window | ||
| 876 | * | ||
| 877 | * Note, these registers are only accessible to the PF through the | ||
| 878 | * VF-window. It is not intended for the VF to access. | ||
| 879 | */ | ||
| 880 | int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func) | ||
| 881 | { | ||
| 882 | __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); | ||
| 883 | __genwqe_writeq(cd, reg, val); | ||
| 884 | return 0; | ||
| 885 | } | ||
| 886 | |||
| 887 | /** | ||
| 888 | * genwqe_read_vreg() - Read register in virtual window | ||
| 889 | * | ||
| 890 | * Note, these registers are only accessible to the PF through the | ||
| 891 | * VF-window. It is not intended for the VF to access. | ||
| 892 | */ | ||
| 893 | u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func) | ||
| 894 | { | ||
| 895 | __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); | ||
| 896 | return __genwqe_readq(cd, reg); | ||
| 897 | } | ||
| 898 | |||
| 899 | /** | ||
| 900 | * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card | ||
| 901 | * | ||
| 902 | * Note: From a design perspective it turned out to be a bad idea to | ||
| 903 | * use codes here to specifiy the frequency/speed values. An old | ||
| 904 | * driver cannot understand new codes and is therefore always a | ||
| 905 | * problem. Better is to measure out the value or put the | ||
| 906 | * speed/frequency directly into a register which is always a valid | ||
| 907 | * value for old as well as for new software. | ||
| 908 | * | ||
| 909 | * Return: Card clock in MHz | ||
| 910 | */ | ||
| 911 | int genwqe_base_clock_frequency(struct genwqe_dev *cd) | ||
| 912 | { | ||
| 913 | u16 speed; /* MHz MHz MHz MHz */ | ||
| 914 | static const int speed_grade[] = { 250, 200, 166, 175 }; | ||
| 915 | |||
| 916 | speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); | ||
| 917 | if (speed >= ARRAY_SIZE(speed_grade)) | ||
| 918 | return 0; /* illegal value */ | ||
| 919 | |||
| 920 | return speed_grade[speed]; | ||
| 921 | } | ||
| 922 | |||
| 923 | /** | ||
| 924 | * genwqe_stop_traps() - Stop traps | ||
| 925 | * | ||
| 926 | * Before reading out the analysis data, we need to stop the traps. | ||
| 927 | */ | ||
| 928 | void genwqe_stop_traps(struct genwqe_dev *cd) | ||
| 929 | { | ||
| 930 | __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull); | ||
| 931 | } | ||
| 932 | |||
| 933 | /** | ||
| 934 | * genwqe_start_traps() - Start traps | ||
| 935 | * | ||
| 936 | * After having read the data, we can/must enable the traps again. | ||
| 937 | */ | ||
| 938 | void genwqe_start_traps(struct genwqe_dev *cd) | ||
| 939 | { | ||
| 940 | __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull); | ||
| 941 | |||
| 942 | if (genwqe_need_err_masking(cd)) | ||
| 943 | __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); | ||
| 944 | } | ||
diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h new file mode 100644 index 000000000000..46e916b36c70 --- /dev/null +++ b/drivers/misc/genwqe/genwqe_driver.h | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | #ifndef __GENWQE_DRIVER_H__ | ||
| 2 | #define __GENWQE_DRIVER_H__ | ||
| 3 | |||
| 4 | /** | ||
| 5 | * IBM Accelerator Family 'GenWQE' | ||
| 6 | * | ||
| 7 | * (C) Copyright IBM Corp. 2013 | ||
| 8 | * | ||
| 9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
| 10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
| 11 | * Author: Michael Jung <mijung@de.ibm.com> | ||
| 12 | * Author: Michael Ruettger <michael@ibmra.de> | ||
| 13 | * | ||
| 14 | * This program is free software; you can redistribute it and/or modify | ||
| 15 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 16 | * as published by the Free Software Foundation. | ||
| 17 | * | ||
| 18 | * This program is distributed in the hope that it will be useful, | ||
| 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 21 | * GNU General Public License for more details. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/types.h> | ||
| 25 | #include <linux/stddef.h> | ||
| 26 | #include <linux/cdev.h> | ||
| 27 | #include <linux/list.h> | ||
| 28 | #include <linux/kthread.h> | ||
| 29 | #include <linux/scatterlist.h> | ||
| 30 | #include <linux/iommu.h> | ||
| 31 | #include <linux/spinlock.h> | ||
| 32 | #include <linux/mutex.h> | ||
| 33 | #include <linux/platform_device.h> | ||
| 34 | #include <linux/printk.h> | ||
| 35 | |||
| 36 | #include <asm/byteorder.h> | ||
| 37 | #include <linux/genwqe/genwqe_card.h> | ||
| 38 | |||
| 39 | #define DRV_VERS_STRING "2.0.0" | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Static minor number assignement, until we decide/implement | ||
| 43 | * something dynamic. | ||
| 44 | */ | ||
| 45 | #define GENWQE_MAX_MINOR 128 /* up to 128 possible genwqe devices */ | ||
| 46 | |||
| 47 | /** | ||
| 48 | * genwqe_requ_alloc() - Allocate a new DDCB execution request | ||
| 49 | * | ||
| 50 | * This data structure contains the user visiable fields of the DDCB | ||
| 51 | * to be executed. | ||
| 52 | * | ||
| 53 | * Return: ptr to genwqe_ddcb_cmd data structure | ||
| 54 | */ | ||
| 55 | struct genwqe_ddcb_cmd *ddcb_requ_alloc(void); | ||
| 56 | |||
| 57 | /** | ||
| 58 | * ddcb_requ_free() - Free DDCB execution request. | ||
| 59 | * @req: ptr to genwqe_ddcb_cmd data structure. | ||
| 60 | */ | ||
| 61 | void ddcb_requ_free(struct genwqe_ddcb_cmd *req); | ||
| 62 | |||
| 63 | u32 genwqe_crc32(u8 *buff, size_t len, u32 init); | ||
| 64 | |||
| 65 | static inline void genwqe_hexdump(struct pci_dev *pci_dev, | ||
| 66 | const void *buff, unsigned int size) | ||
| 67 | { | ||
| 68 | char prefix[32]; | ||
| 69 | |||
| 70 | scnprintf(prefix, sizeof(prefix), "%s %s: ", | ||
| 71 | GENWQE_DEVNAME, pci_name(pci_dev)); | ||
| 72 | |||
| 73 | print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, 16, 1, buff, | ||
| 74 | size, true); | ||
| 75 | } | ||
| 76 | |||
| 77 | #endif /* __GENWQE_DRIVER_H__ */ | ||
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index a2edb2ee0921..49c7a23f02fc 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c | |||
| @@ -224,7 +224,7 @@ static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | #ifdef CONFIG_IDE | 226 | #ifdef CONFIG_IDE |
| 227 | int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, | 227 | static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, |
| 228 | struct block_device *bdev, unsigned int cmd, | 228 | struct block_device *bdev, unsigned int cmd, |
| 229 | unsigned long arg) | 229 | unsigned long arg) |
| 230 | { | 230 | { |
| @@ -334,9 +334,10 @@ static void execute_location(void *dst) | |||
| 334 | 334 | ||
| 335 | static void execute_user_location(void *dst) | 335 | static void execute_user_location(void *dst) |
| 336 | { | 336 | { |
| 337 | /* Intentionally crossing kernel/user memory boundary. */ | ||
| 337 | void (*func)(void) = dst; | 338 | void (*func)(void) = dst; |
| 338 | 339 | ||
| 339 | if (copy_to_user(dst, do_nothing, EXEC_SIZE)) | 340 | if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) |
| 340 | return; | 341 | return; |
| 341 | func(); | 342 | func(); |
| 342 | } | 343 | } |
| @@ -408,6 +409,8 @@ static void lkdtm_do_action(enum ctype which) | |||
| 408 | case CT_SPINLOCKUP: | 409 | case CT_SPINLOCKUP: |
| 409 | /* Must be called twice to trigger. */ | 410 | /* Must be called twice to trigger. */ |
| 410 | spin_lock(&lock_me_up); | 411 | spin_lock(&lock_me_up); |
| 412 | /* Let sparse know we intended to exit holding the lock. */ | ||
| 413 | __release(&lock_me_up); | ||
| 411 | break; | 414 | break; |
| 412 | case CT_HUNG_TASK: | 415 | case CT_HUNG_TASK: |
| 413 | set_current_state(TASK_UNINTERRUPTIBLE); | 416 | set_current_state(TASK_UNINTERRUPTIBLE); |
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index d22c6864508b..2fad84432829 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c | |||
| @@ -177,7 +177,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
| 177 | unsigned long timeout; | 177 | unsigned long timeout; |
| 178 | int i; | 178 | int i; |
| 179 | 179 | ||
| 180 | /* Only Posible if we are in timeout */ | 180 | /* Only possible if we are in timeout */ |
| 181 | if (!cl || cl != &dev->iamthif_cl) { | 181 | if (!cl || cl != &dev->iamthif_cl) { |
| 182 | dev_dbg(&dev->pdev->dev, "bad file ext.\n"); | 182 | dev_dbg(&dev->pdev->dev, "bad file ext.\n"); |
| 183 | return -ETIMEDOUT; | 183 | return -ETIMEDOUT; |
| @@ -249,7 +249,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
| 249 | cb->response_buffer.size); | 249 | cb->response_buffer.size); |
| 250 | dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); | 250 | dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); |
| 251 | 251 | ||
| 252 | /* length is being turncated to PAGE_SIZE, however, | 252 | /* length is being truncated to PAGE_SIZE, however, |
| 253 | * the buf_idx may point beyond */ | 253 | * the buf_idx may point beyond */ |
| 254 | length = min_t(size_t, length, (cb->buf_idx - *offset)); | 254 | length = min_t(size_t, length, (cb->buf_idx - *offset)); |
| 255 | 255 | ||
| @@ -316,6 +316,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) | |||
| 316 | mei_hdr.host_addr = dev->iamthif_cl.host_client_id; | 316 | mei_hdr.host_addr = dev->iamthif_cl.host_client_id; |
| 317 | mei_hdr.me_addr = dev->iamthif_cl.me_client_id; | 317 | mei_hdr.me_addr = dev->iamthif_cl.me_client_id; |
| 318 | mei_hdr.reserved = 0; | 318 | mei_hdr.reserved = 0; |
| 319 | mei_hdr.internal = 0; | ||
| 319 | dev->iamthif_msg_buf_index += mei_hdr.length; | 320 | dev->iamthif_msg_buf_index += mei_hdr.length; |
| 320 | ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf); | 321 | ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf); |
| 321 | if (ret) | 322 | if (ret) |
| @@ -477,6 +478,7 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 477 | mei_hdr.host_addr = cl->host_client_id; | 478 | mei_hdr.host_addr = cl->host_client_id; |
| 478 | mei_hdr.me_addr = cl->me_client_id; | 479 | mei_hdr.me_addr = cl->me_client_id; |
| 479 | mei_hdr.reserved = 0; | 480 | mei_hdr.reserved = 0; |
| 481 | mei_hdr.internal = 0; | ||
| 480 | 482 | ||
| 481 | if (*slots >= msg_slots) { | 483 | if (*slots >= msg_slots) { |
| 482 | mei_hdr.length = len; | 484 | mei_hdr.length = len; |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 87c96e4669e2..1ee2b9492a82 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
| @@ -154,7 +154,7 @@ int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) | |||
| 154 | return 0; | 154 | return 0; |
| 155 | } | 155 | } |
| 156 | /** | 156 | /** |
| 157 | * mei_io_cb_alloc_resp_buf - allocate respose buffer | 157 | * mei_io_cb_alloc_resp_buf - allocate response buffer |
| 158 | * | 158 | * |
| 159 | * @cb: io callback structure | 159 | * @cb: io callback structure |
| 160 | * @length: size of the buffer | 160 | * @length: size of the buffer |
| @@ -207,7 +207,7 @@ int mei_cl_flush_queues(struct mei_cl *cl) | |||
| 207 | 207 | ||
| 208 | 208 | ||
| 209 | /** | 209 | /** |
| 210 | * mei_cl_init - initializes intialize cl. | 210 | * mei_cl_init - initializes cl. |
| 211 | * | 211 | * |
| 212 | * @cl: host client to be initialized | 212 | * @cl: host client to be initialized |
| 213 | * @dev: mei device | 213 | * @dev: mei device |
| @@ -263,10 +263,10 @@ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) | |||
| 263 | return NULL; | 263 | return NULL; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | /** mei_cl_link: allocte host id in the host map | 266 | /** mei_cl_link: allocate host id in the host map |
| 267 | * | 267 | * |
| 268 | * @cl - host client | 268 | * @cl - host client |
| 269 | * @id - fixed host id or -1 for genereting one | 269 | * @id - fixed host id or -1 for generic one |
| 270 | * | 270 | * |
| 271 | * returns 0 on success | 271 | * returns 0 on success |
| 272 | * -EINVAL on incorrect values | 272 | * -EINVAL on incorrect values |
| @@ -282,19 +282,19 @@ int mei_cl_link(struct mei_cl *cl, int id) | |||
| 282 | 282 | ||
| 283 | dev = cl->dev; | 283 | dev = cl->dev; |
| 284 | 284 | ||
| 285 | /* If Id is not asigned get one*/ | 285 | /* If Id is not assigned get one*/ |
| 286 | if (id == MEI_HOST_CLIENT_ID_ANY) | 286 | if (id == MEI_HOST_CLIENT_ID_ANY) |
| 287 | id = find_first_zero_bit(dev->host_clients_map, | 287 | id = find_first_zero_bit(dev->host_clients_map, |
| 288 | MEI_CLIENTS_MAX); | 288 | MEI_CLIENTS_MAX); |
| 289 | 289 | ||
| 290 | if (id >= MEI_CLIENTS_MAX) { | 290 | if (id >= MEI_CLIENTS_MAX) { |
| 291 | dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; | 291 | dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); |
| 292 | return -EMFILE; | 292 | return -EMFILE; |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | open_handle_count = dev->open_handle_count + dev->iamthif_open_count; | 295 | open_handle_count = dev->open_handle_count + dev->iamthif_open_count; |
| 296 | if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { | 296 | if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { |
| 297 | dev_err(&dev->pdev->dev, "open_handle_count exceded %d", | 297 | dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", |
| 298 | MEI_MAX_OPEN_HANDLE_COUNT); | 298 | MEI_MAX_OPEN_HANDLE_COUNT); |
| 299 | return -EMFILE; | 299 | return -EMFILE; |
| 300 | } | 300 | } |
| @@ -344,8 +344,6 @@ int mei_cl_unlink(struct mei_cl *cl) | |||
| 344 | 344 | ||
| 345 | cl->state = MEI_FILE_INITIALIZING; | 345 | cl->state = MEI_FILE_INITIALIZING; |
| 346 | 346 | ||
| 347 | list_del_init(&cl->link); | ||
| 348 | |||
| 349 | return 0; | 347 | return 0; |
| 350 | } | 348 | } |
| 351 | 349 | ||
| @@ -372,13 +370,14 @@ void mei_host_client_init(struct work_struct *work) | |||
| 372 | } | 370 | } |
| 373 | 371 | ||
| 374 | dev->dev_state = MEI_DEV_ENABLED; | 372 | dev->dev_state = MEI_DEV_ENABLED; |
| 373 | dev->reset_count = 0; | ||
| 375 | 374 | ||
| 376 | mutex_unlock(&dev->device_lock); | 375 | mutex_unlock(&dev->device_lock); |
| 377 | } | 376 | } |
| 378 | 377 | ||
| 379 | 378 | ||
| 380 | /** | 379 | /** |
| 381 | * mei_cl_disconnect - disconnect host clinet form the me one | 380 | * mei_cl_disconnect - disconnect host client from the me one |
| 382 | * | 381 | * |
| 383 | * @cl: host client | 382 | * @cl: host client |
| 384 | * | 383 | * |
| @@ -457,7 +456,7 @@ free: | |||
| 457 | * | 456 | * |
| 458 | * @cl: private data of the file object | 457 | * @cl: private data of the file object |
| 459 | * | 458 | * |
| 460 | * returns ture if other client is connected, 0 - otherwise. | 459 | * returns true if other client is connected, false - otherwise. |
| 461 | */ | 460 | */ |
| 462 | bool mei_cl_is_other_connecting(struct mei_cl *cl) | 461 | bool mei_cl_is_other_connecting(struct mei_cl *cl) |
| 463 | { | 462 | { |
| @@ -481,7 +480,7 @@ bool mei_cl_is_other_connecting(struct mei_cl *cl) | |||
| 481 | } | 480 | } |
| 482 | 481 | ||
| 483 | /** | 482 | /** |
| 484 | * mei_cl_connect - connect host clinet to the me one | 483 | * mei_cl_connect - connect host client to the me one |
| 485 | * | 484 | * |
| 486 | * @cl: host client | 485 | * @cl: host client |
| 487 | * | 486 | * |
| @@ -729,6 +728,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 729 | mei_hdr.host_addr = cl->host_client_id; | 728 | mei_hdr.host_addr = cl->host_client_id; |
| 730 | mei_hdr.me_addr = cl->me_client_id; | 729 | mei_hdr.me_addr = cl->me_client_id; |
| 731 | mei_hdr.reserved = 0; | 730 | mei_hdr.reserved = 0; |
| 731 | mei_hdr.internal = cb->internal; | ||
| 732 | 732 | ||
| 733 | if (*slots >= msg_slots) { | 733 | if (*slots >= msg_slots) { |
| 734 | mei_hdr.length = len; | 734 | mei_hdr.length = len; |
| @@ -775,7 +775,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 775 | * @cl: host client | 775 | * @cl: host client |
| 776 | * @cl: write callback with filled data | 776 | * @cl: write callback with filled data |
| 777 | * | 777 | * |
| 778 | * returns numbe of bytes sent on success, <0 on failure. | 778 | * returns number of bytes sent on success, <0 on failure. |
| 779 | */ | 779 | */ |
| 780 | int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | 780 | int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) |
| 781 | { | 781 | { |
| @@ -828,6 +828,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | |||
| 828 | mei_hdr.host_addr = cl->host_client_id; | 828 | mei_hdr.host_addr = cl->host_client_id; |
| 829 | mei_hdr.me_addr = cl->me_client_id; | 829 | mei_hdr.me_addr = cl->me_client_id; |
| 830 | mei_hdr.reserved = 0; | 830 | mei_hdr.reserved = 0; |
| 831 | mei_hdr.internal = cb->internal; | ||
| 831 | 832 | ||
| 832 | 833 | ||
| 833 | rets = mei_write_message(dev, &mei_hdr, buf->data); | 834 | rets = mei_write_message(dev, &mei_hdr, buf->data); |
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index e3870f22d238..a3ae154444b2 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c | |||
| @@ -43,7 +43,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, | |||
| 43 | 43 | ||
| 44 | mutex_lock(&dev->device_lock); | 44 | mutex_lock(&dev->device_lock); |
| 45 | 45 | ||
| 46 | /* if the driver is not enabled the list won't b consitent */ | 46 | /* if the driver is not enabled the list won't be consistent */ |
| 47 | if (dev->dev_state != MEI_DEV_ENABLED) | 47 | if (dev->dev_state != MEI_DEV_ENABLED) |
| 48 | goto out; | 48 | goto out; |
| 49 | 49 | ||
| @@ -101,7 +101,7 @@ static const struct file_operations mei_dbgfs_fops_devstate = { | |||
| 101 | 101 | ||
| 102 | /** | 102 | /** |
| 103 | * mei_dbgfs_deregister - Remove the debugfs files and directories | 103 | * mei_dbgfs_deregister - Remove the debugfs files and directories |
| 104 | * @mei - pointer to mei device private dat | 104 | * @mei - pointer to mei device private data |
| 105 | */ | 105 | */ |
| 106 | void mei_dbgfs_deregister(struct mei_device *dev) | 106 | void mei_dbgfs_deregister(struct mei_device *dev) |
| 107 | { | 107 | { |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 9b3a0fb7f265..28cd74c073b9 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
| @@ -28,9 +28,9 @@ | |||
| 28 | * | 28 | * |
| 29 | * @dev: the device structure | 29 | * @dev: the device structure |
| 30 | * | 30 | * |
| 31 | * returns none. | 31 | * returns 0 on success -ENOMEM on allocation failure |
| 32 | */ | 32 | */ |
| 33 | static void mei_hbm_me_cl_allocate(struct mei_device *dev) | 33 | static int mei_hbm_me_cl_allocate(struct mei_device *dev) |
| 34 | { | 34 | { |
| 35 | struct mei_me_client *clients; | 35 | struct mei_me_client *clients; |
| 36 | int b; | 36 | int b; |
| @@ -44,7 +44,7 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev) | |||
| 44 | dev->me_clients_num++; | 44 | dev->me_clients_num++; |
| 45 | 45 | ||
| 46 | if (dev->me_clients_num == 0) | 46 | if (dev->me_clients_num == 0) |
| 47 | return; | 47 | return 0; |
| 48 | 48 | ||
| 49 | kfree(dev->me_clients); | 49 | kfree(dev->me_clients); |
| 50 | dev->me_clients = NULL; | 50 | dev->me_clients = NULL; |
| @@ -56,12 +56,10 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev) | |||
| 56 | sizeof(struct mei_me_client), GFP_KERNEL); | 56 | sizeof(struct mei_me_client), GFP_KERNEL); |
| 57 | if (!clients) { | 57 | if (!clients) { |
| 58 | dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); | 58 | dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); |
| 59 | dev->dev_state = MEI_DEV_RESETTING; | 59 | return -ENOMEM; |
| 60 | mei_reset(dev, 1); | ||
| 61 | return; | ||
| 62 | } | 60 | } |
| 63 | dev->me_clients = clients; | 61 | dev->me_clients = clients; |
| 64 | return; | 62 | return 0; |
| 65 | } | 63 | } |
| 66 | 64 | ||
| 67 | /** | 65 | /** |
| @@ -85,12 +83,12 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) | |||
| 85 | } | 83 | } |
| 86 | 84 | ||
| 87 | /** | 85 | /** |
| 88 | * same_disconn_addr - tells if they have the same address | 86 | * mei_hbm_cl_addr_equal - tells if they have the same address |
| 89 | * | 87 | * |
| 90 | * @file: private data of the file object. | 88 | * @cl: - client |
| 91 | * @disconn: disconnection request. | 89 | * @buf: buffer with cl header |
| 92 | * | 90 | * |
| 93 | * returns true if addres are same | 91 | * returns true if addresses are the same |
| 94 | */ | 92 | */ |
| 95 | static inline | 93 | static inline |
| 96 | bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) | 94 | bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) |
| @@ -128,6 +126,17 @@ static bool is_treat_specially_client(struct mei_cl *cl, | |||
| 128 | return false; | 126 | return false; |
| 129 | } | 127 | } |
| 130 | 128 | ||
| 129 | /** | ||
| 130 | * mei_hbm_idle - set hbm to idle state | ||
| 131 | * | ||
| 132 | * @dev: the device structure | ||
| 133 | */ | ||
| 134 | void mei_hbm_idle(struct mei_device *dev) | ||
| 135 | { | ||
| 136 | dev->init_clients_timer = 0; | ||
| 137 | dev->hbm_state = MEI_HBM_IDLE; | ||
| 138 | } | ||
| 139 | |||
| 131 | int mei_hbm_start_wait(struct mei_device *dev) | 140 | int mei_hbm_start_wait(struct mei_device *dev) |
| 132 | { | 141 | { |
| 133 | int ret; | 142 | int ret; |
| @@ -137,7 +146,7 @@ int mei_hbm_start_wait(struct mei_device *dev) | |||
| 137 | mutex_unlock(&dev->device_lock); | 146 | mutex_unlock(&dev->device_lock); |
| 138 | ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, | 147 | ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, |
| 139 | dev->hbm_state == MEI_HBM_IDLE || | 148 | dev->hbm_state == MEI_HBM_IDLE || |
| 140 | dev->hbm_state > MEI_HBM_START, | 149 | dev->hbm_state >= MEI_HBM_STARTED, |
| 141 | mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); | 150 | mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); |
| 142 | mutex_lock(&dev->device_lock); | 151 | mutex_lock(&dev->device_lock); |
| 143 | 152 | ||
| @@ -153,12 +162,15 @@ int mei_hbm_start_wait(struct mei_device *dev) | |||
| 153 | * mei_hbm_start_req - sends start request message. | 162 | * mei_hbm_start_req - sends start request message. |
| 154 | * | 163 | * |
| 155 | * @dev: the device structure | 164 | * @dev: the device structure |
| 165 | * | ||
| 166 | * returns 0 on success and < 0 on failure | ||
| 156 | */ | 167 | */ |
| 157 | int mei_hbm_start_req(struct mei_device *dev) | 168 | int mei_hbm_start_req(struct mei_device *dev) |
| 158 | { | 169 | { |
| 159 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | 170 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; |
| 160 | struct hbm_host_version_request *start_req; | 171 | struct hbm_host_version_request *start_req; |
| 161 | const size_t len = sizeof(struct hbm_host_version_request); | 172 | const size_t len = sizeof(struct hbm_host_version_request); |
| 173 | int ret; | ||
| 162 | 174 | ||
| 163 | mei_hbm_hdr(mei_hdr, len); | 175 | mei_hbm_hdr(mei_hdr, len); |
| 164 | 176 | ||
| @@ -170,12 +182,13 @@ int mei_hbm_start_req(struct mei_device *dev) | |||
| 170 | start_req->host_version.minor_version = HBM_MINOR_VERSION; | 182 | start_req->host_version.minor_version = HBM_MINOR_VERSION; |
| 171 | 183 | ||
| 172 | dev->hbm_state = MEI_HBM_IDLE; | 184 | dev->hbm_state = MEI_HBM_IDLE; |
| 173 | if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { | 185 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
| 174 | dev_err(&dev->pdev->dev, "version message write failed\n"); | 186 | if (ret) { |
| 175 | dev->dev_state = MEI_DEV_RESETTING; | 187 | dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n", |
| 176 | mei_reset(dev, 1); | 188 | ret); |
| 177 | return -EIO; | 189 | return ret; |
| 178 | } | 190 | } |
| 191 | |||
| 179 | dev->hbm_state = MEI_HBM_START; | 192 | dev->hbm_state = MEI_HBM_START; |
| 180 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; | 193 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; |
| 181 | return 0; | 194 | return 0; |
| @@ -186,13 +199,15 @@ int mei_hbm_start_req(struct mei_device *dev) | |||
| 186 | * | 199 | * |
| 187 | * @dev: the device structure | 200 | * @dev: the device structure |
| 188 | * | 201 | * |
| 189 | * returns none. | 202 | * returns 0 on success and < 0 on failure |
| 190 | */ | 203 | */ |
| 191 | static void mei_hbm_enum_clients_req(struct mei_device *dev) | 204 | static int mei_hbm_enum_clients_req(struct mei_device *dev) |
| 192 | { | 205 | { |
| 193 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | 206 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; |
| 194 | struct hbm_host_enum_request *enum_req; | 207 | struct hbm_host_enum_request *enum_req; |
| 195 | const size_t len = sizeof(struct hbm_host_enum_request); | 208 | const size_t len = sizeof(struct hbm_host_enum_request); |
| 209 | int ret; | ||
| 210 | |||
| 196 | /* enumerate clients */ | 211 | /* enumerate clients */ |
| 197 | mei_hbm_hdr(mei_hdr, len); | 212 | mei_hbm_hdr(mei_hdr, len); |
| 198 | 213 | ||
| @@ -200,14 +215,15 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev) | |||
| 200 | memset(enum_req, 0, len); | 215 | memset(enum_req, 0, len); |
| 201 | enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; | 216 | enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; |
| 202 | 217 | ||
| 203 | if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { | 218 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
| 204 | dev->dev_state = MEI_DEV_RESETTING; | 219 | if (ret) { |
| 205 | dev_err(&dev->pdev->dev, "enumeration request write failed.\n"); | 220 | dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n", |
| 206 | mei_reset(dev, 1); | 221 | ret); |
| 222 | return ret; | ||
| 207 | } | 223 | } |
| 208 | dev->hbm_state = MEI_HBM_ENUM_CLIENTS; | 224 | dev->hbm_state = MEI_HBM_ENUM_CLIENTS; |
| 209 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; | 225 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; |
| 210 | return; | 226 | return 0; |
| 211 | } | 227 | } |
| 212 | 228 | ||
| 213 | /** | 229 | /** |
| @@ -215,7 +231,7 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev) | |||
| 215 | * | 231 | * |
| 216 | * @dev: the device structure | 232 | * @dev: the device structure |
| 217 | * | 233 | * |
| 218 | * returns none. | 234 | * returns 0 on success and < 0 on failure |
| 219 | */ | 235 | */ |
| 220 | 236 | ||
| 221 | static int mei_hbm_prop_req(struct mei_device *dev) | 237 | static int mei_hbm_prop_req(struct mei_device *dev) |
| @@ -226,7 +242,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
| 226 | const size_t len = sizeof(struct hbm_props_request); | 242 | const size_t len = sizeof(struct hbm_props_request); |
| 227 | unsigned long next_client_index; | 243 | unsigned long next_client_index; |
| 228 | unsigned long client_num; | 244 | unsigned long client_num; |
| 229 | 245 | int ret; | |
| 230 | 246 | ||
| 231 | client_num = dev->me_client_presentation_num; | 247 | client_num = dev->me_client_presentation_num; |
| 232 | 248 | ||
| @@ -253,12 +269,11 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
| 253 | prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; | 269 | prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; |
| 254 | prop_req->address = next_client_index; | 270 | prop_req->address = next_client_index; |
| 255 | 271 | ||
| 256 | if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { | 272 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
| 257 | dev->dev_state = MEI_DEV_RESETTING; | 273 | if (ret) { |
| 258 | dev_err(&dev->pdev->dev, "properties request write failed\n"); | 274 | dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n", |
| 259 | mei_reset(dev, 1); | 275 | ret); |
| 260 | 276 | return ret; | |
| 261 | return -EIO; | ||
| 262 | } | 277 | } |
| 263 | 278 | ||
| 264 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; | 279 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; |
| @@ -268,7 +283,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
| 268 | } | 283 | } |
| 269 | 284 | ||
| 270 | /** | 285 | /** |
| 271 | * mei_hbm_stop_req_prepare - perpare stop request message | 286 | * mei_hbm_stop_req_prepare - prepare stop request message |
| 272 | * | 287 | * |
| 273 | * @dev - mei device | 288 | * @dev - mei device |
| 274 | * @mei_hdr - mei message header | 289 | * @mei_hdr - mei message header |
| @@ -289,7 +304,7 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev, | |||
| 289 | } | 304 | } |
| 290 | 305 | ||
| 291 | /** | 306 | /** |
| 292 | * mei_hbm_cl_flow_control_req - sends flow control requst. | 307 | * mei_hbm_cl_flow_control_req - sends flow control request. |
| 293 | * | 308 | * |
| 294 | * @dev: the device structure | 309 | * @dev: the device structure |
| 295 | * @cl: client info | 310 | * @cl: client info |
| @@ -451,7 +466,7 @@ int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) | |||
| 451 | } | 466 | } |
| 452 | 467 | ||
| 453 | /** | 468 | /** |
| 454 | * mei_hbm_cl_connect_res - connect resposne from the ME | 469 | * mei_hbm_cl_connect_res - connect response from the ME |
| 455 | * | 470 | * |
| 456 | * @dev: the device structure | 471 | * @dev: the device structure |
| 457 | * @rs: connect response bus message | 472 | * @rs: connect response bus message |
| @@ -505,8 +520,8 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, | |||
| 505 | 520 | ||
| 506 | 521 | ||
| 507 | /** | 522 | /** |
| 508 | * mei_hbm_fw_disconnect_req - disconnect request initiated by me | 523 | * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware |
| 509 | * host sends disoconnect response | 524 | * host sends disconnect response |
| 510 | * | 525 | * |
| 511 | * @dev: the device structure. | 526 | * @dev: the device structure. |
| 512 | * @disconnect_req: disconnect request bus message from the me | 527 | * @disconnect_req: disconnect request bus message from the me |
| @@ -559,8 +574,10 @@ bool mei_hbm_version_is_supported(struct mei_device *dev) | |||
| 559 | * | 574 | * |
| 560 | * @dev: the device structure | 575 | * @dev: the device structure |
| 561 | * @mei_hdr: header of bus message | 576 | * @mei_hdr: header of bus message |
| 577 | * | ||
| 578 | * returns 0 on success and < 0 on failure | ||
| 562 | */ | 579 | */ |
| 563 | void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | 580 | int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) |
| 564 | { | 581 | { |
| 565 | struct mei_bus_message *mei_msg; | 582 | struct mei_bus_message *mei_msg; |
| 566 | struct mei_me_client *me_client; | 583 | struct mei_me_client *me_client; |
| @@ -577,8 +594,20 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
| 577 | mei_read_slots(dev, dev->rd_msg_buf, hdr->length); | 594 | mei_read_slots(dev, dev->rd_msg_buf, hdr->length); |
| 578 | mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; | 595 | mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; |
| 579 | 596 | ||
| 597 | /* ignore spurious message and prevent reset nesting | ||
| 598 | * hbm is put to idle during system reset | ||
| 599 | */ | ||
| 600 | if (dev->hbm_state == MEI_HBM_IDLE) { | ||
| 601 | dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n"); | ||
| 602 | return 0; | ||
| 603 | } | ||
| 604 | |||
| 580 | switch (mei_msg->hbm_cmd) { | 605 | switch (mei_msg->hbm_cmd) { |
| 581 | case HOST_START_RES_CMD: | 606 | case HOST_START_RES_CMD: |
| 607 | dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n"); | ||
| 608 | |||
| 609 | dev->init_clients_timer = 0; | ||
| 610 | |||
| 582 | version_res = (struct hbm_host_version_response *)mei_msg; | 611 | version_res = (struct hbm_host_version_response *)mei_msg; |
| 583 | 612 | ||
| 584 | dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", | 613 | dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", |
| @@ -597,73 +626,89 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
| 597 | } | 626 | } |
| 598 | 627 | ||
| 599 | if (!mei_hbm_version_is_supported(dev)) { | 628 | if (!mei_hbm_version_is_supported(dev)) { |
| 600 | dev_warn(&dev->pdev->dev, "hbm version mismatch: stopping the driver.\n"); | 629 | dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); |
| 601 | 630 | ||
| 602 | dev->hbm_state = MEI_HBM_STOP; | 631 | dev->hbm_state = MEI_HBM_STOPPED; |
| 603 | mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, | 632 | mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, |
| 604 | dev->wr_msg.data); | 633 | dev->wr_msg.data); |
| 605 | mei_write_message(dev, &dev->wr_msg.hdr, | 634 | if (mei_write_message(dev, &dev->wr_msg.hdr, |
| 606 | dev->wr_msg.data); | 635 | dev->wr_msg.data)) { |
| 636 | dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); | ||
| 637 | return -EIO; | ||
| 638 | } | ||
| 639 | break; | ||
| 640 | } | ||
| 607 | 641 | ||
| 608 | return; | 642 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || |
| 643 | dev->hbm_state != MEI_HBM_START) { | ||
| 644 | dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n", | ||
| 645 | dev->dev_state, dev->hbm_state); | ||
| 646 | return -EPROTO; | ||
| 609 | } | 647 | } |
| 610 | 648 | ||
| 611 | if (dev->dev_state == MEI_DEV_INIT_CLIENTS && | 649 | dev->hbm_state = MEI_HBM_STARTED; |
| 612 | dev->hbm_state == MEI_HBM_START) { | 650 | |
| 613 | dev->init_clients_timer = 0; | 651 | if (mei_hbm_enum_clients_req(dev)) { |
| 614 | mei_hbm_enum_clients_req(dev); | 652 | dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n"); |
| 615 | } else { | 653 | return -EIO; |
| 616 | dev_err(&dev->pdev->dev, "reset: wrong host start response\n"); | ||
| 617 | mei_reset(dev, 1); | ||
| 618 | return; | ||
| 619 | } | 654 | } |
| 620 | 655 | ||
| 621 | wake_up_interruptible(&dev->wait_recvd_msg); | 656 | wake_up_interruptible(&dev->wait_recvd_msg); |
| 622 | dev_dbg(&dev->pdev->dev, "host start response message received.\n"); | ||
| 623 | break; | 657 | break; |
| 624 | 658 | ||
| 625 | case CLIENT_CONNECT_RES_CMD: | 659 | case CLIENT_CONNECT_RES_CMD: |
| 660 | dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n"); | ||
| 661 | |||
| 626 | connect_res = (struct hbm_client_connect_response *) mei_msg; | 662 | connect_res = (struct hbm_client_connect_response *) mei_msg; |
| 627 | mei_hbm_cl_connect_res(dev, connect_res); | 663 | mei_hbm_cl_connect_res(dev, connect_res); |
| 628 | dev_dbg(&dev->pdev->dev, "client connect response message received.\n"); | ||
| 629 | wake_up(&dev->wait_recvd_msg); | 664 | wake_up(&dev->wait_recvd_msg); |
| 630 | break; | 665 | break; |
| 631 | 666 | ||
| 632 | case CLIENT_DISCONNECT_RES_CMD: | 667 | case CLIENT_DISCONNECT_RES_CMD: |
| 668 | dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n"); | ||
| 669 | |||
| 633 | disconnect_res = (struct hbm_client_connect_response *) mei_msg; | 670 | disconnect_res = (struct hbm_client_connect_response *) mei_msg; |
| 634 | mei_hbm_cl_disconnect_res(dev, disconnect_res); | 671 | mei_hbm_cl_disconnect_res(dev, disconnect_res); |
| 635 | dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n"); | ||
| 636 | wake_up(&dev->wait_recvd_msg); | 672 | wake_up(&dev->wait_recvd_msg); |
| 637 | break; | 673 | break; |
| 638 | 674 | ||
| 639 | case MEI_FLOW_CONTROL_CMD: | 675 | case MEI_FLOW_CONTROL_CMD: |
| 676 | dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n"); | ||
| 677 | |||
| 640 | flow_control = (struct hbm_flow_control *) mei_msg; | 678 | flow_control = (struct hbm_flow_control *) mei_msg; |
| 641 | mei_hbm_cl_flow_control_res(dev, flow_control); | 679 | mei_hbm_cl_flow_control_res(dev, flow_control); |
| 642 | dev_dbg(&dev->pdev->dev, "client flow control response message received.\n"); | ||
| 643 | break; | 680 | break; |
| 644 | 681 | ||
| 645 | case HOST_CLIENT_PROPERTIES_RES_CMD: | 682 | case HOST_CLIENT_PROPERTIES_RES_CMD: |
| 683 | dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); | ||
| 684 | |||
| 685 | dev->init_clients_timer = 0; | ||
| 686 | |||
| 687 | if (dev->me_clients == NULL) { | ||
| 688 | dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n"); | ||
| 689 | return -EPROTO; | ||
| 690 | } | ||
| 691 | |||
| 646 | props_res = (struct hbm_props_response *)mei_msg; | 692 | props_res = (struct hbm_props_response *)mei_msg; |
| 647 | me_client = &dev->me_clients[dev->me_client_presentation_num]; | 693 | me_client = &dev->me_clients[dev->me_client_presentation_num]; |
| 648 | 694 | ||
| 649 | if (props_res->status || !dev->me_clients) { | 695 | if (props_res->status) { |
| 650 | dev_err(&dev->pdev->dev, "reset: properties response hbm wrong status.\n"); | 696 | dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n", |
| 651 | mei_reset(dev, 1); | 697 | props_res->status); |
| 652 | return; | 698 | return -EPROTO; |
| 653 | } | 699 | } |
| 654 | 700 | ||
| 655 | if (me_client->client_id != props_res->address) { | 701 | if (me_client->client_id != props_res->address) { |
| 656 | dev_err(&dev->pdev->dev, "reset: host properties response address mismatch\n"); | 702 | dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n", |
| 657 | mei_reset(dev, 1); | 703 | me_client->client_id, props_res->address); |
| 658 | return; | 704 | return -EPROTO; |
| 659 | } | 705 | } |
| 660 | 706 | ||
| 661 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || | 707 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || |
| 662 | dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { | 708 | dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { |
| 663 | dev_err(&dev->pdev->dev, "reset: unexpected properties response\n"); | 709 | dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", |
| 664 | mei_reset(dev, 1); | 710 | dev->dev_state, dev->hbm_state); |
| 665 | 711 | return -EPROTO; | |
| 666 | return; | ||
| 667 | } | 712 | } |
| 668 | 713 | ||
| 669 | me_client->props = props_res->client_properties; | 714 | me_client->props = props_res->client_properties; |
| @@ -671,49 +716,70 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
| 671 | dev->me_client_presentation_num++; | 716 | dev->me_client_presentation_num++; |
| 672 | 717 | ||
| 673 | /* request property for the next client */ | 718 | /* request property for the next client */ |
| 674 | mei_hbm_prop_req(dev); | 719 | if (mei_hbm_prop_req(dev)) |
| 720 | return -EIO; | ||
| 675 | 721 | ||
| 676 | break; | 722 | break; |
| 677 | 723 | ||
| 678 | case HOST_ENUM_RES_CMD: | 724 | case HOST_ENUM_RES_CMD: |
| 725 | dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n"); | ||
| 726 | |||
| 727 | dev->init_clients_timer = 0; | ||
| 728 | |||
| 679 | enum_res = (struct hbm_host_enum_response *) mei_msg; | 729 | enum_res = (struct hbm_host_enum_response *) mei_msg; |
| 680 | BUILD_BUG_ON(sizeof(dev->me_clients_map) | 730 | BUILD_BUG_ON(sizeof(dev->me_clients_map) |
| 681 | < sizeof(enum_res->valid_addresses)); | 731 | < sizeof(enum_res->valid_addresses)); |
| 682 | memcpy(dev->me_clients_map, enum_res->valid_addresses, | 732 | memcpy(dev->me_clients_map, enum_res->valid_addresses, |
| 683 | sizeof(enum_res->valid_addresses)); | 733 | sizeof(enum_res->valid_addresses)); |
| 684 | if (dev->dev_state == MEI_DEV_INIT_CLIENTS && | 734 | |
| 685 | dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { | 735 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || |
| 686 | dev->init_clients_timer = 0; | 736 | dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { |
| 687 | mei_hbm_me_cl_allocate(dev); | 737 | dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", |
| 688 | dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; | 738 | dev->dev_state, dev->hbm_state); |
| 689 | 739 | return -EPROTO; | |
| 690 | /* first property reqeust */ | 740 | } |
| 691 | mei_hbm_prop_req(dev); | 741 | |
| 692 | } else { | 742 | if (mei_hbm_me_cl_allocate(dev)) { |
| 693 | dev_err(&dev->pdev->dev, "reset: unexpected enumeration response hbm.\n"); | 743 | dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n"); |
| 694 | mei_reset(dev, 1); | 744 | return -ENOMEM; |
| 695 | return; | ||
| 696 | } | 745 | } |
| 746 | |||
| 747 | dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; | ||
| 748 | |||
| 749 | /* first property request */ | ||
| 750 | if (mei_hbm_prop_req(dev)) | ||
| 751 | return -EIO; | ||
| 752 | |||
| 697 | break; | 753 | break; |
| 698 | 754 | ||
| 699 | case HOST_STOP_RES_CMD: | 755 | case HOST_STOP_RES_CMD: |
| 756 | dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n"); | ||
| 757 | |||
| 758 | dev->init_clients_timer = 0; | ||
| 700 | 759 | ||
| 701 | if (dev->hbm_state != MEI_HBM_STOP) | 760 | if (dev->hbm_state != MEI_HBM_STOPPED) { |
| 702 | dev_err(&dev->pdev->dev, "unexpected stop response hbm.\n"); | 761 | dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", |
| 703 | dev->dev_state = MEI_DEV_DISABLED; | 762 | dev->dev_state, dev->hbm_state); |
| 704 | dev_info(&dev->pdev->dev, "reset: FW stop response.\n"); | 763 | return -EPROTO; |
| 705 | mei_reset(dev, 1); | 764 | } |
| 765 | |||
| 766 | dev->dev_state = MEI_DEV_POWER_DOWN; | ||
| 767 | dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n"); | ||
| 768 | /* force the reset */ | ||
| 769 | return -EPROTO; | ||
| 706 | break; | 770 | break; |
| 707 | 771 | ||
| 708 | case CLIENT_DISCONNECT_REQ_CMD: | 772 | case CLIENT_DISCONNECT_REQ_CMD: |
| 709 | /* search for client */ | 773 | dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n"); |
| 774 | |||
| 710 | disconnect_req = (struct hbm_client_connect_request *)mei_msg; | 775 | disconnect_req = (struct hbm_client_connect_request *)mei_msg; |
| 711 | mei_hbm_fw_disconnect_req(dev, disconnect_req); | 776 | mei_hbm_fw_disconnect_req(dev, disconnect_req); |
| 712 | break; | 777 | break; |
| 713 | 778 | ||
| 714 | case ME_STOP_REQ_CMD: | 779 | case ME_STOP_REQ_CMD: |
| 780 | dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); | ||
| 715 | 781 | ||
| 716 | dev->hbm_state = MEI_HBM_STOP; | 782 | dev->hbm_state = MEI_HBM_STOPPED; |
| 717 | mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, | 783 | mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, |
| 718 | dev->wr_ext_msg.data); | 784 | dev->wr_ext_msg.data); |
| 719 | break; | 785 | break; |
| @@ -722,5 +788,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
| 722 | break; | 788 | break; |
| 723 | 789 | ||
| 724 | } | 790 | } |
| 791 | return 0; | ||
| 725 | } | 792 | } |
| 726 | 793 | ||
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index 4ae2e56e404f..5f92188a5cd7 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h | |||
| @@ -32,13 +32,13 @@ struct mei_cl; | |||
| 32 | enum mei_hbm_state { | 32 | enum mei_hbm_state { |
| 33 | MEI_HBM_IDLE = 0, | 33 | MEI_HBM_IDLE = 0, |
| 34 | MEI_HBM_START, | 34 | MEI_HBM_START, |
| 35 | MEI_HBM_STARTED, | ||
| 35 | MEI_HBM_ENUM_CLIENTS, | 36 | MEI_HBM_ENUM_CLIENTS, |
| 36 | MEI_HBM_CLIENT_PROPERTIES, | 37 | MEI_HBM_CLIENT_PROPERTIES, |
| 37 | MEI_HBM_STARTED, | 38 | MEI_HBM_STOPPED, |
| 38 | MEI_HBM_STOP, | ||
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); | 41 | int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); |
| 42 | 42 | ||
| 43 | static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) | 43 | static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) |
| 44 | { | 44 | { |
| @@ -49,6 +49,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) | |||
| 49 | hdr->reserved = 0; | 49 | hdr->reserved = 0; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | void mei_hbm_idle(struct mei_device *dev); | ||
| 52 | int mei_hbm_start_req(struct mei_device *dev); | 53 | int mei_hbm_start_req(struct mei_device *dev); |
| 53 | int mei_hbm_start_wait(struct mei_device *dev); | 54 | int mei_hbm_start_wait(struct mei_device *dev); |
| 54 | int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); | 55 | int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 6c0fde55270d..66f411a6e8ea 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
| @@ -109,9 +109,12 @@ | |||
| 109 | #define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ | 109 | #define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ |
| 110 | #define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ | 110 | #define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ |
| 111 | 111 | ||
| 112 | #define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */ | 112 | #define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */ |
| 113 | #define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ | 113 | #define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ |
| 114 | #define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ | 114 | #define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ |
| 115 | #define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ | ||
| 116 | |||
| 117 | #define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ | ||
| 115 | /* | 118 | /* |
| 116 | * MEI HW Section | 119 | * MEI HW Section |
| 117 | */ | 120 | */ |
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 3412adcdaeb0..6f656c053b14 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c | |||
| @@ -185,7 +185,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) | |||
| 185 | 185 | ||
| 186 | mei_me_reg_write(hw, H_CSR, hcsr); | 186 | mei_me_reg_write(hw, H_CSR, hcsr); |
| 187 | 187 | ||
| 188 | if (dev->dev_state == MEI_DEV_POWER_DOWN) | 188 | if (intr_enable == false) |
| 189 | mei_me_hw_reset_release(dev); | 189 | mei_me_hw_reset_release(dev); |
| 190 | 190 | ||
| 191 | return 0; | 191 | return 0; |
| @@ -469,7 +469,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
| 469 | struct mei_device *dev = (struct mei_device *) dev_id; | 469 | struct mei_device *dev = (struct mei_device *) dev_id; |
| 470 | struct mei_cl_cb complete_list; | 470 | struct mei_cl_cb complete_list; |
| 471 | s32 slots; | 471 | s32 slots; |
| 472 | int rets; | 472 | int rets = 0; |
| 473 | 473 | ||
| 474 | dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); | 474 | dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); |
| 475 | /* initialize our complete list */ | 475 | /* initialize our complete list */ |
| @@ -482,15 +482,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
| 482 | mei_clear_interrupts(dev); | 482 | mei_clear_interrupts(dev); |
| 483 | 483 | ||
| 484 | /* check if ME wants a reset */ | 484 | /* check if ME wants a reset */ |
| 485 | if (!mei_hw_is_ready(dev) && | 485 | if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { |
| 486 | dev->dev_state != MEI_DEV_RESETTING && | 486 | dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); |
| 487 | dev->dev_state != MEI_DEV_INITIALIZING && | 487 | schedule_work(&dev->reset_work); |
| 488 | dev->dev_state != MEI_DEV_POWER_DOWN && | 488 | goto end; |
| 489 | dev->dev_state != MEI_DEV_POWER_UP) { | ||
| 490 | dev_dbg(&dev->pdev->dev, "FW not ready.\n"); | ||
| 491 | mei_reset(dev, 1); | ||
| 492 | mutex_unlock(&dev->device_lock); | ||
| 493 | return IRQ_HANDLED; | ||
| 494 | } | 489 | } |
| 495 | 490 | ||
| 496 | /* check if we need to start the dev */ | 491 | /* check if we need to start the dev */ |
| @@ -500,15 +495,12 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
| 500 | 495 | ||
| 501 | dev->recvd_hw_ready = true; | 496 | dev->recvd_hw_ready = true; |
| 502 | wake_up_interruptible(&dev->wait_hw_ready); | 497 | wake_up_interruptible(&dev->wait_hw_ready); |
| 503 | |||
| 504 | mutex_unlock(&dev->device_lock); | ||
| 505 | return IRQ_HANDLED; | ||
| 506 | } else { | 498 | } else { |
| 499 | |||
| 507 | dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); | 500 | dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); |
| 508 | mei_me_hw_reset_release(dev); | 501 | mei_me_hw_reset_release(dev); |
| 509 | mutex_unlock(&dev->device_lock); | ||
| 510 | return IRQ_HANDLED; | ||
| 511 | } | 502 | } |
| 503 | goto end; | ||
| 512 | } | 504 | } |
| 513 | /* check slots available for reading */ | 505 | /* check slots available for reading */ |
| 514 | slots = mei_count_full_read_slots(dev); | 506 | slots = mei_count_full_read_slots(dev); |
| @@ -516,21 +508,23 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
| 516 | /* we have urgent data to send so break the read */ | 508 | /* we have urgent data to send so break the read */ |
| 517 | if (dev->wr_ext_msg.hdr.length) | 509 | if (dev->wr_ext_msg.hdr.length) |
| 518 | break; | 510 | break; |
| 519 | dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots); | 511 | dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots); |
| 520 | dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n"); | ||
| 521 | rets = mei_irq_read_handler(dev, &complete_list, &slots); | 512 | rets = mei_irq_read_handler(dev, &complete_list, &slots); |
| 522 | if (rets) | 513 | if (rets && dev->dev_state != MEI_DEV_RESETTING) { |
| 514 | schedule_work(&dev->reset_work); | ||
| 523 | goto end; | 515 | goto end; |
| 516 | } | ||
| 524 | } | 517 | } |
| 518 | |||
| 525 | rets = mei_irq_write_handler(dev, &complete_list); | 519 | rets = mei_irq_write_handler(dev, &complete_list); |
| 526 | end: | ||
| 527 | dev_dbg(&dev->pdev->dev, "end of bottom half function.\n"); | ||
| 528 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); | ||
| 529 | 520 | ||
| 530 | mutex_unlock(&dev->device_lock); | 521 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); |
| 531 | 522 | ||
| 532 | mei_irq_compl_handler(dev, &complete_list); | 523 | mei_irq_compl_handler(dev, &complete_list); |
| 533 | 524 | ||
| 525 | end: | ||
| 526 | dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); | ||
| 527 | mutex_unlock(&dev->device_lock); | ||
| 534 | return IRQ_HANDLED; | 528 | return IRQ_HANDLED; |
| 535 | } | 529 | } |
| 536 | static const struct mei_hw_ops mei_me_hw_ops = { | 530 | static const struct mei_hw_ops mei_me_hw_ops = { |
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index cb2f556b4252..dd44e33ad2b6 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h | |||
| @@ -111,7 +111,8 @@ struct mei_msg_hdr { | |||
| 111 | u32 me_addr:8; | 111 | u32 me_addr:8; |
| 112 | u32 host_addr:8; | 112 | u32 host_addr:8; |
| 113 | u32 length:9; | 113 | u32 length:9; |
| 114 | u32 reserved:6; | 114 | u32 reserved:5; |
| 115 | u32 internal:1; | ||
| 115 | u32 msg_complete:1; | 116 | u32 msg_complete:1; |
| 116 | } __packed; | 117 | } __packed; |
| 117 | 118 | ||
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index f7f3abbe12b6..cdd31c2a2a2b 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
| @@ -43,41 +43,119 @@ const char *mei_dev_state_str(int state) | |||
| 43 | #undef MEI_DEV_STATE | 43 | #undef MEI_DEV_STATE |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | void mei_device_init(struct mei_device *dev) | ||
| 47 | { | ||
| 48 | /* setup our list array */ | ||
| 49 | INIT_LIST_HEAD(&dev->file_list); | ||
| 50 | INIT_LIST_HEAD(&dev->device_list); | ||
| 51 | mutex_init(&dev->device_lock); | ||
| 52 | init_waitqueue_head(&dev->wait_hw_ready); | ||
| 53 | init_waitqueue_head(&dev->wait_recvd_msg); | ||
| 54 | init_waitqueue_head(&dev->wait_stop_wd); | ||
| 55 | dev->dev_state = MEI_DEV_INITIALIZING; | ||
| 56 | 46 | ||
| 57 | mei_io_list_init(&dev->read_list); | 47 | /** |
| 58 | mei_io_list_init(&dev->write_list); | 48 | * mei_cancel_work. Cancel mei background jobs |
| 59 | mei_io_list_init(&dev->write_waiting_list); | 49 | * |
| 60 | mei_io_list_init(&dev->ctrl_wr_list); | 50 | * @dev: the device structure |
| 61 | mei_io_list_init(&dev->ctrl_rd_list); | 51 | * |
| 52 | * returns 0 on success or < 0 if the reset hasn't succeeded | ||
| 53 | */ | ||
| 54 | void mei_cancel_work(struct mei_device *dev) | ||
| 55 | { | ||
| 56 | cancel_work_sync(&dev->init_work); | ||
| 57 | cancel_work_sync(&dev->reset_work); | ||
| 62 | 58 | ||
| 63 | INIT_DELAYED_WORK(&dev->timer_work, mei_timer); | 59 | cancel_delayed_work(&dev->timer_work); |
| 64 | INIT_WORK(&dev->init_work, mei_host_client_init); | 60 | } |
| 61 | EXPORT_SYMBOL_GPL(mei_cancel_work); | ||
| 65 | 62 | ||
| 66 | INIT_LIST_HEAD(&dev->wd_cl.link); | 63 | /** |
| 67 | INIT_LIST_HEAD(&dev->iamthif_cl.link); | 64 | * mei_reset - resets host and fw. |
| 68 | mei_io_list_init(&dev->amthif_cmd_list); | 65 | * |
| 69 | mei_io_list_init(&dev->amthif_rd_complete_list); | 66 | * @dev: the device structure |
| 67 | */ | ||
| 68 | int mei_reset(struct mei_device *dev) | ||
| 69 | { | ||
| 70 | enum mei_dev_state state = dev->dev_state; | ||
| 71 | bool interrupts_enabled; | ||
| 72 | int ret; | ||
| 70 | 73 | ||
| 71 | bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); | 74 | if (state != MEI_DEV_INITIALIZING && |
| 72 | dev->open_handle_count = 0; | 75 | state != MEI_DEV_DISABLED && |
| 76 | state != MEI_DEV_POWER_DOWN && | ||
| 77 | state != MEI_DEV_POWER_UP) | ||
| 78 | dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", | ||
| 79 | mei_dev_state_str(state)); | ||
| 73 | 80 | ||
| 74 | /* | 81 | /* we're already in reset, cancel the init timer |
| 75 | * Reserving the first client ID | 82 | * if the reset was called due the hbm protocol error |
| 76 | * 0: Reserved for MEI Bus Message communications | 83 | * we need to call it before hw start |
| 84 | * so the hbm watchdog won't kick in | ||
| 77 | */ | 85 | */ |
| 78 | bitmap_set(dev->host_clients_map, 0, 1); | 86 | mei_hbm_idle(dev); |
| 87 | |||
| 88 | /* enter reset flow */ | ||
| 89 | interrupts_enabled = state != MEI_DEV_POWER_DOWN; | ||
| 90 | dev->dev_state = MEI_DEV_RESETTING; | ||
| 91 | |||
| 92 | dev->reset_count++; | ||
| 93 | if (dev->reset_count > MEI_MAX_CONSEC_RESET) { | ||
| 94 | dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); | ||
| 95 | dev->dev_state = MEI_DEV_DISABLED; | ||
| 96 | return -ENODEV; | ||
| 97 | } | ||
| 98 | |||
| 99 | ret = mei_hw_reset(dev, interrupts_enabled); | ||
| 100 | /* fall through and remove the sw state even if hw reset has failed */ | ||
| 101 | |||
| 102 | /* no need to clean up software state in case of power up */ | ||
| 103 | if (state != MEI_DEV_INITIALIZING && | ||
| 104 | state != MEI_DEV_POWER_UP) { | ||
| 105 | |||
| 106 | /* remove all waiting requests */ | ||
| 107 | mei_cl_all_write_clear(dev); | ||
| 108 | |||
| 109 | mei_cl_all_disconnect(dev); | ||
| 110 | |||
| 111 | /* wake up all readers and writers so they can be interrupted */ | ||
| 112 | mei_cl_all_wakeup(dev); | ||
| 113 | |||
| 114 | /* remove entry if already in list */ | ||
| 115 | dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); | ||
| 116 | mei_cl_unlink(&dev->wd_cl); | ||
| 117 | mei_cl_unlink(&dev->iamthif_cl); | ||
| 118 | mei_amthif_reset_params(dev); | ||
| 119 | memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); | ||
| 120 | } | ||
| 121 | |||
| 122 | |||
| 123 | dev->me_clients_num = 0; | ||
| 124 | dev->rd_msg_hdr = 0; | ||
| 125 | dev->wd_pending = false; | ||
| 126 | |||
| 127 | if (ret) { | ||
| 128 | dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); | ||
| 129 | dev->dev_state = MEI_DEV_DISABLED; | ||
| 130 | return ret; | ||
| 131 | } | ||
| 132 | |||
| 133 | if (state == MEI_DEV_POWER_DOWN) { | ||
| 134 | dev_dbg(&dev->pdev->dev, "powering down: end of reset\n"); | ||
| 135 | dev->dev_state = MEI_DEV_DISABLED; | ||
| 136 | return 0; | ||
| 137 | } | ||
| 138 | |||
| 139 | ret = mei_hw_start(dev); | ||
| 140 | if (ret) { | ||
| 141 | dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); | ||
| 142 | dev->dev_state = MEI_DEV_DISABLED; | ||
| 143 | return ret; | ||
| 144 | } | ||
| 145 | |||
| 146 | dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); | ||
| 147 | |||
| 148 | dev->dev_state = MEI_DEV_INIT_CLIENTS; | ||
| 149 | ret = mei_hbm_start_req(dev); | ||
| 150 | if (ret) { | ||
| 151 | dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); | ||
| 152 | dev->dev_state = MEI_DEV_DISABLED; | ||
| 153 | return ret; | ||
| 154 | } | ||
| 155 | |||
| 156 | return 0; | ||
| 79 | } | 157 | } |
| 80 | EXPORT_SYMBOL_GPL(mei_device_init); | 158 | EXPORT_SYMBOL_GPL(mei_reset); |
| 81 | 159 | ||
| 82 | /** | 160 | /** |
| 83 | * mei_start - initializes host and fw to start work. | 161 | * mei_start - initializes host and fw to start work. |
| @@ -90,14 +168,21 @@ int mei_start(struct mei_device *dev) | |||
| 90 | { | 168 | { |
| 91 | mutex_lock(&dev->device_lock); | 169 | mutex_lock(&dev->device_lock); |
| 92 | 170 | ||
| 93 | /* acknowledge interrupt and stop interupts */ | 171 | /* acknowledge interrupt and stop interrupts */ |
| 94 | mei_clear_interrupts(dev); | 172 | mei_clear_interrupts(dev); |
| 95 | 173 | ||
| 96 | mei_hw_config(dev); | 174 | mei_hw_config(dev); |
| 97 | 175 | ||
| 98 | dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); | 176 | dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); |
| 99 | 177 | ||
| 100 | mei_reset(dev, 1); | 178 | dev->dev_state = MEI_DEV_INITIALIZING; |
| 179 | dev->reset_count = 0; | ||
| 180 | mei_reset(dev); | ||
| 181 | |||
| 182 | if (dev->dev_state == MEI_DEV_DISABLED) { | ||
| 183 | dev_err(&dev->pdev->dev, "reset failed"); | ||
| 184 | goto err; | ||
| 185 | } | ||
| 101 | 186 | ||
| 102 | if (mei_hbm_start_wait(dev)) { | 187 | if (mei_hbm_start_wait(dev)) { |
| 103 | dev_err(&dev->pdev->dev, "HBM haven't started"); | 188 | dev_err(&dev->pdev->dev, "HBM haven't started"); |
| @@ -132,101 +217,64 @@ err: | |||
| 132 | EXPORT_SYMBOL_GPL(mei_start); | 217 | EXPORT_SYMBOL_GPL(mei_start); |
| 133 | 218 | ||
| 134 | /** | 219 | /** |
| 135 | * mei_reset - resets host and fw. | 220 | * mei_restart - restart device after suspend |
| 136 | * | 221 | * |
| 137 | * @dev: the device structure | 222 | * @dev: the device structure |
| 138 | * @interrupts_enabled: if interrupt should be enabled after reset. | 223 | * |
| 224 | * returns 0 on success or -ENODEV if the restart hasn't succeeded | ||
| 139 | */ | 225 | */ |
| 140 | void mei_reset(struct mei_device *dev, int interrupts_enabled) | 226 | int mei_restart(struct mei_device *dev) |
| 141 | { | 227 | { |
| 142 | bool unexpected; | 228 | int err; |
| 143 | int ret; | ||
| 144 | |||
| 145 | unexpected = (dev->dev_state != MEI_DEV_INITIALIZING && | ||
| 146 | dev->dev_state != MEI_DEV_DISABLED && | ||
| 147 | dev->dev_state != MEI_DEV_POWER_DOWN && | ||
| 148 | dev->dev_state != MEI_DEV_POWER_UP); | ||
| 149 | 229 | ||
| 150 | if (unexpected) | 230 | mutex_lock(&dev->device_lock); |
| 151 | dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", | ||
| 152 | mei_dev_state_str(dev->dev_state)); | ||
| 153 | |||
| 154 | ret = mei_hw_reset(dev, interrupts_enabled); | ||
| 155 | if (ret) { | ||
| 156 | dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n"); | ||
| 157 | interrupts_enabled = false; | ||
| 158 | dev->dev_state = MEI_DEV_DISABLED; | ||
| 159 | } | ||
| 160 | |||
| 161 | dev->hbm_state = MEI_HBM_IDLE; | ||
| 162 | 231 | ||
| 163 | if (dev->dev_state != MEI_DEV_INITIALIZING && | 232 | mei_clear_interrupts(dev); |
| 164 | dev->dev_state != MEI_DEV_POWER_UP) { | ||
| 165 | if (dev->dev_state != MEI_DEV_DISABLED && | ||
| 166 | dev->dev_state != MEI_DEV_POWER_DOWN) | ||
| 167 | dev->dev_state = MEI_DEV_RESETTING; | ||
| 168 | 233 | ||
| 169 | /* remove all waiting requests */ | 234 | dev->dev_state = MEI_DEV_POWER_UP; |
| 170 | mei_cl_all_write_clear(dev); | 235 | dev->reset_count = 0; |
| 171 | 236 | ||
| 172 | mei_cl_all_disconnect(dev); | 237 | err = mei_reset(dev); |
| 173 | 238 | ||
| 174 | /* wake up all readings so they can be interrupted */ | 239 | mutex_unlock(&dev->device_lock); |
| 175 | mei_cl_all_wakeup(dev); | ||
| 176 | |||
| 177 | /* remove entry if already in list */ | ||
| 178 | dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); | ||
| 179 | mei_cl_unlink(&dev->wd_cl); | ||
| 180 | mei_cl_unlink(&dev->iamthif_cl); | ||
| 181 | mei_amthif_reset_params(dev); | ||
| 182 | memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); | ||
| 183 | } | ||
| 184 | 240 | ||
| 185 | /* we're already in reset, cancel the init timer */ | 241 | if (err || dev->dev_state == MEI_DEV_DISABLED) |
| 186 | dev->init_clients_timer = 0; | 242 | return -ENODEV; |
| 187 | 243 | ||
| 188 | dev->me_clients_num = 0; | 244 | return 0; |
| 189 | dev->rd_msg_hdr = 0; | 245 | } |
| 190 | dev->wd_pending = false; | 246 | EXPORT_SYMBOL_GPL(mei_restart); |
| 191 | 247 | ||
| 192 | if (!interrupts_enabled) { | ||
| 193 | dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n"); | ||
| 194 | return; | ||
| 195 | } | ||
| 196 | 248 | ||
| 197 | ret = mei_hw_start(dev); | 249 | static void mei_reset_work(struct work_struct *work) |
| 198 | if (ret) { | 250 | { |
| 199 | dev_err(&dev->pdev->dev, "hw_start failed disabling the device\n"); | 251 | struct mei_device *dev = |
| 200 | dev->dev_state = MEI_DEV_DISABLED; | 252 | container_of(work, struct mei_device, reset_work); |
| 201 | return; | ||
| 202 | } | ||
| 203 | 253 | ||
| 204 | dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); | 254 | mutex_lock(&dev->device_lock); |
| 205 | /* link is established * start sending messages. */ | ||
| 206 | 255 | ||
| 207 | dev->dev_state = MEI_DEV_INIT_CLIENTS; | 256 | mei_reset(dev); |
| 208 | 257 | ||
| 209 | mei_hbm_start_req(dev); | 258 | mutex_unlock(&dev->device_lock); |
| 210 | 259 | ||
| 260 | if (dev->dev_state == MEI_DEV_DISABLED) | ||
| 261 | dev_err(&dev->pdev->dev, "reset failed"); | ||
| 211 | } | 262 | } |
| 212 | EXPORT_SYMBOL_GPL(mei_reset); | ||
| 213 | 263 | ||
| 214 | void mei_stop(struct mei_device *dev) | 264 | void mei_stop(struct mei_device *dev) |
| 215 | { | 265 | { |
| 216 | dev_dbg(&dev->pdev->dev, "stopping the device.\n"); | 266 | dev_dbg(&dev->pdev->dev, "stopping the device.\n"); |
| 217 | 267 | ||
| 218 | flush_scheduled_work(); | 268 | mei_cancel_work(dev); |
| 219 | 269 | ||
| 220 | mutex_lock(&dev->device_lock); | 270 | mei_nfc_host_exit(dev); |
| 221 | 271 | ||
| 222 | cancel_delayed_work(&dev->timer_work); | 272 | mutex_lock(&dev->device_lock); |
| 223 | 273 | ||
| 224 | mei_wd_stop(dev); | 274 | mei_wd_stop(dev); |
| 225 | 275 | ||
| 226 | mei_nfc_host_exit(); | ||
| 227 | |||
| 228 | dev->dev_state = MEI_DEV_POWER_DOWN; | 276 | dev->dev_state = MEI_DEV_POWER_DOWN; |
| 229 | mei_reset(dev, 0); | 277 | mei_reset(dev); |
| 230 | 278 | ||
| 231 | mutex_unlock(&dev->device_lock); | 279 | mutex_unlock(&dev->device_lock); |
| 232 | 280 | ||
| @@ -236,3 +284,41 @@ EXPORT_SYMBOL_GPL(mei_stop); | |||
| 236 | 284 | ||
| 237 | 285 | ||
| 238 | 286 | ||
| 287 | void mei_device_init(struct mei_device *dev) | ||
| 288 | { | ||
| 289 | /* setup our list array */ | ||
| 290 | INIT_LIST_HEAD(&dev->file_list); | ||
| 291 | INIT_LIST_HEAD(&dev->device_list); | ||
| 292 | mutex_init(&dev->device_lock); | ||
| 293 | init_waitqueue_head(&dev->wait_hw_ready); | ||
| 294 | init_waitqueue_head(&dev->wait_recvd_msg); | ||
| 295 | init_waitqueue_head(&dev->wait_stop_wd); | ||
| 296 | dev->dev_state = MEI_DEV_INITIALIZING; | ||
| 297 | dev->reset_count = 0; | ||
| 298 | |||
| 299 | mei_io_list_init(&dev->read_list); | ||
| 300 | mei_io_list_init(&dev->write_list); | ||
| 301 | mei_io_list_init(&dev->write_waiting_list); | ||
| 302 | mei_io_list_init(&dev->ctrl_wr_list); | ||
| 303 | mei_io_list_init(&dev->ctrl_rd_list); | ||
| 304 | |||
| 305 | INIT_DELAYED_WORK(&dev->timer_work, mei_timer); | ||
| 306 | INIT_WORK(&dev->init_work, mei_host_client_init); | ||
| 307 | INIT_WORK(&dev->reset_work, mei_reset_work); | ||
| 308 | |||
| 309 | INIT_LIST_HEAD(&dev->wd_cl.link); | ||
| 310 | INIT_LIST_HEAD(&dev->iamthif_cl.link); | ||
| 311 | mei_io_list_init(&dev->amthif_cmd_list); | ||
| 312 | mei_io_list_init(&dev->amthif_rd_complete_list); | ||
| 313 | |||
| 314 | bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); | ||
| 315 | dev->open_handle_count = 0; | ||
| 316 | |||
| 317 | /* | ||
| 318 | * Reserving the first client ID | ||
| 319 | * 0: Reserved for MEI Bus Message communications | ||
| 320 | */ | ||
| 321 | bitmap_set(dev->host_clients_map, 0, 1); | ||
| 322 | } | ||
| 323 | EXPORT_SYMBOL_GPL(mei_device_init); | ||
| 324 | |||
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 7a95c07e59a6..f0fbb5179f80 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | 32 | ||
| 33 | /** | 33 | /** |
| 34 | * mei_irq_compl_handler - dispatch complete handelers | 34 | * mei_irq_compl_handler - dispatch complete handlers |
| 35 | * for the completed callbacks | 35 | * for the completed callbacks |
| 36 | * | 36 | * |
| 37 | * @dev - mei device | 37 | * @dev - mei device |
| @@ -301,13 +301,11 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
| 301 | struct mei_cl_cb *cmpl_list, s32 *slots) | 301 | struct mei_cl_cb *cmpl_list, s32 *slots) |
| 302 | { | 302 | { |
| 303 | struct mei_msg_hdr *mei_hdr; | 303 | struct mei_msg_hdr *mei_hdr; |
| 304 | struct mei_cl *cl_pos = NULL; | 304 | struct mei_cl *cl; |
| 305 | struct mei_cl *cl_next = NULL; | 305 | int ret; |
| 306 | int ret = 0; | ||
| 307 | 306 | ||
| 308 | if (!dev->rd_msg_hdr) { | 307 | if (!dev->rd_msg_hdr) { |
| 309 | dev->rd_msg_hdr = mei_read_hdr(dev); | 308 | dev->rd_msg_hdr = mei_read_hdr(dev); |
| 310 | dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); | ||
| 311 | (*slots)--; | 309 | (*slots)--; |
| 312 | dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); | 310 | dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); |
| 313 | } | 311 | } |
| @@ -315,61 +313,67 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
| 315 | dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); | 313 | dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); |
| 316 | 314 | ||
| 317 | if (mei_hdr->reserved || !dev->rd_msg_hdr) { | 315 | if (mei_hdr->reserved || !dev->rd_msg_hdr) { |
| 318 | dev_dbg(&dev->pdev->dev, "corrupted message header.\n"); | 316 | dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n", |
| 317 | dev->rd_msg_hdr); | ||
| 319 | ret = -EBADMSG; | 318 | ret = -EBADMSG; |
| 320 | goto end; | 319 | goto end; |
| 321 | } | 320 | } |
| 322 | 321 | ||
| 323 | if (mei_hdr->host_addr || mei_hdr->me_addr) { | 322 | if (mei_slots2data(*slots) < mei_hdr->length) { |
| 324 | list_for_each_entry_safe(cl_pos, cl_next, | 323 | dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", |
| 325 | &dev->file_list, link) { | ||
| 326 | dev_dbg(&dev->pdev->dev, | ||
| 327 | "list_for_each_entry_safe read host" | ||
| 328 | " client = %d, ME client = %d\n", | ||
| 329 | cl_pos->host_client_id, | ||
| 330 | cl_pos->me_client_id); | ||
| 331 | if (mei_cl_hbm_equal(cl_pos, mei_hdr)) | ||
| 332 | break; | ||
| 333 | } | ||
| 334 | |||
| 335 | if (&cl_pos->link == &dev->file_list) { | ||
| 336 | dev_dbg(&dev->pdev->dev, "corrupted message header\n"); | ||
| 337 | ret = -EBADMSG; | ||
| 338 | goto end; | ||
| 339 | } | ||
| 340 | } | ||
| 341 | if (((*slots) * sizeof(u32)) < mei_hdr->length) { | ||
| 342 | dev_err(&dev->pdev->dev, | ||
| 343 | "we can't read the message slots =%08x.\n", | ||
| 344 | *slots); | 324 | *slots); |
| 345 | /* we can't read the message */ | 325 | /* we can't read the message */ |
| 346 | ret = -ERANGE; | 326 | ret = -ERANGE; |
| 347 | goto end; | 327 | goto end; |
| 348 | } | 328 | } |
| 349 | 329 | ||
| 350 | /* decide where to read the message too */ | 330 | /* HBM message */ |
| 351 | if (!mei_hdr->host_addr) { | 331 | if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { |
| 352 | dev_dbg(&dev->pdev->dev, "call mei_hbm_dispatch.\n"); | 332 | ret = mei_hbm_dispatch(dev, mei_hdr); |
| 353 | mei_hbm_dispatch(dev, mei_hdr); | 333 | if (ret) { |
| 354 | dev_dbg(&dev->pdev->dev, "end mei_hbm_dispatch.\n"); | 334 | dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n", |
| 355 | } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && | 335 | ret); |
| 356 | (MEI_FILE_CONNECTED == dev->iamthif_cl.state) && | 336 | goto end; |
| 357 | (dev->iamthif_state == MEI_IAMTHIF_READING)) { | 337 | } |
| 338 | goto reset_slots; | ||
| 339 | } | ||
| 340 | |||
| 341 | /* find recipient cl */ | ||
| 342 | list_for_each_entry(cl, &dev->file_list, link) { | ||
| 343 | if (mei_cl_hbm_equal(cl, mei_hdr)) { | ||
| 344 | cl_dbg(dev, cl, "got a message\n"); | ||
| 345 | break; | ||
| 346 | } | ||
| 347 | } | ||
| 348 | |||
| 349 | /* if no recipient cl was found we assume corrupted header */ | ||
| 350 | if (&cl->link == &dev->file_list) { | ||
| 351 | dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n", | ||
| 352 | dev->rd_msg_hdr); | ||
| 353 | ret = -EBADMSG; | ||
| 354 | goto end; | ||
| 355 | } | ||
| 358 | 356 | ||
| 359 | dev_dbg(&dev->pdev->dev, "call mei_amthif_irq_read_msg.\n"); | 357 | if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && |
| 360 | dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); | 358 | MEI_FILE_CONNECTED == dev->iamthif_cl.state && |
| 359 | dev->iamthif_state == MEI_IAMTHIF_READING) { | ||
| 361 | 360 | ||
| 362 | ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); | 361 | ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); |
| 363 | if (ret) | 362 | if (ret) { |
| 363 | dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n", | ||
| 364 | ret); | ||
| 364 | goto end; | 365 | goto end; |
| 366 | } | ||
| 365 | } else { | 367 | } else { |
| 366 | dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n"); | ||
| 367 | dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); | ||
| 368 | ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); | 368 | ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); |
| 369 | if (ret) | 369 | if (ret) { |
| 370 | dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n", | ||
| 371 | ret); | ||
| 370 | goto end; | 372 | goto end; |
| 373 | } | ||
| 371 | } | 374 | } |
| 372 | 375 | ||
| 376 | reset_slots: | ||
| 373 | /* reset the number of slots and header */ | 377 | /* reset the number of slots and header */ |
| 374 | *slots = mei_count_full_read_slots(dev); | 378 | *slots = mei_count_full_read_slots(dev); |
| 375 | dev->rd_msg_hdr = 0; | 379 | dev->rd_msg_hdr = 0; |
| @@ -533,7 +537,6 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler); | |||
| 533 | * | 537 | * |
| 534 | * @work: pointer to the work_struct structure | 538 | * @work: pointer to the work_struct structure |
| 535 | * | 539 | * |
| 536 | * NOTE: This function is called by timer interrupt work | ||
| 537 | */ | 540 | */ |
| 538 | void mei_timer(struct work_struct *work) | 541 | void mei_timer(struct work_struct *work) |
| 539 | { | 542 | { |
| @@ -548,24 +551,30 @@ void mei_timer(struct work_struct *work) | |||
| 548 | 551 | ||
| 549 | 552 | ||
| 550 | mutex_lock(&dev->device_lock); | 553 | mutex_lock(&dev->device_lock); |
| 551 | if (dev->dev_state != MEI_DEV_ENABLED) { | 554 | |
| 552 | if (dev->dev_state == MEI_DEV_INIT_CLIENTS) { | 555 | /* Catch interrupt stalls during HBM init handshake */ |
| 553 | if (dev->init_clients_timer) { | 556 | if (dev->dev_state == MEI_DEV_INIT_CLIENTS && |
| 554 | if (--dev->init_clients_timer == 0) { | 557 | dev->hbm_state != MEI_HBM_IDLE) { |
| 555 | dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n", | 558 | |
| 556 | dev->hbm_state); | 559 | if (dev->init_clients_timer) { |
| 557 | mei_reset(dev, 1); | 560 | if (--dev->init_clients_timer == 0) { |
| 558 | } | 561 | dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n", |
| 562 | dev->hbm_state); | ||
| 563 | mei_reset(dev); | ||
| 564 | goto out; | ||
| 559 | } | 565 | } |
| 560 | } | 566 | } |
| 561 | goto out; | ||
| 562 | } | 567 | } |
| 568 | |||
| 569 | if (dev->dev_state != MEI_DEV_ENABLED) | ||
| 570 | goto out; | ||
| 571 | |||
| 563 | /*** connect/disconnect timeouts ***/ | 572 | /*** connect/disconnect timeouts ***/ |
| 564 | list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { | 573 | list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { |
| 565 | if (cl_pos->timer_count) { | 574 | if (cl_pos->timer_count) { |
| 566 | if (--cl_pos->timer_count == 0) { | 575 | if (--cl_pos->timer_count == 0) { |
| 567 | dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n"); | 576 | dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); |
| 568 | mei_reset(dev, 1); | 577 | mei_reset(dev); |
| 569 | goto out; | 578 | goto out; |
| 570 | } | 579 | } |
| 571 | } | 580 | } |
| @@ -573,8 +582,8 @@ void mei_timer(struct work_struct *work) | |||
| 573 | 582 | ||
| 574 | if (dev->iamthif_stall_timer) { | 583 | if (dev->iamthif_stall_timer) { |
| 575 | if (--dev->iamthif_stall_timer == 0) { | 584 | if (--dev->iamthif_stall_timer == 0) { |
| 576 | dev_err(&dev->pdev->dev, "reset: amthif hanged.\n"); | 585 | dev_err(&dev->pdev->dev, "timer: amthif hanged.\n"); |
| 577 | mei_reset(dev, 1); | 586 | mei_reset(dev); |
| 578 | dev->iamthif_msg_buf_size = 0; | 587 | dev->iamthif_msg_buf_size = 0; |
| 579 | dev->iamthif_msg_buf_index = 0; | 588 | dev->iamthif_msg_buf_index = 0; |
| 580 | dev->iamthif_canceled = false; | 589 | dev->iamthif_canceled = false; |
| @@ -627,7 +636,8 @@ void mei_timer(struct work_struct *work) | |||
| 627 | } | 636 | } |
| 628 | } | 637 | } |
| 629 | out: | 638 | out: |
| 630 | schedule_delayed_work(&dev->timer_work, 2 * HZ); | 639 | if (dev->dev_state != MEI_DEV_DISABLED) |
| 640 | schedule_delayed_work(&dev->timer_work, 2 * HZ); | ||
| 631 | mutex_unlock(&dev->device_lock); | 641 | mutex_unlock(&dev->device_lock); |
| 632 | } | 642 | } |
| 633 | 643 | ||
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 9661a812f550..5424f8ff3f7f 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c | |||
| @@ -48,7 +48,7 @@ | |||
| 48 | * | 48 | * |
| 49 | * @inode: pointer to inode structure | 49 | * @inode: pointer to inode structure |
| 50 | * @file: pointer to file structure | 50 | * @file: pointer to file structure |
| 51 | e | 51 | * |
| 52 | * returns 0 on success, <0 on error | 52 | * returns 0 on success, <0 on error |
| 53 | */ | 53 | */ |
| 54 | static int mei_open(struct inode *inode, struct file *file) | 54 | static int mei_open(struct inode *inode, struct file *file) |
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 406f68e05b4e..f7de95b4cdd9 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h | |||
| @@ -61,11 +61,16 @@ extern const uuid_le mei_wd_guid; | |||
| 61 | #define MEI_CLIENTS_MAX 256 | 61 | #define MEI_CLIENTS_MAX 256 |
| 62 | 62 | ||
| 63 | /* | 63 | /* |
| 64 | * maximum number of consecutive resets | ||
| 65 | */ | ||
| 66 | #define MEI_MAX_CONSEC_RESET 3 | ||
| 67 | |||
| 68 | /* | ||
| 64 | * Number of File descriptors/handles | 69 | * Number of File descriptors/handles |
| 65 | * that can be opened to the driver. | 70 | * that can be opened to the driver. |
| 66 | * | 71 | * |
| 67 | * Limit to 255: 256 Total Clients | 72 | * Limit to 255: 256 Total Clients |
| 68 | * minus internal client for MEI Bus Messags | 73 | * minus internal client for MEI Bus Messages |
| 69 | */ | 74 | */ |
| 70 | #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) | 75 | #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) |
| 71 | 76 | ||
| @@ -178,9 +183,10 @@ struct mei_cl_cb { | |||
| 178 | unsigned long buf_idx; | 183 | unsigned long buf_idx; |
| 179 | unsigned long read_time; | 184 | unsigned long read_time; |
| 180 | struct file *file_object; | 185 | struct file *file_object; |
| 186 | u32 internal:1; | ||
| 181 | }; | 187 | }; |
| 182 | 188 | ||
| 183 | /* MEI client instance carried as file->pirvate_data*/ | 189 | /* MEI client instance carried as file->private_data*/ |
| 184 | struct mei_cl { | 190 | struct mei_cl { |
| 185 | struct list_head link; | 191 | struct list_head link; |
| 186 | struct mei_device *dev; | 192 | struct mei_device *dev; |
| @@ -326,6 +332,7 @@ struct mei_cl_device { | |||
| 326 | /** | 332 | /** |
| 327 | * struct mei_device - MEI private device struct | 333 | * struct mei_device - MEI private device struct |
| 328 | 334 | ||
| 335 | * @reset_count - limits the number of consecutive resets | ||
| 329 | * @hbm_state - state of host bus message protocol | 336 | * @hbm_state - state of host bus message protocol |
| 330 | * @mem_addr - mem mapped base register address | 337 | * @mem_addr - mem mapped base register address |
| 331 | 338 | ||
| @@ -369,6 +376,7 @@ struct mei_device { | |||
| 369 | /* | 376 | /* |
| 370 | * mei device states | 377 | * mei device states |
| 371 | */ | 378 | */ |
| 379 | unsigned long reset_count; | ||
| 372 | enum mei_dev_state dev_state; | 380 | enum mei_dev_state dev_state; |
| 373 | enum mei_hbm_state hbm_state; | 381 | enum mei_hbm_state hbm_state; |
| 374 | u16 init_clients_timer; | 382 | u16 init_clients_timer; |
| @@ -427,6 +435,7 @@ struct mei_device { | |||
| 427 | bool iamthif_canceled; | 435 | bool iamthif_canceled; |
| 428 | 436 | ||
| 429 | struct work_struct init_work; | 437 | struct work_struct init_work; |
| 438 | struct work_struct reset_work; | ||
| 430 | 439 | ||
| 431 | /* List of bus devices */ | 440 | /* List of bus devices */ |
| 432 | struct list_head device_list; | 441 | struct list_head device_list; |
| @@ -456,13 +465,25 @@ static inline u32 mei_data2slots(size_t length) | |||
| 456 | return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); | 465 | return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); |
| 457 | } | 466 | } |
| 458 | 467 | ||
| 468 | /** | ||
| 469 | * mei_slots2data- get data in slots - bytes from slots | ||
| 470 | * @slots - number of available slots | ||
| 471 | * returns - number of bytes in slots | ||
| 472 | */ | ||
| 473 | static inline u32 mei_slots2data(int slots) | ||
| 474 | { | ||
| 475 | return slots * 4; | ||
| 476 | } | ||
| 477 | |||
| 459 | /* | 478 | /* |
| 460 | * mei init function prototypes | 479 | * mei init function prototypes |
| 461 | */ | 480 | */ |
| 462 | void mei_device_init(struct mei_device *dev); | 481 | void mei_device_init(struct mei_device *dev); |
| 463 | void mei_reset(struct mei_device *dev, int interrupts); | 482 | int mei_reset(struct mei_device *dev); |
| 464 | int mei_start(struct mei_device *dev); | 483 | int mei_start(struct mei_device *dev); |
| 484 | int mei_restart(struct mei_device *dev); | ||
| 465 | void mei_stop(struct mei_device *dev); | 485 | void mei_stop(struct mei_device *dev); |
| 486 | void mei_cancel_work(struct mei_device *dev); | ||
| 466 | 487 | ||
| 467 | /* | 488 | /* |
| 468 | * MEI interrupt functions prototype | 489 | * MEI interrupt functions prototype |
| @@ -510,7 +531,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); | |||
| 510 | * NFC functions | 531 | * NFC functions |
| 511 | */ | 532 | */ |
| 512 | int mei_nfc_host_init(struct mei_device *dev); | 533 | int mei_nfc_host_init(struct mei_device *dev); |
| 513 | void mei_nfc_host_exit(void); | 534 | void mei_nfc_host_exit(struct mei_device *dev); |
| 514 | 535 | ||
| 515 | /* | 536 | /* |
| 516 | * NFC Client UUID | 537 | * NFC Client UUID |
| @@ -626,9 +647,9 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {} | |||
| 626 | int mei_register(struct mei_device *dev); | 647 | int mei_register(struct mei_device *dev); |
| 627 | void mei_deregister(struct mei_device *dev); | 648 | void mei_deregister(struct mei_device *dev); |
| 628 | 649 | ||
| 629 | #define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d" | 650 | #define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" |
| 630 | #define MEI_HDR_PRM(hdr) \ | 651 | #define MEI_HDR_PRM(hdr) \ |
| 631 | (hdr)->host_addr, (hdr)->me_addr, \ | 652 | (hdr)->host_addr, (hdr)->me_addr, \ |
| 632 | (hdr)->length, (hdr)->msg_complete | 653 | (hdr)->length, (hdr)->internal, (hdr)->msg_complete |
| 633 | 654 | ||
| 634 | #endif | 655 | #endif |
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c index 994ca4aff1a3..a58320c0c049 100644 --- a/drivers/misc/mei/nfc.c +++ b/drivers/misc/mei/nfc.c | |||
| @@ -92,7 +92,7 @@ struct mei_nfc_hci_hdr { | |||
| 92 | * @cl: NFC host client | 92 | * @cl: NFC host client |
| 93 | * @cl_info: NFC info host client | 93 | * @cl_info: NFC info host client |
| 94 | * @init_work: perform connection to the info client | 94 | * @init_work: perform connection to the info client |
| 95 | * @fw_ivn: NFC Intervace Version Number | 95 | * @fw_ivn: NFC Interface Version Number |
| 96 | * @vendor_id: NFC manufacturer ID | 96 | * @vendor_id: NFC manufacturer ID |
| 97 | * @radio_type: NFC radio type | 97 | * @radio_type: NFC radio type |
| 98 | */ | 98 | */ |
| @@ -163,7 +163,7 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) | |||
| 163 | return 0; | 163 | return 0; |
| 164 | 164 | ||
| 165 | default: | 165 | default: |
| 166 | dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", | 166 | dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", |
| 167 | ndev->radio_type); | 167 | ndev->radio_type); |
| 168 | 168 | ||
| 169 | return -EINVAL; | 169 | return -EINVAL; |
| @@ -175,14 +175,14 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) | |||
| 175 | ndev->bus_name = "pn544"; | 175 | ndev->bus_name = "pn544"; |
| 176 | return 0; | 176 | return 0; |
| 177 | default: | 177 | default: |
| 178 | dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", | 178 | dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", |
| 179 | ndev->radio_type); | 179 | ndev->radio_type); |
| 180 | 180 | ||
| 181 | return -EINVAL; | 181 | return -EINVAL; |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | default: | 184 | default: |
| 185 | dev_err(&dev->pdev->dev, "Unknow vendor ID 0x%x\n", | 185 | dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n", |
| 186 | ndev->vendor_id); | 186 | ndev->vendor_id); |
| 187 | 187 | ||
| 188 | return -EINVAL; | 188 | return -EINVAL; |
| @@ -428,7 +428,7 @@ static void mei_nfc_init(struct work_struct *work) | |||
| 428 | mutex_unlock(&dev->device_lock); | 428 | mutex_unlock(&dev->device_lock); |
| 429 | 429 | ||
| 430 | if (mei_nfc_if_version(ndev) < 0) { | 430 | if (mei_nfc_if_version(ndev) < 0) { |
| 431 | dev_err(&dev->pdev->dev, "Could not get the NFC interfave version"); | 431 | dev_err(&dev->pdev->dev, "Could not get the NFC interface version"); |
| 432 | 432 | ||
| 433 | goto err; | 433 | goto err; |
| 434 | } | 434 | } |
| @@ -469,7 +469,9 @@ static void mei_nfc_init(struct work_struct *work) | |||
| 469 | return; | 469 | return; |
| 470 | 470 | ||
| 471 | err: | 471 | err: |
| 472 | mutex_lock(&dev->device_lock); | ||
| 472 | mei_nfc_free(ndev); | 473 | mei_nfc_free(ndev); |
| 474 | mutex_unlock(&dev->device_lock); | ||
| 473 | 475 | ||
| 474 | return; | 476 | return; |
| 475 | } | 477 | } |
| @@ -481,7 +483,7 @@ int mei_nfc_host_init(struct mei_device *dev) | |||
| 481 | struct mei_cl *cl_info, *cl = NULL; | 483 | struct mei_cl *cl_info, *cl = NULL; |
| 482 | int i, ret; | 484 | int i, ret; |
| 483 | 485 | ||
| 484 | /* already initialzed */ | 486 | /* already initialized */ |
| 485 | if (ndev->cl_info) | 487 | if (ndev->cl_info) |
| 486 | return 0; | 488 | return 0; |
| 487 | 489 | ||
| @@ -547,12 +549,16 @@ err: | |||
| 547 | return ret; | 549 | return ret; |
| 548 | } | 550 | } |
| 549 | 551 | ||
| 550 | void mei_nfc_host_exit(void) | 552 | void mei_nfc_host_exit(struct mei_device *dev) |
| 551 | { | 553 | { |
| 552 | struct mei_nfc_dev *ndev = &nfc_dev; | 554 | struct mei_nfc_dev *ndev = &nfc_dev; |
| 553 | 555 | ||
| 556 | cancel_work_sync(&ndev->init_work); | ||
| 557 | |||
| 558 | mutex_lock(&dev->device_lock); | ||
| 554 | if (ndev->cl && ndev->cl->device) | 559 | if (ndev->cl && ndev->cl->device) |
| 555 | mei_cl_remove_device(ndev->cl->device); | 560 | mei_cl_remove_device(ndev->cl->device); |
| 556 | 561 | ||
| 557 | mei_nfc_free(ndev); | 562 | mei_nfc_free(ndev); |
| 563 | mutex_unlock(&dev->device_lock); | ||
| 558 | } | 564 | } |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index b96205aece0c..ddadd08956f4 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
| @@ -76,9 +76,11 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = { | |||
| 76 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, | 76 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, |
| 77 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, | 77 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, |
| 78 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, | 78 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, |
| 79 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, | 79 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)}, |
| 80 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, | 80 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, |
| 81 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, | 81 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, |
| 82 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)}, | ||
| 83 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)}, | ||
| 82 | 84 | ||
| 83 | /* required last entry */ | 85 | /* required last entry */ |
| 84 | {0, } | 86 | {0, } |
| @@ -142,6 +144,21 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 142 | dev_err(&pdev->dev, "failed to get pci regions.\n"); | 144 | dev_err(&pdev->dev, "failed to get pci regions.\n"); |
| 143 | goto disable_device; | 145 | goto disable_device; |
| 144 | } | 146 | } |
| 147 | |||
| 148 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || | ||
| 149 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | ||
| 150 | |||
| 151 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | ||
| 152 | if (err) | ||
| 153 | err = dma_set_coherent_mask(&pdev->dev, | ||
| 154 | DMA_BIT_MASK(32)); | ||
| 155 | } | ||
| 156 | if (err) { | ||
| 157 | dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); | ||
| 158 | goto release_regions; | ||
| 159 | } | ||
| 160 | |||
| 161 | |||
| 145 | /* allocates and initializes the mei dev structure */ | 162 | /* allocates and initializes the mei dev structure */ |
| 146 | dev = mei_me_dev_init(pdev); | 163 | dev = mei_me_dev_init(pdev); |
| 147 | if (!dev) { | 164 | if (!dev) { |
| @@ -195,8 +212,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 195 | return 0; | 212 | return 0; |
| 196 | 213 | ||
| 197 | release_irq: | 214 | release_irq: |
| 215 | mei_cancel_work(dev); | ||
| 198 | mei_disable_interrupts(dev); | 216 | mei_disable_interrupts(dev); |
| 199 | flush_scheduled_work(); | ||
| 200 | free_irq(pdev->irq, dev); | 217 | free_irq(pdev->irq, dev); |
| 201 | disable_msi: | 218 | disable_msi: |
| 202 | pci_disable_msi(pdev); | 219 | pci_disable_msi(pdev); |
| @@ -304,16 +321,14 @@ static int mei_me_pci_resume(struct device *device) | |||
| 304 | return err; | 321 | return err; |
| 305 | } | 322 | } |
| 306 | 323 | ||
| 307 | mutex_lock(&dev->device_lock); | 324 | err = mei_restart(dev); |
| 308 | dev->dev_state = MEI_DEV_POWER_UP; | 325 | if (err) |
| 309 | mei_clear_interrupts(dev); | 326 | return err; |
| 310 | mei_reset(dev, 1); | ||
| 311 | mutex_unlock(&dev->device_lock); | ||
| 312 | 327 | ||
| 313 | /* Start timer if stopped in suspend */ | 328 | /* Start timer if stopped in suspend */ |
| 314 | schedule_delayed_work(&dev->timer_work, HZ); | 329 | schedule_delayed_work(&dev->timer_work, HZ); |
| 315 | 330 | ||
| 316 | return err; | 331 | return 0; |
| 317 | } | 332 | } |
| 318 | static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); | 333 | static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); |
| 319 | #define MEI_ME_PM_OPS (&mei_me_pm_ops) | 334 | #define MEI_ME_PM_OPS (&mei_me_pm_ops) |
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c index 9e354216c163..f70945ed96f6 100644 --- a/drivers/misc/mei/wd.c +++ b/drivers/misc/mei/wd.c | |||
| @@ -115,6 +115,7 @@ int mei_wd_send(struct mei_device *dev) | |||
| 115 | hdr.me_addr = dev->wd_cl.me_client_id; | 115 | hdr.me_addr = dev->wd_cl.me_client_id; |
| 116 | hdr.msg_complete = 1; | 116 | hdr.msg_complete = 1; |
| 117 | hdr.reserved = 0; | 117 | hdr.reserved = 0; |
| 118 | hdr.internal = 0; | ||
| 118 | 119 | ||
| 119 | if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) | 120 | if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) |
| 120 | hdr.length = MEI_WD_START_MSG_SIZE; | 121 | hdr.length = MEI_WD_START_MSG_SIZE; |
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c index 8aa42e738acc..653799b96bfa 100644 --- a/drivers/misc/mic/card/mic_virtio.c +++ b/drivers/misc/mic/card/mic_virtio.c | |||
| @@ -154,14 +154,14 @@ static void mic_reset_inform_host(struct virtio_device *vdev) | |||
| 154 | { | 154 | { |
| 155 | struct mic_vdev *mvdev = to_micvdev(vdev); | 155 | struct mic_vdev *mvdev = to_micvdev(vdev); |
| 156 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | 156 | struct mic_device_ctrl __iomem *dc = mvdev->dc; |
| 157 | int retry = 100, i; | 157 | int retry; |
| 158 | 158 | ||
| 159 | iowrite8(0, &dc->host_ack); | 159 | iowrite8(0, &dc->host_ack); |
| 160 | iowrite8(1, &dc->vdev_reset); | 160 | iowrite8(1, &dc->vdev_reset); |
| 161 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | 161 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); |
| 162 | 162 | ||
| 163 | /* Wait till host completes all card accesses and acks the reset */ | 163 | /* Wait till host completes all card accesses and acks the reset */ |
| 164 | for (i = retry; i--;) { | 164 | for (retry = 100; retry--;) { |
| 165 | if (ioread8(&dc->host_ack)) | 165 | if (ioread8(&dc->host_ack)) |
| 166 | break; | 166 | break; |
| 167 | msleep(100); | 167 | msleep(100); |
| @@ -187,11 +187,12 @@ static void mic_reset(struct virtio_device *vdev) | |||
| 187 | /* | 187 | /* |
| 188 | * The virtio_ring code calls this API when it wants to notify the Host. | 188 | * The virtio_ring code calls this API when it wants to notify the Host. |
| 189 | */ | 189 | */ |
| 190 | static void mic_notify(struct virtqueue *vq) | 190 | static bool mic_notify(struct virtqueue *vq) |
| 191 | { | 191 | { |
| 192 | struct mic_vdev *mvdev = vq->priv; | 192 | struct mic_vdev *mvdev = vq->priv; |
| 193 | 193 | ||
| 194 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | 194 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); |
| 195 | return true; | ||
| 195 | } | 196 | } |
| 196 | 197 | ||
| 197 | static void mic_del_vq(struct virtqueue *vq, int n) | 198 | static void mic_del_vq(struct virtqueue *vq, int n) |
| @@ -247,17 +248,17 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev, | |||
| 247 | /* First assign the vring's allocated in host memory */ | 248 | /* First assign the vring's allocated in host memory */ |
| 248 | vqconfig = mic_vq_config(mvdev->desc) + index; | 249 | vqconfig = mic_vq_config(mvdev->desc) + index; |
| 249 | memcpy_fromio(&config, vqconfig, sizeof(config)); | 250 | memcpy_fromio(&config, vqconfig, sizeof(config)); |
| 250 | _vr_size = vring_size(config.num, MIC_VIRTIO_RING_ALIGN); | 251 | _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); |
| 251 | vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); | 252 | vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); |
| 252 | va = mic_card_map(mvdev->mdev, config.address, vr_size); | 253 | va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size); |
| 253 | if (!va) | 254 | if (!va) |
| 254 | return ERR_PTR(-ENOMEM); | 255 | return ERR_PTR(-ENOMEM); |
| 255 | mvdev->vr[index] = va; | 256 | mvdev->vr[index] = va; |
| 256 | memset_io(va, 0x0, _vr_size); | 257 | memset_io(va, 0x0, _vr_size); |
| 257 | vq = vring_new_virtqueue(index, | 258 | vq = vring_new_virtqueue(index, le16_to_cpu(config.num), |
| 258 | config.num, MIC_VIRTIO_RING_ALIGN, vdev, | 259 | MIC_VIRTIO_RING_ALIGN, vdev, false, |
| 259 | false, | 260 | (void __force *)va, mic_notify, callback, |
| 260 | va, mic_notify, callback, name); | 261 | name); |
| 261 | if (!vq) { | 262 | if (!vq) { |
| 262 | err = -ENOMEM; | 263 | err = -ENOMEM; |
| 263 | goto unmap; | 264 | goto unmap; |
| @@ -272,7 +273,8 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev, | |||
| 272 | 273 | ||
| 273 | /* Allocate and reassign used ring now */ | 274 | /* Allocate and reassign used ring now */ |
| 274 | mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + | 275 | mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + |
| 275 | sizeof(struct vring_used_elem) * config.num); | 276 | sizeof(struct vring_used_elem) * |
| 277 | le16_to_cpu(config.num)); | ||
| 276 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | 278 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 277 | get_order(mvdev->used_size[index])); | 279 | get_order(mvdev->used_size[index])); |
| 278 | if (!used) { | 280 | if (!used) { |
| @@ -309,7 +311,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 309 | { | 311 | { |
| 310 | struct mic_vdev *mvdev = to_micvdev(vdev); | 312 | struct mic_vdev *mvdev = to_micvdev(vdev); |
| 311 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | 313 | struct mic_device_ctrl __iomem *dc = mvdev->dc; |
| 312 | int i, err, retry = 100; | 314 | int i, err, retry; |
| 313 | 315 | ||
| 314 | /* We must have this many virtqueues. */ | 316 | /* We must have this many virtqueues. */ |
| 315 | if (nvqs > ioread8(&mvdev->desc->num_vq)) | 317 | if (nvqs > ioread8(&mvdev->desc->num_vq)) |
| @@ -331,7 +333,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 331 | * rings have been re-assigned. | 333 | * rings have been re-assigned. |
| 332 | */ | 334 | */ |
| 333 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | 335 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); |
| 334 | for (i = retry; i--;) { | 336 | for (retry = 100; retry--;) { |
| 335 | if (!ioread8(&dc->used_address_updated)) | 337 | if (!ioread8(&dc->used_address_updated)) |
| 336 | break; | 338 | break; |
| 337 | msleep(100); | 339 | msleep(100); |
| @@ -519,8 +521,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove) | |||
| 519 | struct device *dev; | 521 | struct device *dev; |
| 520 | int ret; | 522 | int ret; |
| 521 | 523 | ||
| 522 | for (i = mic_aligned_size(struct mic_bootparam); | 524 | for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE; |
| 523 | i < MIC_DP_SIZE; i += mic_total_desc_size(d)) { | 525 | i += mic_total_desc_size(d)) { |
| 524 | d = mdrv->dp + i; | 526 | d = mdrv->dp + i; |
| 525 | dc = (void __iomem *)d + mic_aligned_desc_size(d); | 527 | dc = (void __iomem *)d + mic_aligned_desc_size(d); |
| 526 | /* | 528 | /* |
| @@ -539,7 +541,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove) | |||
| 539 | continue; | 541 | continue; |
| 540 | 542 | ||
| 541 | /* device already exists */ | 543 | /* device already exists */ |
| 542 | dev = device_find_child(mdrv->dev, d, mic_match_desc); | 544 | dev = device_find_child(mdrv->dev, (void __force *)d, |
| 545 | mic_match_desc); | ||
| 543 | if (dev) { | 546 | if (dev) { |
| 544 | if (remove) | 547 | if (remove) |
| 545 | iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, | 548 | iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, |
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h index 2c5c22c93ba8..d0407ba53bb7 100644 --- a/drivers/misc/mic/card/mic_virtio.h +++ b/drivers/misc/mic/card/mic_virtio.h | |||
| @@ -42,8 +42,8 @@ | |||
| 42 | 42 | ||
| 43 | static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) | 43 | static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) |
| 44 | { | 44 | { |
| 45 | return mic_aligned_size(*desc) | 45 | return sizeof(*desc) |
| 46 | + ioread8(&desc->num_vq) * mic_aligned_size(struct mic_vqconfig) | 46 | + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) |
| 47 | + ioread8(&desc->feature_len) * 2 | 47 | + ioread8(&desc->feature_len) * 2 |
| 48 | + ioread8(&desc->config_len); | 48 | + ioread8(&desc->config_len); |
| 49 | } | 49 | } |
| @@ -67,8 +67,7 @@ mic_vq_configspace(struct mic_device_desc __iomem *desc) | |||
| 67 | } | 67 | } |
| 68 | static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) | 68 | static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) |
| 69 | { | 69 | { |
| 70 | return mic_aligned_desc_size(desc) + | 70 | return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); |
| 71 | mic_aligned_size(struct mic_device_ctrl); | ||
| 72 | } | 71 | } |
| 73 | 72 | ||
| 74 | int mic_devices_init(struct mic_driver *mdrv); | 73 | int mic_devices_init(struct mic_driver *mdrv); |
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 7558d9186438..b75c6b5cc20f 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c | |||
| @@ -62,7 +62,7 @@ void mic_bootparam_init(struct mic_device *mdev) | |||
| 62 | { | 62 | { |
| 63 | struct mic_bootparam *bootparam = mdev->dp; | 63 | struct mic_bootparam *bootparam = mdev->dp; |
| 64 | 64 | ||
| 65 | bootparam->magic = MIC_MAGIC; | 65 | bootparam->magic = cpu_to_le32(MIC_MAGIC); |
| 66 | bootparam->c2h_shutdown_db = mdev->shutdown_db; | 66 | bootparam->c2h_shutdown_db = mdev->shutdown_db; |
| 67 | bootparam->h2c_shutdown_db = -1; | 67 | bootparam->h2c_shutdown_db = -1; |
| 68 | bootparam->h2c_config_db = -1; | 68 | bootparam->h2c_config_db = -1; |
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h index 3574cc375bb9..1a6edce2ecde 100644 --- a/drivers/misc/mic/host/mic_device.h +++ b/drivers/misc/mic/host/mic_device.h | |||
| @@ -112,7 +112,7 @@ struct mic_device { | |||
| 112 | struct work_struct shutdown_work; | 112 | struct work_struct shutdown_work; |
| 113 | u8 state; | 113 | u8 state; |
| 114 | u8 shutdown_status; | 114 | u8 shutdown_status; |
| 115 | struct sysfs_dirent *state_sysfs; | 115 | struct kernfs_node *state_sysfs; |
| 116 | struct completion reset_wait; | 116 | struct completion reset_wait; |
| 117 | void *log_buf_addr; | 117 | void *log_buf_addr; |
| 118 | int *log_buf_len; | 118 | int *log_buf_len; |
| @@ -134,6 +134,8 @@ struct mic_device { | |||
| 134 | * @send_intr: Send an interrupt for a particular doorbell on the card. | 134 | * @send_intr: Send an interrupt for a particular doorbell on the card. |
| 135 | * @ack_interrupt: Hardware specific operations to ack the h/w on | 135 | * @ack_interrupt: Hardware specific operations to ack the h/w on |
| 136 | * receipt of an interrupt. | 136 | * receipt of an interrupt. |
| 137 | * @intr_workarounds: Hardware specific workarounds needed after | ||
| 138 | * handling an interrupt. | ||
| 137 | * @reset: Reset the remote processor. | 139 | * @reset: Reset the remote processor. |
| 138 | * @reset_fw_ready: Reset firmware ready field. | 140 | * @reset_fw_ready: Reset firmware ready field. |
| 139 | * @is_fw_ready: Check if firmware is ready for OS download. | 141 | * @is_fw_ready: Check if firmware is ready for OS download. |
| @@ -149,6 +151,7 @@ struct mic_hw_ops { | |||
| 149 | void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val); | 151 | void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val); |
| 150 | void (*send_intr)(struct mic_device *mdev, int doorbell); | 152 | void (*send_intr)(struct mic_device *mdev, int doorbell); |
| 151 | u32 (*ack_interrupt)(struct mic_device *mdev); | 153 | u32 (*ack_interrupt)(struct mic_device *mdev); |
| 154 | void (*intr_workarounds)(struct mic_device *mdev); | ||
| 152 | void (*reset)(struct mic_device *mdev); | 155 | void (*reset)(struct mic_device *mdev); |
| 153 | void (*reset_fw_ready)(struct mic_device *mdev); | 156 | void (*reset_fw_ready)(struct mic_device *mdev); |
| 154 | bool (*is_fw_ready)(struct mic_device *mdev); | 157 | bool (*is_fw_ready)(struct mic_device *mdev); |
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c index ad838c7651c4..c04a021e20c7 100644 --- a/drivers/misc/mic/host/mic_main.c +++ b/drivers/misc/mic/host/mic_main.c | |||
| @@ -115,7 +115,7 @@ static irqreturn_t mic_shutdown_db(int irq, void *data) | |||
| 115 | struct mic_device *mdev = data; | 115 | struct mic_device *mdev = data; |
| 116 | struct mic_bootparam *bootparam = mdev->dp; | 116 | struct mic_bootparam *bootparam = mdev->dp; |
| 117 | 117 | ||
| 118 | mdev->ops->ack_interrupt(mdev); | 118 | mdev->ops->intr_workarounds(mdev); |
| 119 | 119 | ||
| 120 | switch (bootparam->shutdown_status) { | 120 | switch (bootparam->shutdown_status) { |
| 121 | case MIC_HALTED: | 121 | case MIC_HALTED: |
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c index 5b8494bd1e00..752ff873f891 100644 --- a/drivers/misc/mic/host/mic_virtio.c +++ b/drivers/misc/mic/host/mic_virtio.c | |||
| @@ -41,7 +41,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, | |||
| 41 | * We are copying from IO below an should ideally use something | 41 | * We are copying from IO below an should ideally use something |
| 42 | * like copy_to_user_fromio(..) if it existed. | 42 | * like copy_to_user_fromio(..) if it existed. |
| 43 | */ | 43 | */ |
| 44 | if (copy_to_user(ubuf, dbuf, len)) { | 44 | if (copy_to_user(ubuf, (void __force *)dbuf, len)) { |
| 45 | err = -EFAULT; | 45 | err = -EFAULT; |
| 46 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | 46 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
| 47 | __func__, __LINE__, err); | 47 | __func__, __LINE__, err); |
| @@ -66,7 +66,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, | |||
| 66 | * We are copying to IO below and should ideally use something | 66 | * We are copying to IO below and should ideally use something |
| 67 | * like copy_from_user_toio(..) if it existed. | 67 | * like copy_from_user_toio(..) if it existed. |
| 68 | */ | 68 | */ |
| 69 | if (copy_from_user(dbuf, ubuf, len)) { | 69 | if (copy_from_user((void __force *)dbuf, ubuf, len)) { |
| 70 | err = -EFAULT; | 70 | err = -EFAULT; |
| 71 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | 71 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
| 72 | __func__, __LINE__, err); | 72 | __func__, __LINE__, err); |
| @@ -293,7 +293,7 @@ static void mic_virtio_init_post(struct mic_vdev *mvdev) | |||
| 293 | continue; | 293 | continue; |
| 294 | } | 294 | } |
| 295 | mvdev->mvr[i].vrh.vring.used = | 295 | mvdev->mvr[i].vrh.vring.used = |
| 296 | mvdev->mdev->aper.va + | 296 | (void __force *)mvdev->mdev->aper.va + |
| 297 | le64_to_cpu(vqconfig[i].used_address); | 297 | le64_to_cpu(vqconfig[i].used_address); |
| 298 | } | 298 | } |
| 299 | 299 | ||
| @@ -369,7 +369,7 @@ static irqreturn_t mic_virtio_intr_handler(int irq, void *data) | |||
| 369 | struct mic_vdev *mvdev = data; | 369 | struct mic_vdev *mvdev = data; |
| 370 | struct mic_device *mdev = mvdev->mdev; | 370 | struct mic_device *mdev = mvdev->mdev; |
| 371 | 371 | ||
| 372 | mdev->ops->ack_interrupt(mdev); | 372 | mdev->ops->intr_workarounds(mdev); |
| 373 | schedule_work(&mvdev->virtio_bh_work); | 373 | schedule_work(&mvdev->virtio_bh_work); |
| 374 | return IRQ_HANDLED; | 374 | return IRQ_HANDLED; |
| 375 | } | 375 | } |
| @@ -378,7 +378,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev, | |||
| 378 | void __user *argp) | 378 | void __user *argp) |
| 379 | { | 379 | { |
| 380 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | 380 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); |
| 381 | int ret = 0, retry = 100, i; | 381 | int ret = 0, retry, i; |
| 382 | struct mic_bootparam *bootparam = mvdev->mdev->dp; | 382 | struct mic_bootparam *bootparam = mvdev->mdev->dp; |
| 383 | s8 db = bootparam->h2c_config_db; | 383 | s8 db = bootparam->h2c_config_db; |
| 384 | 384 | ||
| @@ -401,7 +401,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev, | |||
| 401 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; | 401 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; |
| 402 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | 402 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); |
| 403 | 403 | ||
| 404 | for (i = retry; i--;) { | 404 | for (retry = 100; retry--;) { |
| 405 | ret = wait_event_timeout(wake, | 405 | ret = wait_event_timeout(wake, |
| 406 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | 406 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); |
| 407 | if (ret) | 407 | if (ret) |
| @@ -467,7 +467,7 @@ static int mic_copy_dp_entry(struct mic_vdev *mvdev, | |||
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | /* Find the first free device page entry */ | 469 | /* Find the first free device page entry */ |
| 470 | for (i = mic_aligned_size(struct mic_bootparam); | 470 | for (i = sizeof(struct mic_bootparam); |
| 471 | i < MIC_DP_SIZE - mic_total_desc_size(dd_config); | 471 | i < MIC_DP_SIZE - mic_total_desc_size(dd_config); |
| 472 | i += mic_total_desc_size(devp)) { | 472 | i += mic_total_desc_size(devp)) { |
| 473 | devp = mdev->dp + i; | 473 | devp = mdev->dp + i; |
| @@ -525,6 +525,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev, | |||
| 525 | char irqname[10]; | 525 | char irqname[10]; |
| 526 | struct mic_bootparam *bootparam = mdev->dp; | 526 | struct mic_bootparam *bootparam = mdev->dp; |
| 527 | u16 num; | 527 | u16 num; |
| 528 | dma_addr_t vr_addr; | ||
| 528 | 529 | ||
| 529 | mutex_lock(&mdev->mic_mutex); | 530 | mutex_lock(&mdev->mic_mutex); |
| 530 | 531 | ||
| @@ -559,17 +560,16 @@ int mic_virtio_add_device(struct mic_vdev *mvdev, | |||
| 559 | } | 560 | } |
| 560 | vr->len = vr_size; | 561 | vr->len = vr_size; |
| 561 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); | 562 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); |
| 562 | vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i; | 563 | vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); |
| 563 | vqconfig[i].address = mic_map_single(mdev, | 564 | vr_addr = mic_map_single(mdev, vr->va, vr_size); |
| 564 | vr->va, vr_size); | 565 | if (mic_map_error(vr_addr)) { |
| 565 | if (mic_map_error(vqconfig[i].address)) { | ||
| 566 | free_pages((unsigned long)vr->va, get_order(vr_size)); | 566 | free_pages((unsigned long)vr->va, get_order(vr_size)); |
| 567 | ret = -ENOMEM; | 567 | ret = -ENOMEM; |
| 568 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | 568 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
| 569 | __func__, __LINE__, ret); | 569 | __func__, __LINE__, ret); |
| 570 | goto err; | 570 | goto err; |
| 571 | } | 571 | } |
| 572 | vqconfig[i].address = cpu_to_le64(vqconfig[i].address); | 572 | vqconfig[i].address = cpu_to_le64(vr_addr); |
| 573 | 573 | ||
| 574 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); | 574 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); |
| 575 | ret = vringh_init_kern(&mvr->vrh, | 575 | ret = vringh_init_kern(&mvr->vrh, |
| @@ -639,7 +639,7 @@ void mic_virtio_del_device(struct mic_vdev *mvdev) | |||
| 639 | struct mic_vdev *tmp_mvdev; | 639 | struct mic_vdev *tmp_mvdev; |
| 640 | struct mic_device *mdev = mvdev->mdev; | 640 | struct mic_device *mdev = mvdev->mdev; |
| 641 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | 641 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); |
| 642 | int i, ret, retry = 100; | 642 | int i, ret, retry; |
| 643 | struct mic_vqconfig *vqconfig; | 643 | struct mic_vqconfig *vqconfig; |
| 644 | struct mic_bootparam *bootparam = mdev->dp; | 644 | struct mic_bootparam *bootparam = mdev->dp; |
| 645 | s8 db; | 645 | s8 db; |
| @@ -652,16 +652,16 @@ void mic_virtio_del_device(struct mic_vdev *mvdev) | |||
| 652 | "Requesting hot remove id %d\n", mvdev->virtio_id); | 652 | "Requesting hot remove id %d\n", mvdev->virtio_id); |
| 653 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; | 653 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; |
| 654 | mdev->ops->send_intr(mdev, db); | 654 | mdev->ops->send_intr(mdev, db); |
| 655 | for (i = retry; i--;) { | 655 | for (retry = 100; retry--;) { |
| 656 | ret = wait_event_timeout(wake, | 656 | ret = wait_event_timeout(wake, |
| 657 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | 657 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); |
| 658 | if (ret) | 658 | if (ret) |
| 659 | break; | 659 | break; |
| 660 | } | 660 | } |
| 661 | dev_dbg(mdev->sdev->parent, | 661 | dev_dbg(mdev->sdev->parent, |
| 662 | "Device id %d config_change %d guest_ack %d\n", | 662 | "Device id %d config_change %d guest_ack %d retry %d\n", |
| 663 | mvdev->virtio_id, mvdev->dc->config_change, | 663 | mvdev->virtio_id, mvdev->dc->config_change, |
| 664 | mvdev->dc->guest_ack); | 664 | mvdev->dc->guest_ack, retry); |
| 665 | mvdev->dc->config_change = 0; | 665 | mvdev->dc->config_change = 0; |
| 666 | mvdev->dc->guest_ack = 0; | 666 | mvdev->dc->guest_ack = 0; |
| 667 | skip_hot_remove: | 667 | skip_hot_remove: |
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c index 81e9541b784c..5562fdd3ef4e 100644 --- a/drivers/misc/mic/host/mic_x100.c +++ b/drivers/misc/mic/host/mic_x100.c | |||
| @@ -174,35 +174,38 @@ static void mic_x100_send_intr(struct mic_device *mdev, int doorbell) | |||
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | /** | 176 | /** |
| 177 | * mic_ack_interrupt - Device specific interrupt handling. | 177 | * mic_x100_ack_interrupt - Read the interrupt sources register and |
| 178 | * @mdev: pointer to mic_device instance | 178 | * clear it. This function will be called in the MSI/INTx case. |
| 179 | * @mdev: Pointer to mic_device instance. | ||
| 179 | * | 180 | * |
| 180 | * Returns: bitmask of doorbell events triggered. | 181 | * Returns: bitmask of interrupt sources triggered. |
| 181 | */ | 182 | */ |
| 182 | static u32 mic_x100_ack_interrupt(struct mic_device *mdev) | 183 | static u32 mic_x100_ack_interrupt(struct mic_device *mdev) |
| 183 | { | 184 | { |
| 184 | u32 reg = 0; | ||
| 185 | struct mic_mw *mw = &mdev->mmio; | ||
| 186 | u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0; | 185 | u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0; |
| 186 | u32 reg = mic_mmio_read(&mdev->mmio, sicr0); | ||
| 187 | mic_mmio_write(&mdev->mmio, reg, sicr0); | ||
| 188 | return reg; | ||
| 189 | } | ||
| 190 | |||
| 191 | /** | ||
| 192 | * mic_x100_intr_workarounds - These hardware specific workarounds are | ||
| 193 | * to be invoked everytime an interrupt is handled. | ||
| 194 | * @mdev: Pointer to mic_device instance. | ||
| 195 | * | ||
| 196 | * Returns: none | ||
| 197 | */ | ||
| 198 | static void mic_x100_intr_workarounds(struct mic_device *mdev) | ||
| 199 | { | ||
| 200 | struct mic_mw *mw = &mdev->mmio; | ||
| 187 | 201 | ||
| 188 | /* Clear pending bit array. */ | 202 | /* Clear pending bit array. */ |
| 189 | if (MIC_A0_STEP == mdev->stepping) | 203 | if (MIC_A0_STEP == mdev->stepping) |
| 190 | mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS + | 204 | mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS + |
| 191 | MIC_X100_SBOX_MSIXPBACR); | 205 | MIC_X100_SBOX_MSIXPBACR); |
| 192 | 206 | ||
| 193 | if (mdev->irq_info.num_vectors <= 1) { | ||
| 194 | reg = mic_mmio_read(mw, sicr0); | ||
| 195 | |||
| 196 | if (unlikely(!reg)) | ||
| 197 | goto done; | ||
| 198 | |||
| 199 | mic_mmio_write(mw, reg, sicr0); | ||
| 200 | } | ||
| 201 | |||
| 202 | if (mdev->stepping >= MIC_B0_STEP) | 207 | if (mdev->stepping >= MIC_B0_STEP) |
| 203 | mdev->intr_ops->enable_interrupts(mdev); | 208 | mdev->intr_ops->enable_interrupts(mdev); |
| 204 | done: | ||
| 205 | return reg; | ||
| 206 | } | 209 | } |
| 207 | 210 | ||
| 208 | /** | 211 | /** |
| @@ -397,8 +400,8 @@ mic_x100_load_ramdisk(struct mic_device *mdev) | |||
| 397 | * so copy over the ramdisk @ 128M. | 400 | * so copy over the ramdisk @ 128M. |
| 398 | */ | 401 | */ |
| 399 | memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); | 402 | memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); |
| 400 | iowrite32(cpu_to_le32(mdev->bootaddr << 1), &bp->hdr.ramdisk_image); | 403 | iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image); |
| 401 | iowrite32(cpu_to_le32(fw->size), &bp->hdr.ramdisk_size); | 404 | iowrite32(fw->size, &bp->hdr.ramdisk_size); |
| 402 | release_firmware(fw); | 405 | release_firmware(fw); |
| 403 | error: | 406 | error: |
| 404 | return rc; | 407 | return rc; |
| @@ -553,6 +556,7 @@ struct mic_hw_ops mic_x100_ops = { | |||
| 553 | .write_spad = mic_x100_write_spad, | 556 | .write_spad = mic_x100_write_spad, |
| 554 | .send_intr = mic_x100_send_intr, | 557 | .send_intr = mic_x100_send_intr, |
| 555 | .ack_interrupt = mic_x100_ack_interrupt, | 558 | .ack_interrupt = mic_x100_ack_interrupt, |
| 559 | .intr_workarounds = mic_x100_intr_workarounds, | ||
| 556 | .reset = mic_x100_hw_reset, | 560 | .reset = mic_x100_hw_reset, |
| 557 | .reset_fw_ready = mic_x100_reset_fw_ready, | 561 | .reset_fw_ready = mic_x100_reset_fw_ready, |
| 558 | .is_fw_ready = mic_x100_is_fw_ready, | 562 | .is_fw_ready = mic_x100_is_fw_ready, |
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 652593fc486d..128d5615c804 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
| @@ -828,6 +828,7 @@ enum xp_retval | |||
| 828 | xpc_allocate_msg_wait(struct xpc_channel *ch) | 828 | xpc_allocate_msg_wait(struct xpc_channel *ch) |
| 829 | { | 829 | { |
| 830 | enum xp_retval ret; | 830 | enum xp_retval ret; |
| 831 | DEFINE_WAIT(wait); | ||
| 831 | 832 | ||
| 832 | if (ch->flags & XPC_C_DISCONNECTING) { | 833 | if (ch->flags & XPC_C_DISCONNECTING) { |
| 833 | DBUG_ON(ch->reason == xpInterrupted); | 834 | DBUG_ON(ch->reason == xpInterrupted); |
| @@ -835,7 +836,9 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) | |||
| 835 | } | 836 | } |
| 836 | 837 | ||
| 837 | atomic_inc(&ch->n_on_msg_allocate_wq); | 838 | atomic_inc(&ch->n_on_msg_allocate_wq); |
| 838 | ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); | 839 | prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); |
| 840 | ret = schedule_timeout(1); | ||
| 841 | finish_wait(&ch->msg_allocate_wq, &wait); | ||
| 839 | atomic_dec(&ch->n_on_msg_allocate_wq); | 842 | atomic_dec(&ch->n_on_msg_allocate_wq); |
| 840 | 843 | ||
| 841 | if (ch->flags & XPC_C_DISCONNECTING) { | 844 | if (ch->flags & XPC_C_DISCONNECTING) { |
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 8d64b681dd93..3aed525e55b4 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c | |||
| @@ -812,7 +812,7 @@ static void st_tty_flush_buffer(struct tty_struct *tty) | |||
| 812 | kfree_skb(st_gdata->tx_skb); | 812 | kfree_skb(st_gdata->tx_skb); |
| 813 | st_gdata->tx_skb = NULL; | 813 | st_gdata->tx_skb = NULL; |
| 814 | 814 | ||
| 815 | tty->ops->flush_buffer(tty); | 815 | tty_driver_flush_buffer(tty); |
| 816 | return; | 816 | return; |
| 817 | } | 817 | } |
| 818 | 818 | ||
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 96853a09788a..9d3dbb28734b 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c | |||
| @@ -531,7 +531,6 @@ long st_kim_stop(void *kim_data) | |||
| 531 | /* Flush any pending characters in the driver and discipline. */ | 531 | /* Flush any pending characters in the driver and discipline. */ |
| 532 | tty_ldisc_flush(tty); | 532 | tty_ldisc_flush(tty); |
| 533 | tty_driver_flush_buffer(tty); | 533 | tty_driver_flush_buffer(tty); |
| 534 | tty->ops->flush_buffer(tty); | ||
| 535 | } | 534 | } |
| 536 | 535 | ||
| 537 | /* send uninstall notification to UIM */ | 536 | /* send uninstall notification to UIM */ |
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index c98b03b99353..d35cda06b5e8 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c | |||
| @@ -165,7 +165,7 @@ static void vmci_guest_cid_update(u32 sub_id, | |||
| 165 | * true if required hypercalls (or fallback hypercalls) are | 165 | * true if required hypercalls (or fallback hypercalls) are |
| 166 | * supported by the host, false otherwise. | 166 | * supported by the host, false otherwise. |
| 167 | */ | 167 | */ |
| 168 | static bool vmci_check_host_caps(struct pci_dev *pdev) | 168 | static int vmci_check_host_caps(struct pci_dev *pdev) |
| 169 | { | 169 | { |
| 170 | bool result; | 170 | bool result; |
| 171 | struct vmci_resource_query_msg *msg; | 171 | struct vmci_resource_query_msg *msg; |
| @@ -176,7 +176,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev) | |||
| 176 | check_msg = kmalloc(msg_size, GFP_KERNEL); | 176 | check_msg = kmalloc(msg_size, GFP_KERNEL); |
| 177 | if (!check_msg) { | 177 | if (!check_msg) { |
| 178 | dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); | 178 | dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); |
| 179 | return false; | 179 | return -ENOMEM; |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, | 182 | check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, |
| @@ -196,7 +196,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev) | |||
| 196 | __func__, result ? "PASSED" : "FAILED"); | 196 | __func__, result ? "PASSED" : "FAILED"); |
| 197 | 197 | ||
| 198 | /* We need the vector. There are no fallbacks. */ | 198 | /* We need the vector. There are no fallbacks. */ |
| 199 | return result; | 199 | return result ? 0 : -ENXIO; |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | /* | 202 | /* |
| @@ -564,12 +564,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, | |||
| 564 | dev_warn(&pdev->dev, | 564 | dev_warn(&pdev->dev, |
| 565 | "VMCI device unable to register notification bitmap with PPN 0x%x\n", | 565 | "VMCI device unable to register notification bitmap with PPN 0x%x\n", |
| 566 | (u32) bitmap_ppn); | 566 | (u32) bitmap_ppn); |
| 567 | error = -ENXIO; | ||
| 567 | goto err_remove_vmci_dev_g; | 568 | goto err_remove_vmci_dev_g; |
| 568 | } | 569 | } |
| 569 | } | 570 | } |
| 570 | 571 | ||
| 571 | /* Check host capabilities. */ | 572 | /* Check host capabilities. */ |
| 572 | if (!vmci_check_host_caps(pdev)) | 573 | error = vmci_check_host_caps(pdev); |
| 574 | if (error) | ||
| 573 | goto err_remove_bitmap; | 575 | goto err_remove_bitmap; |
| 574 | 576 | ||
| 575 | /* Enable device. */ | 577 | /* Enable device. */ |
