diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-08 06:55:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-08 06:55:41 -0400 |
commit | ef0625b70dac9405ac9d9928cf767108041a9e51 (patch) | |
tree | 9b2a0ea588e23e477868f593f8f7397eedb71c2c /drivers | |
parent | 3fc1479c5e78afa3013ad80b9b7367f0278c629b (diff) | |
parent | 480bd3c4ad30558dd26c16e3f8358e36522d9af0 (diff) |
Merge tag 'char-misc-3.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH:
"Here's the big set of driver patches for char/misc drivers. Nothing
major in here, the shortlog goes into the details. All have been in
the linux-next tree for a while with no issues"
* tag 'char-misc-3.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (80 commits)
mei: mei_txe_fw_sts can be static
mei: fix kernel-doc warnings
mei: fix KDoc documentation formatting
mei: drop me_client_presentation_num
mei: trivial: fix errors in prints in comments
mei: remove include to pci header from mei module files
mei: push pci cfg structure me hw
mei: remove the reference to pdev from mei_device
mei: move fw_status back to hw ops handlers
mei: get rid of most of the pci dependencies in mei
mei: push all standard settings into mei_device_init
mei: move mei_hbm_hdr function from hbm.h the hbm.c
mei: kill error message for allocation failure
mei: nfc: fix style warning
mei: fix style warning: Missing a blank line after declarations
mei: pg: fix cat and paste error in comments
mei: debugfs: add single buffer indicator
mei: debugfs: adjust print buffer
mei: add hbm and pg state in devstate debugfs print
Drivers: hv: vmbus: Enable interrupt driven flow control
...
Diffstat (limited to 'drivers')
55 files changed, 3020 insertions, 1335 deletions
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index 65525c7e903c..34174d01462e 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c | |||
@@ -651,6 +651,7 @@ struct i8k_config_data { | |||
651 | 651 | ||
652 | enum i8k_configs { | 652 | enum i8k_configs { |
653 | DELL_LATITUDE_D520, | 653 | DELL_LATITUDE_D520, |
654 | DELL_LATITUDE_E6540, | ||
654 | DELL_PRECISION_490, | 655 | DELL_PRECISION_490, |
655 | DELL_STUDIO, | 656 | DELL_STUDIO, |
656 | DELL_XPS_M140, | 657 | DELL_XPS_M140, |
@@ -661,6 +662,10 @@ static const struct i8k_config_data i8k_config_data[] = { | |||
661 | .fan_mult = 1, | 662 | .fan_mult = 1, |
662 | .fan_max = I8K_FAN_TURBO, | 663 | .fan_max = I8K_FAN_TURBO, |
663 | }, | 664 | }, |
665 | [DELL_LATITUDE_E6540] = { | ||
666 | .fan_mult = 1, | ||
667 | .fan_max = I8K_FAN_HIGH, | ||
668 | }, | ||
664 | [DELL_PRECISION_490] = { | 669 | [DELL_PRECISION_490] = { |
665 | .fan_mult = 1, | 670 | .fan_mult = 1, |
666 | .fan_max = I8K_FAN_TURBO, | 671 | .fan_max = I8K_FAN_TURBO, |
@@ -706,6 +711,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { | |||
706 | .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520], | 711 | .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520], |
707 | }, | 712 | }, |
708 | { | 713 | { |
714 | .ident = "Dell Latitude E6540", | ||
715 | .matches = { | ||
716 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
717 | DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6540"), | ||
718 | }, | ||
719 | .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_E6540], | ||
720 | }, | ||
721 | { | ||
709 | .ident = "Dell Latitude 2", | 722 | .ident = "Dell Latitude 2", |
710 | .matches = { | 723 | .matches = { |
711 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 724 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 9b1a5ac4881d..c07dfe5c4da3 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -843,7 +843,6 @@ static struct platform_driver hwicap_platform_driver = { | |||
843 | .probe = hwicap_drv_probe, | 843 | .probe = hwicap_drv_probe, |
844 | .remove = hwicap_drv_remove, | 844 | .remove = hwicap_drv_remove, |
845 | .driver = { | 845 | .driver = { |
846 | .owner = THIS_MODULE, | ||
847 | .name = DRIVER_NAME, | 846 | .name = DRIVER_NAME, |
848 | .of_match_table = hwicap_of_match, | 847 | .of_match_table = hwicap_of_match, |
849 | }, | 848 | }, |
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 6f2f4727de2c..6a1f7de6fa54 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig | |||
@@ -70,8 +70,21 @@ config EXTCON_PALMAS | |||
70 | Say Y here to enable support for USB peripheral and USB host | 70 | Say Y here to enable support for USB peripheral and USB host |
71 | detection by palmas usb. | 71 | detection by palmas usb. |
72 | 72 | ||
73 | config EXTCON_RT8973A | ||
74 | tristate "RT8973A EXTCON support" | ||
75 | depends on I2C | ||
76 | select IRQ_DOMAIN | ||
77 | select REGMAP_I2C | ||
78 | select REGMAP_IRQ | ||
79 | help | ||
80 | If you say yes here you get support for the MUIC device of | ||
81 | Richtek RT8973A. The RT8973A is a USB port accessory detector | ||
82 | and switch that is optimized to protect low voltage system | ||
83 | from abnormal high input voltage (up to 28V). | ||
84 | |||
73 | config EXTCON_SM5502 | 85 | config EXTCON_SM5502 |
74 | tristate "SM5502 EXTCON support" | 86 | tristate "SM5502 EXTCON support" |
87 | depends on I2C | ||
75 | select IRQ_DOMAIN | 88 | select IRQ_DOMAIN |
76 | select REGMAP_I2C | 89 | select REGMAP_I2C |
77 | select REGMAP_IRQ | 90 | select REGMAP_IRQ |
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile index b38546eb522a..0370b42e5a27 100644 --- a/drivers/extcon/Makefile +++ b/drivers/extcon/Makefile | |||
@@ -10,4 +10,5 @@ obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o | |||
10 | obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o | 10 | obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o |
11 | obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o | 11 | obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o |
12 | obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o | 12 | obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o |
13 | obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o | ||
13 | obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o | 14 | obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o |
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c index 5b7ec274cb63..72f19a37fd01 100644 --- a/drivers/extcon/extcon-gpio.c +++ b/drivers/extcon/extcon-gpio.c | |||
@@ -20,16 +20,16 @@ | |||
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/extcon.h> |
24 | #include <linux/kernel.h> | 24 | #include <linux/extcon/extcon-gpio.h> |
25 | #include <linux/gpio.h> | ||
25 | #include <linux/init.h> | 26 | #include <linux/init.h> |
26 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/kernel.h> | ||
29 | #include <linux/module.h> | ||
27 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
28 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
29 | #include <linux/workqueue.h> | 32 | #include <linux/workqueue.h> |
30 | #include <linux/gpio.h> | ||
31 | #include <linux/extcon.h> | ||
32 | #include <linux/extcon/extcon-gpio.h> | ||
33 | 33 | ||
34 | struct gpio_extcon_data { | 34 | struct gpio_extcon_data { |
35 | struct extcon_dev *edev; | 35 | struct extcon_dev *edev; |
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index 77460f2c1ca1..490e27475bac 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c | |||
@@ -255,10 +255,14 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info, | |||
255 | case ADC_DEBOUNCE_TIME_10MS: | 255 | case ADC_DEBOUNCE_TIME_10MS: |
256 | case ADC_DEBOUNCE_TIME_25MS: | 256 | case ADC_DEBOUNCE_TIME_25MS: |
257 | case ADC_DEBOUNCE_TIME_38_62MS: | 257 | case ADC_DEBOUNCE_TIME_38_62MS: |
258 | ret = regmap_update_bits(info->max77693->regmap_muic, | 258 | /* |
259 | MAX77693_MUIC_REG_CTRL3, | 259 | * Don't touch BTLDset, JIGset when you want to change adc |
260 | CONTROL3_ADCDBSET_MASK, | 260 | * debounce time. If it writes other than 0 to BTLDset, JIGset |
261 | time << CONTROL3_ADCDBSET_SHIFT); | 261 | * muic device will be reset and loose current state. |
262 | */ | ||
263 | ret = regmap_write(info->max77693->regmap_muic, | ||
264 | MAX77693_MUIC_REG_CTRL3, | ||
265 | time << CONTROL3_ADCDBSET_SHIFT); | ||
262 | if (ret) { | 266 | if (ret) { |
263 | dev_err(info->dev, "failed to set ADC debounce time\n"); | 267 | dev_err(info->dev, "failed to set ADC debounce time\n"); |
264 | return ret; | 268 | return ret; |
@@ -1155,13 +1159,11 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1155 | 1159 | ||
1156 | virq = regmap_irq_get_virq(max77693->irq_data_muic, | 1160 | virq = regmap_irq_get_virq(max77693->irq_data_muic, |
1157 | muic_irq->irq); | 1161 | muic_irq->irq); |
1158 | if (!virq) { | 1162 | if (!virq) |
1159 | ret = -EINVAL; | 1163 | return -EINVAL; |
1160 | goto err_irq; | ||
1161 | } | ||
1162 | muic_irq->virq = virq; | 1164 | muic_irq->virq = virq; |
1163 | 1165 | ||
1164 | ret = request_threaded_irq(virq, NULL, | 1166 | ret = devm_request_threaded_irq(&pdev->dev, virq, NULL, |
1165 | max77693_muic_irq_handler, | 1167 | max77693_muic_irq_handler, |
1166 | IRQF_NO_SUSPEND, | 1168 | IRQF_NO_SUSPEND, |
1167 | muic_irq->name, info); | 1169 | muic_irq->name, info); |
@@ -1170,7 +1172,7 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1170 | "failed: irq request (IRQ: %d," | 1172 | "failed: irq request (IRQ: %d," |
1171 | " error :%d)\n", | 1173 | " error :%d)\n", |
1172 | muic_irq->irq, ret); | 1174 | muic_irq->irq, ret); |
1173 | goto err_irq; | 1175 | return ret; |
1174 | } | 1176 | } |
1175 | } | 1177 | } |
1176 | 1178 | ||
@@ -1179,15 +1181,14 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1179 | max77693_extcon_cable); | 1181 | max77693_extcon_cable); |
1180 | if (IS_ERR(info->edev)) { | 1182 | if (IS_ERR(info->edev)) { |
1181 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); | 1183 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); |
1182 | ret = -ENOMEM; | 1184 | return -ENOMEM; |
1183 | goto err_irq; | ||
1184 | } | 1185 | } |
1185 | info->edev->name = DEV_NAME; | 1186 | info->edev->name = DEV_NAME; |
1186 | 1187 | ||
1187 | ret = devm_extcon_dev_register(&pdev->dev, info->edev); | 1188 | ret = devm_extcon_dev_register(&pdev->dev, info->edev); |
1188 | if (ret) { | 1189 | if (ret) { |
1189 | dev_err(&pdev->dev, "failed to register extcon device\n"); | 1190 | dev_err(&pdev->dev, "failed to register extcon device\n"); |
1190 | goto err_irq; | 1191 | return ret; |
1191 | } | 1192 | } |
1192 | 1193 | ||
1193 | /* Initialize MUIC register by using platform data or default data */ | 1194 | /* Initialize MUIC register by using platform data or default data */ |
@@ -1265,7 +1266,7 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1265 | MAX77693_MUIC_REG_ID, &id); | 1266 | MAX77693_MUIC_REG_ID, &id); |
1266 | if (ret < 0) { | 1267 | if (ret < 0) { |
1267 | dev_err(&pdev->dev, "failed to read revision number\n"); | 1268 | dev_err(&pdev->dev, "failed to read revision number\n"); |
1268 | goto err_irq; | 1269 | return ret; |
1269 | } | 1270 | } |
1270 | dev_info(info->dev, "device ID : 0x%x\n", id); | 1271 | dev_info(info->dev, "device ID : 0x%x\n", id); |
1271 | 1272 | ||
@@ -1285,20 +1286,12 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1285 | delay_jiffies); | 1286 | delay_jiffies); |
1286 | 1287 | ||
1287 | return ret; | 1288 | return ret; |
1288 | |||
1289 | err_irq: | ||
1290 | while (--i >= 0) | ||
1291 | free_irq(muic_irqs[i].virq, info); | ||
1292 | return ret; | ||
1293 | } | 1289 | } |
1294 | 1290 | ||
1295 | static int max77693_muic_remove(struct platform_device *pdev) | 1291 | static int max77693_muic_remove(struct platform_device *pdev) |
1296 | { | 1292 | { |
1297 | struct max77693_muic_info *info = platform_get_drvdata(pdev); | 1293 | struct max77693_muic_info *info = platform_get_drvdata(pdev); |
1298 | int i; | ||
1299 | 1294 | ||
1300 | for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) | ||
1301 | free_irq(muic_irqs[i].virq, info); | ||
1302 | cancel_work_sync(&info->irq_work); | 1295 | cancel_work_sync(&info->irq_work); |
1303 | input_unregister_device(info->dock); | 1296 | input_unregister_device(info->dock); |
1304 | 1297 | ||
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c new file mode 100644 index 000000000000..a784b2d5ee72 --- /dev/null +++ b/drivers/extcon/extcon-rt8973a.c | |||
@@ -0,0 +1,740 @@ | |||
1 | /* | ||
2 | * extcon-rt8973a.c - Richtek RT8973A extcon driver to support USB switches | ||
3 | * | ||
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd | ||
5 | * Author: Chanwoo Choi <cw00.choi@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/err.h> | ||
14 | #include <linux/i2c.h> | ||
15 | #include <linux/input.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/irqdomain.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/regmap.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/extcon.h> | ||
24 | |||
25 | #include "extcon-rt8973a.h" | ||
26 | |||
27 | #define DELAY_MS_DEFAULT 20000 /* unit: millisecond */ | ||
28 | |||
29 | struct muic_irq { | ||
30 | unsigned int irq; | ||
31 | const char *name; | ||
32 | unsigned int virq; | ||
33 | }; | ||
34 | |||
35 | struct reg_data { | ||
36 | u8 reg; | ||
37 | u8 mask; | ||
38 | u8 val; | ||
39 | bool invert; | ||
40 | }; | ||
41 | |||
42 | struct rt8973a_muic_info { | ||
43 | struct device *dev; | ||
44 | struct extcon_dev *edev; | ||
45 | |||
46 | struct i2c_client *i2c; | ||
47 | struct regmap *regmap; | ||
48 | |||
49 | struct regmap_irq_chip_data *irq_data; | ||
50 | struct muic_irq *muic_irqs; | ||
51 | unsigned int num_muic_irqs; | ||
52 | int irq; | ||
53 | bool irq_attach; | ||
54 | bool irq_detach; | ||
55 | bool irq_ovp; | ||
56 | bool irq_otp; | ||
57 | struct work_struct irq_work; | ||
58 | |||
59 | struct reg_data *reg_data; | ||
60 | unsigned int num_reg_data; | ||
61 | bool auto_config; | ||
62 | |||
63 | struct mutex mutex; | ||
64 | |||
65 | /* | ||
66 | * Use delayed workqueue to detect cable state and then | ||
67 | * notify cable state to notifiee/platform through uevent. | ||
68 | * After completing the booting of platform, the extcon provider | ||
69 | * driver should notify cable state to upper layer. | ||
70 | */ | ||
71 | struct delayed_work wq_detcable; | ||
72 | }; | ||
73 | |||
74 | /* Default value of RT8973A register to bring up MUIC device. */ | ||
75 | static struct reg_data rt8973a_reg_data[] = { | ||
76 | { | ||
77 | .reg = RT8973A_REG_CONTROL1, | ||
78 | .mask = RT8973A_REG_CONTROL1_ADC_EN_MASK | ||
79 | | RT8973A_REG_CONTROL1_USB_CHD_EN_MASK | ||
80 | | RT8973A_REG_CONTROL1_CHGTYP_MASK | ||
81 | | RT8973A_REG_CONTROL1_SWITCH_OPEN_MASK | ||
82 | | RT8973A_REG_CONTROL1_AUTO_CONFIG_MASK | ||
83 | | RT8973A_REG_CONTROL1_INTM_MASK, | ||
84 | .val = RT8973A_REG_CONTROL1_ADC_EN_MASK | ||
85 | | RT8973A_REG_CONTROL1_USB_CHD_EN_MASK | ||
86 | | RT8973A_REG_CONTROL1_CHGTYP_MASK, | ||
87 | .invert = false, | ||
88 | }, | ||
89 | { /* sentinel */ } | ||
90 | }; | ||
91 | |||
92 | /* List of detectable cables */ | ||
93 | enum { | ||
94 | EXTCON_CABLE_USB = 0, | ||
95 | EXTCON_CABLE_USB_HOST, | ||
96 | EXTCON_CABLE_TA, | ||
97 | EXTCON_CABLE_JIG_OFF_USB, | ||
98 | EXTCON_CABLE_JIG_ON_USB, | ||
99 | EXTCON_CABLE_JIG_OFF_UART, | ||
100 | EXTCON_CABLE_JIG_ON_UART, | ||
101 | |||
102 | EXTCON_CABLE_END, | ||
103 | }; | ||
104 | |||
105 | static const char *rt8973a_extcon_cable[] = { | ||
106 | [EXTCON_CABLE_USB] = "USB", | ||
107 | [EXTCON_CABLE_USB_HOST] = "USB-Host", | ||
108 | [EXTCON_CABLE_TA] = "TA", | ||
109 | [EXTCON_CABLE_JIG_OFF_USB] = "JIG-USB-OFF", | ||
110 | [EXTCON_CABLE_JIG_ON_USB] = "JIG-USB-ON", | ||
111 | [EXTCON_CABLE_JIG_OFF_UART] = "JIG-UART-OFF", | ||
112 | [EXTCON_CABLE_JIG_ON_UART] = "JIG-UART-ON", | ||
113 | NULL, | ||
114 | }; | ||
115 | |||
116 | /* Define OVP (Over Voltage Protection), OTP (Over Temperature Protection) */ | ||
117 | enum rt8973a_event_type { | ||
118 | RT8973A_EVENT_ATTACH = 1, | ||
119 | RT8973A_EVENT_DETACH, | ||
120 | RT8973A_EVENT_OVP, | ||
121 | RT8973A_EVENT_OTP, | ||
122 | }; | ||
123 | |||
124 | /* Define supported accessory type */ | ||
125 | enum rt8973a_muic_acc_type { | ||
126 | RT8973A_MUIC_ADC_OTG = 0x0, | ||
127 | RT8973A_MUIC_ADC_AUDIO_SEND_END_BUTTON, | ||
128 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S1_BUTTON, | ||
129 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S2_BUTTON, | ||
130 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S3_BUTTON, | ||
131 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S4_BUTTON, | ||
132 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S5_BUTTON, | ||
133 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S6_BUTTON, | ||
134 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S7_BUTTON, | ||
135 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S8_BUTTON, | ||
136 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S9_BUTTON, | ||
137 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S10_BUTTON, | ||
138 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S11_BUTTON, | ||
139 | RT8973A_MUIC_ADC_AUDIO_REMOTE_S12_BUTTON, | ||
140 | RT8973A_MUIC_ADC_RESERVED_ACC_1, | ||
141 | RT8973A_MUIC_ADC_RESERVED_ACC_2, | ||
142 | RT8973A_MUIC_ADC_RESERVED_ACC_3, | ||
143 | RT8973A_MUIC_ADC_RESERVED_ACC_4, | ||
144 | RT8973A_MUIC_ADC_RESERVED_ACC_5, | ||
145 | RT8973A_MUIC_ADC_AUDIO_TYPE2, | ||
146 | RT8973A_MUIC_ADC_PHONE_POWERED_DEV, | ||
147 | RT8973A_MUIC_ADC_UNKNOWN_ACC_1, | ||
148 | RT8973A_MUIC_ADC_UNKNOWN_ACC_2, | ||
149 | RT8973A_MUIC_ADC_TA, | ||
150 | RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB, | ||
151 | RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB, | ||
152 | RT8973A_MUIC_ADC_UNKNOWN_ACC_3, | ||
153 | RT8973A_MUIC_ADC_UNKNOWN_ACC_4, | ||
154 | RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART, | ||
155 | RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART, | ||
156 | RT8973A_MUIC_ADC_UNKNOWN_ACC_5, | ||
157 | RT8973A_MUIC_ADC_OPEN = 0x1f, | ||
158 | |||
159 | /* The below accessories has same ADC value (0x1f). | ||
160 | So, Device type1 is used to separate specific accessory. */ | ||
161 | /* |---------|--ADC| */ | ||
162 | /* | [7:5]|[4:0]| */ | ||
163 | RT8973A_MUIC_ADC_USB = 0x3f, /* | 001|11111| */ | ||
164 | }; | ||
165 | |||
166 | /* List of supported interrupt for RT8973A */ | ||
167 | static struct muic_irq rt8973a_muic_irqs[] = { | ||
168 | { RT8973A_INT1_ATTACH, "muic-attach" }, | ||
169 | { RT8973A_INT1_DETACH, "muic-detach" }, | ||
170 | { RT8973A_INT1_CHGDET, "muic-chgdet" }, | ||
171 | { RT8973A_INT1_DCD_T, "muic-dcd-t" }, | ||
172 | { RT8973A_INT1_OVP, "muic-ovp" }, | ||
173 | { RT8973A_INT1_CONNECT, "muic-connect" }, | ||
174 | { RT8973A_INT1_ADC_CHG, "muic-adc-chg" }, | ||
175 | { RT8973A_INT1_OTP, "muic-otp" }, | ||
176 | { RT8973A_INT2_UVLO, "muic-uvlo" }, | ||
177 | { RT8973A_INT2_POR, "muic-por" }, | ||
178 | { RT8973A_INT2_OTP_FET, "muic-otp-fet" }, | ||
179 | { RT8973A_INT2_OVP_FET, "muic-ovp-fet" }, | ||
180 | { RT8973A_INT2_OCP_LATCH, "muic-ocp-latch" }, | ||
181 | { RT8973A_INT2_OCP, "muic-ocp" }, | ||
182 | { RT8973A_INT2_OVP_OCP, "muic-ovp-ocp" }, | ||
183 | }; | ||
184 | |||
185 | /* Define interrupt list of RT8973A to register regmap_irq */ | ||
186 | static const struct regmap_irq rt8973a_irqs[] = { | ||
187 | /* INT1 interrupts */ | ||
188 | { .reg_offset = 0, .mask = RT8973A_INT1_ATTACH_MASK, }, | ||
189 | { .reg_offset = 0, .mask = RT8973A_INT1_DETACH_MASK, }, | ||
190 | { .reg_offset = 0, .mask = RT8973A_INT1_CHGDET_MASK, }, | ||
191 | { .reg_offset = 0, .mask = RT8973A_INT1_DCD_T_MASK, }, | ||
192 | { .reg_offset = 0, .mask = RT8973A_INT1_OVP_MASK, }, | ||
193 | { .reg_offset = 0, .mask = RT8973A_INT1_CONNECT_MASK, }, | ||
194 | { .reg_offset = 0, .mask = RT8973A_INT1_ADC_CHG_MASK, }, | ||
195 | { .reg_offset = 0, .mask = RT8973A_INT1_OTP_MASK, }, | ||
196 | |||
197 | /* INT2 interrupts */ | ||
198 | { .reg_offset = 1, .mask = RT8973A_INT2_UVLOT_MASK,}, | ||
199 | { .reg_offset = 1, .mask = RT8973A_INT2_POR_MASK, }, | ||
200 | { .reg_offset = 1, .mask = RT8973A_INT2_OTP_FET_MASK, }, | ||
201 | { .reg_offset = 1, .mask = RT8973A_INT2_OVP_FET_MASK, }, | ||
202 | { .reg_offset = 1, .mask = RT8973A_INT2_OCP_LATCH_MASK, }, | ||
203 | { .reg_offset = 1, .mask = RT8973A_INT2_OCP_MASK, }, | ||
204 | { .reg_offset = 1, .mask = RT8973A_INT2_OVP_OCP_MASK, }, | ||
205 | }; | ||
206 | |||
207 | static const struct regmap_irq_chip rt8973a_muic_irq_chip = { | ||
208 | .name = "rt8973a", | ||
209 | .status_base = RT8973A_REG_INT1, | ||
210 | .mask_base = RT8973A_REG_INTM1, | ||
211 | .mask_invert = false, | ||
212 | .num_regs = 2, | ||
213 | .irqs = rt8973a_irqs, | ||
214 | .num_irqs = ARRAY_SIZE(rt8973a_irqs), | ||
215 | }; | ||
216 | |||
217 | /* Define regmap configuration of RT8973A for I2C communication */ | ||
218 | static bool rt8973a_muic_volatile_reg(struct device *dev, unsigned int reg) | ||
219 | { | ||
220 | switch (reg) { | ||
221 | case RT8973A_REG_INTM1: | ||
222 | case RT8973A_REG_INTM2: | ||
223 | return true; | ||
224 | default: | ||
225 | break; | ||
226 | } | ||
227 | return false; | ||
228 | } | ||
229 | |||
230 | static const struct regmap_config rt8973a_muic_regmap_config = { | ||
231 | .reg_bits = 8, | ||
232 | .val_bits = 8, | ||
233 | .volatile_reg = rt8973a_muic_volatile_reg, | ||
234 | .max_register = RT8973A_REG_END, | ||
235 | }; | ||
236 | |||
237 | /* Change DM_CON/DP_CON/VBUSIN switch according to cable type */ | ||
238 | static int rt8973a_muic_set_path(struct rt8973a_muic_info *info, | ||
239 | unsigned int con_sw, bool attached) | ||
240 | { | ||
241 | int ret; | ||
242 | |||
243 | /* | ||
244 | * Don't need to set h/w path according to cable type | ||
245 | * if Auto-configuration mode of CONTROL1 register is true. | ||
246 | */ | ||
247 | if (info->auto_config) | ||
248 | return 0; | ||
249 | |||
250 | if (!attached) | ||
251 | con_sw = DM_DP_SWITCH_UART; | ||
252 | |||
253 | switch (con_sw) { | ||
254 | case DM_DP_SWITCH_OPEN: | ||
255 | case DM_DP_SWITCH_USB: | ||
256 | case DM_DP_SWITCH_UART: | ||
257 | ret = regmap_update_bits(info->regmap, RT8973A_REG_MANUAL_SW1, | ||
258 | RT8973A_REG_MANUAL_SW1_DP_MASK | | ||
259 | RT8973A_REG_MANUAL_SW1_DM_MASK, | ||
260 | con_sw); | ||
261 | if (ret < 0) { | ||
262 | dev_err(info->dev, | ||
263 | "cannot update DM_CON/DP_CON switch\n"); | ||
264 | return ret; | ||
265 | } | ||
266 | break; | ||
267 | default: | ||
268 | dev_err(info->dev, "Unknown DM_CON/DP_CON switch type (%d)\n", | ||
269 | con_sw); | ||
270 | return -EINVAL; | ||
271 | } | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int rt8973a_muic_get_cable_type(struct rt8973a_muic_info *info) | ||
277 | { | ||
278 | unsigned int adc, dev1; | ||
279 | int ret, cable_type; | ||
280 | |||
281 | /* Read ADC value according to external cable or button */ | ||
282 | ret = regmap_read(info->regmap, RT8973A_REG_ADC, &adc); | ||
283 | if (ret) { | ||
284 | dev_err(info->dev, "failed to read ADC register\n"); | ||
285 | return ret; | ||
286 | } | ||
287 | cable_type = adc & RT8973A_REG_ADC_MASK; | ||
288 | |||
289 | /* Read Device 1 reigster to identify correct cable type */ | ||
290 | ret = regmap_read(info->regmap, RT8973A_REG_DEV1, &dev1); | ||
291 | if (ret) { | ||
292 | dev_err(info->dev, "failed to read DEV1 register\n"); | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | switch (adc) { | ||
297 | case RT8973A_MUIC_ADC_OPEN: | ||
298 | if (dev1 & RT8973A_REG_DEV1_USB_MASK) | ||
299 | cable_type = RT8973A_MUIC_ADC_USB; | ||
300 | else if (dev1 & RT8973A_REG_DEV1_DCPORT_MASK) | ||
301 | cable_type = RT8973A_MUIC_ADC_TA; | ||
302 | else | ||
303 | cable_type = RT8973A_MUIC_ADC_OPEN; | ||
304 | break; | ||
305 | default: | ||
306 | break; | ||
307 | } | ||
308 | |||
309 | return cable_type; | ||
310 | } | ||
311 | |||
312 | static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info, | ||
313 | enum rt8973a_event_type event) | ||
314 | { | ||
315 | static unsigned int prev_cable_type; | ||
316 | const char **cable_names = info->edev->supported_cable; | ||
317 | unsigned int con_sw = DM_DP_SWITCH_UART; | ||
318 | int ret, idx = 0, cable_type; | ||
319 | bool attached = false; | ||
320 | |||
321 | if (!cable_names) | ||
322 | return 0; | ||
323 | |||
324 | switch (event) { | ||
325 | case RT8973A_EVENT_ATTACH: | ||
326 | cable_type = rt8973a_muic_get_cable_type(info); | ||
327 | attached = true; | ||
328 | break; | ||
329 | case RT8973A_EVENT_DETACH: | ||
330 | cable_type = prev_cable_type; | ||
331 | attached = false; | ||
332 | break; | ||
333 | case RT8973A_EVENT_OVP: | ||
334 | case RT8973A_EVENT_OTP: | ||
335 | dev_warn(info->dev, | ||
336 | "happen Over %s issue. Need to disconnect all cables\n", | ||
337 | event == RT8973A_EVENT_OVP ? "Voltage" : "Temperature"); | ||
338 | cable_type = prev_cable_type; | ||
339 | attached = false; | ||
340 | break; | ||
341 | default: | ||
342 | dev_err(info->dev, | ||
343 | "Cannot handle this event (event:%d)\n", event); | ||
344 | return -EINVAL; | ||
345 | } | ||
346 | prev_cable_type = cable_type; | ||
347 | |||
348 | switch (cable_type) { | ||
349 | case RT8973A_MUIC_ADC_OTG: | ||
350 | idx = EXTCON_CABLE_USB_HOST; | ||
351 | con_sw = DM_DP_SWITCH_USB; | ||
352 | break; | ||
353 | case RT8973A_MUIC_ADC_TA: | ||
354 | idx = EXTCON_CABLE_TA; | ||
355 | con_sw = DM_DP_SWITCH_OPEN; | ||
356 | break; | ||
357 | case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB: | ||
358 | idx = EXTCON_CABLE_JIG_OFF_USB; | ||
359 | con_sw = DM_DP_SWITCH_UART; | ||
360 | break; | ||
361 | case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB: | ||
362 | idx = EXTCON_CABLE_JIG_ON_USB; | ||
363 | con_sw = DM_DP_SWITCH_UART; | ||
364 | break; | ||
365 | case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART: | ||
366 | idx = EXTCON_CABLE_JIG_OFF_UART; | ||
367 | con_sw = DM_DP_SWITCH_UART; | ||
368 | break; | ||
369 | case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART: | ||
370 | idx = EXTCON_CABLE_JIG_ON_UART; | ||
371 | con_sw = DM_DP_SWITCH_UART; | ||
372 | break; | ||
373 | case RT8973A_MUIC_ADC_USB: | ||
374 | idx = EXTCON_CABLE_USB; | ||
375 | con_sw = DM_DP_SWITCH_USB; | ||
376 | break; | ||
377 | case RT8973A_MUIC_ADC_OPEN: | ||
378 | return 0; | ||
379 | case RT8973A_MUIC_ADC_UNKNOWN_ACC_1: | ||
380 | case RT8973A_MUIC_ADC_UNKNOWN_ACC_2: | ||
381 | case RT8973A_MUIC_ADC_UNKNOWN_ACC_3: | ||
382 | case RT8973A_MUIC_ADC_UNKNOWN_ACC_4: | ||
383 | case RT8973A_MUIC_ADC_UNKNOWN_ACC_5: | ||
384 | dev_warn(info->dev, | ||
385 | "Unknown accessory type (adc:0x%x)\n", cable_type); | ||
386 | return 0; | ||
387 | case RT8973A_MUIC_ADC_AUDIO_SEND_END_BUTTON: | ||
388 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S1_BUTTON: | ||
389 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S2_BUTTON: | ||
390 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S3_BUTTON: | ||
391 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S4_BUTTON: | ||
392 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S5_BUTTON: | ||
393 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S6_BUTTON: | ||
394 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S7_BUTTON: | ||
395 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S8_BUTTON: | ||
396 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S9_BUTTON: | ||
397 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S10_BUTTON: | ||
398 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S11_BUTTON: | ||
399 | case RT8973A_MUIC_ADC_AUDIO_REMOTE_S12_BUTTON: | ||
400 | case RT8973A_MUIC_ADC_AUDIO_TYPE2: | ||
401 | dev_warn(info->dev, | ||
402 | "Audio device/button type (adc:0x%x)\n", cable_type); | ||
403 | return 0; | ||
404 | case RT8973A_MUIC_ADC_RESERVED_ACC_1: | ||
405 | case RT8973A_MUIC_ADC_RESERVED_ACC_2: | ||
406 | case RT8973A_MUIC_ADC_RESERVED_ACC_3: | ||
407 | case RT8973A_MUIC_ADC_RESERVED_ACC_4: | ||
408 | case RT8973A_MUIC_ADC_RESERVED_ACC_5: | ||
409 | case RT8973A_MUIC_ADC_PHONE_POWERED_DEV: | ||
410 | return 0; | ||
411 | default: | ||
412 | dev_err(info->dev, | ||
413 | "Cannot handle this cable_type (adc:0x%x)\n", | ||
414 | cable_type); | ||
415 | return -EINVAL; | ||
416 | } | ||
417 | |||
418 | /* Change internal hardware path(DM_CON/DP_CON) */ | ||
419 | ret = rt8973a_muic_set_path(info, con_sw, attached); | ||
420 | if (ret < 0) | ||
421 | return ret; | ||
422 | |||
423 | /* Change the state of external accessory */ | ||
424 | extcon_set_cable_state(info->edev, cable_names[idx], attached); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static void rt8973a_muic_irq_work(struct work_struct *work) | ||
430 | { | ||
431 | struct rt8973a_muic_info *info = container_of(work, | ||
432 | struct rt8973a_muic_info, irq_work); | ||
433 | int ret = 0; | ||
434 | |||
435 | if (!info->edev) | ||
436 | return; | ||
437 | |||
438 | mutex_lock(&info->mutex); | ||
439 | |||
440 | /* Detect attached or detached cables */ | ||
441 | if (info->irq_attach) { | ||
442 | ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_ATTACH); | ||
443 | info->irq_attach = false; | ||
444 | } | ||
445 | |||
446 | if (info->irq_detach) { | ||
447 | ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_DETACH); | ||
448 | info->irq_detach = false; | ||
449 | } | ||
450 | |||
451 | if (info->irq_ovp) { | ||
452 | ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_OVP); | ||
453 | info->irq_ovp = false; | ||
454 | } | ||
455 | |||
456 | if (info->irq_otp) { | ||
457 | ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_OTP); | ||
458 | info->irq_otp = false; | ||
459 | } | ||
460 | |||
461 | if (ret < 0) | ||
462 | dev_err(info->dev, "failed to handle MUIC interrupt\n"); | ||
463 | |||
464 | mutex_unlock(&info->mutex); | ||
465 | } | ||
466 | |||
467 | static irqreturn_t rt8973a_muic_irq_handler(int irq, void *data) | ||
468 | { | ||
469 | struct rt8973a_muic_info *info = data; | ||
470 | int i, irq_type = -1; | ||
471 | |||
472 | for (i = 0; i < info->num_muic_irqs; i++) | ||
473 | if (irq == info->muic_irqs[i].virq) | ||
474 | irq_type = info->muic_irqs[i].irq; | ||
475 | |||
476 | switch (irq_type) { | ||
477 | case RT8973A_INT1_ATTACH: | ||
478 | info->irq_attach = true; | ||
479 | break; | ||
480 | case RT8973A_INT1_DETACH: | ||
481 | info->irq_detach = true; | ||
482 | break; | ||
483 | case RT8973A_INT1_OVP: | ||
484 | info->irq_ovp = true; | ||
485 | break; | ||
486 | case RT8973A_INT1_OTP: | ||
487 | info->irq_otp = true; | ||
488 | break; | ||
489 | case RT8973A_INT1_CHGDET: | ||
490 | case RT8973A_INT1_DCD_T: | ||
491 | case RT8973A_INT1_CONNECT: | ||
492 | case RT8973A_INT1_ADC_CHG: | ||
493 | case RT8973A_INT2_UVLO: | ||
494 | case RT8973A_INT2_POR: | ||
495 | case RT8973A_INT2_OTP_FET: | ||
496 | case RT8973A_INT2_OVP_FET: | ||
497 | case RT8973A_INT2_OCP_LATCH: | ||
498 | case RT8973A_INT2_OCP: | ||
499 | case RT8973A_INT2_OVP_OCP: | ||
500 | default: | ||
501 | dev_dbg(info->dev, | ||
502 | "Cannot handle this interrupt (%d)\n", irq_type); | ||
503 | break; | ||
504 | } | ||
505 | |||
506 | schedule_work(&info->irq_work); | ||
507 | |||
508 | return IRQ_HANDLED; | ||
509 | } | ||
510 | |||
511 | static void rt8973a_muic_detect_cable_wq(struct work_struct *work) | ||
512 | { | ||
513 | struct rt8973a_muic_info *info = container_of(to_delayed_work(work), | ||
514 | struct rt8973a_muic_info, wq_detcable); | ||
515 | int ret; | ||
516 | |||
517 | /* Notify the state of connector cable or not */ | ||
518 | ret = rt8973a_muic_cable_handler(info, RT8973A_EVENT_ATTACH); | ||
519 | if (ret < 0) | ||
520 | dev_warn(info->dev, "failed to detect cable state\n"); | ||
521 | } | ||
522 | |||
523 | static void rt8973a_init_dev_type(struct rt8973a_muic_info *info) | ||
524 | { | ||
525 | unsigned int data, vendor_id, version_id; | ||
526 | int i, ret; | ||
527 | |||
528 | /* To test I2C, Print version_id and vendor_id of RT8973A */ | ||
529 | ret = regmap_read(info->regmap, RT8973A_REG_DEVICE_ID, &data); | ||
530 | if (ret) { | ||
531 | dev_err(info->dev, | ||
532 | "failed to read DEVICE_ID register: %d\n", ret); | ||
533 | return; | ||
534 | } | ||
535 | |||
536 | vendor_id = ((data & RT8973A_REG_DEVICE_ID_VENDOR_MASK) >> | ||
537 | RT8973A_REG_DEVICE_ID_VENDOR_SHIFT); | ||
538 | version_id = ((data & RT8973A_REG_DEVICE_ID_VERSION_MASK) >> | ||
539 | RT8973A_REG_DEVICE_ID_VERSION_SHIFT); | ||
540 | |||
541 | dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n", | ||
542 | version_id, vendor_id); | ||
543 | |||
544 | /* Initiazle the register of RT8973A device to bring-up */ | ||
545 | for (i = 0; i < info->num_reg_data; i++) { | ||
546 | u8 reg = info->reg_data[i].reg; | ||
547 | u8 mask = info->reg_data[i].mask; | ||
548 | u8 val = 0; | ||
549 | |||
550 | if (info->reg_data[i].invert) | ||
551 | val = ~info->reg_data[i].val; | ||
552 | else | ||
553 | val = info->reg_data[i].val; | ||
554 | |||
555 | regmap_update_bits(info->regmap, reg, mask, val); | ||
556 | } | ||
557 | |||
558 | /* Check whether RT8973A is auto swithcing mode or not */ | ||
559 | ret = regmap_read(info->regmap, RT8973A_REG_CONTROL1, &data); | ||
560 | if (ret) { | ||
561 | dev_err(info->dev, | ||
562 | "failed to read CONTROL1 register: %d\n", ret); | ||
563 | return; | ||
564 | } | ||
565 | |||
566 | data &= RT8973A_REG_CONTROL1_AUTO_CONFIG_MASK; | ||
567 | if (data) { | ||
568 | info->auto_config = true; | ||
569 | dev_info(info->dev, | ||
570 | "Enable Auto-configuration for internal path\n"); | ||
571 | } | ||
572 | } | ||
573 | |||
574 | static int rt8973a_muic_i2c_probe(struct i2c_client *i2c, | ||
575 | const struct i2c_device_id *id) | ||
576 | { | ||
577 | struct device_node *np = i2c->dev.of_node; | ||
578 | struct rt8973a_muic_info *info; | ||
579 | int i, ret, irq_flags; | ||
580 | |||
581 | if (!np) | ||
582 | return -EINVAL; | ||
583 | |||
584 | info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL); | ||
585 | if (!info) { | ||
586 | dev_err(&i2c->dev, "failed to allocate memory\n"); | ||
587 | return -ENOMEM; | ||
588 | } | ||
589 | i2c_set_clientdata(i2c, info); | ||
590 | |||
591 | info->dev = &i2c->dev; | ||
592 | info->i2c = i2c; | ||
593 | info->irq = i2c->irq; | ||
594 | info->muic_irqs = rt8973a_muic_irqs; | ||
595 | info->num_muic_irqs = ARRAY_SIZE(rt8973a_muic_irqs); | ||
596 | info->reg_data = rt8973a_reg_data; | ||
597 | info->num_reg_data = ARRAY_SIZE(rt8973a_reg_data); | ||
598 | |||
599 | mutex_init(&info->mutex); | ||
600 | |||
601 | INIT_WORK(&info->irq_work, rt8973a_muic_irq_work); | ||
602 | |||
603 | info->regmap = devm_regmap_init_i2c(i2c, &rt8973a_muic_regmap_config); | ||
604 | if (IS_ERR(info->regmap)) { | ||
605 | ret = PTR_ERR(info->regmap); | ||
606 | dev_err(info->dev, "failed to allocate register map: %d\n", | ||
607 | ret); | ||
608 | return ret; | ||
609 | } | ||
610 | |||
611 | /* Support irq domain for RT8973A MUIC device */ | ||
612 | irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED; | ||
613 | ret = regmap_add_irq_chip(info->regmap, info->irq, irq_flags, 0, | ||
614 | &rt8973a_muic_irq_chip, &info->irq_data); | ||
615 | if (ret != 0) { | ||
616 | dev_err(info->dev, "failed to add irq_chip (irq:%d, err:%d)\n", | ||
617 | info->irq, ret); | ||
618 | return ret; | ||
619 | } | ||
620 | |||
621 | for (i = 0; i < info->num_muic_irqs; i++) { | ||
622 | struct muic_irq *muic_irq = &info->muic_irqs[i]; | ||
623 | unsigned int virq = 0; | ||
624 | |||
625 | virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq); | ||
626 | if (virq <= 0) | ||
627 | return -EINVAL; | ||
628 | muic_irq->virq = virq; | ||
629 | |||
630 | ret = devm_request_threaded_irq(info->dev, virq, NULL, | ||
631 | rt8973a_muic_irq_handler, | ||
632 | IRQF_NO_SUSPEND, | ||
633 | muic_irq->name, info); | ||
634 | if (ret) { | ||
635 | dev_err(info->dev, | ||
636 | "failed: irq request (IRQ: %d, error :%d)\n", | ||
637 | muic_irq->irq, ret); | ||
638 | return ret; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | /* Allocate extcon device */ | ||
643 | info->edev = devm_extcon_dev_allocate(info->dev, rt8973a_extcon_cable); | ||
644 | if (IS_ERR(info->edev)) { | ||
645 | dev_err(info->dev, "failed to allocate memory for extcon\n"); | ||
646 | return -ENOMEM; | ||
647 | } | ||
648 | info->edev->name = np->name; | ||
649 | |||
650 | /* Register extcon device */ | ||
651 | ret = devm_extcon_dev_register(info->dev, info->edev); | ||
652 | if (ret) { | ||
653 | dev_err(info->dev, "failed to register extcon device\n"); | ||
654 | return ret; | ||
655 | } | ||
656 | |||
657 | /* | ||
658 | * Detect accessory after completing the initialization of platform | ||
659 | * | ||
660 | * - Use delayed workqueue to detect cable state and then | ||
661 | * notify cable state to notifiee/platform through uevent. | ||
662 | * After completing the booting of platform, the extcon provider | ||
663 | * driver should notify cable state to upper layer. | ||
664 | */ | ||
665 | INIT_DELAYED_WORK(&info->wq_detcable, rt8973a_muic_detect_cable_wq); | ||
666 | queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, | ||
667 | msecs_to_jiffies(DELAY_MS_DEFAULT)); | ||
668 | |||
669 | /* Initialize RT8973A device and print vendor id and version id */ | ||
670 | rt8973a_init_dev_type(info); | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | static int rt8973a_muic_i2c_remove(struct i2c_client *i2c) | ||
676 | { | ||
677 | struct rt8973a_muic_info *info = i2c_get_clientdata(i2c); | ||
678 | |||
679 | regmap_del_irq_chip(info->irq, info->irq_data); | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | static struct of_device_id rt8973a_dt_match[] = { | ||
685 | { .compatible = "richtek,rt8973a-muic" }, | ||
686 | { }, | ||
687 | }; | ||
688 | |||
689 | #ifdef CONFIG_PM_SLEEP | ||
690 | static int rt8973a_muic_suspend(struct device *dev) | ||
691 | { | ||
692 | struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); | ||
693 | struct rt8973a_muic_info *info = i2c_get_clientdata(i2c); | ||
694 | |||
695 | enable_irq_wake(info->irq); | ||
696 | |||
697 | return 0; | ||
698 | } | ||
699 | |||
700 | static int rt8973a_muic_resume(struct device *dev) | ||
701 | { | ||
702 | struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); | ||
703 | struct rt8973a_muic_info *info = i2c_get_clientdata(i2c); | ||
704 | |||
705 | disable_irq_wake(info->irq); | ||
706 | |||
707 | return 0; | ||
708 | } | ||
709 | #endif | ||
710 | |||
711 | static SIMPLE_DEV_PM_OPS(rt8973a_muic_pm_ops, | ||
712 | rt8973a_muic_suspend, rt8973a_muic_resume); | ||
713 | |||
714 | static const struct i2c_device_id rt8973a_i2c_id[] = { | ||
715 | { "rt8973a", TYPE_RT8973A }, | ||
716 | { } | ||
717 | }; | ||
718 | MODULE_DEVICE_TABLE(i2c, rt8973a_i2c_id); | ||
719 | |||
720 | static struct i2c_driver rt8973a_muic_i2c_driver = { | ||
721 | .driver = { | ||
722 | .name = "rt8973a", | ||
723 | .owner = THIS_MODULE, | ||
724 | .pm = &rt8973a_muic_pm_ops, | ||
725 | .of_match_table = rt8973a_dt_match, | ||
726 | }, | ||
727 | .probe = rt8973a_muic_i2c_probe, | ||
728 | .remove = rt8973a_muic_i2c_remove, | ||
729 | .id_table = rt8973a_i2c_id, | ||
730 | }; | ||
731 | |||
732 | static int __init rt8973a_muic_i2c_init(void) | ||
733 | { | ||
734 | return i2c_add_driver(&rt8973a_muic_i2c_driver); | ||
735 | } | ||
736 | subsys_initcall(rt8973a_muic_i2c_init); | ||
737 | |||
738 | MODULE_DESCRIPTION("Richtek RT8973A Extcon driver"); | ||
739 | MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>"); | ||
740 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/extcon/extcon-rt8973a.h b/drivers/extcon/extcon-rt8973a.h new file mode 100644 index 000000000000..9dc3e0227eb7 --- /dev/null +++ b/drivers/extcon/extcon-rt8973a.h | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * rt8973a.h | ||
3 | * | ||
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_EXTCON_RT8973A_H | ||
13 | #define __LINUX_EXTCON_RT8973A_H | ||
14 | |||
15 | enum rt8973a_types { | ||
16 | TYPE_RT8973A, | ||
17 | }; | ||
18 | |||
19 | /* RT8973A registers */ | ||
20 | enum rt8973A_reg { | ||
21 | RT8973A_REG_DEVICE_ID = 0x1, | ||
22 | RT8973A_REG_CONTROL1, | ||
23 | RT8973A_REG_INT1, | ||
24 | RT8973A_REG_INT2, | ||
25 | RT8973A_REG_INTM1, | ||
26 | RT8973A_REG_INTM2, | ||
27 | RT8973A_REG_ADC, | ||
28 | RT8973A_REG_RSVD_1, | ||
29 | RT8973A_REG_RSVD_2, | ||
30 | RT8973A_REG_DEV1, | ||
31 | RT8973A_REG_DEV2, | ||
32 | RT8973A_REG_RSVD_3, | ||
33 | RT8973A_REG_RSVD_4, | ||
34 | RT8973A_REG_RSVD_5, | ||
35 | RT8973A_REG_RSVD_6, | ||
36 | RT8973A_REG_RSVD_7, | ||
37 | RT8973A_REG_RSVD_8, | ||
38 | RT8973A_REG_RSVD_9, | ||
39 | RT8973A_REG_MANUAL_SW1, | ||
40 | RT8973A_REG_MANUAL_SW2, | ||
41 | RT8973A_REG_RSVD_10, | ||
42 | RT8973A_REG_RSVD_11, | ||
43 | RT8973A_REG_RSVD_12, | ||
44 | RT8973A_REG_RSVD_13, | ||
45 | RT8973A_REG_RSVD_14, | ||
46 | RT8973A_REG_RSVD_15, | ||
47 | RT8973A_REG_RESET, | ||
48 | |||
49 | RT8973A_REG_END, | ||
50 | }; | ||
51 | |||
52 | /* Define RT8973A MASK/SHIFT constant */ | ||
53 | #define RT8973A_REG_DEVICE_ID_VENDOR_SHIFT 0 | ||
54 | #define RT8973A_REG_DEVICE_ID_VERSION_SHIFT 3 | ||
55 | #define RT8973A_REG_DEVICE_ID_VENDOR_MASK (0x7 << RT8973A_REG_DEVICE_ID_VENDOR_SHIFT) | ||
56 | #define RT8973A_REG_DEVICE_ID_VERSION_MASK (0x1f << RT8973A_REG_DEVICE_ID_VERSION_SHIFT) | ||
57 | |||
58 | #define RT8973A_REG_CONTROL1_INTM_SHIFT 0 | ||
59 | #define RT8973A_REG_CONTROL1_AUTO_CONFIG_SHIFT 2 | ||
60 | #define RT8973A_REG_CONTROL1_I2C_RST_EN_SHIFT 3 | ||
61 | #define RT8973A_REG_CONTROL1_SWITCH_OPEN_SHIFT 4 | ||
62 | #define RT8973A_REG_CONTROL1_CHGTYP_SHIFT 5 | ||
63 | #define RT8973A_REG_CONTROL1_USB_CHD_EN_SHIFT 6 | ||
64 | #define RT8973A_REG_CONTROL1_ADC_EN_SHIFT 7 | ||
65 | #define RT8973A_REG_CONTROL1_INTM_MASK (0x1 << RT8973A_REG_CONTROL1_INTM_SHIFT) | ||
66 | #define RT8973A_REG_CONTROL1_AUTO_CONFIG_MASK (0x1 << RT8973A_REG_CONTROL1_AUTO_CONFIG_SHIFT) | ||
67 | #define RT8973A_REG_CONTROL1_I2C_RST_EN_MASK (0x1 << RT8973A_REG_CONTROL1_I2C_RST_EN_SHIFT) | ||
68 | #define RT8973A_REG_CONTROL1_SWITCH_OPEN_MASK (0x1 << RT8973A_REG_CONTROL1_SWITCH_OPEN_SHIFT) | ||
69 | #define RT8973A_REG_CONTROL1_CHGTYP_MASK (0x1 << RT8973A_REG_CONTROL1_CHGTYP_SHIFT) | ||
70 | #define RT8973A_REG_CONTROL1_USB_CHD_EN_MASK (0x1 << RT8973A_REG_CONTROL1_USB_CHD_EN_SHIFT) | ||
71 | #define RT8973A_REG_CONTROL1_ADC_EN_MASK (0x1 << RT8973A_REG_CONTROL1_ADC_EN_SHIFT) | ||
72 | |||
73 | #define RT9873A_REG_INTM1_ATTACH_SHIFT 0 | ||
74 | #define RT9873A_REG_INTM1_DETACH_SHIFT 1 | ||
75 | #define RT9873A_REG_INTM1_CHGDET_SHIFT 2 | ||
76 | #define RT9873A_REG_INTM1_DCD_T_SHIFT 3 | ||
77 | #define RT9873A_REG_INTM1_OVP_SHIFT 4 | ||
78 | #define RT9873A_REG_INTM1_CONNECT_SHIFT 5 | ||
79 | #define RT9873A_REG_INTM1_ADC_CHG_SHIFT 6 | ||
80 | #define RT9873A_REG_INTM1_OTP_SHIFT 7 | ||
81 | #define RT9873A_REG_INTM1_ATTACH_MASK (0x1 << RT9873A_REG_INTM1_ATTACH_SHIFT) | ||
82 | #define RT9873A_REG_INTM1_DETACH_MASK (0x1 << RT9873A_REG_INTM1_DETACH_SHIFT) | ||
83 | #define RT9873A_REG_INTM1_CHGDET_MASK (0x1 << RT9873A_REG_INTM1_CHGDET_SHIFT) | ||
84 | #define RT9873A_REG_INTM1_DCD_T_MASK (0x1 << RT9873A_REG_INTM1_DCD_T_SHIFT) | ||
85 | #define RT9873A_REG_INTM1_OVP_MASK (0x1 << RT9873A_REG_INTM1_OVP_SHIFT) | ||
86 | #define RT9873A_REG_INTM1_CONNECT_MASK (0x1 << RT9873A_REG_INTM1_CONNECT_SHIFT) | ||
87 | #define RT9873A_REG_INTM1_ADC_CHG_MASK (0x1 << RT9873A_REG_INTM1_ADC_CHG_SHIFT) | ||
88 | #define RT9873A_REG_INTM1_OTP_MASK (0x1 << RT9873A_REG_INTM1_OTP_SHIFT) | ||
89 | |||
90 | #define RT9873A_REG_INTM2_UVLO_SHIFT 1 | ||
91 | #define RT9873A_REG_INTM2_POR_SHIFT 2 | ||
92 | #define RT9873A_REG_INTM2_OTP_FET_SHIFT 3 | ||
93 | #define RT9873A_REG_INTM2_OVP_FET_SHIFT 4 | ||
94 | #define RT9873A_REG_INTM2_OCP_LATCH_SHIFT 5 | ||
95 | #define RT9873A_REG_INTM2_OCP_SHIFT 6 | ||
96 | #define RT9873A_REG_INTM2_OVP_OCP_SHIFT 7 | ||
97 | #define RT9873A_REG_INTM2_UVLO_MASK (0x1 << RT9873A_REG_INTM2_UVLO_SHIFT) | ||
98 | #define RT9873A_REG_INTM2_POR_MASK (0x1 << RT9873A_REG_INTM2_POR_SHIFT) | ||
99 | #define RT9873A_REG_INTM2_OTP_FET_MASK (0x1 << RT9873A_REG_INTM2_OTP_FET_SHIFT) | ||
100 | #define RT9873A_REG_INTM2_OVP_FET_MASK (0x1 << RT9873A_REG_INTM2_OVP_FET_SHIFT) | ||
101 | #define RT9873A_REG_INTM2_OCP_LATCH_MASK (0x1 << RT9873A_REG_INTM2_OCP_LATCH_SHIFT) | ||
102 | #define RT9873A_REG_INTM2_OCP_MASK (0x1 << RT9873A_REG_INTM2_OCP_SHIFT) | ||
103 | #define RT9873A_REG_INTM2_OVP_OCP_MASK (0x1 << RT9873A_REG_INTM2_OVP_OCP_SHIFT) | ||
104 | |||
105 | #define RT8973A_REG_ADC_SHIFT 0 | ||
106 | #define RT8973A_REG_ADC_MASK (0x1f << RT8973A_REG_ADC_SHIFT) | ||
107 | |||
108 | #define RT8973A_REG_DEV1_OTG_SHIFT 0 | ||
109 | #define RT8973A_REG_DEV1_SDP_SHIFT 2 | ||
110 | #define RT8973A_REG_DEV1_UART_SHIFT 3 | ||
111 | #define RT8973A_REG_DEV1_CAR_KIT_TYPE1_SHIFT 4 | ||
112 | #define RT8973A_REG_DEV1_CDPORT_SHIFT 5 | ||
113 | #define RT8973A_REG_DEV1_DCPORT_SHIFT 6 | ||
114 | #define RT8973A_REG_DEV1_OTG_MASK (0x1 << RT8973A_REG_DEV1_OTG_SHIFT) | ||
115 | #define RT8973A_REG_DEV1_SDP_MASK (0x1 << RT8973A_REG_DEV1_SDP_SHIFT) | ||
116 | #define RT8973A_REG_DEV1_UART_MASK (0x1 << RT8973A_REG_DEV1_UART_SHIFT) | ||
117 | #define RT8973A_REG_DEV1_CAR_KIT_TYPE1_MASK (0x1 << RT8973A_REG_DEV1_CAR_KIT_TYPE1_SHIFT) | ||
118 | #define RT8973A_REG_DEV1_CDPORT_MASK (0x1 << RT8973A_REG_DEV1_CDPORT_SHIFT) | ||
119 | #define RT8973A_REG_DEV1_DCPORT_MASK (0x1 << RT8973A_REG_DEV1_DCPORT_SHIFT) | ||
120 | #define RT8973A_REG_DEV1_USB_MASK (RT8973A_REG_DEV1_SDP_MASK \ | ||
121 | | RT8973A_REG_DEV1_CDPORT_MASK) | ||
122 | |||
123 | #define RT8973A_REG_DEV2_JIG_USB_ON_SHIFT 0 | ||
124 | #define RT8973A_REG_DEV2_JIG_USB_OFF_SHIFT 1 | ||
125 | #define RT8973A_REG_DEV2_JIG_UART_ON_SHIFT 2 | ||
126 | #define RT8973A_REG_DEV2_JIG_UART_OFF_SHIFT 3 | ||
127 | #define RT8973A_REG_DEV2_JIG_USB_ON_MASK (0x1 << RT8973A_REG_DEV2_JIG_USB_ON_SHIFT) | ||
128 | #define RT8973A_REG_DEV2_JIG_USB_OFF_MASK (0x1 << RT8973A_REG_DEV2_JIG_USB_OFF_SHIFT) | ||
129 | #define RT8973A_REG_DEV2_JIG_UART_ON_MASK (0x1 << RT8973A_REG_DEV2_JIG_UART_ON_SHIFT) | ||
130 | #define RT8973A_REG_DEV2_JIG_UART_OFF_MASK (0x1 << RT8973A_REG_DEV2_JIG_UART_OFF_SHIFT) | ||
131 | |||
132 | #define RT8973A_REG_MANUAL_SW1_DP_SHIFT 2 | ||
133 | #define RT8973A_REG_MANUAL_SW1_DM_SHIFT 5 | ||
134 | #define RT8973A_REG_MANUAL_SW1_DP_MASK (0x7 << RT8973A_REG_MANUAL_SW1_DP_SHIFT) | ||
135 | #define RT8973A_REG_MANUAL_SW1_DM_MASK (0x7 << RT8973A_REG_MANUAL_SW1_DM_SHIFT) | ||
136 | #define DM_DP_CON_SWITCH_OPEN 0x0 | ||
137 | #define DM_DP_CON_SWITCH_USB 0x1 | ||
138 | #define DM_DP_CON_SWITCH_UART 0x3 | ||
139 | #define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN << RT8973A_REG_MANUAL_SW1_DP_SHIFT) \ | ||
140 | | (DM_DP_CON_SWITCH_OPEN << RT8973A_REG_MANUAL_SW1_DM_SHIFT)) | ||
141 | #define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB << RT8973A_REG_MANUAL_SW1_DP_SHIFT) \ | ||
142 | | (DM_DP_CON_SWITCH_USB << RT8973A_REG_MANUAL_SW1_DM_SHIFT)) | ||
143 | #define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART << RT8973A_REG_MANUAL_SW1_DP_SHIFT) \ | ||
144 | | (DM_DP_CON_SWITCH_UART << RT8973A_REG_MANUAL_SW1_DM_SHIFT)) | ||
145 | |||
146 | #define RT8973A_REG_MANUAL_SW2_FET_ON_SHIFT 0 | ||
147 | #define RT8973A_REG_MANUAL_SW2_JIG_ON_SHIFT 2 | ||
148 | #define RT8973A_REG_MANUAL_SW2_BOOT_SW_SHIFT 3 | ||
149 | #define RT8973A_REG_MANUAL_SW2_FET_ON_MASK (0x1 << RT8973A_REG_MANUAL_SW2_FET_ON_SHIFT) | ||
150 | #define RT8973A_REG_MANUAL_SW2_JIG_ON_MASK (0x1 << RT8973A_REG_MANUAL_SW2_JIG_ON_SHIFT) | ||
151 | #define RT8973A_REG_MANUAL_SW2_BOOT_SW_MASK (0x1 << RT8973A_REG_MANUAL_SW2_BOOT_SW_SHIFT) | ||
152 | #define RT8973A_REG_MANUAL_SW2_FET_ON 0 | ||
153 | #define RT8973A_REG_MANUAL_SW2_FET_OFF 0x1 | ||
154 | #define RT8973A_REG_MANUAL_SW2_JIG_OFF 0 | ||
155 | #define RT8973A_REG_MANUAL_SW2_JIG_ON 0x1 | ||
156 | #define RT8973A_REG_MANUAL_SW2_BOOT_SW_ON 0 | ||
157 | #define RT8973A_REG_MANUAL_SW2_BOOT_SW_OFF 0x1 | ||
158 | |||
159 | #define RT8973A_REG_RESET_SHIFT 0 | ||
160 | #define RT8973A_REG_RESET_MASK (0x1 << RT8973A_REG_RESET_SHIFT) | ||
161 | #define RT8973A_REG_RESET 0x1 | ||
162 | |||
163 | /* RT8973A Interrupts */ | ||
164 | enum rt8973a_irq { | ||
165 | /* Interrupt1*/ | ||
166 | RT8973A_INT1_ATTACH, | ||
167 | RT8973A_INT1_DETACH, | ||
168 | RT8973A_INT1_CHGDET, | ||
169 | RT8973A_INT1_DCD_T, | ||
170 | RT8973A_INT1_OVP, | ||
171 | RT8973A_INT1_CONNECT, | ||
172 | RT8973A_INT1_ADC_CHG, | ||
173 | RT8973A_INT1_OTP, | ||
174 | |||
175 | /* Interrupt2*/ | ||
176 | RT8973A_INT2_UVLO, | ||
177 | RT8973A_INT2_POR, | ||
178 | RT8973A_INT2_OTP_FET, | ||
179 | RT8973A_INT2_OVP_FET, | ||
180 | RT8973A_INT2_OCP_LATCH, | ||
181 | RT8973A_INT2_OCP, | ||
182 | RT8973A_INT2_OVP_OCP, | ||
183 | |||
184 | RT8973A_NUM, | ||
185 | }; | ||
186 | |||
187 | #define RT8973A_INT1_ATTACH_MASK BIT(0) | ||
188 | #define RT8973A_INT1_DETACH_MASK BIT(1) | ||
189 | #define RT8973A_INT1_CHGDET_MASK BIT(2) | ||
190 | #define RT8973A_INT1_DCD_T_MASK BIT(3) | ||
191 | #define RT8973A_INT1_OVP_MASK BIT(4) | ||
192 | #define RT8973A_INT1_CONNECT_MASK BIT(5) | ||
193 | #define RT8973A_INT1_ADC_CHG_MASK BIT(6) | ||
194 | #define RT8973A_INT1_OTP_MASK BIT(7) | ||
195 | #define RT8973A_INT2_UVLOT_MASK BIT(0) | ||
196 | #define RT8973A_INT2_POR_MASK BIT(1) | ||
197 | #define RT8973A_INT2_OTP_FET_MASK BIT(2) | ||
198 | #define RT8973A_INT2_OVP_FET_MASK BIT(3) | ||
199 | #define RT8973A_INT2_OCP_LATCH_MASK BIT(4) | ||
200 | #define RT8973A_INT2_OCP_MASK BIT(5) | ||
201 | #define RT8973A_INT2_OVP_OCP_MASK BIT(6) | ||
202 | |||
203 | #endif /* __LINUX_EXTCON_RT8973A_H */ | ||
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c index 560d7dccec7b..b0f7bd82af90 100644 --- a/drivers/extcon/extcon-sm5502.c +++ b/drivers/extcon/extcon-sm5502.c | |||
@@ -8,16 +8,10 @@ | |||
8 | * under the terms of the GNU General Public License as published by the | 8 | * under the terms of the GNU General Public License as published by the |
9 | * Free Software Foundation; either version 2 of the License, or (at your | 9 | * Free Software Foundation; either version 2 of the License, or (at your |
10 | * option) any later version. | 10 | * option) any later version. |
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | 11 | */ |
17 | 12 | ||
18 | #include <linux/err.h> | 13 | #include <linux/err.h> |
19 | #include <linux/i2c.h> | 14 | #include <linux/i2c.h> |
20 | #include <linux/input.h> | ||
21 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
22 | #include <linux/irqdomain.h> | 16 | #include <linux/irqdomain.h> |
23 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
@@ -26,7 +20,8 @@ | |||
26 | #include <linux/regmap.h> | 20 | #include <linux/regmap.h> |
27 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
28 | #include <linux/extcon.h> | 22 | #include <linux/extcon.h> |
29 | #include <linux/extcon/sm5502.h> | 23 | |
24 | #include "extcon-sm5502.h" | ||
30 | 25 | ||
31 | #define DELAY_MS_DEFAULT 17000 /* unit: millisecond */ | 26 | #define DELAY_MS_DEFAULT 17000 /* unit: millisecond */ |
32 | 27 | ||
@@ -300,7 +295,7 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info) | |||
300 | * If ADC is SM5502_MUIC_ADC_GROUND(0x0), external cable hasn't | 295 | * If ADC is SM5502_MUIC_ADC_GROUND(0x0), external cable hasn't |
301 | * connected with to MUIC device. | 296 | * connected with to MUIC device. |
302 | */ | 297 | */ |
303 | cable_type &= SM5502_REG_ADC_MASK; | 298 | cable_type = adc & SM5502_REG_ADC_MASK; |
304 | if (cable_type == SM5502_MUIC_ADC_GROUND) | 299 | if (cable_type == SM5502_MUIC_ADC_GROUND) |
305 | return SM5502_MUIC_ADC_GROUND; | 300 | return SM5502_MUIC_ADC_GROUND; |
306 | 301 | ||
@@ -395,7 +390,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info, | |||
395 | /* Get the type of attached or detached cable */ | 390 | /* Get the type of attached or detached cable */ |
396 | if (attached) | 391 | if (attached) |
397 | cable_type = sm5502_muic_get_cable_type(info); | 392 | cable_type = sm5502_muic_get_cable_type(info); |
398 | else if (!attached) | 393 | else |
399 | cable_type = prev_cable_type; | 394 | cable_type = prev_cable_type; |
400 | prev_cable_type = cable_type; | 395 | prev_cable_type = cable_type; |
401 | 396 | ||
@@ -457,8 +452,6 @@ static void sm5502_muic_irq_work(struct work_struct *work) | |||
457 | dev_err(info->dev, "failed to handle MUIC interrupt\n"); | 452 | dev_err(info->dev, "failed to handle MUIC interrupt\n"); |
458 | 453 | ||
459 | mutex_unlock(&info->mutex); | 454 | mutex_unlock(&info->mutex); |
460 | |||
461 | return; | ||
462 | } | 455 | } |
463 | 456 | ||
464 | /* | 457 | /* |
@@ -617,8 +610,9 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c, | |||
617 | IRQF_NO_SUSPEND, | 610 | IRQF_NO_SUSPEND, |
618 | muic_irq->name, info); | 611 | muic_irq->name, info); |
619 | if (ret) { | 612 | if (ret) { |
620 | dev_err(info->dev, "failed: irq request (IRQ: %d," | 613 | dev_err(info->dev, |
621 | " error :%d)\n", muic_irq->irq, ret); | 614 | "failed: irq request (IRQ: %d, error :%d)\n", |
615 | muic_irq->irq, ret); | ||
622 | return ret; | 616 | return ret; |
623 | } | 617 | } |
624 | } | 618 | } |
diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h new file mode 100644 index 000000000000..974b53222f56 --- /dev/null +++ b/drivers/extcon/extcon-sm5502.h | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * sm5502.h | ||
3 | * | ||
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_EXTCON_SM5502_H | ||
13 | #define __LINUX_EXTCON_SM5502_H | ||
14 | |||
15 | enum sm5502_types { | ||
16 | TYPE_SM5502, | ||
17 | }; | ||
18 | |||
19 | /* SM5502 registers */ | ||
20 | enum sm5502_reg { | ||
21 | SM5502_REG_DEVICE_ID = 0x01, | ||
22 | SM5502_REG_CONTROL, | ||
23 | SM5502_REG_INT1, | ||
24 | SM5502_REG_INT2, | ||
25 | SM5502_REG_INTMASK1, | ||
26 | SM5502_REG_INTMASK2, | ||
27 | SM5502_REG_ADC, | ||
28 | SM5502_REG_TIMING_SET1, | ||
29 | SM5502_REG_TIMING_SET2, | ||
30 | SM5502_REG_DEV_TYPE1, | ||
31 | SM5502_REG_DEV_TYPE2, | ||
32 | SM5502_REG_BUTTON1, | ||
33 | SM5502_REG_BUTTON2, | ||
34 | SM5502_REG_CAR_KIT_STATUS, | ||
35 | SM5502_REG_RSVD1, | ||
36 | SM5502_REG_RSVD2, | ||
37 | SM5502_REG_RSVD3, | ||
38 | SM5502_REG_RSVD4, | ||
39 | SM5502_REG_MANUAL_SW1, | ||
40 | SM5502_REG_MANUAL_SW2, | ||
41 | SM5502_REG_DEV_TYPE3, | ||
42 | SM5502_REG_RSVD5, | ||
43 | SM5502_REG_RSVD6, | ||
44 | SM5502_REG_RSVD7, | ||
45 | SM5502_REG_RSVD8, | ||
46 | SM5502_REG_RSVD9, | ||
47 | SM5502_REG_RESET, | ||
48 | SM5502_REG_RSVD10, | ||
49 | SM5502_REG_RESERVED_ID1, | ||
50 | SM5502_REG_RSVD11, | ||
51 | SM5502_REG_RSVD12, | ||
52 | SM5502_REG_RESERVED_ID2, | ||
53 | SM5502_REG_RSVD13, | ||
54 | SM5502_REG_OCP, | ||
55 | SM5502_REG_RSVD14, | ||
56 | SM5502_REG_RSVD15, | ||
57 | SM5502_REG_RSVD16, | ||
58 | SM5502_REG_RSVD17, | ||
59 | SM5502_REG_RSVD18, | ||
60 | SM5502_REG_RSVD19, | ||
61 | SM5502_REG_RSVD20, | ||
62 | SM5502_REG_RSVD21, | ||
63 | SM5502_REG_RSVD22, | ||
64 | SM5502_REG_RSVD23, | ||
65 | SM5502_REG_RSVD24, | ||
66 | SM5502_REG_RSVD25, | ||
67 | SM5502_REG_RSVD26, | ||
68 | SM5502_REG_RSVD27, | ||
69 | SM5502_REG_RSVD28, | ||
70 | SM5502_REG_RSVD29, | ||
71 | SM5502_REG_RSVD30, | ||
72 | SM5502_REG_RSVD31, | ||
73 | SM5502_REG_RSVD32, | ||
74 | SM5502_REG_RSVD33, | ||
75 | SM5502_REG_RSVD34, | ||
76 | SM5502_REG_RSVD35, | ||
77 | SM5502_REG_RSVD36, | ||
78 | SM5502_REG_RESERVED_ID3, | ||
79 | |||
80 | SM5502_REG_END, | ||
81 | }; | ||
82 | |||
83 | /* Define SM5502 MASK/SHIFT constant */ | ||
84 | #define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0 | ||
85 | #define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3 | ||
86 | #define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT) | ||
87 | #define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT) | ||
88 | |||
89 | #define SM5502_REG_CONTROL_MASK_INT_SHIFT 0 | ||
90 | #define SM5502_REG_CONTROL_WAIT_SHIFT 1 | ||
91 | #define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2 | ||
92 | #define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3 | ||
93 | #define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4 | ||
94 | #define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT) | ||
95 | #define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT) | ||
96 | #define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT) | ||
97 | #define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT) | ||
98 | #define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT) | ||
99 | |||
100 | #define SM5502_REG_INTM1_ATTACH_SHIFT 0 | ||
101 | #define SM5502_REG_INTM1_DETACH_SHIFT 1 | ||
102 | #define SM5502_REG_INTM1_KP_SHIFT 2 | ||
103 | #define SM5502_REG_INTM1_LKP_SHIFT 3 | ||
104 | #define SM5502_REG_INTM1_LKR_SHIFT 4 | ||
105 | #define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5 | ||
106 | #define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6 | ||
107 | #define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7 | ||
108 | #define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT) | ||
109 | #define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT) | ||
110 | #define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT) | ||
111 | #define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT) | ||
112 | #define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT) | ||
113 | #define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT) | ||
114 | #define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT) | ||
115 | #define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT) | ||
116 | |||
117 | #define SM5502_REG_INTM2_VBUS_DET_SHIFT 0 | ||
118 | #define SM5502_REG_INTM2_REV_ACCE_SHIFT 1 | ||
119 | #define SM5502_REG_INTM2_ADC_CHG_SHIFT 2 | ||
120 | #define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3 | ||
121 | #define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4 | ||
122 | #define SM5502_REG_INTM2_MHL_SHIFT 5 | ||
123 | #define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT) | ||
124 | #define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT) | ||
125 | #define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT) | ||
126 | #define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT) | ||
127 | #define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT) | ||
128 | #define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT) | ||
129 | |||
130 | #define SM5502_REG_ADC_SHIFT 0 | ||
131 | #define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT) | ||
132 | |||
133 | #define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4 | ||
134 | #define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT) | ||
135 | #define TIMING_KEY_PRESS_100MS 0x0 | ||
136 | #define TIMING_KEY_PRESS_200MS 0x1 | ||
137 | #define TIMING_KEY_PRESS_300MS 0x2 | ||
138 | #define TIMING_KEY_PRESS_400MS 0x3 | ||
139 | #define TIMING_KEY_PRESS_500MS 0x4 | ||
140 | #define TIMING_KEY_PRESS_600MS 0x5 | ||
141 | #define TIMING_KEY_PRESS_700MS 0x6 | ||
142 | #define TIMING_KEY_PRESS_800MS 0x7 | ||
143 | #define TIMING_KEY_PRESS_900MS 0x8 | ||
144 | #define TIMING_KEY_PRESS_1000MS 0x9 | ||
145 | #define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0 | ||
146 | #define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT) | ||
147 | #define TIMING_ADC_DET_50MS 0x0 | ||
148 | #define TIMING_ADC_DET_100MS 0x1 | ||
149 | #define TIMING_ADC_DET_150MS 0x2 | ||
150 | #define TIMING_ADC_DET_200MS 0x3 | ||
151 | #define TIMING_ADC_DET_300MS 0x4 | ||
152 | #define TIMING_ADC_DET_400MS 0x5 | ||
153 | #define TIMING_ADC_DET_500MS 0x6 | ||
154 | #define TIMING_ADC_DET_600MS 0x7 | ||
155 | #define TIMING_ADC_DET_700MS 0x8 | ||
156 | #define TIMING_ADC_DET_800MS 0x9 | ||
157 | #define TIMING_ADC_DET_900MS 0xA | ||
158 | #define TIMING_ADC_DET_1000MS 0xB | ||
159 | |||
160 | #define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4 | ||
161 | #define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT) | ||
162 | #define TIMING_SW_WAIT_10MS 0x0 | ||
163 | #define TIMING_SW_WAIT_30MS 0x1 | ||
164 | #define TIMING_SW_WAIT_50MS 0x2 | ||
165 | #define TIMING_SW_WAIT_70MS 0x3 | ||
166 | #define TIMING_SW_WAIT_90MS 0x4 | ||
167 | #define TIMING_SW_WAIT_110MS 0x5 | ||
168 | #define TIMING_SW_WAIT_130MS 0x6 | ||
169 | #define TIMING_SW_WAIT_150MS 0x7 | ||
170 | #define TIMING_SW_WAIT_170MS 0x8 | ||
171 | #define TIMING_SW_WAIT_190MS 0x9 | ||
172 | #define TIMING_SW_WAIT_210MS 0xA | ||
173 | #define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0 | ||
174 | #define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT) | ||
175 | #define TIMING_LONG_KEY_300MS 0x0 | ||
176 | #define TIMING_LONG_KEY_400MS 0x1 | ||
177 | #define TIMING_LONG_KEY_500MS 0x2 | ||
178 | #define TIMING_LONG_KEY_600MS 0x3 | ||
179 | #define TIMING_LONG_KEY_700MS 0x4 | ||
180 | #define TIMING_LONG_KEY_800MS 0x5 | ||
181 | #define TIMING_LONG_KEY_900MS 0x6 | ||
182 | #define TIMING_LONG_KEY_1000MS 0x7 | ||
183 | #define TIMING_LONG_KEY_1100MS 0x8 | ||
184 | #define TIMING_LONG_KEY_1200MS 0x9 | ||
185 | #define TIMING_LONG_KEY_1300MS 0xA | ||
186 | #define TIMING_LONG_KEY_1400MS 0xB | ||
187 | #define TIMING_LONG_KEY_1500MS 0xC | ||
188 | |||
189 | #define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0 | ||
190 | #define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1 | ||
191 | #define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2 | ||
192 | #define SM5502_REG_DEV_TYPE1_UART_SHIFT 3 | ||
193 | #define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4 | ||
194 | #define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5 | ||
195 | #define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6 | ||
196 | #define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7 | ||
197 | #define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT) | ||
198 | #define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT) | ||
199 | #define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT) | ||
200 | #define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT) | ||
201 | #define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT) | ||
202 | #define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT) | ||
203 | #define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT) | ||
204 | #define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT) | ||
205 | |||
206 | #define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0 | ||
207 | #define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1 | ||
208 | #define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2 | ||
209 | #define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3 | ||
210 | #define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4 | ||
211 | #define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5 | ||
212 | #define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6 | ||
213 | #define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT) | ||
214 | #define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT) | ||
215 | #define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT) | ||
216 | #define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT) | ||
217 | #define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT) | ||
218 | #define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT) | ||
219 | #define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT) | ||
220 | |||
221 | #define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0 | ||
222 | #define SM5502_REG_MANUAL_SW1_DP_SHIFT 2 | ||
223 | #define SM5502_REG_MANUAL_SW1_DM_SHIFT 5 | ||
224 | #define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT) | ||
225 | #define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT) | ||
226 | #define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT) | ||
227 | #define VBUSIN_SWITCH_OPEN 0x0 | ||
228 | #define VBUSIN_SWITCH_VBUSOUT 0x1 | ||
229 | #define VBUSIN_SWITCH_MIC 0x2 | ||
230 | #define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3 | ||
231 | #define DM_DP_CON_SWITCH_OPEN 0x0 | ||
232 | #define DM_DP_CON_SWITCH_USB 0x1 | ||
233 | #define DM_DP_CON_SWITCH_AUDIO 0x2 | ||
234 | #define DM_DP_CON_SWITCH_UART 0x3 | ||
235 | #define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ | ||
236 | | (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) | ||
237 | #define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ | ||
238 | | (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) | ||
239 | #define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ | ||
240 | | (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) | ||
241 | #define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ | ||
242 | | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) | ||
243 | |||
244 | /* SM5502 Interrupts */ | ||
245 | enum sm5502_irq { | ||
246 | /* INT1 */ | ||
247 | SM5502_IRQ_INT1_ATTACH, | ||
248 | SM5502_IRQ_INT1_DETACH, | ||
249 | SM5502_IRQ_INT1_KP, | ||
250 | SM5502_IRQ_INT1_LKP, | ||
251 | SM5502_IRQ_INT1_LKR, | ||
252 | SM5502_IRQ_INT1_OVP_EVENT, | ||
253 | SM5502_IRQ_INT1_OCP_EVENT, | ||
254 | SM5502_IRQ_INT1_OVP_OCP_DIS, | ||
255 | |||
256 | /* INT2 */ | ||
257 | SM5502_IRQ_INT2_VBUS_DET, | ||
258 | SM5502_IRQ_INT2_REV_ACCE, | ||
259 | SM5502_IRQ_INT2_ADC_CHG, | ||
260 | SM5502_IRQ_INT2_STUCK_KEY, | ||
261 | SM5502_IRQ_INT2_STUCK_KEY_RCV, | ||
262 | SM5502_IRQ_INT2_MHL, | ||
263 | |||
264 | SM5502_IRQ_NUM, | ||
265 | }; | ||
266 | |||
267 | #define SM5502_IRQ_INT1_ATTACH_MASK BIT(0) | ||
268 | #define SM5502_IRQ_INT1_DETACH_MASK BIT(1) | ||
269 | #define SM5502_IRQ_INT1_KP_MASK BIT(2) | ||
270 | #define SM5502_IRQ_INT1_LKP_MASK BIT(3) | ||
271 | #define SM5502_IRQ_INT1_LKR_MASK BIT(4) | ||
272 | #define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5) | ||
273 | #define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6) | ||
274 | #define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7) | ||
275 | #define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0) | ||
276 | #define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1) | ||
277 | #define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2) | ||
278 | #define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3) | ||
279 | #define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4) | ||
280 | #define SM5502_IRQ_INT2_MHL_MASK BIT(5) | ||
281 | |||
282 | #endif /* __LINUX_EXTCON_SM5502_H */ | ||
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 531a593912ec..433f72a1c006 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
@@ -165,8 +165,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | |||
165 | ret = vmbus_post_msg(open_msg, | 165 | ret = vmbus_post_msg(open_msg, |
166 | sizeof(struct vmbus_channel_open_channel)); | 166 | sizeof(struct vmbus_channel_open_channel)); |
167 | 167 | ||
168 | if (ret != 0) | 168 | if (ret != 0) { |
169 | err = ret; | ||
169 | goto error1; | 170 | goto error1; |
171 | } | ||
170 | 172 | ||
171 | t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); | 173 | t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); |
172 | if (t == 0) { | 174 | if (t == 0) { |
@@ -363,7 +365,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, | |||
363 | u32 next_gpadl_handle; | 365 | u32 next_gpadl_handle; |
364 | unsigned long flags; | 366 | unsigned long flags; |
365 | int ret = 0; | 367 | int ret = 0; |
366 | int t; | ||
367 | 368 | ||
368 | next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle); | 369 | next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle); |
369 | atomic_inc(&vmbus_connection.next_gpadl_handle); | 370 | atomic_inc(&vmbus_connection.next_gpadl_handle); |
@@ -410,9 +411,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, | |||
410 | 411 | ||
411 | } | 412 | } |
412 | } | 413 | } |
413 | t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); | 414 | wait_for_completion(&msginfo->waitevent); |
414 | BUG_ON(t == 0); | ||
415 | |||
416 | 415 | ||
417 | /* At this point, we received the gpadl created msg */ | 416 | /* At this point, we received the gpadl created msg */ |
418 | *gpadl_handle = gpadlmsg->gpadl; | 417 | *gpadl_handle = gpadlmsg->gpadl; |
@@ -435,7 +434,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) | |||
435 | struct vmbus_channel_gpadl_teardown *msg; | 434 | struct vmbus_channel_gpadl_teardown *msg; |
436 | struct vmbus_channel_msginfo *info; | 435 | struct vmbus_channel_msginfo *info; |
437 | unsigned long flags; | 436 | unsigned long flags; |
438 | int ret, t; | 437 | int ret; |
439 | 438 | ||
440 | info = kmalloc(sizeof(*info) + | 439 | info = kmalloc(sizeof(*info) + |
441 | sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL); | 440 | sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL); |
@@ -457,11 +456,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) | |||
457 | ret = vmbus_post_msg(msg, | 456 | ret = vmbus_post_msg(msg, |
458 | sizeof(struct vmbus_channel_gpadl_teardown)); | 457 | sizeof(struct vmbus_channel_gpadl_teardown)); |
459 | 458 | ||
460 | BUG_ON(ret != 0); | 459 | if (ret) |
461 | t = wait_for_completion_timeout(&info->waitevent, 5*HZ); | 460 | goto post_msg_err; |
462 | BUG_ON(t == 0); | 461 | |
462 | wait_for_completion(&info->waitevent); | ||
463 | 463 | ||
464 | /* Received a torndown response */ | 464 | post_msg_err: |
465 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | 465 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); |
466 | list_del(&info->msglistentry); | 466 | list_del(&info->msglistentry); |
467 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); | 467 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); |
@@ -478,7 +478,7 @@ static void reset_channel_cb(void *arg) | |||
478 | channel->onchannel_callback = NULL; | 478 | channel->onchannel_callback = NULL; |
479 | } | 479 | } |
480 | 480 | ||
481 | static void vmbus_close_internal(struct vmbus_channel *channel) | 481 | static int vmbus_close_internal(struct vmbus_channel *channel) |
482 | { | 482 | { |
483 | struct vmbus_channel_close_channel *msg; | 483 | struct vmbus_channel_close_channel *msg; |
484 | int ret; | 484 | int ret; |
@@ -486,11 +486,14 @@ static void vmbus_close_internal(struct vmbus_channel *channel) | |||
486 | channel->state = CHANNEL_OPEN_STATE; | 486 | channel->state = CHANNEL_OPEN_STATE; |
487 | channel->sc_creation_callback = NULL; | 487 | channel->sc_creation_callback = NULL; |
488 | /* Stop callback and cancel the timer asap */ | 488 | /* Stop callback and cancel the timer asap */ |
489 | if (channel->target_cpu != smp_processor_id()) | 489 | if (channel->target_cpu != get_cpu()) { |
490 | put_cpu(); | ||
490 | smp_call_function_single(channel->target_cpu, reset_channel_cb, | 491 | smp_call_function_single(channel->target_cpu, reset_channel_cb, |
491 | channel, true); | 492 | channel, true); |
492 | else | 493 | } else { |
493 | reset_channel_cb(channel); | 494 | reset_channel_cb(channel); |
495 | put_cpu(); | ||
496 | } | ||
494 | 497 | ||
495 | /* Send a closing message */ | 498 | /* Send a closing message */ |
496 | 499 | ||
@@ -501,11 +504,28 @@ static void vmbus_close_internal(struct vmbus_channel *channel) | |||
501 | 504 | ||
502 | ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel)); | 505 | ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel)); |
503 | 506 | ||
504 | BUG_ON(ret != 0); | 507 | if (ret) { |
508 | pr_err("Close failed: close post msg return is %d\n", ret); | ||
509 | /* | ||
510 | * If we failed to post the close msg, | ||
511 | * it is perhaps better to leak memory. | ||
512 | */ | ||
513 | return ret; | ||
514 | } | ||
515 | |||
505 | /* Tear down the gpadl for the channel's ring buffer */ | 516 | /* Tear down the gpadl for the channel's ring buffer */ |
506 | if (channel->ringbuffer_gpadlhandle) | 517 | if (channel->ringbuffer_gpadlhandle) { |
507 | vmbus_teardown_gpadl(channel, | 518 | ret = vmbus_teardown_gpadl(channel, |
508 | channel->ringbuffer_gpadlhandle); | 519 | channel->ringbuffer_gpadlhandle); |
520 | if (ret) { | ||
521 | pr_err("Close failed: teardown gpadl return %d\n", ret); | ||
522 | /* | ||
523 | * If we failed to teardown gpadl, | ||
524 | * it is perhaps better to leak memory. | ||
525 | */ | ||
526 | return ret; | ||
527 | } | ||
528 | } | ||
509 | 529 | ||
510 | /* Cleanup the ring buffers for this channel */ | 530 | /* Cleanup the ring buffers for this channel */ |
511 | hv_ringbuffer_cleanup(&channel->outbound); | 531 | hv_ringbuffer_cleanup(&channel->outbound); |
@@ -514,7 +534,7 @@ static void vmbus_close_internal(struct vmbus_channel *channel) | |||
514 | free_pages((unsigned long)channel->ringbuffer_pages, | 534 | free_pages((unsigned long)channel->ringbuffer_pages, |
515 | get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); | 535 | get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); |
516 | 536 | ||
517 | 537 | return ret; | |
518 | } | 538 | } |
519 | 539 | ||
520 | /* | 540 | /* |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index ed9350d42764..a2d1a9612c86 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
@@ -224,11 +224,14 @@ static void vmbus_process_rescind_offer(struct work_struct *work) | |||
224 | msg.header.msgtype = CHANNELMSG_RELID_RELEASED; | 224 | msg.header.msgtype = CHANNELMSG_RELID_RELEASED; |
225 | vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); | 225 | vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); |
226 | 226 | ||
227 | if (channel->target_cpu != smp_processor_id()) | 227 | if (channel->target_cpu != get_cpu()) { |
228 | put_cpu(); | ||
228 | smp_call_function_single(channel->target_cpu, | 229 | smp_call_function_single(channel->target_cpu, |
229 | percpu_channel_deq, channel, true); | 230 | percpu_channel_deq, channel, true); |
230 | else | 231 | } else { |
231 | percpu_channel_deq(channel); | 232 | percpu_channel_deq(channel); |
233 | put_cpu(); | ||
234 | } | ||
232 | 235 | ||
233 | if (channel->primary_channel == NULL) { | 236 | if (channel->primary_channel == NULL) { |
234 | spin_lock_irqsave(&vmbus_connection.channel_lock, flags); | 237 | spin_lock_irqsave(&vmbus_connection.channel_lock, flags); |
@@ -294,12 +297,15 @@ static void vmbus_process_offer(struct work_struct *work) | |||
294 | spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); | 297 | spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); |
295 | 298 | ||
296 | if (enq) { | 299 | if (enq) { |
297 | if (newchannel->target_cpu != smp_processor_id()) | 300 | if (newchannel->target_cpu != get_cpu()) { |
301 | put_cpu(); | ||
298 | smp_call_function_single(newchannel->target_cpu, | 302 | smp_call_function_single(newchannel->target_cpu, |
299 | percpu_channel_enq, | 303 | percpu_channel_enq, |
300 | newchannel, true); | 304 | newchannel, true); |
301 | else | 305 | } else { |
302 | percpu_channel_enq(newchannel); | 306 | percpu_channel_enq(newchannel); |
307 | put_cpu(); | ||
308 | } | ||
303 | } | 309 | } |
304 | if (!fnew) { | 310 | if (!fnew) { |
305 | /* | 311 | /* |
@@ -314,12 +320,15 @@ static void vmbus_process_offer(struct work_struct *work) | |||
314 | list_add_tail(&newchannel->sc_list, &channel->sc_list); | 320 | list_add_tail(&newchannel->sc_list, &channel->sc_list); |
315 | spin_unlock_irqrestore(&channel->sc_lock, flags); | 321 | spin_unlock_irqrestore(&channel->sc_lock, flags); |
316 | 322 | ||
317 | if (newchannel->target_cpu != smp_processor_id()) | 323 | if (newchannel->target_cpu != get_cpu()) { |
324 | put_cpu(); | ||
318 | smp_call_function_single(newchannel->target_cpu, | 325 | smp_call_function_single(newchannel->target_cpu, |
319 | percpu_channel_enq, | 326 | percpu_channel_enq, |
320 | newchannel, true); | 327 | newchannel, true); |
321 | else | 328 | } else { |
322 | percpu_channel_enq(newchannel); | 329 | percpu_channel_enq(newchannel); |
330 | put_cpu(); | ||
331 | } | ||
323 | 332 | ||
324 | newchannel->state = CHANNEL_OPEN_STATE; | 333 | newchannel->state = CHANNEL_OPEN_STATE; |
325 | if (channel->sc_creation_callback != NULL) | 334 | if (channel->sc_creation_callback != NULL) |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index ae22e3c1fc4c..e206619b946e 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
@@ -427,10 +427,21 @@ int vmbus_post_msg(void *buffer, size_t buflen) | |||
427 | * insufficient resources. Retry the operation a couple of | 427 | * insufficient resources. Retry the operation a couple of |
428 | * times before giving up. | 428 | * times before giving up. |
429 | */ | 429 | */ |
430 | while (retries < 3) { | 430 | while (retries < 10) { |
431 | ret = hv_post_message(conn_id, 1, buffer, buflen); | 431 | ret = hv_post_message(conn_id, 1, buffer, buflen); |
432 | if (ret != HV_STATUS_INSUFFICIENT_BUFFERS) | 432 | |
433 | switch (ret) { | ||
434 | case HV_STATUS_INSUFFICIENT_BUFFERS: | ||
435 | ret = -ENOMEM; | ||
436 | case -ENOMEM: | ||
437 | break; | ||
438 | case HV_STATUS_SUCCESS: | ||
433 | return ret; | 439 | return ret; |
440 | default: | ||
441 | pr_err("hv_post_msg() failed; error code:%d\n", ret); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | |||
434 | retries++; | 445 | retries++; |
435 | msleep(100); | 446 | msleep(100); |
436 | } | 447 | } |
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index edfc8488cb03..3e4235c7a47f 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c | |||
@@ -138,6 +138,8 @@ int hv_init(void) | |||
138 | memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); | 138 | memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); |
139 | memset(hv_context.synic_message_page, 0, | 139 | memset(hv_context.synic_message_page, 0, |
140 | sizeof(void *) * NR_CPUS); | 140 | sizeof(void *) * NR_CPUS); |
141 | memset(hv_context.post_msg_page, 0, | ||
142 | sizeof(void *) * NR_CPUS); | ||
141 | memset(hv_context.vp_index, 0, | 143 | memset(hv_context.vp_index, 0, |
142 | sizeof(int) * NR_CPUS); | 144 | sizeof(int) * NR_CPUS); |
143 | memset(hv_context.event_dpc, 0, | 145 | memset(hv_context.event_dpc, 0, |
@@ -217,26 +219,18 @@ int hv_post_message(union hv_connection_id connection_id, | |||
217 | enum hv_message_type message_type, | 219 | enum hv_message_type message_type, |
218 | void *payload, size_t payload_size) | 220 | void *payload, size_t payload_size) |
219 | { | 221 | { |
220 | struct aligned_input { | ||
221 | u64 alignment8; | ||
222 | struct hv_input_post_message msg; | ||
223 | }; | ||
224 | 222 | ||
225 | struct hv_input_post_message *aligned_msg; | 223 | struct hv_input_post_message *aligned_msg; |
226 | u16 status; | 224 | u16 status; |
227 | unsigned long addr; | ||
228 | 225 | ||
229 | if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) | 226 | if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) |
230 | return -EMSGSIZE; | 227 | return -EMSGSIZE; |
231 | 228 | ||
232 | addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC); | ||
233 | if (!addr) | ||
234 | return -ENOMEM; | ||
235 | |||
236 | aligned_msg = (struct hv_input_post_message *) | 229 | aligned_msg = (struct hv_input_post_message *) |
237 | (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN)); | 230 | hv_context.post_msg_page[get_cpu()]; |
238 | 231 | ||
239 | aligned_msg->connectionid = connection_id; | 232 | aligned_msg->connectionid = connection_id; |
233 | aligned_msg->reserved = 0; | ||
240 | aligned_msg->message_type = message_type; | 234 | aligned_msg->message_type = message_type; |
241 | aligned_msg->payload_size = payload_size; | 235 | aligned_msg->payload_size = payload_size; |
242 | memcpy((void *)aligned_msg->payload, payload, payload_size); | 236 | memcpy((void *)aligned_msg->payload, payload, payload_size); |
@@ -244,8 +238,7 @@ int hv_post_message(union hv_connection_id connection_id, | |||
244 | status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL) | 238 | status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL) |
245 | & 0xFFFF; | 239 | & 0xFFFF; |
246 | 240 | ||
247 | kfree((void *)addr); | 241 | put_cpu(); |
248 | |||
249 | return status; | 242 | return status; |
250 | } | 243 | } |
251 | 244 | ||
@@ -294,6 +287,14 @@ int hv_synic_alloc(void) | |||
294 | pr_err("Unable to allocate SYNIC event page\n"); | 287 | pr_err("Unable to allocate SYNIC event page\n"); |
295 | goto err; | 288 | goto err; |
296 | } | 289 | } |
290 | |||
291 | hv_context.post_msg_page[cpu] = | ||
292 | (void *)get_zeroed_page(GFP_ATOMIC); | ||
293 | |||
294 | if (hv_context.post_msg_page[cpu] == NULL) { | ||
295 | pr_err("Unable to allocate post msg page\n"); | ||
296 | goto err; | ||
297 | } | ||
297 | } | 298 | } |
298 | 299 | ||
299 | return 0; | 300 | return 0; |
@@ -308,6 +309,8 @@ static void hv_synic_free_cpu(int cpu) | |||
308 | free_page((unsigned long)hv_context.synic_event_page[cpu]); | 309 | free_page((unsigned long)hv_context.synic_event_page[cpu]); |
309 | if (hv_context.synic_message_page[cpu]) | 310 | if (hv_context.synic_message_page[cpu]) |
310 | free_page((unsigned long)hv_context.synic_message_page[cpu]); | 311 | free_page((unsigned long)hv_context.synic_message_page[cpu]); |
312 | if (hv_context.post_msg_page[cpu]) | ||
313 | free_page((unsigned long)hv_context.post_msg_page[cpu]); | ||
311 | } | 314 | } |
312 | 315 | ||
313 | void hv_synic_free(void) | 316 | void hv_synic_free(void) |
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 22b750749a39..c386d8dc7223 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h | |||
@@ -515,6 +515,10 @@ struct hv_context { | |||
515 | * per-cpu list of the channels based on their CPU affinity. | 515 | * per-cpu list of the channels based on their CPU affinity. |
516 | */ | 516 | */ |
517 | struct list_head percpu_list[NR_CPUS]; | 517 | struct list_head percpu_list[NR_CPUS]; |
518 | /* | ||
519 | * buffer to post messages to the host. | ||
520 | */ | ||
521 | void *post_msg_page[NR_CPUS]; | ||
518 | }; | 522 | }; |
519 | 523 | ||
520 | extern struct hv_context hv_context; | 524 | extern struct hv_context hv_context; |
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 15db66b74141..6361d124f67d 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
@@ -361,6 +361,11 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, | |||
361 | ring_info->ring_buffer->read_index = | 361 | ring_info->ring_buffer->read_index = |
362 | ring_info->ring_buffer->write_index = 0; | 362 | ring_info->ring_buffer->write_index = 0; |
363 | 363 | ||
364 | /* | ||
365 | * Set the feature bit for enabling flow control. | ||
366 | */ | ||
367 | ring_info->ring_buffer->feature_bits.value = 1; | ||
368 | |||
364 | ring_info->ring_size = buflen; | 369 | ring_info->ring_size = buflen; |
365 | ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); | 370 | ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); |
366 | 371 | ||
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index de5e32151a1e..9b23843dcad4 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c | |||
@@ -572,7 +572,8 @@ static int tpci200_pci_probe(struct pci_dev *pdev, | |||
572 | /* Register the carrier in the industry pack bus driver */ | 572 | /* Register the carrier in the industry pack bus driver */ |
573 | tpci200->info->ipack_bus = ipack_bus_register(&pdev->dev, | 573 | tpci200->info->ipack_bus = ipack_bus_register(&pdev->dev, |
574 | TPCI200_NB_SLOT, | 574 | TPCI200_NB_SLOT, |
575 | &tpci200_bus_ops); | 575 | &tpci200_bus_ops, |
576 | THIS_MODULE); | ||
576 | if (!tpci200->info->ipack_bus) { | 577 | if (!tpci200->info->ipack_bus) { |
577 | dev_err(&pdev->dev, | 578 | dev_err(&pdev->dev, |
578 | "error registering the carrier on ipack driver\n"); | 579 | "error registering the carrier on ipack driver\n"); |
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c index e41bef048c23..035d5449227e 100644 --- a/drivers/ipack/devices/ipoctal.c +++ b/drivers/ipack/devices/ipoctal.c | |||
@@ -55,6 +55,22 @@ struct ipoctal { | |||
55 | u8 __iomem *int_space; | 55 | u8 __iomem *int_space; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static inline struct ipoctal *chan_to_ipoctal(struct ipoctal_channel *chan, | ||
59 | unsigned int index) | ||
60 | { | ||
61 | return container_of(chan, struct ipoctal, channel[index]); | ||
62 | } | ||
63 | |||
64 | static void ipoctal_reset_channel(struct ipoctal_channel *channel) | ||
65 | { | ||
66 | iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr); | ||
67 | channel->rx_enable = 0; | ||
68 | iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr); | ||
69 | iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr); | ||
70 | iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr); | ||
71 | iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr); | ||
72 | } | ||
73 | |||
58 | static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty) | 74 | static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty) |
59 | { | 75 | { |
60 | struct ipoctal_channel *channel; | 76 | struct ipoctal_channel *channel; |
@@ -72,12 +88,20 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty) | |||
72 | 88 | ||
73 | static int ipoctal_open(struct tty_struct *tty, struct file *file) | 89 | static int ipoctal_open(struct tty_struct *tty, struct file *file) |
74 | { | 90 | { |
75 | struct ipoctal_channel *channel; | 91 | struct ipoctal_channel *channel = dev_get_drvdata(tty->dev); |
92 | struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index); | ||
93 | int err; | ||
76 | 94 | ||
77 | channel = dev_get_drvdata(tty->dev); | ||
78 | tty->driver_data = channel; | 95 | tty->driver_data = channel; |
79 | 96 | ||
80 | return tty_port_open(&channel->tty_port, tty, file); | 97 | if (!ipack_get_carrier(ipoctal->dev)) |
98 | return -EBUSY; | ||
99 | |||
100 | err = tty_port_open(&channel->tty_port, tty, file); | ||
101 | if (err) | ||
102 | ipack_put_carrier(ipoctal->dev); | ||
103 | |||
104 | return err; | ||
81 | } | 105 | } |
82 | 106 | ||
83 | static void ipoctal_reset_stats(struct ipoctal_stats *stats) | 107 | static void ipoctal_reset_stats(struct ipoctal_stats *stats) |
@@ -151,7 +175,6 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr) | |||
151 | flag = TTY_FRAME; | 175 | flag = TTY_FRAME; |
152 | } | 176 | } |
153 | if (sr & SR_RECEIVED_BREAK) { | 177 | if (sr & SR_RECEIVED_BREAK) { |
154 | iowrite8(CR_CMD_RESET_BREAK_CHANGE, &channel->regs->w.cr); | ||
155 | channel->stats.rcv_break++; | 178 | channel->stats.rcv_break++; |
156 | flag = TTY_BREAK; | 179 | flag = TTY_BREAK; |
157 | } | 180 | } |
@@ -196,6 +219,9 @@ static void ipoctal_irq_channel(struct ipoctal_channel *channel) | |||
196 | isr = ioread8(&channel->block_regs->r.isr); | 219 | isr = ioread8(&channel->block_regs->r.isr); |
197 | sr = ioread8(&channel->regs->r.sr); | 220 | sr = ioread8(&channel->regs->r.sr); |
198 | 221 | ||
222 | if (isr & (IMR_DELTA_BREAK_A | IMR_DELTA_BREAK_B)) | ||
223 | iowrite8(CR_CMD_RESET_BREAK_CHANGE, &channel->regs->w.cr); | ||
224 | |||
199 | if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) { | 225 | if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) { |
200 | iowrite8(CR_DISABLE_TX, &channel->regs->w.cr); | 226 | iowrite8(CR_DISABLE_TX, &channel->regs->w.cr); |
201 | /* In case of RS-485, change from TX to RX when finishing TX. | 227 | /* In case of RS-485, change from TX to RX when finishing TX. |
@@ -304,10 +330,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, | |||
304 | channel->isr_rx_rdy_mask = ISR_RxRDY_FFULL_A; | 330 | channel->isr_rx_rdy_mask = ISR_RxRDY_FFULL_A; |
305 | } | 331 | } |
306 | 332 | ||
307 | iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr); | 333 | ipoctal_reset_channel(channel); |
308 | channel->rx_enable = 0; | ||
309 | iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr); | ||
310 | iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr); | ||
311 | iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY, | 334 | iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY, |
312 | &channel->regs->w.mr); /* mr1 */ | 335 | &channel->regs->w.mr); /* mr1 */ |
313 | iowrite8(0, &channel->regs->w.mr); /* mr2 */ | 336 | iowrite8(0, &channel->regs->w.mr); /* mr2 */ |
@@ -467,11 +490,7 @@ static void ipoctal_set_termios(struct tty_struct *tty, | |||
467 | cflag = tty->termios.c_cflag; | 490 | cflag = tty->termios.c_cflag; |
468 | 491 | ||
469 | /* Disable and reset everything before change the setup */ | 492 | /* Disable and reset everything before change the setup */ |
470 | iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr); | 493 | ipoctal_reset_channel(channel); |
471 | iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr); | ||
472 | iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr); | ||
473 | iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr); | ||
474 | iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr); | ||
475 | 494 | ||
476 | /* Set Bits per chars */ | 495 | /* Set Bits per chars */ |
477 | switch (cflag & CSIZE) { | 496 | switch (cflag & CSIZE) { |
@@ -609,12 +628,7 @@ static void ipoctal_hangup(struct tty_struct *tty) | |||
609 | 628 | ||
610 | tty_port_hangup(&channel->tty_port); | 629 | tty_port_hangup(&channel->tty_port); |
611 | 630 | ||
612 | iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr); | 631 | ipoctal_reset_channel(channel); |
613 | channel->rx_enable = 0; | ||
614 | iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr); | ||
615 | iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr); | ||
616 | iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr); | ||
617 | iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr); | ||
618 | 632 | ||
619 | clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags); | 633 | clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags); |
620 | wake_up_interruptible(&channel->tty_port.open_wait); | 634 | wake_up_interruptible(&channel->tty_port.open_wait); |
@@ -627,15 +641,19 @@ static void ipoctal_shutdown(struct tty_struct *tty) | |||
627 | if (channel == NULL) | 641 | if (channel == NULL) |
628 | return; | 642 | return; |
629 | 643 | ||
630 | iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr); | 644 | ipoctal_reset_channel(channel); |
631 | channel->rx_enable = 0; | ||
632 | iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr); | ||
633 | iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr); | ||
634 | iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr); | ||
635 | iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr); | ||
636 | clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags); | 645 | clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags); |
637 | } | 646 | } |
638 | 647 | ||
648 | static void ipoctal_cleanup(struct tty_struct *tty) | ||
649 | { | ||
650 | struct ipoctal_channel *channel = tty->driver_data; | ||
651 | struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index); | ||
652 | |||
653 | /* release the carrier driver */ | ||
654 | ipack_put_carrier(ipoctal->dev); | ||
655 | } | ||
656 | |||
639 | static const struct tty_operations ipoctal_fops = { | 657 | static const struct tty_operations ipoctal_fops = { |
640 | .ioctl = NULL, | 658 | .ioctl = NULL, |
641 | .open = ipoctal_open, | 659 | .open = ipoctal_open, |
@@ -647,6 +665,7 @@ static const struct tty_operations ipoctal_fops = { | |||
647 | .get_icount = ipoctal_get_icount, | 665 | .get_icount = ipoctal_get_icount, |
648 | .hangup = ipoctal_hangup, | 666 | .hangup = ipoctal_hangup, |
649 | .shutdown = ipoctal_shutdown, | 667 | .shutdown = ipoctal_shutdown, |
668 | .cleanup = ipoctal_cleanup, | ||
650 | }; | 669 | }; |
651 | 670 | ||
652 | static int ipoctal_probe(struct ipack_device *dev) | 671 | static int ipoctal_probe(struct ipack_device *dev) |
diff --git a/drivers/ipack/devices/ipoctal.h b/drivers/ipack/devices/ipoctal.h index 28f1c4233154..7fede0eb6a0c 100644 --- a/drivers/ipack/devices/ipoctal.h +++ b/drivers/ipack/devices/ipoctal.h | |||
@@ -12,7 +12,7 @@ | |||
12 | * Software Foundation; version 2 of the License. | 12 | * Software Foundation; version 2 of the License. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifndef _IPOCTAL_H | 15 | #ifndef _IPOCTAL_H_ |
16 | #define _IPOCTAL_H_ | 16 | #define _IPOCTAL_H_ |
17 | 17 | ||
18 | #define NR_CHANNELS 8 | 18 | #define NR_CHANNELS 8 |
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c index d0016ba469ed..c0e7b624ce54 100644 --- a/drivers/ipack/ipack.c +++ b/drivers/ipack/ipack.c | |||
@@ -206,7 +206,8 @@ static struct bus_type ipack_bus_type = { | |||
206 | }; | 206 | }; |
207 | 207 | ||
208 | struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, | 208 | struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, |
209 | const struct ipack_bus_ops *ops) | 209 | const struct ipack_bus_ops *ops, |
210 | struct module *owner) | ||
210 | { | 211 | { |
211 | int bus_nr; | 212 | int bus_nr; |
212 | struct ipack_bus_device *bus; | 213 | struct ipack_bus_device *bus; |
@@ -225,6 +226,7 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, | |||
225 | bus->parent = parent; | 226 | bus->parent = parent; |
226 | bus->slots = slots; | 227 | bus->slots = slots; |
227 | bus->ops = ops; | 228 | bus->ops = ops; |
229 | bus->owner = owner; | ||
228 | return bus; | 230 | return bus; |
229 | } | 231 | } |
230 | EXPORT_SYMBOL_GPL(ipack_bus_register); | 232 | EXPORT_SYMBOL_GPL(ipack_bus_register); |
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 33f8673d23a6..b432873def96 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/device.h> |
22 | #include <linux/jiffies.h> | 22 | #include <linux/jiffies.h> |
23 | #include <linux/i2c.h> | 23 | #include <linux/i2c.h> |
24 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
@@ -159,12 +159,11 @@ static int eeprom_probe(struct i2c_client *client, | |||
159 | { | 159 | { |
160 | struct i2c_adapter *adapter = client->adapter; | 160 | struct i2c_adapter *adapter = client->adapter; |
161 | struct eeprom_data *data; | 161 | struct eeprom_data *data; |
162 | int err; | ||
163 | 162 | ||
164 | if (!(data = kzalloc(sizeof(struct eeprom_data), GFP_KERNEL))) { | 163 | data = devm_kzalloc(&client->dev, sizeof(struct eeprom_data), |
165 | err = -ENOMEM; | 164 | GFP_KERNEL); |
166 | goto exit; | 165 | if (!data) |
167 | } | 166 | return -ENOMEM; |
168 | 167 | ||
169 | memset(data->data, 0xff, EEPROM_SIZE); | 168 | memset(data->data, 0xff, EEPROM_SIZE); |
170 | i2c_set_clientdata(client, data); | 169 | i2c_set_clientdata(client, data); |
@@ -190,22 +189,12 @@ static int eeprom_probe(struct i2c_client *client, | |||
190 | } | 189 | } |
191 | 190 | ||
192 | /* create the sysfs eeprom file */ | 191 | /* create the sysfs eeprom file */ |
193 | err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr); | 192 | return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr); |
194 | if (err) | ||
195 | goto exit_kfree; | ||
196 | |||
197 | return 0; | ||
198 | |||
199 | exit_kfree: | ||
200 | kfree(data); | ||
201 | exit: | ||
202 | return err; | ||
203 | } | 193 | } |
204 | 194 | ||
205 | static int eeprom_remove(struct i2c_client *client) | 195 | static int eeprom_remove(struct i2c_client *client) |
206 | { | 196 | { |
207 | sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr); | 197 | sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr); |
208 | kfree(i2c_get_clientdata(client)); | ||
209 | 198 | ||
210 | return 0; | 199 | return 0; |
211 | } | 200 | } |
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index 43bbabc96b6c..4cf8f82cfca2 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
8 | * Author: Michael Jung <mijung@de.ibm.com> | 8 | * Author: Michael Jung <mijung@gmx.net> |
9 | * Author: Michael Ruettger <michael@ibmra.de> | 9 | * Author: Michael Ruettger <michael@ibmra.de> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -45,10 +45,10 @@ | |||
45 | MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>"); | 45 | MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>"); |
46 | MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>"); | 46 | MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>"); |
47 | MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>"); | 47 | MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>"); |
48 | MODULE_AUTHOR("Michal Jung <mijung@de.ibm.com>"); | 48 | MODULE_AUTHOR("Michael Jung <mijung@gmx.net>"); |
49 | 49 | ||
50 | MODULE_DESCRIPTION("GenWQE Card"); | 50 | MODULE_DESCRIPTION("GenWQE Card"); |
51 | MODULE_VERSION(DRV_VERS_STRING); | 51 | MODULE_VERSION(DRV_VERSION); |
52 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
53 | 53 | ||
54 | static char genwqe_driver_name[] = GENWQE_DEVNAME; | 54 | static char genwqe_driver_name[] = GENWQE_DEVNAME; |
@@ -346,8 +346,13 @@ static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd) | |||
346 | unsigned int vf; | 346 | unsigned int vf; |
347 | u32 T = genwqe_T_psec(cd); | 347 | u32 T = genwqe_T_psec(cd); |
348 | u64 x; | 348 | u64 x; |
349 | int totalvfs; | ||
349 | 350 | ||
350 | for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) { | 351 | totalvfs = pci_sriov_get_totalvfs(pci_dev); |
352 | if (totalvfs <= 0) | ||
353 | return false; | ||
354 | |||
355 | for (vf = 0; vf < totalvfs; vf++) { | ||
351 | 356 | ||
352 | if (cd->vf_jobtimeout_msec[vf] == 0) | 357 | if (cd->vf_jobtimeout_msec[vf] == 0) |
353 | continue; | 358 | continue; |
@@ -383,8 +388,9 @@ static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd) | |||
383 | 388 | ||
384 | /* currently support only the debug units mentioned here */ | 389 | /* currently support only the debug units mentioned here */ |
385 | cd->ffdc[type].entries = e; | 390 | cd->ffdc[type].entries = e; |
386 | cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg), | 391 | cd->ffdc[type].regs = |
387 | GFP_KERNEL); | 392 | kmalloc_array(e, sizeof(struct genwqe_reg), |
393 | GFP_KERNEL); | ||
388 | /* | 394 | /* |
389 | * regs == NULL is ok, the using code treats this as no regs, | 395 | * regs == NULL is ok, the using code treats this as no regs, |
390 | * Printing warning is ok in this case. | 396 | * Printing warning is ok in this case. |
@@ -723,8 +729,8 @@ static u64 genwqe_fir_checking(struct genwqe_dev *cd) | |||
723 | __genwqe_writeq(cd, sfir_addr, sfir); | 729 | __genwqe_writeq(cd, sfir_addr, sfir); |
724 | 730 | ||
725 | dev_dbg(&pci_dev->dev, | 731 | dev_dbg(&pci_dev->dev, |
726 | "[HM] Clearing 2ndary FIR 0x%08x " | 732 | "[HM] Clearing 2ndary FIR 0x%08x with 0x%016llx\n", |
727 | "with 0x%016llx\n", sfir_addr, sfir); | 733 | sfir_addr, sfir); |
728 | 734 | ||
729 | /* | 735 | /* |
730 | * note, these cannot be error-Firs | 736 | * note, these cannot be error-Firs |
@@ -740,9 +746,8 @@ static u64 genwqe_fir_checking(struct genwqe_dev *cd) | |||
740 | __genwqe_writeq(cd, fir_clr_addr, mask); | 746 | __genwqe_writeq(cd, fir_clr_addr, mask); |
741 | 747 | ||
742 | dev_dbg(&pci_dev->dev, | 748 | dev_dbg(&pci_dev->dev, |
743 | "[HM] Clearing primary FIR 0x%08x " | 749 | "[HM] Clearing primary FIR 0x%08x with 0x%016llx\n", |
744 | "with 0x%016llx\n", fir_clr_addr, | 750 | fir_clr_addr, mask); |
745 | mask); | ||
746 | } | 751 | } |
747 | } | 752 | } |
748 | } | 753 | } |
@@ -1125,6 +1130,8 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) | |||
1125 | } | 1130 | } |
1126 | 1131 | ||
1127 | cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); | 1132 | cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); |
1133 | if (cd->num_vfs < 0) | ||
1134 | cd->num_vfs = 0; | ||
1128 | 1135 | ||
1129 | err = genwqe_read_ids(cd); | 1136 | err = genwqe_read_ids(cd); |
1130 | if (err) | 1137 | if (err) |
@@ -1202,8 +1209,8 @@ static int genwqe_probe(struct pci_dev *pci_dev, | |||
1202 | err = genwqe_health_check_start(cd); | 1209 | err = genwqe_health_check_start(cd); |
1203 | if (err < 0) { | 1210 | if (err < 0) { |
1204 | dev_err(&pci_dev->dev, | 1211 | dev_err(&pci_dev->dev, |
1205 | "err: cannot start health checking! " | 1212 | "err: cannot start health checking! (err=%d)\n", |
1206 | "(err=%d)\n", err); | 1213 | err); |
1207 | goto out_stop_services; | 1214 | goto out_stop_services; |
1208 | } | 1215 | } |
1209 | } | 1216 | } |
@@ -1313,11 +1320,14 @@ static void genwqe_err_resume(struct pci_dev *pci_dev) | |||
1313 | 1320 | ||
1314 | static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) | 1321 | static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) |
1315 | { | 1322 | { |
1323 | int rc; | ||
1316 | struct genwqe_dev *cd = dev_get_drvdata(&dev->dev); | 1324 | struct genwqe_dev *cd = dev_get_drvdata(&dev->dev); |
1317 | 1325 | ||
1318 | if (numvfs > 0) { | 1326 | if (numvfs > 0) { |
1319 | genwqe_setup_vf_jtimer(cd); | 1327 | genwqe_setup_vf_jtimer(cd); |
1320 | pci_enable_sriov(dev, numvfs); | 1328 | rc = pci_enable_sriov(dev, numvfs); |
1329 | if (rc < 0) | ||
1330 | return rc; | ||
1321 | return numvfs; | 1331 | return numvfs; |
1322 | } | 1332 | } |
1323 | if (numvfs == 0) { | 1333 | if (numvfs == 0) { |
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h index 67abd8cb2247..c64d7cad1085 100644 --- a/drivers/misc/genwqe/card_base.h +++ b/drivers/misc/genwqe/card_base.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
11 | * Author: Michael Jung <mijung@de.ibm.com> | 11 | * Author: Michael Jung <mijung@gmx.net> |
12 | * Author: Michael Ruettger <michael@ibmra.de> | 12 | * Author: Michael Ruettger <michael@ibmra.de> |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
@@ -201,7 +201,8 @@ static inline void genwqe_mapping_init(struct dma_mapping *m, | |||
201 | * @ddcb_seq: Sequence number of last DDCB | 201 | * @ddcb_seq: Sequence number of last DDCB |
202 | * @ddcbs_in_flight: Currently enqueued DDCBs | 202 | * @ddcbs_in_flight: Currently enqueued DDCBs |
203 | * @ddcbs_completed: Number of already completed DDCBs | 203 | * @ddcbs_completed: Number of already completed DDCBs |
204 | * @busy: Number of -EBUSY returns | 204 | * @return_on_busy: Number of -EBUSY returns on full queue |
205 | * @wait_on_busy: Number of waits on full queue | ||
205 | * @ddcb_daddr: DMA address of first DDCB in the queue | 206 | * @ddcb_daddr: DMA address of first DDCB in the queue |
206 | * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue | 207 | * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue |
207 | * @ddcb_req: Associated requests (one per DDCB) | 208 | * @ddcb_req: Associated requests (one per DDCB) |
@@ -218,7 +219,8 @@ struct ddcb_queue { | |||
218 | unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ | 219 | unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ |
219 | unsigned int ddcbs_completed; | 220 | unsigned int ddcbs_completed; |
220 | unsigned int ddcbs_max_in_flight; | 221 | unsigned int ddcbs_max_in_flight; |
221 | unsigned int busy; /* how many times -EBUSY? */ | 222 | unsigned int return_on_busy; /* how many times -EBUSY? */ |
223 | unsigned int wait_on_busy; | ||
222 | 224 | ||
223 | dma_addr_t ddcb_daddr; /* DMA address */ | 225 | dma_addr_t ddcb_daddr; /* DMA address */ |
224 | struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ | 226 | struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ |
@@ -226,7 +228,7 @@ struct ddcb_queue { | |||
226 | wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ | 228 | wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ |
227 | 229 | ||
228 | spinlock_t ddcb_lock; /* exclusive access to queue */ | 230 | spinlock_t ddcb_lock; /* exclusive access to queue */ |
229 | wait_queue_head_t ddcb_waitq; /* wait for ddcb processing */ | 231 | wait_queue_head_t busy_waitq; /* wait for ddcb processing */ |
230 | 232 | ||
231 | /* registers or the respective queue to be used */ | 233 | /* registers or the respective queue to be used */ |
232 | u32 IO_QUEUE_CONFIG; | 234 | u32 IO_QUEUE_CONFIG; |
@@ -306,7 +308,7 @@ struct genwqe_dev { | |||
306 | struct pci_dev *pci_dev; /* PCI device */ | 308 | struct pci_dev *pci_dev; /* PCI device */ |
307 | void __iomem *mmio; /* BAR-0 MMIO start */ | 309 | void __iomem *mmio; /* BAR-0 MMIO start */ |
308 | unsigned long mmio_len; | 310 | unsigned long mmio_len; |
309 | u16 num_vfs; | 311 | int num_vfs; |
310 | u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; | 312 | u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; |
311 | int is_privileged; /* access to all regs possible */ | 313 | int is_privileged; /* access to all regs possible */ |
312 | 314 | ||
@@ -508,7 +510,7 @@ static inline bool dma_mapping_used(struct dma_mapping *m) | |||
508 | * buildup and teardown. | 510 | * buildup and teardown. |
509 | */ | 511 | */ |
510 | int __genwqe_execute_ddcb(struct genwqe_dev *cd, | 512 | int __genwqe_execute_ddcb(struct genwqe_dev *cd, |
511 | struct genwqe_ddcb_cmd *cmd); | 513 | struct genwqe_ddcb_cmd *cmd, unsigned int f_flags); |
512 | 514 | ||
513 | /** | 515 | /** |
514 | * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation | 516 | * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation |
@@ -520,9 +522,12 @@ int __genwqe_execute_ddcb(struct genwqe_dev *cd, | |||
520 | * modification. | 522 | * modification. |
521 | */ | 523 | */ |
522 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, | 524 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, |
523 | struct genwqe_ddcb_cmd *cmd); | 525 | struct genwqe_ddcb_cmd *cmd, |
526 | unsigned int f_flags); | ||
527 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, | ||
528 | struct ddcb_requ *req, | ||
529 | unsigned int f_flags); | ||
524 | 530 | ||
525 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | ||
526 | int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | 531 | int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); |
527 | int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | 532 | int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); |
528 | 533 | ||
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c index dc9851a5540e..6d51e5f08664 100644 --- a/drivers/misc/genwqe/card_ddcb.c +++ b/drivers/misc/genwqe/card_ddcb.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
8 | * Author: Michael Jung <mijung@de.ibm.com> | 8 | * Author: Michael Jung <mijung@gmx.net> |
9 | * Author: Michael Ruettger <michael@ibmra.de> | 9 | * Author: Michael Ruettger <michael@ibmra.de> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -185,8 +185,7 @@ static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) | |||
185 | pddcb = queue->ddcb_vaddr; | 185 | pddcb = queue->ddcb_vaddr; |
186 | for (i = 0; i < queue->ddcb_max; i++) { | 186 | for (i = 0; i < queue->ddcb_max; i++) { |
187 | dev_err(&pci_dev->dev, | 187 | dev_err(&pci_dev->dev, |
188 | " %c %-3d: RETC=%03x SEQ=%04x " | 188 | " %c %-3d: RETC=%03x SEQ=%04x HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n", |
189 | "HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n", | ||
190 | i == queue->ddcb_act ? '>' : ' ', | 189 | i == queue->ddcb_act ? '>' : ' ', |
191 | i, | 190 | i, |
192 | be16_to_cpu(pddcb->retc_16), | 191 | be16_to_cpu(pddcb->retc_16), |
@@ -214,6 +213,7 @@ struct genwqe_ddcb_cmd *ddcb_requ_alloc(void) | |||
214 | void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd) | 213 | void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd) |
215 | { | 214 | { |
216 | struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); | 215 | struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); |
216 | |||
217 | kfree(req); | 217 | kfree(req); |
218 | } | 218 | } |
219 | 219 | ||
@@ -306,7 +306,7 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, | |||
306 | 306 | ||
307 | new = (old | DDCB_NEXT_BE32); | 307 | new = (old | DDCB_NEXT_BE32); |
308 | 308 | ||
309 | wmb(); | 309 | wmb(); /* need to ensure write ordering */ |
310 | icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); | 310 | icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); |
311 | 311 | ||
312 | if (icrc_hsi_shi == old) | 312 | if (icrc_hsi_shi == old) |
@@ -317,7 +317,7 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, | |||
317 | ddcb_mark_tapped(pddcb); | 317 | ddcb_mark_tapped(pddcb); |
318 | num = (u64)ddcb_no << 8; | 318 | num = (u64)ddcb_no << 8; |
319 | 319 | ||
320 | wmb(); | 320 | wmb(); /* need to ensure write ordering */ |
321 | __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ | 321 | __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ |
322 | 322 | ||
323 | return RET_DDCB_TAPPED; | 323 | return RET_DDCB_TAPPED; |
@@ -390,8 +390,9 @@ static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, | |||
390 | 0x00000000) | 390 | 0x00000000) |
391 | goto go_home; /* not completed, continue waiting */ | 391 | goto go_home; /* not completed, continue waiting */ |
392 | 392 | ||
393 | /* Note: DDCB could be purged */ | 393 | wmb(); /* Add sync to decouple prev. read operations */ |
394 | 394 | ||
395 | /* Note: DDCB could be purged */ | ||
395 | req = queue->ddcb_req[queue->ddcb_act]; | 396 | req = queue->ddcb_req[queue->ddcb_act]; |
396 | if (req == NULL) { | 397 | if (req == NULL) { |
397 | /* this occurs if DDCB is purged, not an error */ | 398 | /* this occurs if DDCB is purged, not an error */ |
@@ -416,9 +417,7 @@ static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, | |||
416 | status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); | 417 | status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); |
417 | 418 | ||
418 | dev_err(&pci_dev->dev, | 419 | dev_err(&pci_dev->dev, |
419 | "[%s] SEQN=%04x HSI=%02x RETC=%03x " | 420 | "[%s] SEQN=%04x HSI=%02x RETC=%03x Q_ERRCNTS=%016llx Q_STATUS=%016llx DDCB_DMA_ADDR=%016llx\n", |
420 | " Q_ERRCNTS=%016llx Q_STATUS=%016llx\n" | ||
421 | " DDCB_DMA_ADDR=%016llx\n", | ||
422 | __func__, be16_to_cpu(pddcb->seqnum_16), | 421 | __func__, be16_to_cpu(pddcb->seqnum_16), |
423 | pddcb->hsi, retc_16, errcnts, status, | 422 | pddcb->hsi, retc_16, errcnts, status, |
424 | queue->ddcb_daddr + ddcb_offs); | 423 | queue->ddcb_daddr + ddcb_offs); |
@@ -439,8 +438,7 @@ static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, | |||
439 | vcrc_16 = be16_to_cpu(pddcb->vcrc_16); | 438 | vcrc_16 = be16_to_cpu(pddcb->vcrc_16); |
440 | if (vcrc != vcrc_16) { | 439 | if (vcrc != vcrc_16) { |
441 | printk_ratelimited(KERN_ERR | 440 | printk_ratelimited(KERN_ERR |
442 | "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d " | 441 | "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d bytes vcrc_data=%04x is not vcrc_card=%04x\n", |
443 | "bytes vcrc_data=%04x is not vcrc_card=%04x\n", | ||
444 | GENWQE_DEVNAME, dev_name(&pci_dev->dev), | 442 | GENWQE_DEVNAME, dev_name(&pci_dev->dev), |
445 | pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), | 443 | pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), |
446 | vcrc, vcrc_16); | 444 | vcrc, vcrc_16); |
@@ -450,8 +448,10 @@ static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, | |||
450 | queue->ddcbs_completed++; | 448 | queue->ddcbs_completed++; |
451 | queue->ddcbs_in_flight--; | 449 | queue->ddcbs_in_flight--; |
452 | 450 | ||
453 | /* wake up process waiting for this DDCB */ | 451 | /* wake up process waiting for this DDCB, and |
452 | processes on the busy queue */ | ||
454 | wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); | 453 | wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); |
454 | wake_up_interruptible(&queue->busy_waitq); | ||
455 | 455 | ||
456 | pick_next_one: | 456 | pick_next_one: |
457 | queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; | 457 | queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; |
@@ -717,8 +717,7 @@ go_home: | |||
717 | genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); | 717 | genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); |
718 | 718 | ||
719 | dev_err(&pci_dev->dev, | 719 | dev_err(&pci_dev->dev, |
720 | "[%s] err: DDCB#%d not purged and not completed " | 720 | "[%s] err: DDCB#%d not purged and not completed after %d seconds QSTAT=%016llx!!\n", |
721 | "after %d seconds QSTAT=%016llx!!\n", | ||
722 | __func__, req->num, genwqe_ddcb_software_timeout, | 721 | __func__, req->num, genwqe_ddcb_software_timeout, |
723 | queue_status); | 722 | queue_status); |
724 | 723 | ||
@@ -740,7 +739,7 @@ int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d) | |||
740 | } | 739 | } |
741 | 740 | ||
742 | len = sizeof(d->driver_version); | 741 | len = sizeof(d->driver_version); |
743 | snprintf(d->driver_version, len, "%s", DRV_VERS_STRING); | 742 | snprintf(d->driver_version, len, "%s", DRV_VERSION); |
744 | d->slu_unitcfg = cd->slu_unitcfg; | 743 | d->slu_unitcfg = cd->slu_unitcfg; |
745 | d->app_unitcfg = cd->app_unitcfg; | 744 | d->app_unitcfg = cd->app_unitcfg; |
746 | return 0; | 745 | return 0; |
@@ -748,14 +747,16 @@ int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d) | |||
748 | 747 | ||
749 | /** | 748 | /** |
750 | * __genwqe_enqueue_ddcb() - Enqueue a DDCB | 749 | * __genwqe_enqueue_ddcb() - Enqueue a DDCB |
751 | * @cd: pointer to genwqe device descriptor | 750 | * @cd: pointer to genwqe device descriptor |
752 | * @req: pointer to DDCB execution request | 751 | * @req: pointer to DDCB execution request |
752 | * @f_flags: file mode: blocking, non-blocking | ||
753 | * | 753 | * |
754 | * Return: 0 if enqueuing succeeded | 754 | * Return: 0 if enqueuing succeeded |
755 | * -EIO if card is unusable/PCIe problems | 755 | * -EIO if card is unusable/PCIe problems |
756 | * -EBUSY if enqueuing failed | 756 | * -EBUSY if enqueuing failed |
757 | */ | 757 | */ |
758 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) | 758 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req, |
759 | unsigned int f_flags) | ||
759 | { | 760 | { |
760 | struct ddcb *pddcb; | 761 | struct ddcb *pddcb; |
761 | unsigned long flags; | 762 | unsigned long flags; |
@@ -763,6 +764,7 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) | |||
763 | struct pci_dev *pci_dev = cd->pci_dev; | 764 | struct pci_dev *pci_dev = cd->pci_dev; |
764 | u16 icrc; | 765 | u16 icrc; |
765 | 766 | ||
767 | retry: | ||
766 | if (cd->card_state != GENWQE_CARD_USED) { | 768 | if (cd->card_state != GENWQE_CARD_USED) { |
767 | printk_ratelimited(KERN_ERR | 769 | printk_ratelimited(KERN_ERR |
768 | "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", | 770 | "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", |
@@ -788,9 +790,24 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) | |||
788 | 790 | ||
789 | pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ | 791 | pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ |
790 | if (pddcb == NULL) { | 792 | if (pddcb == NULL) { |
793 | int rc; | ||
794 | |||
791 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | 795 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); |
792 | queue->busy++; | 796 | |
793 | return -EBUSY; | 797 | if (f_flags & O_NONBLOCK) { |
798 | queue->return_on_busy++; | ||
799 | return -EBUSY; | ||
800 | } | ||
801 | |||
802 | queue->wait_on_busy++; | ||
803 | rc = wait_event_interruptible(queue->busy_waitq, | ||
804 | queue_free_ddcbs(queue) != 0); | ||
805 | dev_dbg(&pci_dev->dev, "[%s] waiting for free DDCB: rc=%d\n", | ||
806 | __func__, rc); | ||
807 | if (rc == -ERESTARTSYS) | ||
808 | return rc; /* interrupted by a signal */ | ||
809 | |||
810 | goto retry; | ||
794 | } | 811 | } |
795 | 812 | ||
796 | if (queue->ddcb_req[req->num] != NULL) { | 813 | if (queue->ddcb_req[req->num] != NULL) { |
@@ -893,9 +910,11 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) | |||
893 | * __genwqe_execute_raw_ddcb() - Setup and execute DDCB | 910 | * __genwqe_execute_raw_ddcb() - Setup and execute DDCB |
894 | * @cd: pointer to genwqe device descriptor | 911 | * @cd: pointer to genwqe device descriptor |
895 | * @req: user provided DDCB request | 912 | * @req: user provided DDCB request |
913 | * @f_flags: file mode: blocking, non-blocking | ||
896 | */ | 914 | */ |
897 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, | 915 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, |
898 | struct genwqe_ddcb_cmd *cmd) | 916 | struct genwqe_ddcb_cmd *cmd, |
917 | unsigned int f_flags) | ||
899 | { | 918 | { |
900 | int rc = 0; | 919 | int rc = 0; |
901 | struct pci_dev *pci_dev = cd->pci_dev; | 920 | struct pci_dev *pci_dev = cd->pci_dev; |
@@ -911,7 +930,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, | |||
911 | __func__, cmd->asiv_length); | 930 | __func__, cmd->asiv_length); |
912 | return -EINVAL; | 931 | return -EINVAL; |
913 | } | 932 | } |
914 | rc = __genwqe_enqueue_ddcb(cd, req); | 933 | rc = __genwqe_enqueue_ddcb(cd, req, f_flags); |
915 | if (rc != 0) | 934 | if (rc != 0) |
916 | return rc; | 935 | return rc; |
917 | 936 | ||
@@ -1017,7 +1036,8 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) | |||
1017 | queue->ddcbs_in_flight = 0; /* statistics */ | 1036 | queue->ddcbs_in_flight = 0; /* statistics */ |
1018 | queue->ddcbs_max_in_flight = 0; | 1037 | queue->ddcbs_max_in_flight = 0; |
1019 | queue->ddcbs_completed = 0; | 1038 | queue->ddcbs_completed = 0; |
1020 | queue->busy = 0; | 1039 | queue->return_on_busy = 0; |
1040 | queue->wait_on_busy = 0; | ||
1021 | 1041 | ||
1022 | queue->ddcb_seq = 0x100; /* start sequence number */ | 1042 | queue->ddcb_seq = 0x100; /* start sequence number */ |
1023 | queue->ddcb_max = genwqe_ddcb_max; /* module parameter */ | 1043 | queue->ddcb_max = genwqe_ddcb_max; /* module parameter */ |
@@ -1057,7 +1077,7 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) | |||
1057 | queue->ddcb_next = 0; /* queue is empty */ | 1077 | queue->ddcb_next = 0; /* queue is empty */ |
1058 | 1078 | ||
1059 | spin_lock_init(&queue->ddcb_lock); | 1079 | spin_lock_init(&queue->ddcb_lock); |
1060 | init_waitqueue_head(&queue->ddcb_waitq); | 1080 | init_waitqueue_head(&queue->busy_waitq); |
1061 | 1081 | ||
1062 | val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ | 1082 | val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ |
1063 | __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ | 1083 | __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ |
@@ -1251,10 +1271,8 @@ int genwqe_setup_service_layer(struct genwqe_dev *cd) | |||
1251 | } | 1271 | } |
1252 | 1272 | ||
1253 | rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS); | 1273 | rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS); |
1254 | if (rc) { | 1274 | if (rc) |
1255 | rc = -ENODEV; | ||
1256 | goto stop_kthread; | 1275 | goto stop_kthread; |
1257 | } | ||
1258 | 1276 | ||
1259 | /* | 1277 | /* |
1260 | * We must have all wait-queues initialized when we enable the | 1278 | * We must have all wait-queues initialized when we enable the |
@@ -1307,6 +1325,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd) | |||
1307 | for (i = 0; i < queue->ddcb_max; i++) | 1325 | for (i = 0; i < queue->ddcb_max; i++) |
1308 | wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); | 1326 | wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); |
1309 | 1327 | ||
1328 | wake_up_interruptible(&queue->busy_waitq); | ||
1310 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); | 1329 | spin_unlock_irqrestore(&queue->ddcb_lock, flags); |
1311 | 1330 | ||
1312 | return 0; | 1331 | return 0; |
@@ -1346,8 +1365,8 @@ int genwqe_finish_queue(struct genwqe_dev *cd) | |||
1346 | break; | 1365 | break; |
1347 | 1366 | ||
1348 | dev_dbg(&pci_dev->dev, | 1367 | dev_dbg(&pci_dev->dev, |
1349 | " DEBUG [%d/%d] waiting for queue to get empty: " | 1368 | " DEBUG [%d/%d] waiting for queue to get empty: %d requests!\n", |
1350 | "%d requests!\n", i, waitmax, in_flight); | 1369 | i, waitmax, in_flight); |
1351 | 1370 | ||
1352 | /* | 1371 | /* |
1353 | * Severe severe error situation: The card itself has | 1372 | * Severe severe error situation: The card itself has |
diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h index c4f26720753e..0361a68d79a6 100644 --- a/drivers/misc/genwqe/card_ddcb.h +++ b/drivers/misc/genwqe/card_ddcb.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
11 | * Author: Michael Jung <mijung@de.ibm.com> | 11 | * Author: Michael Jung <mijung@gmx.net> |
12 | * Author: Michael Ruettger <michael@ibmra.de> | 12 | * Author: Michael Ruettger <michael@ibmra.de> |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c index c9b4d6d0eb99..c715534e7fe7 100644 --- a/drivers/misc/genwqe/card_debugfs.c +++ b/drivers/misc/genwqe/card_debugfs.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
8 | * Author: Michael Jung <mijung@de.ibm.com> | 8 | * Author: Michael Jung <mijung@gmx.net> |
9 | * Author: Michael Ruettger <michael@ibmra.de> | 9 | * Author: Michael Ruettger <michael@ibmra.de> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -244,14 +244,16 @@ static int genwqe_ddcb_info_show(struct seq_file *s, void *unused) | |||
244 | " ddcbs_in_flight: %u\n" | 244 | " ddcbs_in_flight: %u\n" |
245 | " ddcbs_max_in_flight: %u\n" | 245 | " ddcbs_max_in_flight: %u\n" |
246 | " ddcbs_completed: %u\n" | 246 | " ddcbs_completed: %u\n" |
247 | " busy: %u\n" | 247 | " return_on_busy: %u\n" |
248 | " wait_on_busy: %u\n" | ||
248 | " irqs_processed: %u\n", | 249 | " irqs_processed: %u\n", |
249 | queue->ddcb_max, (long long)queue->ddcb_daddr, | 250 | queue->ddcb_max, (long long)queue->ddcb_daddr, |
250 | (long long)queue->ddcb_daddr + | 251 | (long long)queue->ddcb_daddr + |
251 | (queue->ddcb_max * DDCB_LENGTH), | 252 | (queue->ddcb_max * DDCB_LENGTH), |
252 | (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, | 253 | (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, |
253 | queue->ddcbs_max_in_flight, queue->ddcbs_completed, | 254 | queue->ddcbs_max_in_flight, queue->ddcbs_completed, |
254 | queue->busy, cd->irqs_processed); | 255 | queue->return_on_busy, queue->wait_on_busy, |
256 | cd->irqs_processed); | ||
255 | 257 | ||
256 | /* Hardware State */ | 258 | /* Hardware State */ |
257 | seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n" | 259 | seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n" |
@@ -323,7 +325,7 @@ static int genwqe_info_show(struct seq_file *s, void *unused) | |||
323 | " Base Clock : %u MHz\n" | 325 | " Base Clock : %u MHz\n" |
324 | " Arch/SVN Release: %u/%llx\n" | 326 | " Arch/SVN Release: %u/%llx\n" |
325 | " Bitstream : %llx\n", | 327 | " Bitstream : %llx\n", |
326 | GENWQE_DEVNAME, DRV_VERS_STRING, dev_name(&pci_dev->dev), | 328 | GENWQE_DEVNAME, DRV_VERSION, dev_name(&pci_dev->dev), |
327 | genwqe_is_privileged(cd) ? | 329 | genwqe_is_privileged(cd) ? |
328 | "Physical" : "Virtual or no SR-IOV", | 330 | "Physical" : "Virtual or no SR-IOV", |
329 | cd->card_idx, slu_id, app_id, | 331 | cd->card_idx, slu_id, app_id, |
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index aae42555e2ca..5918586f2f76 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
8 | * Author: Michael Jung <mijung@de.ibm.com> | 8 | * Author: Michael Jung <mijung@gmx.net> |
9 | * Author: Michael Ruettger <michael@ibmra.de> | 9 | * Author: Michael Ruettger <michael@ibmra.de> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -213,9 +213,9 @@ static void genwqe_remove_mappings(struct genwqe_file *cfile) | |||
213 | * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. | 213 | * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. |
214 | */ | 214 | */ |
215 | dev_err(&pci_dev->dev, | 215 | dev_err(&pci_dev->dev, |
216 | "[%s] %d. cleanup mapping: u_vaddr=%p " | 216 | "[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n", |
217 | "u_kaddr=%016lx dma_addr=%lx\n", __func__, i++, | 217 | __func__, i++, dma_map->u_vaddr, |
218 | dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr, | 218 | (unsigned long)dma_map->k_vaddr, |
219 | (unsigned long)dma_map->dma_addr); | 219 | (unsigned long)dma_map->dma_addr); |
220 | 220 | ||
221 | if (dma_map->type == GENWQE_MAPPING_RAW) { | 221 | if (dma_map->type == GENWQE_MAPPING_RAW) { |
@@ -346,6 +346,7 @@ static int genwqe_open(struct inode *inode, struct file *filp) | |||
346 | static int genwqe_fasync(int fd, struct file *filp, int mode) | 346 | static int genwqe_fasync(int fd, struct file *filp, int mode) |
347 | { | 347 | { |
348 | struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; | 348 | struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; |
349 | |||
349 | return fasync_helper(fd, filp, mode, &cdev->async_queue); | 350 | return fasync_helper(fd, filp, mode, &cdev->async_queue); |
350 | } | 351 | } |
351 | 352 | ||
@@ -515,6 +516,7 @@ static int do_flash_update(struct genwqe_file *cfile, | |||
515 | u32 crc; | 516 | u32 crc; |
516 | u8 cmdopts; | 517 | u8 cmdopts; |
517 | struct genwqe_dev *cd = cfile->cd; | 518 | struct genwqe_dev *cd = cfile->cd; |
519 | struct file *filp = cfile->filp; | ||
518 | struct pci_dev *pci_dev = cd->pci_dev; | 520 | struct pci_dev *pci_dev = cd->pci_dev; |
519 | 521 | ||
520 | if ((load->size & 0x3) != 0) | 522 | if ((load->size & 0x3) != 0) |
@@ -609,7 +611,7 @@ static int do_flash_update(struct genwqe_file *cfile, | |||
609 | /* For Genwqe5 we get back the calculated CRC */ | 611 | /* For Genwqe5 we get back the calculated CRC */ |
610 | *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ | 612 | *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ |
611 | 613 | ||
612 | rc = __genwqe_execute_raw_ddcb(cd, req); | 614 | rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags); |
613 | 615 | ||
614 | load->retc = req->retc; | 616 | load->retc = req->retc; |
615 | load->attn = req->attn; | 617 | load->attn = req->attn; |
@@ -649,6 +651,7 @@ static int do_flash_read(struct genwqe_file *cfile, | |||
649 | u8 *xbuf; | 651 | u8 *xbuf; |
650 | u8 cmdopts; | 652 | u8 cmdopts; |
651 | struct genwqe_dev *cd = cfile->cd; | 653 | struct genwqe_dev *cd = cfile->cd; |
654 | struct file *filp = cfile->filp; | ||
652 | struct pci_dev *pci_dev = cd->pci_dev; | 655 | struct pci_dev *pci_dev = cd->pci_dev; |
653 | struct genwqe_ddcb_cmd *cmd; | 656 | struct genwqe_ddcb_cmd *cmd; |
654 | 657 | ||
@@ -726,7 +729,7 @@ static int do_flash_read(struct genwqe_file *cfile, | |||
726 | /* we only get back the calculated CRC */ | 729 | /* we only get back the calculated CRC */ |
727 | *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ | 730 | *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ |
728 | 731 | ||
729 | rc = __genwqe_execute_raw_ddcb(cd, cmd); | 732 | rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); |
730 | 733 | ||
731 | load->retc = cmd->retc; | 734 | load->retc = cmd->retc; |
732 | load->attn = cmd->attn; | 735 | load->attn = cmd->attn; |
@@ -987,13 +990,14 @@ static int genwqe_execute_ddcb(struct genwqe_file *cfile, | |||
987 | { | 990 | { |
988 | int rc; | 991 | int rc; |
989 | struct genwqe_dev *cd = cfile->cd; | 992 | struct genwqe_dev *cd = cfile->cd; |
993 | struct file *filp = cfile->filp; | ||
990 | struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); | 994 | struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); |
991 | 995 | ||
992 | rc = ddcb_cmd_fixups(cfile, req); | 996 | rc = ddcb_cmd_fixups(cfile, req); |
993 | if (rc != 0) | 997 | if (rc != 0) |
994 | return rc; | 998 | return rc; |
995 | 999 | ||
996 | rc = __genwqe_execute_raw_ddcb(cd, cmd); | 1000 | rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); |
997 | ddcb_cmd_cleanup(cfile, req); | 1001 | ddcb_cmd_cleanup(cfile, req); |
998 | return rc; | 1002 | return rc; |
999 | } | 1003 | } |
@@ -1005,6 +1009,7 @@ static int do_execute_ddcb(struct genwqe_file *cfile, | |||
1005 | struct genwqe_ddcb_cmd *cmd; | 1009 | struct genwqe_ddcb_cmd *cmd; |
1006 | struct ddcb_requ *req; | 1010 | struct ddcb_requ *req; |
1007 | struct genwqe_dev *cd = cfile->cd; | 1011 | struct genwqe_dev *cd = cfile->cd; |
1012 | struct file *filp = cfile->filp; | ||
1008 | 1013 | ||
1009 | cmd = ddcb_requ_alloc(); | 1014 | cmd = ddcb_requ_alloc(); |
1010 | if (cmd == NULL) | 1015 | if (cmd == NULL) |
@@ -1020,7 +1025,7 @@ static int do_execute_ddcb(struct genwqe_file *cfile, | |||
1020 | if (!raw) | 1025 | if (!raw) |
1021 | rc = genwqe_execute_ddcb(cfile, cmd); | 1026 | rc = genwqe_execute_ddcb(cfile, cmd); |
1022 | else | 1027 | else |
1023 | rc = __genwqe_execute_raw_ddcb(cd, cmd); | 1028 | rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); |
1024 | 1029 | ||
1025 | /* Copy back only the modifed fields. Do not copy ASIV | 1030 | /* Copy back only the modifed fields. Do not copy ASIV |
1026 | back since the copy got modified by the driver. */ | 1031 | back since the copy got modified by the driver. */ |
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c index 7232e40a3ad9..2c33fbca9225 100644 --- a/drivers/misc/genwqe/card_sysfs.c +++ b/drivers/misc/genwqe/card_sysfs.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
8 | * Author: Michael Jung <mijung@de.ibm.com> | 8 | * Author: Michael Jung <mijung@gmx.net> |
9 | * Author: Michael Ruettger <michael@ibmra.de> | 9 | * Author: Michael Ruettger <michael@ibmra.de> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -91,13 +91,6 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr, | |||
91 | } | 91 | } |
92 | static DEVICE_ATTR_RO(type); | 92 | static DEVICE_ATTR_RO(type); |
93 | 93 | ||
94 | static ssize_t driver_show(struct device *dev, struct device_attribute *attr, | ||
95 | char *buf) | ||
96 | { | ||
97 | return sprintf(buf, "%s\n", DRV_VERS_STRING); | ||
98 | } | ||
99 | static DEVICE_ATTR_RO(driver); | ||
100 | |||
101 | static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr, | 94 | static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr, |
102 | char *buf) | 95 | char *buf) |
103 | { | 96 | { |
@@ -256,7 +249,6 @@ static struct attribute *genwqe_attributes[] = { | |||
256 | &dev_attr_next_bitstream.attr, | 249 | &dev_attr_next_bitstream.attr, |
257 | &dev_attr_curr_bitstream.attr, | 250 | &dev_attr_curr_bitstream.attr, |
258 | &dev_attr_base_clock.attr, | 251 | &dev_attr_base_clock.attr, |
259 | &dev_attr_driver.attr, | ||
260 | &dev_attr_type.attr, | 252 | &dev_attr_type.attr, |
261 | &dev_attr_version.attr, | 253 | &dev_attr_version.attr, |
262 | &dev_attr_appid.attr, | 254 | &dev_attr_appid.attr, |
@@ -268,7 +260,6 @@ static struct attribute *genwqe_attributes[] = { | |||
268 | }; | 260 | }; |
269 | 261 | ||
270 | static struct attribute *genwqe_normal_attributes[] = { | 262 | static struct attribute *genwqe_normal_attributes[] = { |
271 | &dev_attr_driver.attr, | ||
272 | &dev_attr_type.attr, | 263 | &dev_attr_type.attr, |
273 | &dev_attr_version.attr, | 264 | &dev_attr_version.attr, |
274 | &dev_attr_appid.attr, | 265 | &dev_attr_appid.attr, |
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index a6400f09229c..7cb3b7e41739 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
8 | * Author: Michael Jung <mijung@de.ibm.com> | 8 | * Author: Michael Jung <mijung@gmx.net> |
9 | * Author: Michael Ruettger <michael@ibmra.de> | 9 | * Author: Michael Ruettger <michael@ibmra.de> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -150,6 +150,7 @@ int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len) | |||
150 | memset(app_name, 0, len); | 150 | memset(app_name, 0, len); |
151 | for (i = 0, j = 0; j < min(len, 4); j++) { | 151 | for (i = 0, j = 0; j < min(len, 4); j++) { |
152 | char ch = (char)((app_id >> (24 - j*8)) & 0xff); | 152 | char ch = (char)((app_id >> (24 - j*8)) & 0xff); |
153 | |||
153 | if (ch == ' ') | 154 | if (ch == ' ') |
154 | continue; | 155 | continue; |
155 | app_name[i++] = isprint(ch) ? ch : 'X'; | 156 | app_name[i++] = isprint(ch) ? ch : 'X'; |
@@ -304,8 +305,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, | |||
304 | sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); | 305 | sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); |
305 | sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE; | 306 | sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE; |
306 | 307 | ||
307 | dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld " | 308 | dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n", |
308 | "fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n", | ||
309 | __func__, user_addr, user_size, sgl->nr_pages, | 309 | __func__, user_addr, user_size, sgl->nr_pages, |
310 | sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size); | 310 | sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size); |
311 | 311 | ||
@@ -662,6 +662,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, | |||
662 | u8 genwqe_card_type(struct genwqe_dev *cd) | 662 | u8 genwqe_card_type(struct genwqe_dev *cd) |
663 | { | 663 | { |
664 | u64 card_type = cd->slu_unitcfg; | 664 | u64 card_type = cd->slu_unitcfg; |
665 | |||
665 | return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20); | 666 | return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20); |
666 | } | 667 | } |
667 | 668 | ||
diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h index a506e9aa2d57..15355350e076 100644 --- a/drivers/misc/genwqe/genwqe_driver.h +++ b/drivers/misc/genwqe/genwqe_driver.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | 9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | 10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
11 | * Author: Michael Jung <mijung@de.ibm.com> | 11 | * Author: Michael Jung <mijung@gmx.net> |
12 | * Author: Michael Ruettger <michael@ibmra.de> | 12 | * Author: Michael Ruettger <michael@ibmra.de> |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
@@ -36,7 +36,7 @@ | |||
36 | #include <asm/byteorder.h> | 36 | #include <asm/byteorder.h> |
37 | #include <linux/genwqe/genwqe_card.h> | 37 | #include <linux/genwqe/genwqe_card.h> |
38 | 38 | ||
39 | #define DRV_VERS_STRING "2.0.21" | 39 | #define DRV_VERSION "2.0.25" |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Static minor number assignement, until we decide/implement | 42 | * Static minor number assignement, until we decide/implement |
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 7e1efd5f58f0..c544f1f50f52 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c | |||
@@ -247,3 +247,4 @@ module_spi_driver(lattice_ecp3_driver); | |||
247 | MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); | 247 | MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); |
248 | MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI"); | 248 | MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI"); |
249 | MODULE_LICENSE("GPL"); | 249 | MODULE_LICENSE("GPL"); |
250 | MODULE_FIRMWARE(FIRMWARE_NAME); | ||
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index 0d6234db00fa..6cdce8477f57 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/fcntl.h> | 21 | #include <linux/fcntl.h> |
22 | #include <linux/aio.h> | 22 | #include <linux/aio.h> |
23 | #include <linux/pci.h> | ||
24 | #include <linux/ioctl.h> | 23 | #include <linux/ioctl.h> |
25 | #include <linux/cdev.h> | 24 | #include <linux/cdev.h> |
26 | #include <linux/list.h> | 25 | #include <linux/list.h> |
@@ -29,6 +28,7 @@ | |||
29 | #include <linux/uuid.h> | 28 | #include <linux/uuid.h> |
30 | #include <linux/jiffies.h> | 29 | #include <linux/jiffies.h> |
31 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | #include <linux/slab.h> | ||
32 | 32 | ||
33 | #include <linux/mei.h> | 33 | #include <linux/mei.h> |
34 | 34 | ||
@@ -64,31 +64,32 @@ void mei_amthif_reset_params(struct mei_device *dev) | |||
64 | * | 64 | * |
65 | * @dev: the device structure | 65 | * @dev: the device structure |
66 | * | 66 | * |
67 | * Return: 0 on success, <0 on failure. | ||
67 | */ | 68 | */ |
68 | int mei_amthif_host_init(struct mei_device *dev) | 69 | int mei_amthif_host_init(struct mei_device *dev) |
69 | { | 70 | { |
70 | struct mei_cl *cl = &dev->iamthif_cl; | 71 | struct mei_cl *cl = &dev->iamthif_cl; |
72 | struct mei_me_client *me_cl; | ||
71 | unsigned char *msg_buf; | 73 | unsigned char *msg_buf; |
72 | int ret, i; | 74 | int ret; |
73 | 75 | ||
74 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 76 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
75 | 77 | ||
76 | mei_cl_init(cl, dev); | 78 | mei_cl_init(cl, dev); |
77 | 79 | ||
78 | i = mei_me_cl_by_uuid(dev, &mei_amthif_guid); | 80 | me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid); |
79 | if (i < 0) { | 81 | if (!me_cl) { |
80 | dev_info(&dev->pdev->dev, | 82 | dev_info(dev->dev, "amthif: failed to find the client"); |
81 | "amthif: failed to find the client %d\n", i); | ||
82 | return -ENOTTY; | 83 | return -ENOTTY; |
83 | } | 84 | } |
84 | 85 | ||
85 | cl->me_client_id = dev->me_clients[i].client_id; | 86 | cl->me_client_id = me_cl->client_id; |
87 | cl->cl_uuid = me_cl->props.protocol_name; | ||
86 | 88 | ||
87 | /* Assign iamthif_mtu to the value received from ME */ | 89 | /* Assign iamthif_mtu to the value received from ME */ |
88 | 90 | ||
89 | dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length; | 91 | dev->iamthif_mtu = me_cl->props.max_msg_length; |
90 | dev_dbg(&dev->pdev->dev, "IAMTHIF_MTU = %d\n", | 92 | dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu); |
91 | dev->me_clients[i].props.max_msg_length); | ||
92 | 93 | ||
93 | kfree(dev->iamthif_msg_buf); | 94 | kfree(dev->iamthif_msg_buf); |
94 | dev->iamthif_msg_buf = NULL; | 95 | dev->iamthif_msg_buf = NULL; |
@@ -96,17 +97,15 @@ int mei_amthif_host_init(struct mei_device *dev) | |||
96 | /* allocate storage for ME message buffer */ | 97 | /* allocate storage for ME message buffer */ |
97 | msg_buf = kcalloc(dev->iamthif_mtu, | 98 | msg_buf = kcalloc(dev->iamthif_mtu, |
98 | sizeof(unsigned char), GFP_KERNEL); | 99 | sizeof(unsigned char), GFP_KERNEL); |
99 | if (!msg_buf) { | 100 | if (!msg_buf) |
100 | dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n"); | ||
101 | return -ENOMEM; | 101 | return -ENOMEM; |
102 | } | ||
103 | 102 | ||
104 | dev->iamthif_msg_buf = msg_buf; | 103 | dev->iamthif_msg_buf = msg_buf; |
105 | 104 | ||
106 | ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); | 105 | ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); |
107 | 106 | ||
108 | if (ret < 0) { | 107 | if (ret < 0) { |
109 | dev_err(&dev->pdev->dev, | 108 | dev_err(dev->dev, |
110 | "amthif: failed link client %d\n", ret); | 109 | "amthif: failed link client %d\n", ret); |
111 | return ret; | 110 | return ret; |
112 | } | 111 | } |
@@ -124,18 +123,16 @@ int mei_amthif_host_init(struct mei_device *dev) | |||
124 | * @dev: the device structure | 123 | * @dev: the device structure |
125 | * @file: pointer to file object | 124 | * @file: pointer to file object |
126 | * | 125 | * |
127 | * returns returned a list entry on success, NULL on failure. | 126 | * Return: returned a list entry on success, NULL on failure. |
128 | */ | 127 | */ |
129 | struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, | 128 | struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, |
130 | struct file *file) | 129 | struct file *file) |
131 | { | 130 | { |
132 | struct mei_cl_cb *cb; | 131 | struct mei_cl_cb *cb; |
133 | 132 | ||
134 | list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) { | 133 | list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) |
135 | if (cb->cl && cb->cl == &dev->iamthif_cl && | 134 | if (cb->file_object == file) |
136 | cb->file_object == file) | ||
137 | return cb; | 135 | return cb; |
138 | } | ||
139 | return NULL; | 136 | return NULL; |
140 | } | 137 | } |
141 | 138 | ||
@@ -144,15 +141,14 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, | |||
144 | * mei_amthif_read - read data from AMTHIF client | 141 | * mei_amthif_read - read data from AMTHIF client |
145 | * | 142 | * |
146 | * @dev: the device structure | 143 | * @dev: the device structure |
147 | * @if_num: minor number | ||
148 | * @file: pointer to file object | 144 | * @file: pointer to file object |
149 | * @*ubuf: pointer to user data in user space | 145 | * @ubuf: pointer to user data in user space |
150 | * @length: data length to read | 146 | * @length: data length to read |
151 | * @offset: data read offset | 147 | * @offset: data read offset |
152 | * | 148 | * |
153 | * Locking: called under "dev->device_lock" lock | 149 | * Locking: called under "dev->device_lock" lock |
154 | * | 150 | * |
155 | * returns | 151 | * Return: |
156 | * returned data length on success, | 152 | * returned data length on success, |
157 | * zero if no data to read, | 153 | * zero if no data to read, |
158 | * negative on failure. | 154 | * negative on failure. |
@@ -160,25 +156,19 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, | |||
160 | int mei_amthif_read(struct mei_device *dev, struct file *file, | 156 | int mei_amthif_read(struct mei_device *dev, struct file *file, |
161 | char __user *ubuf, size_t length, loff_t *offset) | 157 | char __user *ubuf, size_t length, loff_t *offset) |
162 | { | 158 | { |
163 | int rets; | ||
164 | int wait_ret; | ||
165 | struct mei_cl_cb *cb = NULL; | ||
166 | struct mei_cl *cl = file->private_data; | 159 | struct mei_cl *cl = file->private_data; |
160 | struct mei_cl_cb *cb; | ||
167 | unsigned long timeout; | 161 | unsigned long timeout; |
168 | int i; | 162 | int rets; |
163 | int wait_ret; | ||
169 | 164 | ||
170 | /* Only possible if we are in timeout */ | 165 | /* Only possible if we are in timeout */ |
171 | if (!cl || cl != &dev->iamthif_cl) { | 166 | if (!cl) { |
172 | dev_dbg(&dev->pdev->dev, "bad file ext.\n"); | 167 | dev_err(dev->dev, "bad file ext.\n"); |
173 | return -ETIME; | 168 | return -ETIME; |
174 | } | 169 | } |
175 | 170 | ||
176 | i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id); | 171 | dev_dbg(dev->dev, "checking amthif data\n"); |
177 | if (i < 0) { | ||
178 | dev_dbg(&dev->pdev->dev, "amthif client not found.\n"); | ||
179 | return -ENOTTY; | ||
180 | } | ||
181 | dev_dbg(&dev->pdev->dev, "checking amthif data\n"); | ||
182 | cb = mei_amthif_find_read_list_entry(dev, file); | 172 | cb = mei_amthif_find_read_list_entry(dev, file); |
183 | 173 | ||
184 | /* Check for if we can block or not*/ | 174 | /* Check for if we can block or not*/ |
@@ -186,7 +176,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
186 | return -EAGAIN; | 176 | return -EAGAIN; |
187 | 177 | ||
188 | 178 | ||
189 | dev_dbg(&dev->pdev->dev, "waiting for amthif data\n"); | 179 | dev_dbg(dev->dev, "waiting for amthif data\n"); |
190 | while (cb == NULL) { | 180 | while (cb == NULL) { |
191 | /* unlock the Mutex */ | 181 | /* unlock the Mutex */ |
192 | mutex_unlock(&dev->device_lock); | 182 | mutex_unlock(&dev->device_lock); |
@@ -200,21 +190,21 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
200 | if (wait_ret) | 190 | if (wait_ret) |
201 | return -ERESTARTSYS; | 191 | return -ERESTARTSYS; |
202 | 192 | ||
203 | dev_dbg(&dev->pdev->dev, "woke up from sleep\n"); | 193 | dev_dbg(dev->dev, "woke up from sleep\n"); |
204 | } | 194 | } |
205 | 195 | ||
206 | 196 | ||
207 | dev_dbg(&dev->pdev->dev, "Got amthif data\n"); | 197 | dev_dbg(dev->dev, "Got amthif data\n"); |
208 | dev->iamthif_timer = 0; | 198 | dev->iamthif_timer = 0; |
209 | 199 | ||
210 | if (cb) { | 200 | if (cb) { |
211 | timeout = cb->read_time + | 201 | timeout = cb->read_time + |
212 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); | 202 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); |
213 | dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n", | 203 | dev_dbg(dev->dev, "amthif timeout = %lud\n", |
214 | timeout); | 204 | timeout); |
215 | 205 | ||
216 | if (time_after(jiffies, timeout)) { | 206 | if (time_after(jiffies, timeout)) { |
217 | dev_dbg(&dev->pdev->dev, "amthif Time out\n"); | 207 | dev_dbg(dev->dev, "amthif Time out\n"); |
218 | /* 15 sec for the message has expired */ | 208 | /* 15 sec for the message has expired */ |
219 | list_del(&cb->list); | 209 | list_del(&cb->list); |
220 | rets = -ETIME; | 210 | rets = -ETIME; |
@@ -234,16 +224,16 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
234 | * remove message from deletion list | 224 | * remove message from deletion list |
235 | */ | 225 | */ |
236 | 226 | ||
237 | dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n", | 227 | dev_dbg(dev->dev, "amthif cb->response_buffer size - %d\n", |
238 | cb->response_buffer.size); | 228 | cb->response_buffer.size); |
239 | dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); | 229 | dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); |
240 | 230 | ||
241 | /* length is being truncated to PAGE_SIZE, however, | 231 | /* length is being truncated to PAGE_SIZE, however, |
242 | * the buf_idx may point beyond */ | 232 | * the buf_idx may point beyond */ |
243 | length = min_t(size_t, length, (cb->buf_idx - *offset)); | 233 | length = min_t(size_t, length, (cb->buf_idx - *offset)); |
244 | 234 | ||
245 | if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { | 235 | if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { |
246 | dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); | 236 | dev_dbg(dev->dev, "failed to copy data to userland\n"); |
247 | rets = -EFAULT; | 237 | rets = -EFAULT; |
248 | } else { | 238 | } else { |
249 | rets = length; | 239 | rets = length; |
@@ -253,7 +243,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
253 | } | 243 | } |
254 | } | 244 | } |
255 | free: | 245 | free: |
256 | dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n"); | 246 | dev_dbg(dev->dev, "free amthif cb memory.\n"); |
257 | *offset = 0; | 247 | *offset = 0; |
258 | mei_io_cb_free(cb); | 248 | mei_io_cb_free(cb); |
259 | out: | 249 | out: |
@@ -266,7 +256,7 @@ out: | |||
266 | * @dev: the device structure | 256 | * @dev: the device structure |
267 | * @cb: mei call back struct | 257 | * @cb: mei call back struct |
268 | * | 258 | * |
269 | * returns 0 on success, <0 on failure. | 259 | * Return: 0 on success, <0 on failure. |
270 | * | 260 | * |
271 | */ | 261 | */ |
272 | static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) | 262 | static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) |
@@ -277,7 +267,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) | |||
277 | if (!dev || !cb) | 267 | if (!dev || !cb) |
278 | return -ENODEV; | 268 | return -ENODEV; |
279 | 269 | ||
280 | dev_dbg(&dev->pdev->dev, "write data to amthif client.\n"); | 270 | dev_dbg(dev->dev, "write data to amthif client.\n"); |
281 | 271 | ||
282 | dev->iamthif_state = MEI_IAMTHIF_WRITING; | 272 | dev->iamthif_state = MEI_IAMTHIF_WRITING; |
283 | dev->iamthif_current_cb = cb; | 273 | dev->iamthif_current_cb = cb; |
@@ -316,12 +306,12 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) | |||
316 | return -EIO; | 306 | return -EIO; |
317 | dev->iamthif_flow_control_pending = true; | 307 | dev->iamthif_flow_control_pending = true; |
318 | dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; | 308 | dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; |
319 | dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n"); | 309 | dev_dbg(dev->dev, "add amthif cb to write waiting list\n"); |
320 | dev->iamthif_current_cb = cb; | 310 | dev->iamthif_current_cb = cb; |
321 | dev->iamthif_file_object = cb->file_object; | 311 | dev->iamthif_file_object = cb->file_object; |
322 | list_add_tail(&cb->list, &dev->write_waiting_list.list); | 312 | list_add_tail(&cb->list, &dev->write_waiting_list.list); |
323 | } else { | 313 | } else { |
324 | dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n"); | 314 | dev_dbg(dev->dev, "message does not complete, so add amthif cb to write list.\n"); |
325 | list_add_tail(&cb->list, &dev->write_list.list); | 315 | list_add_tail(&cb->list, &dev->write_list.list); |
326 | } | 316 | } |
327 | } else { | 317 | } else { |
@@ -336,7 +326,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) | |||
336 | * @dev: the device structure | 326 | * @dev: the device structure |
337 | * @cb: mei call back struct | 327 | * @cb: mei call back struct |
338 | * | 328 | * |
339 | * returns 0 on success, <0 on failure. | 329 | * Return: 0 on success, <0 on failure. |
340 | * | 330 | * |
341 | */ | 331 | */ |
342 | int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb) | 332 | int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb) |
@@ -354,25 +344,23 @@ int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb) | |||
354 | 344 | ||
355 | if (!list_empty(&dev->amthif_cmd_list.list) || | 345 | if (!list_empty(&dev->amthif_cmd_list.list) || |
356 | dev->iamthif_state != MEI_IAMTHIF_IDLE) { | 346 | dev->iamthif_state != MEI_IAMTHIF_IDLE) { |
357 | dev_dbg(&dev->pdev->dev, | 347 | dev_dbg(dev->dev, |
358 | "amthif state = %d\n", dev->iamthif_state); | 348 | "amthif state = %d\n", dev->iamthif_state); |
359 | dev_dbg(&dev->pdev->dev, "AMTHIF: add cb to the wait list\n"); | 349 | dev_dbg(dev->dev, "AMTHIF: add cb to the wait list\n"); |
360 | list_add_tail(&cb->list, &dev->amthif_cmd_list.list); | 350 | list_add_tail(&cb->list, &dev->amthif_cmd_list.list); |
361 | return 0; | 351 | return 0; |
362 | } | 352 | } |
363 | return mei_amthif_send_cmd(dev, cb); | 353 | return mei_amthif_send_cmd(dev, cb); |
364 | } | 354 | } |
365 | /** | 355 | /** |
366 | * mei_amthif_run_next_cmd | 356 | * mei_amthif_run_next_cmd - send next amt command from queue |
367 | * | 357 | * |
368 | * @dev: the device structure | 358 | * @dev: the device structure |
369 | * | ||
370 | * returns 0 on success, <0 on failure. | ||
371 | */ | 359 | */ |
372 | void mei_amthif_run_next_cmd(struct mei_device *dev) | 360 | void mei_amthif_run_next_cmd(struct mei_device *dev) |
373 | { | 361 | { |
374 | struct mei_cl_cb *pos = NULL; | 362 | struct mei_cl_cb *cb; |
375 | struct mei_cl_cb *next = NULL; | 363 | struct mei_cl_cb *next; |
376 | int status; | 364 | int status; |
377 | 365 | ||
378 | if (!dev) | 366 | if (!dev) |
@@ -386,21 +374,17 @@ void mei_amthif_run_next_cmd(struct mei_device *dev) | |||
386 | dev->iamthif_timer = 0; | 374 | dev->iamthif_timer = 0; |
387 | dev->iamthif_file_object = NULL; | 375 | dev->iamthif_file_object = NULL; |
388 | 376 | ||
389 | dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n"); | 377 | dev_dbg(dev->dev, "complete amthif cmd_list cb.\n"); |
390 | |||
391 | list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) { | ||
392 | list_del(&pos->list); | ||
393 | 378 | ||
394 | if (pos->cl && pos->cl == &dev->iamthif_cl) { | 379 | list_for_each_entry_safe(cb, next, &dev->amthif_cmd_list.list, list) { |
395 | status = mei_amthif_send_cmd(dev, pos); | 380 | list_del(&cb->list); |
396 | if (status) { | 381 | if (!cb->cl) |
397 | dev_dbg(&dev->pdev->dev, | 382 | continue; |
398 | "amthif write failed status = %d\n", | 383 | status = mei_amthif_send_cmd(dev, cb); |
384 | if (status) | ||
385 | dev_warn(dev->dev, "amthif write failed status = %d\n", | ||
399 | status); | 386 | status); |
400 | return; | 387 | break; |
401 | } | ||
402 | break; | ||
403 | } | ||
404 | } | 388 | } |
405 | } | 389 | } |
406 | 390 | ||
@@ -421,7 +405,7 @@ unsigned int mei_amthif_poll(struct mei_device *dev, | |||
421 | dev->iamthif_file_object == file) { | 405 | dev->iamthif_file_object == file) { |
422 | 406 | ||
423 | mask |= (POLLIN | POLLRDNORM); | 407 | mask |= (POLLIN | POLLRDNORM); |
424 | dev_dbg(&dev->pdev->dev, "run next amthif cb\n"); | 408 | dev_dbg(dev->dev, "run next amthif cb\n"); |
425 | mei_amthif_run_next_cmd(dev); | 409 | mei_amthif_run_next_cmd(dev); |
426 | } | 410 | } |
427 | mutex_unlock(&dev->device_lock); | 411 | mutex_unlock(&dev->device_lock); |
@@ -434,12 +418,11 @@ unsigned int mei_amthif_poll(struct mei_device *dev, | |||
434 | /** | 418 | /** |
435 | * mei_amthif_irq_write - write iamthif command in irq thread context. | 419 | * mei_amthif_irq_write - write iamthif command in irq thread context. |
436 | * | 420 | * |
437 | * @dev: the device structure. | ||
438 | * @cb_pos: callback block. | ||
439 | * @cl: private data of the file object. | 421 | * @cl: private data of the file object. |
422 | * @cb: callback block. | ||
440 | * @cmpl_list: complete list. | 423 | * @cmpl_list: complete list. |
441 | * | 424 | * |
442 | * returns 0, OK; otherwise, error. | 425 | * Return: 0, OK; otherwise, error. |
443 | */ | 426 | */ |
444 | int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | 427 | int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, |
445 | struct mei_cl_cb *cmpl_list) | 428 | struct mei_cl_cb *cmpl_list) |
@@ -481,7 +464,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
481 | return 0; | 464 | return 0; |
482 | } | 465 | } |
483 | 466 | ||
484 | dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); | 467 | dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); |
485 | 468 | ||
486 | rets = mei_write_message(dev, &mei_hdr, | 469 | rets = mei_write_message(dev, &mei_hdr, |
487 | dev->iamthif_msg_buf + dev->iamthif_msg_buf_index); | 470 | dev->iamthif_msg_buf + dev->iamthif_msg_buf_index); |
@@ -514,14 +497,14 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
514 | } | 497 | } |
515 | 498 | ||
516 | /** | 499 | /** |
517 | * mei_amthif_irq_read_message - read routine after ISR to | 500 | * mei_amthif_irq_read_msg - read routine after ISR to |
518 | * handle the read amthif message | 501 | * handle the read amthif message |
519 | * | 502 | * |
520 | * @dev: the device structure | 503 | * @dev: the device structure |
521 | * @mei_hdr: header of amthif message | 504 | * @mei_hdr: header of amthif message |
522 | * @complete_list: An instance of our list structure | 505 | * @complete_list: An instance of our list structure |
523 | * | 506 | * |
524 | * returns 0 on success, <0 on failure. | 507 | * Return: 0 on success, <0 on failure. |
525 | */ | 508 | */ |
526 | int mei_amthif_irq_read_msg(struct mei_device *dev, | 509 | int mei_amthif_irq_read_msg(struct mei_device *dev, |
527 | struct mei_msg_hdr *mei_hdr, | 510 | struct mei_msg_hdr *mei_hdr, |
@@ -543,10 +526,10 @@ int mei_amthif_irq_read_msg(struct mei_device *dev, | |||
543 | if (!mei_hdr->msg_complete) | 526 | if (!mei_hdr->msg_complete) |
544 | return 0; | 527 | return 0; |
545 | 528 | ||
546 | dev_dbg(&dev->pdev->dev, "amthif_message_buffer_index =%d\n", | 529 | dev_dbg(dev->dev, "amthif_message_buffer_index =%d\n", |
547 | mei_hdr->length); | 530 | mei_hdr->length); |
548 | 531 | ||
549 | dev_dbg(&dev->pdev->dev, "completed amthif read.\n "); | 532 | dev_dbg(dev->dev, "completed amthif read.\n "); |
550 | if (!dev->iamthif_current_cb) | 533 | if (!dev->iamthif_current_cb) |
551 | return -ENODEV; | 534 | return -ENODEV; |
552 | 535 | ||
@@ -559,10 +542,10 @@ int mei_amthif_irq_read_msg(struct mei_device *dev, | |||
559 | dev->iamthif_stall_timer = 0; | 542 | dev->iamthif_stall_timer = 0; |
560 | cb->buf_idx = dev->iamthif_msg_buf_index; | 543 | cb->buf_idx = dev->iamthif_msg_buf_index; |
561 | cb->read_time = jiffies; | 544 | cb->read_time = jiffies; |
562 | if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) { | 545 | if (dev->iamthif_ioctl) { |
563 | /* found the iamthif cb */ | 546 | /* found the iamthif cb */ |
564 | dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n "); | 547 | dev_dbg(dev->dev, "complete the amthif read cb.\n "); |
565 | dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n "); | 548 | dev_dbg(dev->dev, "add the amthif read cb to complete.\n "); |
566 | list_add_tail(&cb->list, &complete_list->list); | 549 | list_add_tail(&cb->list, &complete_list->list); |
567 | } | 550 | } |
568 | return 0; | 551 | return 0; |
@@ -574,7 +557,7 @@ int mei_amthif_irq_read_msg(struct mei_device *dev, | |||
574 | * @dev: the device structure. | 557 | * @dev: the device structure. |
575 | * @slots: free slots. | 558 | * @slots: free slots. |
576 | * | 559 | * |
577 | * returns 0, OK; otherwise, error. | 560 | * Return: 0, OK; otherwise, error. |
578 | */ | 561 | */ |
579 | int mei_amthif_irq_read(struct mei_device *dev, s32 *slots) | 562 | int mei_amthif_irq_read(struct mei_device *dev, s32 *slots) |
580 | { | 563 | { |
@@ -586,11 +569,11 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots) | |||
586 | *slots -= msg_slots; | 569 | *slots -= msg_slots; |
587 | 570 | ||
588 | if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) { | 571 | if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) { |
589 | dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n"); | 572 | dev_dbg(dev->dev, "iamthif flow control failed\n"); |
590 | return -EIO; | 573 | return -EIO; |
591 | } | 574 | } |
592 | 575 | ||
593 | dev_dbg(&dev->pdev->dev, "iamthif flow control success\n"); | 576 | dev_dbg(dev->dev, "iamthif flow control success\n"); |
594 | dev->iamthif_state = MEI_IAMTHIF_READING; | 577 | dev->iamthif_state = MEI_IAMTHIF_READING; |
595 | dev->iamthif_flow_control_pending = false; | 578 | dev->iamthif_flow_control_pending = false; |
596 | dev->iamthif_msg_buf_index = 0; | 579 | dev->iamthif_msg_buf_index = 0; |
@@ -604,7 +587,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots) | |||
604 | * mei_amthif_complete - complete amthif callback. | 587 | * mei_amthif_complete - complete amthif callback. |
605 | * | 588 | * |
606 | * @dev: the device structure. | 589 | * @dev: the device structure. |
607 | * @cb_pos: callback block. | 590 | * @cb: callback block. |
608 | */ | 591 | */ |
609 | void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) | 592 | void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) |
610 | { | 593 | { |
@@ -615,15 +598,15 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) | |||
615 | dev->iamthif_msg_buf, | 598 | dev->iamthif_msg_buf, |
616 | dev->iamthif_msg_buf_index); | 599 | dev->iamthif_msg_buf_index); |
617 | list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); | 600 | list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); |
618 | dev_dbg(&dev->pdev->dev, "amthif read completed\n"); | 601 | dev_dbg(dev->dev, "amthif read completed\n"); |
619 | dev->iamthif_timer = jiffies; | 602 | dev->iamthif_timer = jiffies; |
620 | dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", | 603 | dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", |
621 | dev->iamthif_timer); | 604 | dev->iamthif_timer); |
622 | } else { | 605 | } else { |
623 | mei_amthif_run_next_cmd(dev); | 606 | mei_amthif_run_next_cmd(dev); |
624 | } | 607 | } |
625 | 608 | ||
626 | dev_dbg(&dev->pdev->dev, "completing amthif call back.\n"); | 609 | dev_dbg(dev->dev, "completing amthif call back.\n"); |
627 | wake_up_interruptible(&dev->iamthif_cl.wait); | 610 | wake_up_interruptible(&dev->iamthif_cl.wait); |
628 | } | 611 | } |
629 | 612 | ||
@@ -638,7 +621,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) | |||
638 | * mei_clear_list is called to clear resources associated with file | 621 | * mei_clear_list is called to clear resources associated with file |
639 | * when application calls close function or Ctrl-C was pressed | 622 | * when application calls close function or Ctrl-C was pressed |
640 | * | 623 | * |
641 | * returns true if callback removed from the list, false otherwise | 624 | * Return: true if callback removed from the list, false otherwise |
642 | */ | 625 | */ |
643 | static bool mei_clear_list(struct mei_device *dev, | 626 | static bool mei_clear_list(struct mei_device *dev, |
644 | const struct file *file, struct list_head *mei_cb_list) | 627 | const struct file *file, struct list_head *mei_cb_list) |
@@ -678,7 +661,7 @@ static bool mei_clear_list(struct mei_device *dev, | |||
678 | * mei_clear_lists is called to clear resources associated with file | 661 | * mei_clear_lists is called to clear resources associated with file |
679 | * when application calls close function or Ctrl-C was pressed | 662 | * when application calls close function or Ctrl-C was pressed |
680 | * | 663 | * |
681 | * returns true if callback removed from the list, false otherwise | 664 | * Return: true if callback removed from the list, false otherwise |
682 | */ | 665 | */ |
683 | static bool mei_clear_lists(struct mei_device *dev, struct file *file) | 666 | static bool mei_clear_lists(struct mei_device *dev, struct file *file) |
684 | { | 667 | { |
@@ -719,7 +702,7 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file) | |||
719 | * @dev: device structure | 702 | * @dev: device structure |
720 | * @file: pointer to file structure | 703 | * @file: pointer to file structure |
721 | * | 704 | * |
722 | * returns 0 on success, <0 on error | 705 | * Return: 0 on success, <0 on error |
723 | */ | 706 | */ |
724 | int mei_amthif_release(struct mei_device *dev, struct file *file) | 707 | int mei_amthif_release(struct mei_device *dev, struct file *file) |
725 | { | 708 | { |
@@ -729,11 +712,11 @@ int mei_amthif_release(struct mei_device *dev, struct file *file) | |||
729 | if (dev->iamthif_file_object == file && | 712 | if (dev->iamthif_file_object == file && |
730 | dev->iamthif_state != MEI_IAMTHIF_IDLE) { | 713 | dev->iamthif_state != MEI_IAMTHIF_IDLE) { |
731 | 714 | ||
732 | dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n", | 715 | dev_dbg(dev->dev, "amthif canceled iamthif state %d\n", |
733 | dev->iamthif_state); | 716 | dev->iamthif_state); |
734 | dev->iamthif_canceled = true; | 717 | dev->iamthif_canceled = true; |
735 | if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) { | 718 | if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) { |
736 | dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n"); | 719 | dev_dbg(dev->dev, "run next amthif iamthif cb\n"); |
737 | mei_amthif_run_next_cmd(dev); | 720 | mei_amthif_run_next_cmd(dev); |
738 | } | 721 | } |
739 | } | 722 | } |
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 0e993ef28b94..4d20d60ca38d 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/pci.h> | ||
26 | #include <linux/mei_cl_bus.h> | 25 | #include <linux/mei_cl_bus.h> |
27 | 26 | ||
28 | #include "mei_dev.h" | 27 | #include "mei_dev.h" |
@@ -70,7 +69,7 @@ static int mei_cl_device_probe(struct device *dev) | |||
70 | 69 | ||
71 | dev_dbg(dev, "Device probe\n"); | 70 | dev_dbg(dev, "Device probe\n"); |
72 | 71 | ||
73 | strncpy(id.name, dev_name(dev), sizeof(id.name)); | 72 | strlcpy(id.name, dev_name(dev), sizeof(id.name)); |
74 | 73 | ||
75 | return driver->probe(device, &id); | 74 | return driver->probe(device, &id); |
76 | } | 75 | } |
@@ -147,7 +146,7 @@ static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev, | |||
147 | struct mei_cl *cl; | 146 | struct mei_cl *cl; |
148 | 147 | ||
149 | list_for_each_entry(cl, &dev->device_list, device_link) { | 148 | list_for_each_entry(cl, &dev->device_list, device_link) { |
150 | if (!uuid_le_cmp(uuid, cl->device_uuid)) | 149 | if (!uuid_le_cmp(uuid, cl->cl_uuid)) |
151 | return cl; | 150 | return cl; |
152 | } | 151 | } |
153 | 152 | ||
@@ -172,7 +171,7 @@ struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, | |||
172 | device->cl = cl; | 171 | device->cl = cl; |
173 | device->ops = ops; | 172 | device->ops = ops; |
174 | 173 | ||
175 | device->dev.parent = &dev->pdev->dev; | 174 | device->dev.parent = dev->dev; |
176 | device->dev.bus = &mei_cl_bus_type; | 175 | device->dev.bus = &mei_cl_bus_type; |
177 | device->dev.type = &mei_cl_device_type; | 176 | device->dev.type = &mei_cl_device_type; |
178 | 177 | ||
@@ -180,7 +179,7 @@ struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, | |||
180 | 179 | ||
181 | status = device_register(&device->dev); | 180 | status = device_register(&device->dev); |
182 | if (status) { | 181 | if (status) { |
183 | dev_err(&dev->pdev->dev, "Failed to register MEI device\n"); | 182 | dev_err(dev->dev, "Failed to register MEI device\n"); |
184 | kfree(device); | 183 | kfree(device); |
185 | return NULL; | 184 | return NULL; |
186 | } | 185 | } |
@@ -229,8 +228,8 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
229 | bool blocking) | 228 | bool blocking) |
230 | { | 229 | { |
231 | struct mei_device *dev; | 230 | struct mei_device *dev; |
231 | struct mei_me_client *me_cl; | ||
232 | struct mei_cl_cb *cb; | 232 | struct mei_cl_cb *cb; |
233 | int id; | ||
234 | int rets; | 233 | int rets; |
235 | 234 | ||
236 | if (WARN_ON(!cl || !cl->dev)) | 235 | if (WARN_ON(!cl || !cl->dev)) |
@@ -242,11 +241,11 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
242 | return -ENODEV; | 241 | return -ENODEV; |
243 | 242 | ||
244 | /* Check if we have an ME client device */ | 243 | /* Check if we have an ME client device */ |
245 | id = mei_me_cl_by_id(dev, cl->me_client_id); | 244 | me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); |
246 | if (id < 0) | 245 | if (!me_cl) |
247 | return id; | 246 | return -ENOTTY; |
248 | 247 | ||
249 | if (length > dev->me_clients[id].props.max_msg_length) | 248 | if (length > me_cl->props.max_msg_length) |
250 | return -EFBIG; | 249 | return -EFBIG; |
251 | 250 | ||
252 | cb = mei_io_cb_init(cl, NULL); | 251 | cb = mei_io_cb_init(cl, NULL); |
@@ -430,7 +429,7 @@ int mei_cl_enable_device(struct mei_cl_device *device) | |||
430 | err = mei_cl_connect(cl, NULL); | 429 | err = mei_cl_connect(cl, NULL); |
431 | if (err < 0) { | 430 | if (err < 0) { |
432 | mutex_unlock(&dev->device_lock); | 431 | mutex_unlock(&dev->device_lock); |
433 | dev_err(&dev->pdev->dev, "Could not connect to the ME client"); | 432 | dev_err(dev->dev, "Could not connect to the ME client"); |
434 | 433 | ||
435 | return err; | 434 | return err; |
436 | } | 435 | } |
@@ -462,7 +461,7 @@ int mei_cl_disable_device(struct mei_cl_device *device) | |||
462 | 461 | ||
463 | if (cl->state != MEI_FILE_CONNECTED) { | 462 | if (cl->state != MEI_FILE_CONNECTED) { |
464 | mutex_unlock(&dev->device_lock); | 463 | mutex_unlock(&dev->device_lock); |
465 | dev_err(&dev->pdev->dev, "Already disconnected"); | 464 | dev_err(dev->dev, "Already disconnected"); |
466 | 465 | ||
467 | return 0; | 466 | return 0; |
468 | } | 467 | } |
@@ -472,7 +471,7 @@ int mei_cl_disable_device(struct mei_cl_device *device) | |||
472 | err = mei_cl_disconnect(cl); | 471 | err = mei_cl_disconnect(cl); |
473 | if (err < 0) { | 472 | if (err < 0) { |
474 | mutex_unlock(&dev->device_lock); | 473 | mutex_unlock(&dev->device_lock); |
475 | dev_err(&dev->pdev->dev, | 474 | dev_err(dev->dev, |
476 | "Could not disconnect from the ME client"); | 475 | "Could not disconnect from the ME client"); |
477 | 476 | ||
478 | return err; | 477 | return err; |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 2da05c0e113d..bc9ba5359bc6 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -14,10 +14,10 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
19 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
20 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/slab.h> | ||
21 | #include <linux/pm_runtime.h> | 21 | #include <linux/pm_runtime.h> |
22 | 22 | ||
23 | #include <linux/mei.h> | 23 | #include <linux/mei.h> |
@@ -27,47 +27,90 @@ | |||
27 | #include "client.h" | 27 | #include "client.h" |
28 | 28 | ||
29 | /** | 29 | /** |
30 | * mei_me_cl_by_uuid - locate index of me client | 30 | * mei_me_cl_by_uuid - locate me client by uuid |
31 | * | 31 | * |
32 | * @dev: mei device | 32 | * @dev: mei device |
33 | * @uuid: me client uuid | ||
33 | * | 34 | * |
34 | * Locking: called under "dev->device_lock" lock | 35 | * Locking: called under "dev->device_lock" lock |
35 | * | 36 | * |
36 | * returns me client index or -ENOENT if not found | 37 | * Return: me client or NULL if not found |
37 | */ | 38 | */ |
38 | int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) | 39 | struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev, |
40 | const uuid_le *uuid) | ||
39 | { | 41 | { |
40 | int i; | 42 | struct mei_me_client *me_cl; |
41 | 43 | ||
42 | for (i = 0; i < dev->me_clients_num; ++i) | 44 | list_for_each_entry(me_cl, &dev->me_clients, list) |
43 | if (uuid_le_cmp(*uuid, | 45 | if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0) |
44 | dev->me_clients[i].props.protocol_name) == 0) | 46 | return me_cl; |
45 | return i; | ||
46 | 47 | ||
47 | return -ENOENT; | 48 | return NULL; |
48 | } | 49 | } |
49 | 50 | ||
50 | |||
51 | /** | 51 | /** |
52 | * mei_me_cl_by_id return index to me_clients for client_id | 52 | * mei_me_cl_by_id - locate me client by client id |
53 | * | 53 | * |
54 | * @dev: the device structure | 54 | * @dev: the device structure |
55 | * @client_id: me client id | 55 | * @client_id: me client id |
56 | * | 56 | * |
57 | * Locking: called under "dev->device_lock" lock | 57 | * Locking: called under "dev->device_lock" lock |
58 | * | 58 | * |
59 | * returns index on success, -ENOENT on failure. | 59 | * Return: me client or NULL if not found |
60 | */ | 60 | */ |
61 | struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) | ||
62 | { | ||
63 | |||
64 | struct mei_me_client *me_cl; | ||
61 | 65 | ||
62 | int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) | 66 | list_for_each_entry(me_cl, &dev->me_clients, list) |
67 | if (me_cl->client_id == client_id) | ||
68 | return me_cl; | ||
69 | return NULL; | ||
70 | } | ||
71 | |||
72 | /** | ||
73 | * mei_me_cl_by_uuid_id - locate me client by client id and uuid | ||
74 | * | ||
75 | * @dev: the device structure | ||
76 | * @uuid: me client uuid | ||
77 | * @client_id: me client id | ||
78 | * | ||
79 | * Locking: called under "dev->device_lock" lock | ||
80 | * | ||
81 | * Return: me client or NULL if not found | ||
82 | */ | ||
83 | struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, | ||
84 | const uuid_le *uuid, u8 client_id) | ||
63 | { | 85 | { |
64 | int i; | 86 | struct mei_me_client *me_cl; |
65 | 87 | ||
66 | for (i = 0; i < dev->me_clients_num; i++) | 88 | list_for_each_entry(me_cl, &dev->me_clients, list) |
67 | if (dev->me_clients[i].client_id == client_id) | 89 | if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 && |
68 | return i; | 90 | me_cl->client_id == client_id) |
91 | return me_cl; | ||
92 | return NULL; | ||
93 | } | ||
69 | 94 | ||
70 | return -ENOENT; | 95 | /** |
96 | * mei_me_cl_remove - remove me client matching uuid and client_id | ||
97 | * | ||
98 | * @dev: the device structure | ||
99 | * @uuid: me client uuid | ||
100 | * @client_id: me client address | ||
101 | */ | ||
102 | void mei_me_cl_remove(struct mei_device *dev, const uuid_le *uuid, u8 client_id) | ||
103 | { | ||
104 | struct mei_me_client *me_cl, *next; | ||
105 | |||
106 | list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) { | ||
107 | if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 && | ||
108 | me_cl->client_id == client_id) { | ||
109 | list_del(&me_cl->list); | ||
110 | kfree(me_cl); | ||
111 | break; | ||
112 | } | ||
113 | } | ||
71 | } | 114 | } |
72 | 115 | ||
73 | 116 | ||
@@ -77,7 +120,7 @@ int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) | |||
77 | * @cl1: host client 1 | 120 | * @cl1: host client 1 |
78 | * @cl2: host client 2 | 121 | * @cl2: host client 2 |
79 | * | 122 | * |
80 | * returns true - if the clients has same host and me ids | 123 | * Return: true - if the clients has same host and me ids |
81 | * false - otherwise | 124 | * false - otherwise |
82 | */ | 125 | */ |
83 | static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, | 126 | static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, |
@@ -117,7 +160,7 @@ static void __mei_io_list_flush(struct mei_cl_cb *list, | |||
117 | * @list: An instance of our list structure | 160 | * @list: An instance of our list structure |
118 | * @cl: host client | 161 | * @cl: host client |
119 | */ | 162 | */ |
120 | static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) | 163 | void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) |
121 | { | 164 | { |
122 | __mei_io_list_flush(list, cl, false); | 165 | __mei_io_list_flush(list, cl, false); |
123 | } | 166 | } |
@@ -152,10 +195,10 @@ void mei_io_cb_free(struct mei_cl_cb *cb) | |||
152 | /** | 195 | /** |
153 | * mei_io_cb_init - allocate and initialize io callback | 196 | * mei_io_cb_init - allocate and initialize io callback |
154 | * | 197 | * |
155 | * @cl - mei client | 198 | * @cl: mei client |
156 | * @fp: pointer to file structure | 199 | * @fp: pointer to file structure |
157 | * | 200 | * |
158 | * returns mei_cl_cb pointer or NULL; | 201 | * Return: mei_cl_cb pointer or NULL; |
159 | */ | 202 | */ |
160 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) | 203 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) |
161 | { | 204 | { |
@@ -179,7 +222,7 @@ struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) | |||
179 | * @cb: io callback structure | 222 | * @cb: io callback structure |
180 | * @length: size of the buffer | 223 | * @length: size of the buffer |
181 | * | 224 | * |
182 | * returns 0 on success | 225 | * Return: 0 on success |
183 | * -EINVAL if cb is NULL | 226 | * -EINVAL if cb is NULL |
184 | * -ENOMEM if allocation failed | 227 | * -ENOMEM if allocation failed |
185 | */ | 228 | */ |
@@ -203,7 +246,7 @@ int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) | |||
203 | * @cb: io callback structure | 246 | * @cb: io callback structure |
204 | * @length: size of the buffer | 247 | * @length: size of the buffer |
205 | * | 248 | * |
206 | * returns 0 on success | 249 | * Return: 0 on success |
207 | * -EINVAL if cb is NULL | 250 | * -EINVAL if cb is NULL |
208 | * -ENOMEM if allocation failed | 251 | * -ENOMEM if allocation failed |
209 | */ | 252 | */ |
@@ -228,6 +271,8 @@ int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) | |||
228 | * mei_cl_flush_queues - flushes queue lists belonging to cl. | 271 | * mei_cl_flush_queues - flushes queue lists belonging to cl. |
229 | * | 272 | * |
230 | * @cl: host client | 273 | * @cl: host client |
274 | * | ||
275 | * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. | ||
231 | */ | 276 | */ |
232 | int mei_cl_flush_queues(struct mei_cl *cl) | 277 | int mei_cl_flush_queues(struct mei_cl *cl) |
233 | { | 278 | { |
@@ -273,7 +318,7 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) | |||
273 | * mei_cl_allocate - allocates cl structure and sets it up. | 318 | * mei_cl_allocate - allocates cl structure and sets it up. |
274 | * | 319 | * |
275 | * @dev: mei device | 320 | * @dev: mei device |
276 | * returns The allocated file or NULL on failure | 321 | * Return: The allocated file or NULL on failure |
277 | */ | 322 | */ |
278 | struct mei_cl *mei_cl_allocate(struct mei_device *dev) | 323 | struct mei_cl *mei_cl_allocate(struct mei_device *dev) |
279 | { | 324 | { |
@@ -293,7 +338,7 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev) | |||
293 | * | 338 | * |
294 | * @cl: host client | 339 | * @cl: host client |
295 | * | 340 | * |
296 | * returns cb on success, NULL on error | 341 | * Return: cb on success, NULL on error |
297 | */ | 342 | */ |
298 | struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) | 343 | struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) |
299 | { | 344 | { |
@@ -311,7 +356,7 @@ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) | |||
311 | * @cl - host client | 356 | * @cl - host client |
312 | * @id - fixed host id or -1 for generic one | 357 | * @id - fixed host id or -1 for generic one |
313 | * | 358 | * |
314 | * returns 0 on success | 359 | * Return: 0 on success |
315 | * -EINVAL on incorrect values | 360 | * -EINVAL on incorrect values |
316 | * -ENONET if client not found | 361 | * -ENONET if client not found |
317 | */ | 362 | */ |
@@ -331,13 +376,13 @@ int mei_cl_link(struct mei_cl *cl, int id) | |||
331 | MEI_CLIENTS_MAX); | 376 | MEI_CLIENTS_MAX); |
332 | 377 | ||
333 | if (id >= MEI_CLIENTS_MAX) { | 378 | if (id >= MEI_CLIENTS_MAX) { |
334 | dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); | 379 | dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); |
335 | return -EMFILE; | 380 | return -EMFILE; |
336 | } | 381 | } |
337 | 382 | ||
338 | open_handle_count = dev->open_handle_count + dev->iamthif_open_count; | 383 | open_handle_count = dev->open_handle_count + dev->iamthif_open_count; |
339 | if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { | 384 | if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { |
340 | dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", | 385 | dev_err(dev->dev, "open_handle_count exceeded %d", |
341 | MEI_MAX_OPEN_HANDLE_COUNT); | 386 | MEI_MAX_OPEN_HANDLE_COUNT); |
342 | return -EMFILE; | 387 | return -EMFILE; |
343 | } | 388 | } |
@@ -359,6 +404,8 @@ int mei_cl_link(struct mei_cl *cl, int id) | |||
359 | * mei_cl_unlink - remove me_cl from the list | 404 | * mei_cl_unlink - remove me_cl from the list |
360 | * | 405 | * |
361 | * @cl: host client | 406 | * @cl: host client |
407 | * | ||
408 | * Return: always 0 | ||
362 | */ | 409 | */ |
363 | int mei_cl_unlink(struct mei_cl *cl) | 410 | int mei_cl_unlink(struct mei_cl *cl) |
364 | { | 411 | { |
@@ -395,19 +442,19 @@ void mei_host_client_init(struct work_struct *work) | |||
395 | { | 442 | { |
396 | struct mei_device *dev = container_of(work, | 443 | struct mei_device *dev = container_of(work, |
397 | struct mei_device, init_work); | 444 | struct mei_device, init_work); |
398 | struct mei_client_properties *client_props; | 445 | struct mei_me_client *me_cl; |
399 | int i; | 446 | struct mei_client_properties *props; |
400 | 447 | ||
401 | mutex_lock(&dev->device_lock); | 448 | mutex_lock(&dev->device_lock); |
402 | 449 | ||
403 | for (i = 0; i < dev->me_clients_num; i++) { | 450 | list_for_each_entry(me_cl, &dev->me_clients, list) { |
404 | client_props = &dev->me_clients[i].props; | 451 | props = &me_cl->props; |
405 | 452 | ||
406 | if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) | 453 | if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid)) |
407 | mei_amthif_host_init(dev); | 454 | mei_amthif_host_init(dev); |
408 | else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) | 455 | else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid)) |
409 | mei_wd_host_init(dev); | 456 | mei_wd_host_init(dev); |
410 | else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) | 457 | else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid)) |
411 | mei_nfc_host_init(dev); | 458 | mei_nfc_host_init(dev); |
412 | 459 | ||
413 | } | 460 | } |
@@ -417,27 +464,27 @@ void mei_host_client_init(struct work_struct *work) | |||
417 | 464 | ||
418 | mutex_unlock(&dev->device_lock); | 465 | mutex_unlock(&dev->device_lock); |
419 | 466 | ||
420 | pm_runtime_mark_last_busy(&dev->pdev->dev); | 467 | pm_runtime_mark_last_busy(dev->dev); |
421 | dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); | 468 | dev_dbg(dev->dev, "rpm: autosuspend\n"); |
422 | pm_runtime_autosuspend(&dev->pdev->dev); | 469 | pm_runtime_autosuspend(dev->dev); |
423 | } | 470 | } |
424 | 471 | ||
425 | /** | 472 | /** |
426 | * mei_hbuf_acquire: try to acquire host buffer | 473 | * mei_hbuf_acquire - try to acquire host buffer |
427 | * | 474 | * |
428 | * @dev: the device structure | 475 | * @dev: the device structure |
429 | * returns true if host buffer was acquired | 476 | * Return: true if host buffer was acquired |
430 | */ | 477 | */ |
431 | bool mei_hbuf_acquire(struct mei_device *dev) | 478 | bool mei_hbuf_acquire(struct mei_device *dev) |
432 | { | 479 | { |
433 | if (mei_pg_state(dev) == MEI_PG_ON || | 480 | if (mei_pg_state(dev) == MEI_PG_ON || |
434 | dev->pg_event == MEI_PG_EVENT_WAIT) { | 481 | dev->pg_event == MEI_PG_EVENT_WAIT) { |
435 | dev_dbg(&dev->pdev->dev, "device is in pg\n"); | 482 | dev_dbg(dev->dev, "device is in pg\n"); |
436 | return false; | 483 | return false; |
437 | } | 484 | } |
438 | 485 | ||
439 | if (!dev->hbuf_is_ready) { | 486 | if (!dev->hbuf_is_ready) { |
440 | dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); | 487 | dev_dbg(dev->dev, "hbuf is not ready\n"); |
441 | return false; | 488 | return false; |
442 | } | 489 | } |
443 | 490 | ||
@@ -453,7 +500,7 @@ bool mei_hbuf_acquire(struct mei_device *dev) | |||
453 | * | 500 | * |
454 | * Locking: called under "dev->device_lock" lock | 501 | * Locking: called under "dev->device_lock" lock |
455 | * | 502 | * |
456 | * returns 0 on success, <0 on failure. | 503 | * Return: 0 on success, <0 on failure. |
457 | */ | 504 | */ |
458 | int mei_cl_disconnect(struct mei_cl *cl) | 505 | int mei_cl_disconnect(struct mei_cl *cl) |
459 | { | 506 | { |
@@ -471,9 +518,9 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
471 | if (cl->state != MEI_FILE_DISCONNECTING) | 518 | if (cl->state != MEI_FILE_DISCONNECTING) |
472 | return 0; | 519 | return 0; |
473 | 520 | ||
474 | rets = pm_runtime_get(&dev->pdev->dev); | 521 | rets = pm_runtime_get(dev->dev); |
475 | if (rets < 0 && rets != -EINPROGRESS) { | 522 | if (rets < 0 && rets != -EINPROGRESS) { |
476 | pm_runtime_put_noidle(&dev->pdev->dev); | 523 | pm_runtime_put_noidle(dev->dev); |
477 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | 524 | cl_err(dev, cl, "rpm: get failed %d\n", rets); |
478 | return rets; | 525 | return rets; |
479 | } | 526 | } |
@@ -484,7 +531,8 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
484 | goto free; | 531 | goto free; |
485 | } | 532 | } |
486 | 533 | ||
487 | cb->fop_type = MEI_FOP_CLOSE; | 534 | cb->fop_type = MEI_FOP_DISCONNECT; |
535 | |||
488 | if (mei_hbuf_acquire(dev)) { | 536 | if (mei_hbuf_acquire(dev)) { |
489 | if (mei_hbm_cl_disconnect_req(dev, cl)) { | 537 | if (mei_hbm_cl_disconnect_req(dev, cl)) { |
490 | rets = -ENODEV; | 538 | rets = -ENODEV; |
@@ -501,7 +549,7 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
501 | } | 549 | } |
502 | mutex_unlock(&dev->device_lock); | 550 | mutex_unlock(&dev->device_lock); |
503 | 551 | ||
504 | wait_event_timeout(dev->wait_recvd_msg, | 552 | wait_event_timeout(cl->wait, |
505 | MEI_FILE_DISCONNECTED == cl->state, | 553 | MEI_FILE_DISCONNECTED == cl->state, |
506 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); | 554 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); |
507 | 555 | ||
@@ -519,8 +567,8 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
519 | mei_io_list_flush(&dev->ctrl_wr_list, cl); | 567 | mei_io_list_flush(&dev->ctrl_wr_list, cl); |
520 | free: | 568 | free: |
521 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | 569 | cl_dbg(dev, cl, "rpm: autosuspend\n"); |
522 | pm_runtime_mark_last_busy(&dev->pdev->dev); | 570 | pm_runtime_mark_last_busy(dev->dev); |
523 | pm_runtime_put_autosuspend(&dev->pdev->dev); | 571 | pm_runtime_put_autosuspend(dev->dev); |
524 | 572 | ||
525 | mei_io_cb_free(cb); | 573 | mei_io_cb_free(cb); |
526 | return rets; | 574 | return rets; |
@@ -533,7 +581,7 @@ free: | |||
533 | * | 581 | * |
534 | * @cl: private data of the file object | 582 | * @cl: private data of the file object |
535 | * | 583 | * |
536 | * returns true if other client is connected, false - otherwise. | 584 | * Return: true if other client is connected, false - otherwise. |
537 | */ | 585 | */ |
538 | bool mei_cl_is_other_connecting(struct mei_cl *cl) | 586 | bool mei_cl_is_other_connecting(struct mei_cl *cl) |
539 | { | 587 | { |
@@ -560,10 +608,11 @@ bool mei_cl_is_other_connecting(struct mei_cl *cl) | |||
560 | * mei_cl_connect - connect host client to the me one | 608 | * mei_cl_connect - connect host client to the me one |
561 | * | 609 | * |
562 | * @cl: host client | 610 | * @cl: host client |
611 | * @file: pointer to file structure | ||
563 | * | 612 | * |
564 | * Locking: called under "dev->device_lock" lock | 613 | * Locking: called under "dev->device_lock" lock |
565 | * | 614 | * |
566 | * returns 0 on success, <0 on failure. | 615 | * Return: 0 on success, <0 on failure. |
567 | */ | 616 | */ |
568 | int mei_cl_connect(struct mei_cl *cl, struct file *file) | 617 | int mei_cl_connect(struct mei_cl *cl, struct file *file) |
569 | { | 618 | { |
@@ -576,9 +625,9 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) | |||
576 | 625 | ||
577 | dev = cl->dev; | 626 | dev = cl->dev; |
578 | 627 | ||
579 | rets = pm_runtime_get(&dev->pdev->dev); | 628 | rets = pm_runtime_get(dev->dev); |
580 | if (rets < 0 && rets != -EINPROGRESS) { | 629 | if (rets < 0 && rets != -EINPROGRESS) { |
581 | pm_runtime_put_noidle(&dev->pdev->dev); | 630 | pm_runtime_put_noidle(dev->dev); |
582 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | 631 | cl_err(dev, cl, "rpm: get failed %d\n", rets); |
583 | return rets; | 632 | return rets; |
584 | } | 633 | } |
@@ -606,7 +655,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) | |||
606 | } | 655 | } |
607 | 656 | ||
608 | mutex_unlock(&dev->device_lock); | 657 | mutex_unlock(&dev->device_lock); |
609 | wait_event_timeout(dev->wait_recvd_msg, | 658 | wait_event_timeout(cl->wait, |
610 | (cl->state == MEI_FILE_CONNECTED || | 659 | (cl->state == MEI_FILE_CONNECTED || |
611 | cl->state == MEI_FILE_DISCONNECTED), | 660 | cl->state == MEI_FILE_DISCONNECTED), |
612 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); | 661 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); |
@@ -626,8 +675,8 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) | |||
626 | 675 | ||
627 | out: | 676 | out: |
628 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | 677 | cl_dbg(dev, cl, "rpm: autosuspend\n"); |
629 | pm_runtime_mark_last_busy(&dev->pdev->dev); | 678 | pm_runtime_mark_last_busy(dev->dev); |
630 | pm_runtime_put_autosuspend(&dev->pdev->dev); | 679 | pm_runtime_put_autosuspend(dev->dev); |
631 | 680 | ||
632 | mei_io_cb_free(cb); | 681 | mei_io_cb_free(cb); |
633 | return rets; | 682 | return rets; |
@@ -638,7 +687,7 @@ out: | |||
638 | * | 687 | * |
639 | * @cl: private data of the file object | 688 | * @cl: private data of the file object |
640 | * | 689 | * |
641 | * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. | 690 | * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. |
642 | * -ENOENT if mei_cl is not present | 691 | * -ENOENT if mei_cl is not present |
643 | * -EINVAL if single_recv_buf == 0 | 692 | * -EINVAL if single_recv_buf == 0 |
644 | */ | 693 | */ |
@@ -646,26 +695,21 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) | |||
646 | { | 695 | { |
647 | struct mei_device *dev; | 696 | struct mei_device *dev; |
648 | struct mei_me_client *me_cl; | 697 | struct mei_me_client *me_cl; |
649 | int id; | ||
650 | 698 | ||
651 | if (WARN_ON(!cl || !cl->dev)) | 699 | if (WARN_ON(!cl || !cl->dev)) |
652 | return -EINVAL; | 700 | return -EINVAL; |
653 | 701 | ||
654 | dev = cl->dev; | 702 | dev = cl->dev; |
655 | 703 | ||
656 | if (!dev->me_clients_num) | ||
657 | return 0; | ||
658 | |||
659 | if (cl->mei_flow_ctrl_creds > 0) | 704 | if (cl->mei_flow_ctrl_creds > 0) |
660 | return 1; | 705 | return 1; |
661 | 706 | ||
662 | id = mei_me_cl_by_id(dev, cl->me_client_id); | 707 | me_cl = mei_me_cl_by_id(dev, cl->me_client_id); |
663 | if (id < 0) { | 708 | if (!me_cl) { |
664 | cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); | 709 | cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); |
665 | return id; | 710 | return -ENOENT; |
666 | } | 711 | } |
667 | 712 | ||
668 | me_cl = &dev->me_clients[id]; | ||
669 | if (me_cl->mei_flow_ctrl_creds) { | 713 | if (me_cl->mei_flow_ctrl_creds) { |
670 | if (WARN_ON(me_cl->props.single_recv_buf == 0)) | 714 | if (WARN_ON(me_cl->props.single_recv_buf == 0)) |
671 | return -EINVAL; | 715 | return -EINVAL; |
@@ -679,7 +723,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) | |||
679 | * | 723 | * |
680 | * @cl: private data of the file object | 724 | * @cl: private data of the file object |
681 | * | 725 | * |
682 | * @returns | 726 | * Return: |
683 | * 0 on success | 727 | * 0 on success |
684 | * -ENOENT when me client is not found | 728 | * -ENOENT when me client is not found |
685 | * -EINVAL when ctrl credits are <= 0 | 729 | * -EINVAL when ctrl credits are <= 0 |
@@ -688,21 +732,19 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) | |||
688 | { | 732 | { |
689 | struct mei_device *dev; | 733 | struct mei_device *dev; |
690 | struct mei_me_client *me_cl; | 734 | struct mei_me_client *me_cl; |
691 | int id; | ||
692 | 735 | ||
693 | if (WARN_ON(!cl || !cl->dev)) | 736 | if (WARN_ON(!cl || !cl->dev)) |
694 | return -EINVAL; | 737 | return -EINVAL; |
695 | 738 | ||
696 | dev = cl->dev; | 739 | dev = cl->dev; |
697 | 740 | ||
698 | id = mei_me_cl_by_id(dev, cl->me_client_id); | 741 | me_cl = mei_me_cl_by_id(dev, cl->me_client_id); |
699 | if (id < 0) { | 742 | if (!me_cl) { |
700 | cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); | 743 | cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); |
701 | return id; | 744 | return -ENOENT; |
702 | } | 745 | } |
703 | 746 | ||
704 | me_cl = &dev->me_clients[id]; | 747 | if (me_cl->props.single_recv_buf) { |
705 | if (me_cl->props.single_recv_buf != 0) { | ||
706 | if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) | 748 | if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) |
707 | return -EINVAL; | 749 | return -EINVAL; |
708 | me_cl->mei_flow_ctrl_creds--; | 750 | me_cl->mei_flow_ctrl_creds--; |
@@ -718,15 +760,16 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) | |||
718 | * mei_cl_read_start - the start read client message function. | 760 | * mei_cl_read_start - the start read client message function. |
719 | * | 761 | * |
720 | * @cl: host client | 762 | * @cl: host client |
763 | * @length: number of bytes to read | ||
721 | * | 764 | * |
722 | * returns 0 on success, <0 on failure. | 765 | * Return: 0 on success, <0 on failure. |
723 | */ | 766 | */ |
724 | int mei_cl_read_start(struct mei_cl *cl, size_t length) | 767 | int mei_cl_read_start(struct mei_cl *cl, size_t length) |
725 | { | 768 | { |
726 | struct mei_device *dev; | 769 | struct mei_device *dev; |
727 | struct mei_cl_cb *cb; | 770 | struct mei_cl_cb *cb; |
771 | struct mei_me_client *me_cl; | ||
728 | int rets; | 772 | int rets; |
729 | int i; | ||
730 | 773 | ||
731 | if (WARN_ON(!cl || !cl->dev)) | 774 | if (WARN_ON(!cl || !cl->dev)) |
732 | return -ENODEV; | 775 | return -ENODEV; |
@@ -740,15 +783,15 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length) | |||
740 | cl_dbg(dev, cl, "read is pending.\n"); | 783 | cl_dbg(dev, cl, "read is pending.\n"); |
741 | return -EBUSY; | 784 | return -EBUSY; |
742 | } | 785 | } |
743 | i = mei_me_cl_by_id(dev, cl->me_client_id); | 786 | me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); |
744 | if (i < 0) { | 787 | if (!me_cl) { |
745 | cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); | 788 | cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); |
746 | return -ENOTTY; | 789 | return -ENOTTY; |
747 | } | 790 | } |
748 | 791 | ||
749 | rets = pm_runtime_get(&dev->pdev->dev); | 792 | rets = pm_runtime_get(dev->dev); |
750 | if (rets < 0 && rets != -EINPROGRESS) { | 793 | if (rets < 0 && rets != -EINPROGRESS) { |
751 | pm_runtime_put_noidle(&dev->pdev->dev); | 794 | pm_runtime_put_noidle(dev->dev); |
752 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | 795 | cl_err(dev, cl, "rpm: get failed %d\n", rets); |
753 | return rets; | 796 | return rets; |
754 | } | 797 | } |
@@ -760,7 +803,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length) | |||
760 | } | 803 | } |
761 | 804 | ||
762 | /* always allocate at least client max message */ | 805 | /* always allocate at least client max message */ |
763 | length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); | 806 | length = max_t(size_t, length, me_cl->props.max_msg_length); |
764 | rets = mei_io_cb_alloc_resp_buf(cb, length); | 807 | rets = mei_io_cb_alloc_resp_buf(cb, length); |
765 | if (rets) | 808 | if (rets) |
766 | goto out; | 809 | goto out; |
@@ -780,8 +823,8 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length) | |||
780 | 823 | ||
781 | out: | 824 | out: |
782 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | 825 | cl_dbg(dev, cl, "rpm: autosuspend\n"); |
783 | pm_runtime_mark_last_busy(&dev->pdev->dev); | 826 | pm_runtime_mark_last_busy(dev->dev); |
784 | pm_runtime_put_autosuspend(&dev->pdev->dev); | 827 | pm_runtime_put_autosuspend(dev->dev); |
785 | 828 | ||
786 | if (rets) | 829 | if (rets) |
787 | mei_io_cb_free(cb); | 830 | mei_io_cb_free(cb); |
@@ -797,7 +840,7 @@ out: | |||
797 | * @cb: callback block. | 840 | * @cb: callback block. |
798 | * @cmpl_list: complete list. | 841 | * @cmpl_list: complete list. |
799 | * | 842 | * |
800 | * returns 0, OK; otherwise error. | 843 | * Return: 0, OK; otherwise error. |
801 | */ | 844 | */ |
802 | int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | 845 | int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, |
803 | struct mei_cl_cb *cmpl_list) | 846 | struct mei_cl_cb *cmpl_list) |
@@ -874,12 +917,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
874 | 917 | ||
875 | /** | 918 | /** |
876 | * mei_cl_write - submit a write cb to mei device | 919 | * mei_cl_write - submit a write cb to mei device |
877 | assumes device_lock is locked | 920 | * assumes device_lock is locked |
878 | * | 921 | * |
879 | * @cl: host client | 922 | * @cl: host client |
880 | * @cl: write callback with filled data | 923 | * @cb: write callback with filled data |
924 | * @blocking: block until completed | ||
881 | * | 925 | * |
882 | * returns number of bytes sent on success, <0 on failure. | 926 | * Return: number of bytes sent on success, <0 on failure. |
883 | */ | 927 | */ |
884 | int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | 928 | int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) |
885 | { | 929 | { |
@@ -900,11 +944,11 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | |||
900 | 944 | ||
901 | buf = &cb->request_buffer; | 945 | buf = &cb->request_buffer; |
902 | 946 | ||
903 | cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); | 947 | cl_dbg(dev, cl, "size=%d\n", buf->size); |
904 | 948 | ||
905 | rets = pm_runtime_get(&dev->pdev->dev); | 949 | rets = pm_runtime_get(dev->dev); |
906 | if (rets < 0 && rets != -EINPROGRESS) { | 950 | if (rets < 0 && rets != -EINPROGRESS) { |
907 | pm_runtime_put_noidle(&dev->pdev->dev); | 951 | pm_runtime_put_noidle(dev->dev); |
908 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | 952 | cl_err(dev, cl, "rpm: get failed %d\n", rets); |
909 | return rets; | 953 | return rets; |
910 | } | 954 | } |
@@ -979,8 +1023,8 @@ out: | |||
979 | rets = buf->size; | 1023 | rets = buf->size; |
980 | err: | 1024 | err: |
981 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | 1025 | cl_dbg(dev, cl, "rpm: autosuspend\n"); |
982 | pm_runtime_mark_last_busy(&dev->pdev->dev); | 1026 | pm_runtime_mark_last_busy(dev->dev); |
983 | pm_runtime_put_autosuspend(&dev->pdev->dev); | 1027 | pm_runtime_put_autosuspend(dev->dev); |
984 | 1028 | ||
985 | return rets; | 1029 | return rets; |
986 | } | 1030 | } |
@@ -1016,7 +1060,7 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) | |||
1016 | /** | 1060 | /** |
1017 | * mei_cl_all_disconnect - disconnect forcefully all connected clients | 1061 | * mei_cl_all_disconnect - disconnect forcefully all connected clients |
1018 | * | 1062 | * |
1019 | * @dev - mei device | 1063 | * @dev: mei device |
1020 | */ | 1064 | */ |
1021 | 1065 | ||
1022 | void mei_cl_all_disconnect(struct mei_device *dev) | 1066 | void mei_cl_all_disconnect(struct mei_device *dev) |
@@ -1034,11 +1078,12 @@ void mei_cl_all_disconnect(struct mei_device *dev) | |||
1034 | /** | 1078 | /** |
1035 | * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted | 1079 | * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted |
1036 | * | 1080 | * |
1037 | * @dev - mei device | 1081 | * @dev: mei device |
1038 | */ | 1082 | */ |
1039 | void mei_cl_all_wakeup(struct mei_device *dev) | 1083 | void mei_cl_all_wakeup(struct mei_device *dev) |
1040 | { | 1084 | { |
1041 | struct mei_cl *cl; | 1085 | struct mei_cl *cl; |
1086 | |||
1042 | list_for_each_entry(cl, &dev->file_list, link) { | 1087 | list_for_each_entry(cl, &dev->file_list, link) { |
1043 | if (waitqueue_active(&cl->rx_wait)) { | 1088 | if (waitqueue_active(&cl->rx_wait)) { |
1044 | cl_dbg(dev, cl, "Waking up reading client!\n"); | 1089 | cl_dbg(dev, cl, "Waking up reading client!\n"); |
@@ -1053,8 +1098,8 @@ void mei_cl_all_wakeup(struct mei_device *dev) | |||
1053 | 1098 | ||
1054 | /** | 1099 | /** |
1055 | * mei_cl_all_write_clear - clear all pending writes | 1100 | * mei_cl_all_write_clear - clear all pending writes |
1056 | 1101 | * | |
1057 | * @dev - mei device | 1102 | * @dev: mei device |
1058 | */ | 1103 | */ |
1059 | void mei_cl_all_write_clear(struct mei_device *dev) | 1104 | void mei_cl_all_write_clear(struct mei_device *dev) |
1060 | { | 1105 | { |
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 96d5de0389f9..d9d0c1525259 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h | |||
@@ -24,8 +24,15 @@ | |||
24 | 24 | ||
25 | #include "mei_dev.h" | 25 | #include "mei_dev.h" |
26 | 26 | ||
27 | int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid); | 27 | struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev, |
28 | int mei_me_cl_by_id(struct mei_device *dev, u8 client_id); | 28 | const uuid_le *cuuid); |
29 | struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id); | ||
30 | |||
31 | struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, | ||
32 | const uuid_le *uuid, u8 client_id); | ||
33 | |||
34 | void mei_me_cl_remove(struct mei_device *dev, | ||
35 | const uuid_le *uuid, u8 client_id); | ||
29 | 36 | ||
30 | /* | 37 | /* |
31 | * MEI IO Functions | 38 | * MEI IO Functions |
@@ -45,6 +52,8 @@ static inline void mei_io_list_init(struct mei_cl_cb *list) | |||
45 | { | 52 | { |
46 | INIT_LIST_HEAD(&list->list); | 53 | INIT_LIST_HEAD(&list->list); |
47 | } | 54 | } |
55 | void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl); | ||
56 | |||
48 | /* | 57 | /* |
49 | * MEI Host Client Functions | 58 | * MEI Host Client Functions |
50 | */ | 59 | */ |
@@ -101,9 +110,9 @@ void mei_cl_all_write_clear(struct mei_device *dev); | |||
101 | #define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id | 110 | #define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id |
102 | 111 | ||
103 | #define cl_dbg(dev, cl, format, arg...) \ | 112 | #define cl_dbg(dev, cl, format, arg...) \ |
104 | dev_dbg(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) | 113 | dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) |
105 | 114 | ||
106 | #define cl_err(dev, cl, format, arg...) \ | 115 | #define cl_err(dev, cl, format, arg...) \ |
107 | dev_err(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) | 116 | dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) |
108 | 117 | ||
109 | #endif /* _MEI_CLIENT_H_ */ | 118 | #endif /* _MEI_CLIENT_H_ */ |
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index ced5b777c70f..ce1566715f80 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/debugfs.h> | 19 | #include <linux/debugfs.h> |
20 | #include <linux/pci.h> | ||
21 | 20 | ||
22 | #include <linux/mei.h> | 21 | #include <linux/mei.h> |
23 | 22 | ||
@@ -28,39 +27,47 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, | |||
28 | size_t cnt, loff_t *ppos) | 27 | size_t cnt, loff_t *ppos) |
29 | { | 28 | { |
30 | struct mei_device *dev = fp->private_data; | 29 | struct mei_device *dev = fp->private_data; |
31 | struct mei_me_client *cl; | 30 | struct mei_me_client *me_cl; |
32 | const size_t bufsz = 1024; | 31 | size_t bufsz = 1; |
33 | char *buf = kzalloc(bufsz, GFP_KERNEL); | 32 | char *buf; |
34 | int i; | 33 | int i = 0; |
35 | int pos = 0; | 34 | int pos = 0; |
36 | int ret; | 35 | int ret; |
37 | 36 | ||
38 | if (!buf) | 37 | #define HDR " |id|addr| UUID |con|msg len|sb|\n" |
39 | return -ENOMEM; | ||
40 | |||
41 | pos += scnprintf(buf + pos, bufsz - pos, | ||
42 | " |id|addr| UUID |con|msg len|\n"); | ||
43 | 38 | ||
44 | mutex_lock(&dev->device_lock); | 39 | mutex_lock(&dev->device_lock); |
45 | 40 | ||
41 | list_for_each_entry(me_cl, &dev->me_clients, list) | ||
42 | bufsz++; | ||
43 | |||
44 | bufsz *= sizeof(HDR) + 1; | ||
45 | buf = kzalloc(bufsz, GFP_KERNEL); | ||
46 | if (!buf) { | ||
47 | mutex_unlock(&dev->device_lock); | ||
48 | return -ENOMEM; | ||
49 | } | ||
50 | |||
51 | pos += scnprintf(buf + pos, bufsz - pos, HDR); | ||
52 | |||
46 | /* if the driver is not enabled the list won't be consistent */ | 53 | /* if the driver is not enabled the list won't be consistent */ |
47 | if (dev->dev_state != MEI_DEV_ENABLED) | 54 | if (dev->dev_state != MEI_DEV_ENABLED) |
48 | goto out; | 55 | goto out; |
49 | 56 | ||
50 | for (i = 0; i < dev->me_clients_num; i++) { | 57 | list_for_each_entry(me_cl, &dev->me_clients, list) { |
51 | cl = &dev->me_clients[i]; | ||
52 | 58 | ||
53 | /* skip me clients that cannot be connected */ | 59 | /* skip me clients that cannot be connected */ |
54 | if (cl->props.max_number_of_connections == 0) | 60 | if (me_cl->props.max_number_of_connections == 0) |
55 | continue; | 61 | continue; |
56 | 62 | ||
57 | pos += scnprintf(buf + pos, bufsz - pos, | 63 | pos += scnprintf(buf + pos, bufsz - pos, |
58 | "%2d|%2d|%4d|%pUl|%3d|%7d|\n", | 64 | "%2d|%2d|%4d|%pUl|%3d|%7d|%2d|\n", |
59 | i, cl->client_id, | 65 | i++, me_cl->client_id, |
60 | cl->props.fixed_address, | 66 | me_cl->props.fixed_address, |
61 | &cl->props.protocol_name, | 67 | &me_cl->props.protocol_name, |
62 | cl->props.max_number_of_connections, | 68 | me_cl->props.max_number_of_connections, |
63 | cl->props.max_msg_length); | 69 | me_cl->props.max_msg_length, |
70 | me_cl->props.single_recv_buf); | ||
64 | } | 71 | } |
65 | out: | 72 | out: |
66 | mutex_unlock(&dev->device_lock); | 73 | mutex_unlock(&dev->device_lock); |
@@ -98,7 +105,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, | |||
98 | 105 | ||
99 | mutex_lock(&dev->device_lock); | 106 | mutex_lock(&dev->device_lock); |
100 | 107 | ||
101 | /* if the driver is not enabled the list won't b consitent */ | 108 | /* if the driver is not enabled the list won't be consistent */ |
102 | if (dev->dev_state != MEI_DEV_ENABLED) | 109 | if (dev->dev_state != MEI_DEV_ENABLED) |
103 | goto out; | 110 | goto out; |
104 | 111 | ||
@@ -135,8 +142,13 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, | |||
135 | if (!buf) | 142 | if (!buf) |
136 | return -ENOMEM; | 143 | return -ENOMEM; |
137 | 144 | ||
138 | pos += scnprintf(buf + pos, bufsz - pos, "%s\n", | 145 | pos += scnprintf(buf + pos, bufsz - pos, "dev: %s\n", |
139 | mei_dev_state_str(dev->dev_state)); | 146 | mei_dev_state_str(dev->dev_state)); |
147 | pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n", | ||
148 | mei_hbm_state_str(dev->hbm_state)); | ||
149 | pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", | ||
150 | mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED", | ||
151 | mei_pg_state_str(mei_pg_state(dev))); | ||
140 | ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); | 152 | ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); |
141 | kfree(buf); | 153 | kfree(buf); |
142 | return ret; | 154 | return ret; |
@@ -149,7 +161,8 @@ static const struct file_operations mei_dbgfs_fops_devstate = { | |||
149 | 161 | ||
150 | /** | 162 | /** |
151 | * mei_dbgfs_deregister - Remove the debugfs files and directories | 163 | * mei_dbgfs_deregister - Remove the debugfs files and directories |
152 | * @mei - pointer to mei device private data | 164 | * |
165 | * @dev: the mei device structure | ||
153 | */ | 166 | */ |
154 | void mei_dbgfs_deregister(struct mei_device *dev) | 167 | void mei_dbgfs_deregister(struct mei_device *dev) |
155 | { | 168 | { |
@@ -160,12 +173,17 @@ void mei_dbgfs_deregister(struct mei_device *dev) | |||
160 | } | 173 | } |
161 | 174 | ||
162 | /** | 175 | /** |
163 | * Add the debugfs files | 176 | * mei_dbgfs_register - Add the debugfs files |
164 | * | 177 | * |
178 | * @dev: the mei device structure | ||
179 | * @name: the mei device name | ||
180 | * | ||
181 | * Return: 0 on success, <0 on failure. | ||
165 | */ | 182 | */ |
166 | int mei_dbgfs_register(struct mei_device *dev, const char *name) | 183 | int mei_dbgfs_register(struct mei_device *dev, const char *name) |
167 | { | 184 | { |
168 | struct dentry *dir, *f; | 185 | struct dentry *dir, *f; |
186 | |||
169 | dir = debugfs_create_dir(name, NULL); | 187 | dir = debugfs_create_dir(name, NULL); |
170 | if (!dir) | 188 | if (!dir) |
171 | return -ENOMEM; | 189 | return -ENOMEM; |
@@ -173,19 +191,19 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name) | |||
173 | f = debugfs_create_file("meclients", S_IRUSR, dir, | 191 | f = debugfs_create_file("meclients", S_IRUSR, dir, |
174 | dev, &mei_dbgfs_fops_meclients); | 192 | dev, &mei_dbgfs_fops_meclients); |
175 | if (!f) { | 193 | if (!f) { |
176 | dev_err(&dev->pdev->dev, "meclients: registration failed\n"); | 194 | dev_err(dev->dev, "meclients: registration failed\n"); |
177 | goto err; | 195 | goto err; |
178 | } | 196 | } |
179 | f = debugfs_create_file("active", S_IRUSR, dir, | 197 | f = debugfs_create_file("active", S_IRUSR, dir, |
180 | dev, &mei_dbgfs_fops_active); | 198 | dev, &mei_dbgfs_fops_active); |
181 | if (!f) { | 199 | if (!f) { |
182 | dev_err(&dev->pdev->dev, "meclients: registration failed\n"); | 200 | dev_err(dev->dev, "meclients: registration failed\n"); |
183 | goto err; | 201 | goto err; |
184 | } | 202 | } |
185 | f = debugfs_create_file("devstate", S_IRUSR, dir, | 203 | f = debugfs_create_file("devstate", S_IRUSR, dir, |
186 | dev, &mei_dbgfs_fops_devstate); | 204 | dev, &mei_dbgfs_fops_devstate); |
187 | if (!f) { | 205 | if (!f) { |
188 | dev_err(&dev->pdev->dev, "devstate: registration failed\n"); | 206 | dev_err(dev->dev, "devstate: registration failed\n"); |
189 | goto err; | 207 | goto err; |
190 | } | 208 | } |
191 | dev->dbgfs_dir = dir; | 209 | dev->dbgfs_dir = dir; |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 804106209d76..49a2653d91a5 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
@@ -15,16 +15,34 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/export.h> | 17 | #include <linux/export.h> |
18 | #include <linux/pci.h> | ||
19 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
20 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
21 | #include <linux/mei.h> | ||
22 | #include <linux/pm_runtime.h> | 20 | #include <linux/pm_runtime.h> |
21 | #include <linux/slab.h> | ||
22 | |||
23 | #include <linux/mei.h> | ||
23 | 24 | ||
24 | #include "mei_dev.h" | 25 | #include "mei_dev.h" |
25 | #include "hbm.h" | 26 | #include "hbm.h" |
26 | #include "client.h" | 27 | #include "client.h" |
27 | 28 | ||
29 | static const char *mei_hbm_status_str(enum mei_hbm_status status) | ||
30 | { | ||
31 | #define MEI_HBM_STATUS(status) case MEI_HBMS_##status: return #status | ||
32 | switch (status) { | ||
33 | MEI_HBM_STATUS(SUCCESS); | ||
34 | MEI_HBM_STATUS(CLIENT_NOT_FOUND); | ||
35 | MEI_HBM_STATUS(ALREADY_EXISTS); | ||
36 | MEI_HBM_STATUS(REJECTED); | ||
37 | MEI_HBM_STATUS(INVALID_PARAMETER); | ||
38 | MEI_HBM_STATUS(NOT_ALLOWED); | ||
39 | MEI_HBM_STATUS(ALREADY_STARTED); | ||
40 | MEI_HBM_STATUS(NOT_STARTED); | ||
41 | default: return "unknown"; | ||
42 | } | ||
43 | #undef MEI_HBM_STATUS | ||
44 | }; | ||
45 | |||
28 | static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) | 46 | static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) |
29 | { | 47 | { |
30 | #define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status | 48 | #define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status |
@@ -39,13 +57,29 @@ static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) | |||
39 | #undef MEI_CL_CCS | 57 | #undef MEI_CL_CCS |
40 | } | 58 | } |
41 | 59 | ||
60 | const char *mei_hbm_state_str(enum mei_hbm_state state) | ||
61 | { | ||
62 | #define MEI_HBM_STATE(state) case MEI_HBM_##state: return #state | ||
63 | switch (state) { | ||
64 | MEI_HBM_STATE(IDLE); | ||
65 | MEI_HBM_STATE(STARTING); | ||
66 | MEI_HBM_STATE(STARTED); | ||
67 | MEI_HBM_STATE(ENUM_CLIENTS); | ||
68 | MEI_HBM_STATE(CLIENT_PROPERTIES); | ||
69 | MEI_HBM_STATE(STOPPED); | ||
70 | default: | ||
71 | return "unknown"; | ||
72 | } | ||
73 | #undef MEI_HBM_STATE | ||
74 | } | ||
75 | |||
42 | /** | 76 | /** |
43 | * mei_cl_conn_status_to_errno - convert client connect response | 77 | * mei_cl_conn_status_to_errno - convert client connect response |
44 | * status to error code | 78 | * status to error code |
45 | * | 79 | * |
46 | * @status: client connect response status | 80 | * @status: client connect response status |
47 | * | 81 | * |
48 | * returns corresponding error code | 82 | * Return: corresponding error code |
49 | */ | 83 | */ |
50 | static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) | 84 | static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) |
51 | { | 85 | { |
@@ -71,60 +105,54 @@ void mei_hbm_idle(struct mei_device *dev) | |||
71 | } | 105 | } |
72 | 106 | ||
73 | /** | 107 | /** |
74 | * mei_hbm_reset - reset hbm counters and book keeping data structurs | 108 | * mei_me_cl_remove_all - remove all me clients |
75 | * | 109 | * |
76 | * @dev: the device structure | 110 | * @dev: the device structure |
77 | */ | 111 | */ |
78 | void mei_hbm_reset(struct mei_device *dev) | 112 | static void mei_me_cl_remove_all(struct mei_device *dev) |
79 | { | 113 | { |
80 | dev->me_clients_num = 0; | 114 | struct mei_me_client *me_cl, *next; |
81 | dev->me_client_presentation_num = 0; | ||
82 | dev->me_client_index = 0; | ||
83 | |||
84 | kfree(dev->me_clients); | ||
85 | dev->me_clients = NULL; | ||
86 | 115 | ||
87 | mei_hbm_idle(dev); | 116 | list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) { |
117 | list_del(&me_cl->list); | ||
118 | kfree(me_cl); | ||
119 | } | ||
88 | } | 120 | } |
89 | 121 | ||
90 | /** | 122 | /** |
91 | * mei_hbm_me_cl_allocate - allocates storage for me clients | 123 | * mei_hbm_reset - reset hbm counters and book keeping data structurs |
92 | * | 124 | * |
93 | * @dev: the device structure | 125 | * @dev: the device structure |
94 | * | ||
95 | * returns 0 on success -ENOMEM on allocation failure | ||
96 | */ | 126 | */ |
97 | static int mei_hbm_me_cl_allocate(struct mei_device *dev) | 127 | void mei_hbm_reset(struct mei_device *dev) |
98 | { | 128 | { |
99 | struct mei_me_client *clients; | 129 | dev->me_client_index = 0; |
100 | int b; | ||
101 | 130 | ||
102 | mei_hbm_reset(dev); | 131 | mei_me_cl_remove_all(dev); |
103 | 132 | ||
104 | /* count how many ME clients we have */ | 133 | mei_hbm_idle(dev); |
105 | for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) | 134 | } |
106 | dev->me_clients_num++; | ||
107 | 135 | ||
108 | if (dev->me_clients_num == 0) | 136 | /** |
109 | return 0; | 137 | * mei_hbm_hdr - construct hbm header |
138 | * | ||
139 | * @hdr: hbm header | ||
140 | * @length: payload length | ||
141 | */ | ||
110 | 142 | ||
111 | dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n", | 143 | static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) |
112 | dev->me_clients_num * sizeof(struct mei_me_client)); | 144 | { |
113 | /* allocate storage for ME clients representation */ | 145 | hdr->host_addr = 0; |
114 | clients = kcalloc(dev->me_clients_num, | 146 | hdr->me_addr = 0; |
115 | sizeof(struct mei_me_client), GFP_KERNEL); | 147 | hdr->length = length; |
116 | if (!clients) { | 148 | hdr->msg_complete = 1; |
117 | dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); | 149 | hdr->reserved = 0; |
118 | return -ENOMEM; | ||
119 | } | ||
120 | dev->me_clients = clients; | ||
121 | return 0; | ||
122 | } | 150 | } |
123 | 151 | ||
124 | /** | 152 | /** |
125 | * mei_hbm_cl_hdr - construct client hbm header | 153 | * mei_hbm_cl_hdr - construct client hbm header |
126 | * | 154 | * |
127 | * @cl: - client | 155 | * @cl: client |
128 | * @hbm_cmd: host bus message command | 156 | * @hbm_cmd: host bus message command |
129 | * @buf: buffer for cl header | 157 | * @buf: buffer for cl header |
130 | * @len: buffer length | 158 | * @len: buffer length |
@@ -142,38 +170,87 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) | |||
142 | } | 170 | } |
143 | 171 | ||
144 | /** | 172 | /** |
145 | * mei_hbm_cl_addr_equal - tells if they have the same address | 173 | * mei_hbm_cl_write - write simple hbm client message |
146 | * | 174 | * |
147 | * @cl: - client | 175 | * @dev: the device structure |
148 | * @buf: buffer with cl header | 176 | * @cl: client |
177 | * @hbm_cmd: host bus message command | ||
178 | * @len: buffer length | ||
149 | * | 179 | * |
150 | * returns true if addresses are the same | 180 | * Return: 0 on success, <0 on failure. |
151 | */ | 181 | */ |
152 | static inline | 182 | static inline |
153 | bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) | 183 | int mei_hbm_cl_write(struct mei_device *dev, |
184 | struct mei_cl *cl, u8 hbm_cmd, size_t len) | ||
185 | { | ||
186 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | ||
187 | |||
188 | mei_hbm_hdr(mei_hdr, len); | ||
189 | mei_hbm_cl_hdr(cl, hbm_cmd, dev->wr_msg.data, len); | ||
190 | |||
191 | return mei_write_message(dev, mei_hdr, dev->wr_msg.data); | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * mei_hbm_cl_addr_equal - check if the client's and | ||
196 | * the message address match | ||
197 | * | ||
198 | * @cl: client | ||
199 | * @cmd: hbm client message | ||
200 | * | ||
201 | * Return: true if addresses are the same | ||
202 | */ | ||
203 | static inline | ||
204 | bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd) | ||
154 | { | 205 | { |
155 | struct mei_hbm_cl_cmd *cmd = buf; | ||
156 | return cl->host_client_id == cmd->host_addr && | 206 | return cl->host_client_id == cmd->host_addr && |
157 | cl->me_client_id == cmd->me_addr; | 207 | cl->me_client_id == cmd->me_addr; |
158 | } | 208 | } |
159 | 209 | ||
210 | /** | ||
211 | * mei_hbm_cl_find_by_cmd - find recipient client | ||
212 | * | ||
213 | * @dev: the device structure | ||
214 | * @buf: a buffer with hbm cl command | ||
215 | * | ||
216 | * Return: the recipient client or NULL if not found | ||
217 | */ | ||
218 | static inline | ||
219 | struct mei_cl *mei_hbm_cl_find_by_cmd(struct mei_device *dev, void *buf) | ||
220 | { | ||
221 | struct mei_hbm_cl_cmd *cmd = (struct mei_hbm_cl_cmd *)buf; | ||
222 | struct mei_cl *cl; | ||
223 | |||
224 | list_for_each_entry(cl, &dev->file_list, link) | ||
225 | if (mei_hbm_cl_addr_equal(cl, cmd)) | ||
226 | return cl; | ||
227 | return NULL; | ||
228 | } | ||
229 | |||
160 | 230 | ||
231 | /** | ||
232 | * mei_hbm_start_wait - wait for start response message. | ||
233 | * | ||
234 | * @dev: the device structure | ||
235 | * | ||
236 | * Return: 0 on success and < 0 on failure | ||
237 | */ | ||
161 | int mei_hbm_start_wait(struct mei_device *dev) | 238 | int mei_hbm_start_wait(struct mei_device *dev) |
162 | { | 239 | { |
163 | int ret; | 240 | int ret; |
164 | if (dev->hbm_state > MEI_HBM_START) | 241 | |
242 | if (dev->hbm_state > MEI_HBM_STARTING) | ||
165 | return 0; | 243 | return 0; |
166 | 244 | ||
167 | mutex_unlock(&dev->device_lock); | 245 | mutex_unlock(&dev->device_lock); |
168 | ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, | 246 | ret = wait_event_timeout(dev->wait_hbm_start, |
169 | dev->hbm_state == MEI_HBM_IDLE || | 247 | dev->hbm_state != MEI_HBM_STARTING, |
170 | dev->hbm_state >= MEI_HBM_STARTED, | ||
171 | mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); | 248 | mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); |
172 | mutex_lock(&dev->device_lock); | 249 | mutex_lock(&dev->device_lock); |
173 | 250 | ||
174 | if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) { | 251 | if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) { |
175 | dev->hbm_state = MEI_HBM_IDLE; | 252 | dev->hbm_state = MEI_HBM_IDLE; |
176 | dev_err(&dev->pdev->dev, "waiting for mei start failed\n"); | 253 | dev_err(dev->dev, "waiting for mei start failed\n"); |
177 | return -ETIME; | 254 | return -ETIME; |
178 | } | 255 | } |
179 | return 0; | 256 | return 0; |
@@ -184,7 +261,7 @@ int mei_hbm_start_wait(struct mei_device *dev) | |||
184 | * | 261 | * |
185 | * @dev: the device structure | 262 | * @dev: the device structure |
186 | * | 263 | * |
187 | * returns 0 on success and < 0 on failure | 264 | * Return: 0 on success and < 0 on failure |
188 | */ | 265 | */ |
189 | int mei_hbm_start_req(struct mei_device *dev) | 266 | int mei_hbm_start_req(struct mei_device *dev) |
190 | { | 267 | { |
@@ -193,6 +270,8 @@ int mei_hbm_start_req(struct mei_device *dev) | |||
193 | const size_t len = sizeof(struct hbm_host_version_request); | 270 | const size_t len = sizeof(struct hbm_host_version_request); |
194 | int ret; | 271 | int ret; |
195 | 272 | ||
273 | mei_hbm_reset(dev); | ||
274 | |||
196 | mei_hbm_hdr(mei_hdr, len); | 275 | mei_hbm_hdr(mei_hdr, len); |
197 | 276 | ||
198 | /* host start message */ | 277 | /* host start message */ |
@@ -205,12 +284,12 @@ int mei_hbm_start_req(struct mei_device *dev) | |||
205 | dev->hbm_state = MEI_HBM_IDLE; | 284 | dev->hbm_state = MEI_HBM_IDLE; |
206 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); | 285 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
207 | if (ret) { | 286 | if (ret) { |
208 | dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n", | 287 | dev_err(dev->dev, "version message write failed: ret = %d\n", |
209 | ret); | 288 | ret); |
210 | return ret; | 289 | return ret; |
211 | } | 290 | } |
212 | 291 | ||
213 | dev->hbm_state = MEI_HBM_START; | 292 | dev->hbm_state = MEI_HBM_STARTING; |
214 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; | 293 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; |
215 | return 0; | 294 | return 0; |
216 | } | 295 | } |
@@ -220,7 +299,7 @@ int mei_hbm_start_req(struct mei_device *dev) | |||
220 | * | 299 | * |
221 | * @dev: the device structure | 300 | * @dev: the device structure |
222 | * | 301 | * |
223 | * returns 0 on success and < 0 on failure | 302 | * Return: 0 on success and < 0 on failure |
224 | */ | 303 | */ |
225 | static int mei_hbm_enum_clients_req(struct mei_device *dev) | 304 | static int mei_hbm_enum_clients_req(struct mei_device *dev) |
226 | { | 305 | { |
@@ -238,7 +317,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) | |||
238 | 317 | ||
239 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); | 318 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
240 | if (ret) { | 319 | if (ret) { |
241 | dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n", | 320 | dev_err(dev->dev, "enumeration request write failed: ret = %d.\n", |
242 | ret); | 321 | ret); |
243 | return ret; | 322 | return ret; |
244 | } | 323 | } |
@@ -247,12 +326,38 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) | |||
247 | return 0; | 326 | return 0; |
248 | } | 327 | } |
249 | 328 | ||
329 | /* | ||
330 | * mei_hbm_me_cl_add - add new me client to the list | ||
331 | * | ||
332 | * @dev: the device structure | ||
333 | * @res: hbm property response | ||
334 | * | ||
335 | * Return: 0 on success and -ENOMEM on allocation failure | ||
336 | */ | ||
337 | |||
338 | static int mei_hbm_me_cl_add(struct mei_device *dev, | ||
339 | struct hbm_props_response *res) | ||
340 | { | ||
341 | struct mei_me_client *me_cl; | ||
342 | |||
343 | me_cl = kzalloc(sizeof(struct mei_me_client), GFP_KERNEL); | ||
344 | if (!me_cl) | ||
345 | return -ENOMEM; | ||
346 | |||
347 | me_cl->props = res->client_properties; | ||
348 | me_cl->client_id = res->me_addr; | ||
349 | me_cl->mei_flow_ctrl_creds = 0; | ||
350 | |||
351 | list_add(&me_cl->list, &dev->me_clients); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
250 | /** | 355 | /** |
251 | * mei_hbm_prop_req - request property for a single client | 356 | * mei_hbm_prop_req - request property for a single client |
252 | * | 357 | * |
253 | * @dev: the device structure | 358 | * @dev: the device structure |
254 | * | 359 | * |
255 | * returns 0 on success and < 0 on failure | 360 | * Return: 0 on success and < 0 on failure |
256 | */ | 361 | */ |
257 | 362 | ||
258 | static int mei_hbm_prop_req(struct mei_device *dev) | 363 | static int mei_hbm_prop_req(struct mei_device *dev) |
@@ -262,11 +367,8 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
262 | struct hbm_props_request *prop_req; | 367 | struct hbm_props_request *prop_req; |
263 | const size_t len = sizeof(struct hbm_props_request); | 368 | const size_t len = sizeof(struct hbm_props_request); |
264 | unsigned long next_client_index; | 369 | unsigned long next_client_index; |
265 | unsigned long client_num; | ||
266 | int ret; | 370 | int ret; |
267 | 371 | ||
268 | client_num = dev->me_client_presentation_num; | ||
269 | |||
270 | next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, | 372 | next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, |
271 | dev->me_client_index); | 373 | dev->me_client_index); |
272 | 374 | ||
@@ -278,21 +380,17 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
278 | return 0; | 380 | return 0; |
279 | } | 381 | } |
280 | 382 | ||
281 | dev->me_clients[client_num].client_id = next_client_index; | ||
282 | dev->me_clients[client_num].mei_flow_ctrl_creds = 0; | ||
283 | |||
284 | mei_hbm_hdr(mei_hdr, len); | 383 | mei_hbm_hdr(mei_hdr, len); |
285 | prop_req = (struct hbm_props_request *)dev->wr_msg.data; | 384 | prop_req = (struct hbm_props_request *)dev->wr_msg.data; |
286 | 385 | ||
287 | memset(prop_req, 0, sizeof(struct hbm_props_request)); | 386 | memset(prop_req, 0, sizeof(struct hbm_props_request)); |
288 | 387 | ||
289 | |||
290 | prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; | 388 | prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; |
291 | prop_req->address = next_client_index; | 389 | prop_req->me_addr = next_client_index; |
292 | 390 | ||
293 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); | 391 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
294 | if (ret) { | 392 | if (ret) { |
295 | dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n", | 393 | dev_err(dev->dev, "properties request write failed: ret = %d\n", |
296 | ret); | 394 | ret); |
297 | return ret; | 395 | return ret; |
298 | } | 396 | } |
@@ -309,7 +407,8 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
309 | * @dev: the device structure | 407 | * @dev: the device structure |
310 | * @pg_cmd: the pg command code | 408 | * @pg_cmd: the pg command code |
311 | * | 409 | * |
312 | * This function returns -EIO on write failure | 410 | * Return: -EIO on write failure |
411 | * -EOPNOTSUPP if the operation is not supported by the protocol | ||
313 | */ | 412 | */ |
314 | int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) | 413 | int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) |
315 | { | 414 | { |
@@ -318,6 +417,9 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) | |||
318 | const size_t len = sizeof(struct hbm_power_gate); | 417 | const size_t len = sizeof(struct hbm_power_gate); |
319 | int ret; | 418 | int ret; |
320 | 419 | ||
420 | if (!dev->hbm_f_pg_supported) | ||
421 | return -EOPNOTSUPP; | ||
422 | |||
321 | mei_hbm_hdr(mei_hdr, len); | 423 | mei_hbm_hdr(mei_hdr, len); |
322 | 424 | ||
323 | req = (struct hbm_power_gate *)dev->wr_msg.data; | 425 | req = (struct hbm_power_gate *)dev->wr_msg.data; |
@@ -326,7 +428,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) | |||
326 | 428 | ||
327 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); | 429 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
328 | if (ret) | 430 | if (ret) |
329 | dev_err(&dev->pdev->dev, "power gate command write failed.\n"); | 431 | dev_err(dev->dev, "power gate command write failed.\n"); |
330 | return ret; | 432 | return ret; |
331 | } | 433 | } |
332 | EXPORT_SYMBOL_GPL(mei_hbm_pg); | 434 | EXPORT_SYMBOL_GPL(mei_hbm_pg); |
@@ -334,10 +436,9 @@ EXPORT_SYMBOL_GPL(mei_hbm_pg); | |||
334 | /** | 436 | /** |
335 | * mei_hbm_stop_req - send stop request message | 437 | * mei_hbm_stop_req - send stop request message |
336 | * | 438 | * |
337 | * @dev - mei device | 439 | * @dev: mei device |
338 | * @cl: client info | ||
339 | * | 440 | * |
340 | * This function returns -EIO on write failure | 441 | * Return: -EIO on write failure |
341 | */ | 442 | */ |
342 | static int mei_hbm_stop_req(struct mei_device *dev) | 443 | static int mei_hbm_stop_req(struct mei_device *dev) |
343 | { | 444 | { |
@@ -361,19 +462,14 @@ static int mei_hbm_stop_req(struct mei_device *dev) | |||
361 | * @dev: the device structure | 462 | * @dev: the device structure |
362 | * @cl: client info | 463 | * @cl: client info |
363 | * | 464 | * |
364 | * This function returns -EIO on write failure | 465 | * Return: -EIO on write failure |
365 | */ | 466 | */ |
366 | int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) | 467 | int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) |
367 | { | 468 | { |
368 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | ||
369 | const size_t len = sizeof(struct hbm_flow_control); | 469 | const size_t len = sizeof(struct hbm_flow_control); |
370 | 470 | ||
371 | mei_hbm_hdr(mei_hdr, len); | ||
372 | mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len); | ||
373 | |||
374 | cl_dbg(dev, cl, "sending flow control\n"); | 471 | cl_dbg(dev, cl, "sending flow control\n"); |
375 | 472 | return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, len); | |
376 | return mei_write_message(dev, mei_hdr, dev->wr_msg.data); | ||
377 | } | 473 | } |
378 | 474 | ||
379 | /** | 475 | /** |
@@ -382,31 +478,26 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) | |||
382 | * @dev: the device structure | 478 | * @dev: the device structure |
383 | * @flow: flow control. | 479 | * @flow: flow control. |
384 | * | 480 | * |
385 | * return 0 on success, < 0 otherwise | 481 | * Return: 0 on success, < 0 otherwise |
386 | */ | 482 | */ |
387 | static int mei_hbm_add_single_flow_creds(struct mei_device *dev, | 483 | static int mei_hbm_add_single_flow_creds(struct mei_device *dev, |
388 | struct hbm_flow_control *flow) | 484 | struct hbm_flow_control *flow) |
389 | { | 485 | { |
390 | struct mei_me_client *me_cl; | 486 | struct mei_me_client *me_cl; |
391 | int id; | ||
392 | 487 | ||
393 | id = mei_me_cl_by_id(dev, flow->me_addr); | 488 | me_cl = mei_me_cl_by_id(dev, flow->me_addr); |
394 | if (id < 0) { | 489 | if (!me_cl) { |
395 | dev_err(&dev->pdev->dev, "no such me client %d\n", | 490 | dev_err(dev->dev, "no such me client %d\n", |
396 | flow->me_addr); | 491 | flow->me_addr); |
397 | return id; | 492 | return -ENOENT; |
398 | } | 493 | } |
399 | 494 | ||
400 | me_cl = &dev->me_clients[id]; | 495 | if (WARN_ON(me_cl->props.single_recv_buf == 0)) |
401 | if (me_cl->props.single_recv_buf) { | 496 | return -EINVAL; |
402 | me_cl->mei_flow_ctrl_creds++; | 497 | |
403 | dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", | 498 | me_cl->mei_flow_ctrl_creds++; |
404 | flow->me_addr); | 499 | dev_dbg(dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n", |
405 | dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", | 500 | flow->me_addr, me_cl->mei_flow_ctrl_creds); |
406 | me_cl->mei_flow_ctrl_creds); | ||
407 | } else { | ||
408 | BUG(); /* error in flow control */ | ||
409 | } | ||
410 | 501 | ||
411 | return 0; | 502 | return 0; |
412 | } | 503 | } |
@@ -418,7 +509,7 @@ static int mei_hbm_add_single_flow_creds(struct mei_device *dev, | |||
418 | * @flow_control: flow control response bus message | 509 | * @flow_control: flow control response bus message |
419 | */ | 510 | */ |
420 | static void mei_hbm_cl_flow_control_res(struct mei_device *dev, | 511 | static void mei_hbm_cl_flow_control_res(struct mei_device *dev, |
421 | struct hbm_flow_control *flow_control) | 512 | struct hbm_flow_control *flow_control) |
422 | { | 513 | { |
423 | struct mei_cl *cl; | 514 | struct mei_cl *cl; |
424 | 515 | ||
@@ -428,16 +519,11 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev, | |||
428 | return; | 519 | return; |
429 | } | 520 | } |
430 | 521 | ||
431 | /* normal connection */ | 522 | cl = mei_hbm_cl_find_by_cmd(dev, flow_control); |
432 | list_for_each_entry(cl, &dev->file_list, link) { | 523 | if (cl) { |
433 | if (mei_hbm_cl_addr_equal(cl, flow_control)) { | 524 | cl->mei_flow_ctrl_creds++; |
434 | cl->mei_flow_ctrl_creds++; | 525 | cl_dbg(dev, cl, "flow control creds = %d.\n", |
435 | dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n", | 526 | cl->mei_flow_ctrl_creds); |
436 | flow_control->host_addr, flow_control->me_addr); | ||
437 | dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n", | ||
438 | cl->mei_flow_ctrl_creds); | ||
439 | break; | ||
440 | } | ||
441 | } | 527 | } |
442 | } | 528 | } |
443 | 529 | ||
@@ -448,17 +534,13 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev, | |||
448 | * @dev: the device structure | 534 | * @dev: the device structure |
449 | * @cl: a client to disconnect from | 535 | * @cl: a client to disconnect from |
450 | * | 536 | * |
451 | * This function returns -EIO on write failure | 537 | * Return: -EIO on write failure |
452 | */ | 538 | */ |
453 | int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) | 539 | int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) |
454 | { | 540 | { |
455 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | ||
456 | const size_t len = sizeof(struct hbm_client_connect_request); | 541 | const size_t len = sizeof(struct hbm_client_connect_request); |
457 | 542 | ||
458 | mei_hbm_hdr(mei_hdr, len); | 543 | return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, len); |
459 | mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len); | ||
460 | |||
461 | return mei_write_message(dev, mei_hdr, dev->wr_msg.data); | ||
462 | } | 544 | } |
463 | 545 | ||
464 | /** | 546 | /** |
@@ -467,53 +549,34 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) | |||
467 | * @dev: the device structure | 549 | * @dev: the device structure |
468 | * @cl: a client to disconnect from | 550 | * @cl: a client to disconnect from |
469 | * | 551 | * |
470 | * This function returns -EIO on write failure | 552 | * Return: -EIO on write failure |
471 | */ | 553 | */ |
472 | int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl) | 554 | int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl) |
473 | { | 555 | { |
474 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | ||
475 | const size_t len = sizeof(struct hbm_client_connect_response); | 556 | const size_t len = sizeof(struct hbm_client_connect_response); |
476 | 557 | ||
477 | mei_hbm_hdr(mei_hdr, len); | 558 | return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, len); |
478 | mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, dev->wr_msg.data, len); | ||
479 | |||
480 | return mei_write_message(dev, mei_hdr, dev->wr_msg.data); | ||
481 | } | 559 | } |
482 | 560 | ||
483 | /** | 561 | /** |
484 | * mei_hbm_cl_disconnect_res - disconnect response from ME | 562 | * mei_hbm_cl_disconnect_res - update the client state according |
563 | * disconnect response | ||
485 | * | 564 | * |
486 | * @dev: the device structure | 565 | * @cl: mei host client |
487 | * @rs: disconnect response bus message | 566 | * @cmd: disconnect client response host bus message |
488 | */ | 567 | */ |
489 | static void mei_hbm_cl_disconnect_res(struct mei_device *dev, | 568 | static void mei_hbm_cl_disconnect_res(struct mei_cl *cl, |
490 | struct hbm_client_connect_response *rs) | 569 | struct mei_hbm_cl_cmd *cmd) |
491 | { | 570 | { |
492 | struct mei_cl *cl; | 571 | struct hbm_client_connect_response *rs = |
493 | struct mei_cl_cb *cb, *next; | 572 | (struct hbm_client_connect_response *)cmd; |
494 | 573 | ||
495 | dev_dbg(&dev->pdev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n", | 574 | dev_dbg(cl->dev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n", |
496 | rs->me_addr, rs->host_addr, rs->status); | 575 | rs->me_addr, rs->host_addr, rs->status); |
497 | 576 | ||
498 | list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { | 577 | if (rs->status == MEI_CL_DISCONN_SUCCESS) |
499 | cl = cb->cl; | 578 | cl->state = MEI_FILE_DISCONNECTED; |
500 | 579 | cl->status = 0; | |
501 | /* this should not happen */ | ||
502 | if (WARN_ON(!cl)) { | ||
503 | list_del(&cb->list); | ||
504 | return; | ||
505 | } | ||
506 | |||
507 | if (mei_hbm_cl_addr_equal(cl, rs)) { | ||
508 | list_del(&cb->list); | ||
509 | if (rs->status == MEI_CL_DISCONN_SUCCESS) | ||
510 | cl->state = MEI_FILE_DISCONNECTED; | ||
511 | |||
512 | cl->status = 0; | ||
513 | cl->timer_count = 0; | ||
514 | break; | ||
515 | } | ||
516 | } | ||
517 | } | 580 | } |
518 | 581 | ||
519 | /** | 582 | /** |
@@ -522,38 +585,55 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, | |||
522 | * @dev: the device structure | 585 | * @dev: the device structure |
523 | * @cl: a client to connect to | 586 | * @cl: a client to connect to |
524 | * | 587 | * |
525 | * returns -EIO on write failure | 588 | * Return: -EIO on write failure |
526 | */ | 589 | */ |
527 | int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) | 590 | int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) |
528 | { | 591 | { |
529 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | ||
530 | const size_t len = sizeof(struct hbm_client_connect_request); | 592 | const size_t len = sizeof(struct hbm_client_connect_request); |
531 | 593 | ||
532 | mei_hbm_hdr(mei_hdr, len); | 594 | return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, len); |
533 | mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len); | 595 | } |
534 | 596 | ||
535 | return mei_write_message(dev, mei_hdr, dev->wr_msg.data); | 597 | /** |
598 | * mei_hbm_cl_connect_res - update the client state according | ||
599 | * connection response | ||
600 | * | ||
601 | * @cl: mei host client | ||
602 | * @cmd: connect client response host bus message | ||
603 | */ | ||
604 | static void mei_hbm_cl_connect_res(struct mei_cl *cl, | ||
605 | struct mei_hbm_cl_cmd *cmd) | ||
606 | { | ||
607 | struct hbm_client_connect_response *rs = | ||
608 | (struct hbm_client_connect_response *)cmd; | ||
609 | |||
610 | dev_dbg(cl->dev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n", | ||
611 | rs->me_addr, rs->host_addr, | ||
612 | mei_cl_conn_status_str(rs->status)); | ||
613 | |||
614 | if (rs->status == MEI_CL_CONN_SUCCESS) | ||
615 | cl->state = MEI_FILE_CONNECTED; | ||
616 | else | ||
617 | cl->state = MEI_FILE_DISCONNECTED; | ||
618 | cl->status = mei_cl_conn_status_to_errno(rs->status); | ||
536 | } | 619 | } |
537 | 620 | ||
538 | /** | 621 | /** |
539 | * mei_hbm_cl_connect_res - connect response from the ME | 622 | * mei_hbm_cl_res - process hbm response received on behalf |
623 | * an client | ||
540 | * | 624 | * |
541 | * @dev: the device structure | 625 | * @dev: the device structure |
542 | * @rs: connect response bus message | 626 | * @rs: hbm client message |
627 | * @fop_type: file operation type | ||
543 | */ | 628 | */ |
544 | static void mei_hbm_cl_connect_res(struct mei_device *dev, | 629 | static void mei_hbm_cl_res(struct mei_device *dev, |
545 | struct hbm_client_connect_response *rs) | 630 | struct mei_hbm_cl_cmd *rs, |
631 | enum mei_cb_file_ops fop_type) | ||
546 | { | 632 | { |
547 | |||
548 | struct mei_cl *cl; | 633 | struct mei_cl *cl; |
549 | struct mei_cl_cb *cb, *next; | 634 | struct mei_cl_cb *cb, *next; |
550 | 635 | ||
551 | dev_dbg(&dev->pdev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n", | ||
552 | rs->me_addr, rs->host_addr, | ||
553 | mei_cl_conn_status_str(rs->status)); | ||
554 | |||
555 | cl = NULL; | 636 | cl = NULL; |
556 | |||
557 | list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { | 637 | list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { |
558 | 638 | ||
559 | cl = cb->cl; | 639 | cl = cb->cl; |
@@ -563,7 +643,7 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, | |||
563 | continue; | 643 | continue; |
564 | } | 644 | } |
565 | 645 | ||
566 | if (cb->fop_type != MEI_FOP_CONNECT) | 646 | if (cb->fop_type != fop_type) |
567 | continue; | 647 | continue; |
568 | 648 | ||
569 | if (mei_hbm_cl_addr_equal(cl, rs)) { | 649 | if (mei_hbm_cl_addr_equal(cl, rs)) { |
@@ -575,12 +655,19 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, | |||
575 | if (!cl) | 655 | if (!cl) |
576 | return; | 656 | return; |
577 | 657 | ||
658 | switch (fop_type) { | ||
659 | case MEI_FOP_CONNECT: | ||
660 | mei_hbm_cl_connect_res(cl, rs); | ||
661 | break; | ||
662 | case MEI_FOP_DISCONNECT: | ||
663 | mei_hbm_cl_disconnect_res(cl, rs); | ||
664 | break; | ||
665 | default: | ||
666 | return; | ||
667 | } | ||
668 | |||
578 | cl->timer_count = 0; | 669 | cl->timer_count = 0; |
579 | if (rs->status == MEI_CL_CONN_SUCCESS) | 670 | wake_up(&cl->wait); |
580 | cl->state = MEI_FILE_CONNECTED; | ||
581 | else | ||
582 | cl->state = MEI_FILE_DISCONNECTED; | ||
583 | cl->status = mei_cl_conn_status_to_errno(rs->status); | ||
584 | } | 671 | } |
585 | 672 | ||
586 | 673 | ||
@@ -591,7 +678,7 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, | |||
591 | * @dev: the device structure. | 678 | * @dev: the device structure. |
592 | * @disconnect_req: disconnect request bus message from the me | 679 | * @disconnect_req: disconnect request bus message from the me |
593 | * | 680 | * |
594 | * returns -ENOMEM on allocation failure | 681 | * Return: -ENOMEM on allocation failure |
595 | */ | 682 | */ |
596 | static int mei_hbm_fw_disconnect_req(struct mei_device *dev, | 683 | static int mei_hbm_fw_disconnect_req(struct mei_device *dev, |
597 | struct hbm_client_connect_request *disconnect_req) | 684 | struct hbm_client_connect_request *disconnect_req) |
@@ -599,34 +686,46 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev, | |||
599 | struct mei_cl *cl; | 686 | struct mei_cl *cl; |
600 | struct mei_cl_cb *cb; | 687 | struct mei_cl_cb *cb; |
601 | 688 | ||
602 | list_for_each_entry(cl, &dev->file_list, link) { | 689 | cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req); |
603 | if (mei_hbm_cl_addr_equal(cl, disconnect_req)) { | 690 | if (cl) { |
604 | dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n", | 691 | cl_dbg(dev, cl, "disconnect request received\n"); |
605 | disconnect_req->host_addr, | 692 | cl->state = MEI_FILE_DISCONNECTED; |
606 | disconnect_req->me_addr); | 693 | cl->timer_count = 0; |
607 | cl->state = MEI_FILE_DISCONNECTED; | ||
608 | cl->timer_count = 0; | ||
609 | |||
610 | cb = mei_io_cb_init(cl, NULL); | ||
611 | if (!cb) | ||
612 | return -ENOMEM; | ||
613 | cb->fop_type = MEI_FOP_DISCONNECT_RSP; | ||
614 | cl_dbg(dev, cl, "add disconnect response as first\n"); | ||
615 | list_add(&cb->list, &dev->ctrl_wr_list.list); | ||
616 | 694 | ||
617 | break; | 695 | cb = mei_io_cb_init(cl, NULL); |
618 | } | 696 | if (!cb) |
697 | return -ENOMEM; | ||
698 | cb->fop_type = MEI_FOP_DISCONNECT_RSP; | ||
699 | cl_dbg(dev, cl, "add disconnect response as first\n"); | ||
700 | list_add(&cb->list, &dev->ctrl_wr_list.list); | ||
619 | } | 701 | } |
620 | return 0; | 702 | return 0; |
621 | } | 703 | } |
622 | 704 | ||
705 | /** | ||
706 | * mei_hbm_config_features - check what hbm features and commands | ||
707 | * are supported by the fw | ||
708 | * | ||
709 | * @dev: the device structure | ||
710 | */ | ||
711 | static void mei_hbm_config_features(struct mei_device *dev) | ||
712 | { | ||
713 | /* Power Gating Isolation Support */ | ||
714 | dev->hbm_f_pg_supported = 0; | ||
715 | if (dev->version.major_version > HBM_MAJOR_VERSION_PGI) | ||
716 | dev->hbm_f_pg_supported = 1; | ||
717 | |||
718 | if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && | ||
719 | dev->version.minor_version >= HBM_MINOR_VERSION_PGI) | ||
720 | dev->hbm_f_pg_supported = 1; | ||
721 | } | ||
623 | 722 | ||
624 | /** | 723 | /** |
625 | * mei_hbm_version_is_supported - checks whether the driver can | 724 | * mei_hbm_version_is_supported - checks whether the driver can |
626 | * support the hbm version of the device | 725 | * support the hbm version of the device |
627 | * | 726 | * |
628 | * @dev: the device structure | 727 | * @dev: the device structure |
629 | * returns true if driver can support hbm version of the device | 728 | * Return: true if driver can support hbm version of the device |
630 | */ | 729 | */ |
631 | bool mei_hbm_version_is_supported(struct mei_device *dev) | 730 | bool mei_hbm_version_is_supported(struct mei_device *dev) |
632 | { | 731 | { |
@@ -640,44 +739,44 @@ bool mei_hbm_version_is_supported(struct mei_device *dev) | |||
640 | * handle the read bus message cmd processing. | 739 | * handle the read bus message cmd processing. |
641 | * | 740 | * |
642 | * @dev: the device structure | 741 | * @dev: the device structure |
643 | * @mei_hdr: header of bus message | 742 | * @hdr: header of bus message |
644 | * | 743 | * |
645 | * returns 0 on success and < 0 on failure | 744 | * Return: 0 on success and < 0 on failure |
646 | */ | 745 | */ |
647 | int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | 746 | int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) |
648 | { | 747 | { |
649 | struct mei_bus_message *mei_msg; | 748 | struct mei_bus_message *mei_msg; |
650 | struct mei_me_client *me_client; | ||
651 | struct hbm_host_version_response *version_res; | 749 | struct hbm_host_version_response *version_res; |
652 | struct hbm_client_connect_response *connect_res; | ||
653 | struct hbm_client_connect_response *disconnect_res; | ||
654 | struct hbm_client_connect_request *disconnect_req; | ||
655 | struct hbm_flow_control *flow_control; | ||
656 | struct hbm_props_response *props_res; | 750 | struct hbm_props_response *props_res; |
657 | struct hbm_host_enum_response *enum_res; | 751 | struct hbm_host_enum_response *enum_res; |
658 | 752 | ||
753 | struct mei_hbm_cl_cmd *cl_cmd; | ||
754 | struct hbm_client_connect_request *disconnect_req; | ||
755 | struct hbm_flow_control *flow_control; | ||
756 | |||
659 | /* read the message to our buffer */ | 757 | /* read the message to our buffer */ |
660 | BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf)); | 758 | BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf)); |
661 | mei_read_slots(dev, dev->rd_msg_buf, hdr->length); | 759 | mei_read_slots(dev, dev->rd_msg_buf, hdr->length); |
662 | mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; | 760 | mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; |
761 | cl_cmd = (struct mei_hbm_cl_cmd *)mei_msg; | ||
663 | 762 | ||
664 | /* ignore spurious message and prevent reset nesting | 763 | /* ignore spurious message and prevent reset nesting |
665 | * hbm is put to idle during system reset | 764 | * hbm is put to idle during system reset |
666 | */ | 765 | */ |
667 | if (dev->hbm_state == MEI_HBM_IDLE) { | 766 | if (dev->hbm_state == MEI_HBM_IDLE) { |
668 | dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n"); | 767 | dev_dbg(dev->dev, "hbm: state is idle ignore spurious messages\n"); |
669 | return 0; | 768 | return 0; |
670 | } | 769 | } |
671 | 770 | ||
672 | switch (mei_msg->hbm_cmd) { | 771 | switch (mei_msg->hbm_cmd) { |
673 | case HOST_START_RES_CMD: | 772 | case HOST_START_RES_CMD: |
674 | dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n"); | 773 | dev_dbg(dev->dev, "hbm: start: response message received.\n"); |
675 | 774 | ||
676 | dev->init_clients_timer = 0; | 775 | dev->init_clients_timer = 0; |
677 | 776 | ||
678 | version_res = (struct hbm_host_version_response *)mei_msg; | 777 | version_res = (struct hbm_host_version_response *)mei_msg; |
679 | 778 | ||
680 | dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", | 779 | dev_dbg(dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", |
681 | HBM_MAJOR_VERSION, HBM_MINOR_VERSION, | 780 | HBM_MAJOR_VERSION, HBM_MINOR_VERSION, |
682 | version_res->me_max_version.major_version, | 781 | version_res->me_max_version.major_version, |
683 | version_res->me_max_version.minor_version); | 782 | version_res->me_max_version.minor_version); |
@@ -693,19 +792,21 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
693 | } | 792 | } |
694 | 793 | ||
695 | if (!mei_hbm_version_is_supported(dev)) { | 794 | if (!mei_hbm_version_is_supported(dev)) { |
696 | dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); | 795 | dev_warn(dev->dev, "hbm: start: version mismatch - stopping the driver.\n"); |
697 | 796 | ||
698 | dev->hbm_state = MEI_HBM_STOPPED; | 797 | dev->hbm_state = MEI_HBM_STOPPED; |
699 | if (mei_hbm_stop_req(dev)) { | 798 | if (mei_hbm_stop_req(dev)) { |
700 | dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); | 799 | dev_err(dev->dev, "hbm: start: failed to send stop request\n"); |
701 | return -EIO; | 800 | return -EIO; |
702 | } | 801 | } |
703 | break; | 802 | break; |
704 | } | 803 | } |
705 | 804 | ||
805 | mei_hbm_config_features(dev); | ||
806 | |||
706 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || | 807 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || |
707 | dev->hbm_state != MEI_HBM_START) { | 808 | dev->hbm_state != MEI_HBM_STARTING) { |
708 | dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n", | 809 | dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n", |
709 | dev->dev_state, dev->hbm_state); | 810 | dev->dev_state, dev->hbm_state); |
710 | return -EPROTO; | 811 | return -EPROTO; |
711 | } | 812 | } |
@@ -713,45 +814,39 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
713 | dev->hbm_state = MEI_HBM_STARTED; | 814 | dev->hbm_state = MEI_HBM_STARTED; |
714 | 815 | ||
715 | if (mei_hbm_enum_clients_req(dev)) { | 816 | if (mei_hbm_enum_clients_req(dev)) { |
716 | dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n"); | 817 | dev_err(dev->dev, "hbm: start: failed to send enumeration request\n"); |
717 | return -EIO; | 818 | return -EIO; |
718 | } | 819 | } |
719 | 820 | ||
720 | wake_up_interruptible(&dev->wait_recvd_msg); | 821 | wake_up(&dev->wait_hbm_start); |
721 | break; | 822 | break; |
722 | 823 | ||
723 | case CLIENT_CONNECT_RES_CMD: | 824 | case CLIENT_CONNECT_RES_CMD: |
724 | dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n"); | 825 | dev_dbg(dev->dev, "hbm: client connect response: message received.\n"); |
725 | 826 | mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT); | |
726 | connect_res = (struct hbm_client_connect_response *) mei_msg; | ||
727 | mei_hbm_cl_connect_res(dev, connect_res); | ||
728 | wake_up(&dev->wait_recvd_msg); | ||
729 | break; | 827 | break; |
730 | 828 | ||
731 | case CLIENT_DISCONNECT_RES_CMD: | 829 | case CLIENT_DISCONNECT_RES_CMD: |
732 | dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n"); | 830 | dev_dbg(dev->dev, "hbm: client disconnect response: message received.\n"); |
733 | 831 | mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_DISCONNECT); | |
734 | disconnect_res = (struct hbm_client_connect_response *) mei_msg; | ||
735 | mei_hbm_cl_disconnect_res(dev, disconnect_res); | ||
736 | wake_up(&dev->wait_recvd_msg); | ||
737 | break; | 832 | break; |
738 | 833 | ||
739 | case MEI_FLOW_CONTROL_CMD: | 834 | case MEI_FLOW_CONTROL_CMD: |
740 | dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n"); | 835 | dev_dbg(dev->dev, "hbm: client flow control response: message received.\n"); |
741 | 836 | ||
742 | flow_control = (struct hbm_flow_control *) mei_msg; | 837 | flow_control = (struct hbm_flow_control *) mei_msg; |
743 | mei_hbm_cl_flow_control_res(dev, flow_control); | 838 | mei_hbm_cl_flow_control_res(dev, flow_control); |
744 | break; | 839 | break; |
745 | 840 | ||
746 | case MEI_PG_ISOLATION_ENTRY_RES_CMD: | 841 | case MEI_PG_ISOLATION_ENTRY_RES_CMD: |
747 | dev_dbg(&dev->pdev->dev, "power gate isolation entry response received\n"); | 842 | dev_dbg(dev->dev, "power gate isolation entry response received\n"); |
748 | dev->pg_event = MEI_PG_EVENT_RECEIVED; | 843 | dev->pg_event = MEI_PG_EVENT_RECEIVED; |
749 | if (waitqueue_active(&dev->wait_pg)) | 844 | if (waitqueue_active(&dev->wait_pg)) |
750 | wake_up(&dev->wait_pg); | 845 | wake_up(&dev->wait_pg); |
751 | break; | 846 | break; |
752 | 847 | ||
753 | case MEI_PG_ISOLATION_EXIT_REQ_CMD: | 848 | case MEI_PG_ISOLATION_EXIT_REQ_CMD: |
754 | dev_dbg(&dev->pdev->dev, "power gate isolation exit request received\n"); | 849 | dev_dbg(dev->dev, "power gate isolation exit request received\n"); |
755 | dev->pg_event = MEI_PG_EVENT_RECEIVED; | 850 | dev->pg_event = MEI_PG_EVENT_RECEIVED; |
756 | if (waitqueue_active(&dev->wait_pg)) | 851 | if (waitqueue_active(&dev->wait_pg)) |
757 | wake_up(&dev->wait_pg); | 852 | wake_up(&dev->wait_pg); |
@@ -761,44 +856,33 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
761 | * this is HW initiated exit from PG. | 856 | * this is HW initiated exit from PG. |
762 | * Start runtime pm resume sequence to exit from PG. | 857 | * Start runtime pm resume sequence to exit from PG. |
763 | */ | 858 | */ |
764 | pm_request_resume(&dev->pdev->dev); | 859 | pm_request_resume(dev->dev); |
765 | break; | 860 | break; |
766 | 861 | ||
767 | case HOST_CLIENT_PROPERTIES_RES_CMD: | 862 | case HOST_CLIENT_PROPERTIES_RES_CMD: |
768 | dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); | 863 | dev_dbg(dev->dev, "hbm: properties response: message received.\n"); |
769 | 864 | ||
770 | dev->init_clients_timer = 0; | 865 | dev->init_clients_timer = 0; |
771 | 866 | ||
772 | if (dev->me_clients == NULL) { | 867 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || |
773 | dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n"); | 868 | dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { |
869 | dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", | ||
870 | dev->dev_state, dev->hbm_state); | ||
774 | return -EPROTO; | 871 | return -EPROTO; |
775 | } | 872 | } |
776 | 873 | ||
777 | props_res = (struct hbm_props_response *)mei_msg; | 874 | props_res = (struct hbm_props_response *)mei_msg; |
778 | me_client = &dev->me_clients[dev->me_client_presentation_num]; | ||
779 | 875 | ||
780 | if (props_res->status) { | 876 | if (props_res->status) { |
781 | dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n", | 877 | dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", |
782 | props_res->status); | 878 | props_res->status, |
879 | mei_hbm_status_str(props_res->status)); | ||
783 | return -EPROTO; | 880 | return -EPROTO; |
784 | } | 881 | } |
785 | 882 | ||
786 | if (me_client->client_id != props_res->address) { | 883 | mei_hbm_me_cl_add(dev, props_res); |
787 | dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n", | ||
788 | me_client->client_id, props_res->address); | ||
789 | return -EPROTO; | ||
790 | } | ||
791 | 884 | ||
792 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || | ||
793 | dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { | ||
794 | dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", | ||
795 | dev->dev_state, dev->hbm_state); | ||
796 | return -EPROTO; | ||
797 | } | ||
798 | |||
799 | me_client->props = props_res->client_properties; | ||
800 | dev->me_client_index++; | 885 | dev->me_client_index++; |
801 | dev->me_client_presentation_num++; | ||
802 | 886 | ||
803 | /* request property for the next client */ | 887 | /* request property for the next client */ |
804 | if (mei_hbm_prop_req(dev)) | 888 | if (mei_hbm_prop_req(dev)) |
@@ -807,7 +891,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
807 | break; | 891 | break; |
808 | 892 | ||
809 | case HOST_ENUM_RES_CMD: | 893 | case HOST_ENUM_RES_CMD: |
810 | dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n"); | 894 | dev_dbg(dev->dev, "hbm: enumeration response: message received\n"); |
811 | 895 | ||
812 | dev->init_clients_timer = 0; | 896 | dev->init_clients_timer = 0; |
813 | 897 | ||
@@ -815,20 +899,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
815 | BUILD_BUG_ON(sizeof(dev->me_clients_map) | 899 | BUILD_BUG_ON(sizeof(dev->me_clients_map) |
816 | < sizeof(enum_res->valid_addresses)); | 900 | < sizeof(enum_res->valid_addresses)); |
817 | memcpy(dev->me_clients_map, enum_res->valid_addresses, | 901 | memcpy(dev->me_clients_map, enum_res->valid_addresses, |
818 | sizeof(enum_res->valid_addresses)); | 902 | sizeof(enum_res->valid_addresses)); |
819 | 903 | ||
820 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || | 904 | if (dev->dev_state != MEI_DEV_INIT_CLIENTS || |
821 | dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { | 905 | dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { |
822 | dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", | 906 | dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", |
823 | dev->dev_state, dev->hbm_state); | 907 | dev->dev_state, dev->hbm_state); |
824 | return -EPROTO; | 908 | return -EPROTO; |
825 | } | 909 | } |
826 | 910 | ||
827 | if (mei_hbm_me_cl_allocate(dev)) { | ||
828 | dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n"); | ||
829 | return -ENOMEM; | ||
830 | } | ||
831 | |||
832 | dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; | 911 | dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; |
833 | 912 | ||
834 | /* first property request */ | 913 | /* first property request */ |
@@ -838,34 +917,34 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
838 | break; | 917 | break; |
839 | 918 | ||
840 | case HOST_STOP_RES_CMD: | 919 | case HOST_STOP_RES_CMD: |
841 | dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n"); | 920 | dev_dbg(dev->dev, "hbm: stop response: message received\n"); |
842 | 921 | ||
843 | dev->init_clients_timer = 0; | 922 | dev->init_clients_timer = 0; |
844 | 923 | ||
845 | if (dev->hbm_state != MEI_HBM_STOPPED) { | 924 | if (dev->hbm_state != MEI_HBM_STOPPED) { |
846 | dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", | 925 | dev_err(dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", |
847 | dev->dev_state, dev->hbm_state); | 926 | dev->dev_state, dev->hbm_state); |
848 | return -EPROTO; | 927 | return -EPROTO; |
849 | } | 928 | } |
850 | 929 | ||
851 | dev->dev_state = MEI_DEV_POWER_DOWN; | 930 | dev->dev_state = MEI_DEV_POWER_DOWN; |
852 | dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n"); | 931 | dev_info(dev->dev, "hbm: stop response: resetting.\n"); |
853 | /* force the reset */ | 932 | /* force the reset */ |
854 | return -EPROTO; | 933 | return -EPROTO; |
855 | break; | 934 | break; |
856 | 935 | ||
857 | case CLIENT_DISCONNECT_REQ_CMD: | 936 | case CLIENT_DISCONNECT_REQ_CMD: |
858 | dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n"); | 937 | dev_dbg(dev->dev, "hbm: disconnect request: message received\n"); |
859 | 938 | ||
860 | disconnect_req = (struct hbm_client_connect_request *)mei_msg; | 939 | disconnect_req = (struct hbm_client_connect_request *)mei_msg; |
861 | mei_hbm_fw_disconnect_req(dev, disconnect_req); | 940 | mei_hbm_fw_disconnect_req(dev, disconnect_req); |
862 | break; | 941 | break; |
863 | 942 | ||
864 | case ME_STOP_REQ_CMD: | 943 | case ME_STOP_REQ_CMD: |
865 | dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); | 944 | dev_dbg(dev->dev, "hbm: stop request: message received\n"); |
866 | dev->hbm_state = MEI_HBM_STOPPED; | 945 | dev->hbm_state = MEI_HBM_STOPPED; |
867 | if (mei_hbm_stop_req(dev)) { | 946 | if (mei_hbm_stop_req(dev)) { |
868 | dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); | 947 | dev_err(dev->dev, "hbm: stop request: failed to send stop request\n"); |
869 | return -EIO; | 948 | return -EIO; |
870 | } | 949 | } |
871 | break; | 950 | break; |
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index 683eb2835cec..b7cd3d857fd5 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h | |||
@@ -25,29 +25,24 @@ struct mei_cl; | |||
25 | * enum mei_hbm_state - host bus message protocol state | 25 | * enum mei_hbm_state - host bus message protocol state |
26 | * | 26 | * |
27 | * @MEI_HBM_IDLE : protocol not started | 27 | * @MEI_HBM_IDLE : protocol not started |
28 | * @MEI_HBM_START : start request message was sent | 28 | * @MEI_HBM_STARTING : start request message was sent |
29 | * @MEI_HBM_STARTED : start reply message was received | ||
29 | * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent | 30 | * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent |
30 | * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties | 31 | * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties |
32 | * @MEI_HBM_STOPPED : stopping exchange | ||
31 | */ | 33 | */ |
32 | enum mei_hbm_state { | 34 | enum mei_hbm_state { |
33 | MEI_HBM_IDLE = 0, | 35 | MEI_HBM_IDLE = 0, |
34 | MEI_HBM_START, | 36 | MEI_HBM_STARTING, |
35 | MEI_HBM_STARTED, | 37 | MEI_HBM_STARTED, |
36 | MEI_HBM_ENUM_CLIENTS, | 38 | MEI_HBM_ENUM_CLIENTS, |
37 | MEI_HBM_CLIENT_PROPERTIES, | 39 | MEI_HBM_CLIENT_PROPERTIES, |
38 | MEI_HBM_STOPPED, | 40 | MEI_HBM_STOPPED, |
39 | }; | 41 | }; |
40 | 42 | ||
41 | int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); | 43 | const char *mei_hbm_state_str(enum mei_hbm_state state); |
42 | 44 | ||
43 | static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) | 45 | int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); |
44 | { | ||
45 | hdr->host_addr = 0; | ||
46 | hdr->me_addr = 0; | ||
47 | hdr->length = length; | ||
48 | hdr->msg_complete = 1; | ||
49 | hdr->reserved = 0; | ||
50 | } | ||
51 | 46 | ||
52 | void mei_hbm_idle(struct mei_device *dev); | 47 | void mei_hbm_idle(struct mei_device *dev); |
53 | void mei_hbm_reset(struct mei_device *dev); | 48 | void mei_hbm_reset(struct mei_device *dev); |
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index a9a0d08f758e..4f2fd6fc1e23 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c | |||
@@ -28,10 +28,10 @@ | |||
28 | /** | 28 | /** |
29 | * mei_me_reg_read - Reads 32bit data from the mei device | 29 | * mei_me_reg_read - Reads 32bit data from the mei device |
30 | * | 30 | * |
31 | * @dev: the device structure | 31 | * @hw: the me hardware structure |
32 | * @offset: offset from which to read the data | 32 | * @offset: offset from which to read the data |
33 | * | 33 | * |
34 | * returns register value (u32) | 34 | * Return: register value (u32) |
35 | */ | 35 | */ |
36 | static inline u32 mei_me_reg_read(const struct mei_me_hw *hw, | 36 | static inline u32 mei_me_reg_read(const struct mei_me_hw *hw, |
37 | unsigned long offset) | 37 | unsigned long offset) |
@@ -43,7 +43,7 @@ static inline u32 mei_me_reg_read(const struct mei_me_hw *hw, | |||
43 | /** | 43 | /** |
44 | * mei_me_reg_write - Writes 32bit data to the mei device | 44 | * mei_me_reg_write - Writes 32bit data to the mei device |
45 | * | 45 | * |
46 | * @dev: the device structure | 46 | * @hw: the me hardware structure |
47 | * @offset: offset from which to write the data | 47 | * @offset: offset from which to write the data |
48 | * @value: register value to write (u32) | 48 | * @value: register value to write (u32) |
49 | */ | 49 | */ |
@@ -59,7 +59,7 @@ static inline void mei_me_reg_write(const struct mei_me_hw *hw, | |||
59 | * | 59 | * |
60 | * @dev: the device structure | 60 | * @dev: the device structure |
61 | * | 61 | * |
62 | * returns ME_CB_RW register value (u32) | 62 | * Return: ME_CB_RW register value (u32) |
63 | */ | 63 | */ |
64 | static u32 mei_me_mecbrw_read(const struct mei_device *dev) | 64 | static u32 mei_me_mecbrw_read(const struct mei_device *dev) |
65 | { | 65 | { |
@@ -68,9 +68,9 @@ static u32 mei_me_mecbrw_read(const struct mei_device *dev) | |||
68 | /** | 68 | /** |
69 | * mei_me_mecsr_read - Reads 32bit data from the ME CSR | 69 | * mei_me_mecsr_read - Reads 32bit data from the ME CSR |
70 | * | 70 | * |
71 | * @dev: the device structure | 71 | * @hw: the me hardware structure |
72 | * | 72 | * |
73 | * returns ME_CSR_HA register value (u32) | 73 | * Return: ME_CSR_HA register value (u32) |
74 | */ | 74 | */ |
75 | static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw) | 75 | static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw) |
76 | { | 76 | { |
@@ -80,9 +80,9 @@ static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw) | |||
80 | /** | 80 | /** |
81 | * mei_hcsr_read - Reads 32bit data from the host CSR | 81 | * mei_hcsr_read - Reads 32bit data from the host CSR |
82 | * | 82 | * |
83 | * @dev: the device structure | 83 | * @hw: the me hardware structure |
84 | * | 84 | * |
85 | * returns H_CSR register value (u32) | 85 | * Return: H_CSR register value (u32) |
86 | */ | 86 | */ |
87 | static inline u32 mei_hcsr_read(const struct mei_me_hw *hw) | 87 | static inline u32 mei_hcsr_read(const struct mei_me_hw *hw) |
88 | { | 88 | { |
@@ -93,7 +93,8 @@ static inline u32 mei_hcsr_read(const struct mei_me_hw *hw) | |||
93 | * mei_hcsr_set - writes H_CSR register to the mei device, | 93 | * mei_hcsr_set - writes H_CSR register to the mei device, |
94 | * and ignores the H_IS bit for it is write-one-to-zero. | 94 | * and ignores the H_IS bit for it is write-one-to-zero. |
95 | * | 95 | * |
96 | * @dev: the device structure | 96 | * @hw: the me hardware structure |
97 | * @hcsr: new register value | ||
97 | */ | 98 | */ |
98 | static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) | 99 | static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) |
99 | { | 100 | { |
@@ -101,6 +102,36 @@ static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) | |||
101 | mei_me_reg_write(hw, H_CSR, hcsr); | 102 | mei_me_reg_write(hw, H_CSR, hcsr); |
102 | } | 103 | } |
103 | 104 | ||
105 | /** | ||
106 | * mei_me_fw_status - read fw status register from pci config space | ||
107 | * | ||
108 | * @dev: mei device | ||
109 | * @fw_status: fw status register values | ||
110 | * | ||
111 | * Return: 0 on success, error otherwise | ||
112 | */ | ||
113 | static int mei_me_fw_status(struct mei_device *dev, | ||
114 | struct mei_fw_status *fw_status) | ||
115 | { | ||
116 | struct pci_dev *pdev = to_pci_dev(dev->dev); | ||
117 | struct mei_me_hw *hw = to_me_hw(dev); | ||
118 | const struct mei_fw_status *fw_src = &hw->cfg->fw_status; | ||
119 | int ret; | ||
120 | int i; | ||
121 | |||
122 | if (!fw_status) | ||
123 | return -EINVAL; | ||
124 | |||
125 | fw_status->count = fw_src->count; | ||
126 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { | ||
127 | ret = pci_read_config_dword(pdev, | ||
128 | fw_src->status[i], &fw_status->status[i]); | ||
129 | if (ret) | ||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | return 0; | ||
134 | } | ||
104 | 135 | ||
105 | /** | 136 | /** |
106 | * mei_me_hw_config - configure hw dependent settings | 137 | * mei_me_hw_config - configure hw dependent settings |
@@ -121,17 +152,19 @@ static void mei_me_hw_config(struct mei_device *dev) | |||
121 | * mei_me_pg_state - translate internal pg state | 152 | * mei_me_pg_state - translate internal pg state |
122 | * to the mei power gating state | 153 | * to the mei power gating state |
123 | * | 154 | * |
124 | * @hw - me hardware | 155 | * @dev: mei device |
125 | * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise | 156 | * |
157 | * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise | ||
126 | */ | 158 | */ |
127 | static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) | 159 | static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) |
128 | { | 160 | { |
129 | struct mei_me_hw *hw = to_me_hw(dev); | 161 | struct mei_me_hw *hw = to_me_hw(dev); |
162 | |||
130 | return hw->pg_state; | 163 | return hw->pg_state; |
131 | } | 164 | } |
132 | 165 | ||
133 | /** | 166 | /** |
134 | * mei_clear_interrupts - clear and stop interrupts | 167 | * mei_me_intr_clear - clear and stop interrupts |
135 | * | 168 | * |
136 | * @dev: the device structure | 169 | * @dev: the device structure |
137 | */ | 170 | */ |
@@ -139,6 +172,7 @@ static void mei_me_intr_clear(struct mei_device *dev) | |||
139 | { | 172 | { |
140 | struct mei_me_hw *hw = to_me_hw(dev); | 173 | struct mei_me_hw *hw = to_me_hw(dev); |
141 | u32 hcsr = mei_hcsr_read(hw); | 174 | u32 hcsr = mei_hcsr_read(hw); |
175 | |||
142 | if ((hcsr & H_IS) == H_IS) | 176 | if ((hcsr & H_IS) == H_IS) |
143 | mei_me_reg_write(hw, H_CSR, hcsr); | 177 | mei_me_reg_write(hw, H_CSR, hcsr); |
144 | } | 178 | } |
@@ -151,12 +185,13 @@ static void mei_me_intr_enable(struct mei_device *dev) | |||
151 | { | 185 | { |
152 | struct mei_me_hw *hw = to_me_hw(dev); | 186 | struct mei_me_hw *hw = to_me_hw(dev); |
153 | u32 hcsr = mei_hcsr_read(hw); | 187 | u32 hcsr = mei_hcsr_read(hw); |
188 | |||
154 | hcsr |= H_IE; | 189 | hcsr |= H_IE; |
155 | mei_hcsr_set(hw, hcsr); | 190 | mei_hcsr_set(hw, hcsr); |
156 | } | 191 | } |
157 | 192 | ||
158 | /** | 193 | /** |
159 | * mei_disable_interrupts - disables mei device interrupts | 194 | * mei_me_intr_disable - disables mei device interrupts |
160 | * | 195 | * |
161 | * @dev: the device structure | 196 | * @dev: the device structure |
162 | */ | 197 | */ |
@@ -164,6 +199,7 @@ static void mei_me_intr_disable(struct mei_device *dev) | |||
164 | { | 199 | { |
165 | struct mei_me_hw *hw = to_me_hw(dev); | 200 | struct mei_me_hw *hw = to_me_hw(dev); |
166 | u32 hcsr = mei_hcsr_read(hw); | 201 | u32 hcsr = mei_hcsr_read(hw); |
202 | |||
167 | hcsr &= ~H_IE; | 203 | hcsr &= ~H_IE; |
168 | mei_hcsr_set(hw, hcsr); | 204 | mei_hcsr_set(hw, hcsr); |
169 | } | 205 | } |
@@ -190,6 +226,8 @@ static void mei_me_hw_reset_release(struct mei_device *dev) | |||
190 | * | 226 | * |
191 | * @dev: the device structure | 227 | * @dev: the device structure |
192 | * @intr_enable: if interrupt should be enabled after reset. | 228 | * @intr_enable: if interrupt should be enabled after reset. |
229 | * | ||
230 | * Return: always 0 | ||
193 | */ | 231 | */ |
194 | static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) | 232 | static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) |
195 | { | 233 | { |
@@ -213,10 +251,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) | |||
213 | hcsr = mei_hcsr_read(hw); | 251 | hcsr = mei_hcsr_read(hw); |
214 | 252 | ||
215 | if ((hcsr & H_RST) == 0) | 253 | if ((hcsr & H_RST) == 0) |
216 | dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr); | 254 | dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); |
217 | 255 | ||
218 | if ((hcsr & H_RDY) == H_RDY) | 256 | if ((hcsr & H_RDY) == H_RDY) |
219 | dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr); | 257 | dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); |
220 | 258 | ||
221 | if (intr_enable == false) | 259 | if (intr_enable == false) |
222 | mei_me_hw_reset_release(dev); | 260 | mei_me_hw_reset_release(dev); |
@@ -227,26 +265,27 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) | |||
227 | /** | 265 | /** |
228 | * mei_me_host_set_ready - enable device | 266 | * mei_me_host_set_ready - enable device |
229 | * | 267 | * |
230 | * @dev - mei device | 268 | * @dev: mei device |
231 | * returns bool | ||
232 | */ | 269 | */ |
233 | |||
234 | static void mei_me_host_set_ready(struct mei_device *dev) | 270 | static void mei_me_host_set_ready(struct mei_device *dev) |
235 | { | 271 | { |
236 | struct mei_me_hw *hw = to_me_hw(dev); | 272 | struct mei_me_hw *hw = to_me_hw(dev); |
273 | |||
237 | hw->host_hw_state = mei_hcsr_read(hw); | 274 | hw->host_hw_state = mei_hcsr_read(hw); |
238 | hw->host_hw_state |= H_IE | H_IG | H_RDY; | 275 | hw->host_hw_state |= H_IE | H_IG | H_RDY; |
239 | mei_hcsr_set(hw, hw->host_hw_state); | 276 | mei_hcsr_set(hw, hw->host_hw_state); |
240 | } | 277 | } |
278 | |||
241 | /** | 279 | /** |
242 | * mei_me_host_is_ready - check whether the host has turned ready | 280 | * mei_me_host_is_ready - check whether the host has turned ready |
243 | * | 281 | * |
244 | * @dev - mei device | 282 | * @dev: mei device |
245 | * returns bool | 283 | * Return: bool |
246 | */ | 284 | */ |
247 | static bool mei_me_host_is_ready(struct mei_device *dev) | 285 | static bool mei_me_host_is_ready(struct mei_device *dev) |
248 | { | 286 | { |
249 | struct mei_me_hw *hw = to_me_hw(dev); | 287 | struct mei_me_hw *hw = to_me_hw(dev); |
288 | |||
250 | hw->host_hw_state = mei_hcsr_read(hw); | 289 | hw->host_hw_state = mei_hcsr_read(hw); |
251 | return (hw->host_hw_state & H_RDY) == H_RDY; | 290 | return (hw->host_hw_state & H_RDY) == H_RDY; |
252 | } | 291 | } |
@@ -254,43 +293,53 @@ static bool mei_me_host_is_ready(struct mei_device *dev) | |||
254 | /** | 293 | /** |
255 | * mei_me_hw_is_ready - check whether the me(hw) has turned ready | 294 | * mei_me_hw_is_ready - check whether the me(hw) has turned ready |
256 | * | 295 | * |
257 | * @dev - mei device | 296 | * @dev: mei device |
258 | * returns bool | 297 | * Return: bool |
259 | */ | 298 | */ |
260 | static bool mei_me_hw_is_ready(struct mei_device *dev) | 299 | static bool mei_me_hw_is_ready(struct mei_device *dev) |
261 | { | 300 | { |
262 | struct mei_me_hw *hw = to_me_hw(dev); | 301 | struct mei_me_hw *hw = to_me_hw(dev); |
302 | |||
263 | hw->me_hw_state = mei_me_mecsr_read(hw); | 303 | hw->me_hw_state = mei_me_mecsr_read(hw); |
264 | return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA; | 304 | return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA; |
265 | } | 305 | } |
266 | 306 | ||
307 | /** | ||
308 | * mei_me_hw_ready_wait - wait until the me(hw) has turned ready | ||
309 | * or timeout is reached | ||
310 | * | ||
311 | * @dev: mei device | ||
312 | * Return: 0 on success, error otherwise | ||
313 | */ | ||
267 | static int mei_me_hw_ready_wait(struct mei_device *dev) | 314 | static int mei_me_hw_ready_wait(struct mei_device *dev) |
268 | { | 315 | { |
269 | int err; | ||
270 | |||
271 | mutex_unlock(&dev->device_lock); | 316 | mutex_unlock(&dev->device_lock); |
272 | err = wait_event_interruptible_timeout(dev->wait_hw_ready, | 317 | wait_event_timeout(dev->wait_hw_ready, |
273 | dev->recvd_hw_ready, | 318 | dev->recvd_hw_ready, |
274 | mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); | 319 | mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); |
275 | mutex_lock(&dev->device_lock); | 320 | mutex_lock(&dev->device_lock); |
276 | if (!err && !dev->recvd_hw_ready) { | 321 | if (!dev->recvd_hw_ready) { |
277 | if (!err) | 322 | dev_err(dev->dev, "wait hw ready failed\n"); |
278 | err = -ETIME; | 323 | return -ETIME; |
279 | dev_err(&dev->pdev->dev, | ||
280 | "wait hw ready failed. status = %d\n", err); | ||
281 | return err; | ||
282 | } | 324 | } |
283 | 325 | ||
284 | dev->recvd_hw_ready = false; | 326 | dev->recvd_hw_ready = false; |
285 | return 0; | 327 | return 0; |
286 | } | 328 | } |
287 | 329 | ||
330 | /** | ||
331 | * mei_me_hw_start - hw start routine | ||
332 | * | ||
333 | * @dev: mei device | ||
334 | * Return: 0 on success, error otherwise | ||
335 | */ | ||
288 | static int mei_me_hw_start(struct mei_device *dev) | 336 | static int mei_me_hw_start(struct mei_device *dev) |
289 | { | 337 | { |
290 | int ret = mei_me_hw_ready_wait(dev); | 338 | int ret = mei_me_hw_ready_wait(dev); |
339 | |||
291 | if (ret) | 340 | if (ret) |
292 | return ret; | 341 | return ret; |
293 | dev_dbg(&dev->pdev->dev, "hw is ready\n"); | 342 | dev_dbg(dev->dev, "hw is ready\n"); |
294 | 343 | ||
295 | mei_me_host_set_ready(dev); | 344 | mei_me_host_set_ready(dev); |
296 | return ret; | 345 | return ret; |
@@ -302,7 +351,7 @@ static int mei_me_hw_start(struct mei_device *dev) | |||
302 | * | 351 | * |
303 | * @dev: the device structure | 352 | * @dev: the device structure |
304 | * | 353 | * |
305 | * returns number of filled slots | 354 | * Return: number of filled slots |
306 | */ | 355 | */ |
307 | static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) | 356 | static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) |
308 | { | 357 | { |
@@ -322,7 +371,7 @@ static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) | |||
322 | * | 371 | * |
323 | * @dev: the device structure | 372 | * @dev: the device structure |
324 | * | 373 | * |
325 | * returns true if empty, false - otherwise. | 374 | * Return: true if empty, false - otherwise. |
326 | */ | 375 | */ |
327 | static bool mei_me_hbuf_is_empty(struct mei_device *dev) | 376 | static bool mei_me_hbuf_is_empty(struct mei_device *dev) |
328 | { | 377 | { |
@@ -334,7 +383,7 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev) | |||
334 | * | 383 | * |
335 | * @dev: the device structure | 384 | * @dev: the device structure |
336 | * | 385 | * |
337 | * returns -EOVERFLOW if overflow, otherwise empty slots count | 386 | * Return: -EOVERFLOW if overflow, otherwise empty slots count |
338 | */ | 387 | */ |
339 | static int mei_me_hbuf_empty_slots(struct mei_device *dev) | 388 | static int mei_me_hbuf_empty_slots(struct mei_device *dev) |
340 | { | 389 | { |
@@ -350,6 +399,13 @@ static int mei_me_hbuf_empty_slots(struct mei_device *dev) | |||
350 | return empty_slots; | 399 | return empty_slots; |
351 | } | 400 | } |
352 | 401 | ||
402 | /** | ||
403 | * mei_me_hbuf_max_len - returns size of hw buffer. | ||
404 | * | ||
405 | * @dev: the device structure | ||
406 | * | ||
407 | * Return: size of hw buffer in bytes | ||
408 | */ | ||
353 | static size_t mei_me_hbuf_max_len(const struct mei_device *dev) | 409 | static size_t mei_me_hbuf_max_len(const struct mei_device *dev) |
354 | { | 410 | { |
355 | return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); | 411 | return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); |
@@ -363,7 +419,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev) | |||
363 | * @header: mei HECI header of message | 419 | * @header: mei HECI header of message |
364 | * @buf: message payload will be written | 420 | * @buf: message payload will be written |
365 | * | 421 | * |
366 | * This function returns -EIO if write has failed | 422 | * Return: -EIO if write has failed |
367 | */ | 423 | */ |
368 | static int mei_me_write_message(struct mei_device *dev, | 424 | static int mei_me_write_message(struct mei_device *dev, |
369 | struct mei_msg_hdr *header, | 425 | struct mei_msg_hdr *header, |
@@ -378,10 +434,10 @@ static int mei_me_write_message(struct mei_device *dev, | |||
378 | int i; | 434 | int i; |
379 | int empty_slots; | 435 | int empty_slots; |
380 | 436 | ||
381 | dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); | 437 | dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); |
382 | 438 | ||
383 | empty_slots = mei_hbuf_empty_slots(dev); | 439 | empty_slots = mei_hbuf_empty_slots(dev); |
384 | dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots); | 440 | dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots); |
385 | 441 | ||
386 | dw_cnt = mei_data2slots(length); | 442 | dw_cnt = mei_data2slots(length); |
387 | if (empty_slots < 0 || dw_cnt > empty_slots) | 443 | if (empty_slots < 0 || dw_cnt > empty_slots) |
@@ -395,6 +451,7 @@ static int mei_me_write_message(struct mei_device *dev, | |||
395 | rem = length & 0x3; | 451 | rem = length & 0x3; |
396 | if (rem > 0) { | 452 | if (rem > 0) { |
397 | u32 reg = 0; | 453 | u32 reg = 0; |
454 | |||
398 | memcpy(®, &buf[length - rem], rem); | 455 | memcpy(®, &buf[length - rem], rem); |
399 | mei_me_reg_write(hw, H_CB_WW, reg); | 456 | mei_me_reg_write(hw, H_CB_WW, reg); |
400 | } | 457 | } |
@@ -412,7 +469,7 @@ static int mei_me_write_message(struct mei_device *dev, | |||
412 | * | 469 | * |
413 | * @dev: the device structure | 470 | * @dev: the device structure |
414 | * | 471 | * |
415 | * returns -EOVERFLOW if overflow, otherwise filled slots count | 472 | * Return: -EOVERFLOW if overflow, otherwise filled slots count |
416 | */ | 473 | */ |
417 | static int mei_me_count_full_read_slots(struct mei_device *dev) | 474 | static int mei_me_count_full_read_slots(struct mei_device *dev) |
418 | { | 475 | { |
@@ -430,7 +487,7 @@ static int mei_me_count_full_read_slots(struct mei_device *dev) | |||
430 | if (filled_slots > buffer_depth) | 487 | if (filled_slots > buffer_depth) |
431 | return -EOVERFLOW; | 488 | return -EOVERFLOW; |
432 | 489 | ||
433 | dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots); | 490 | dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots); |
434 | return (int)filled_slots; | 491 | return (int)filled_slots; |
435 | } | 492 | } |
436 | 493 | ||
@@ -440,6 +497,8 @@ static int mei_me_count_full_read_slots(struct mei_device *dev) | |||
440 | * @dev: the device structure | 497 | * @dev: the device structure |
441 | * @buffer: message buffer will be written | 498 | * @buffer: message buffer will be written |
442 | * @buffer_length: message size will be read | 499 | * @buffer_length: message size will be read |
500 | * | ||
501 | * Return: always 0 | ||
443 | */ | 502 | */ |
444 | static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, | 503 | static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, |
445 | unsigned long buffer_length) | 504 | unsigned long buffer_length) |
@@ -453,6 +512,7 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, | |||
453 | 512 | ||
454 | if (buffer_length > 0) { | 513 | if (buffer_length > 0) { |
455 | u32 reg = mei_me_mecbrw_read(dev); | 514 | u32 reg = mei_me_mecbrw_read(dev); |
515 | |||
456 | memcpy(reg_buf, ®, buffer_length); | 516 | memcpy(reg_buf, ®, buffer_length); |
457 | } | 517 | } |
458 | 518 | ||
@@ -462,7 +522,7 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, | |||
462 | } | 522 | } |
463 | 523 | ||
464 | /** | 524 | /** |
465 | * mei_me_pg_enter - write pg enter register to mei device. | 525 | * mei_me_pg_enter - write pg enter register |
466 | * | 526 | * |
467 | * @dev: the device structure | 527 | * @dev: the device structure |
468 | */ | 528 | */ |
@@ -470,12 +530,13 @@ static void mei_me_pg_enter(struct mei_device *dev) | |||
470 | { | 530 | { |
471 | struct mei_me_hw *hw = to_me_hw(dev); | 531 | struct mei_me_hw *hw = to_me_hw(dev); |
472 | u32 reg = mei_me_reg_read(hw, H_HPG_CSR); | 532 | u32 reg = mei_me_reg_read(hw, H_HPG_CSR); |
533 | |||
473 | reg |= H_HPG_CSR_PGI; | 534 | reg |= H_HPG_CSR_PGI; |
474 | mei_me_reg_write(hw, H_HPG_CSR, reg); | 535 | mei_me_reg_write(hw, H_HPG_CSR, reg); |
475 | } | 536 | } |
476 | 537 | ||
477 | /** | 538 | /** |
478 | * mei_me_pg_enter - write pg enter register to mei device. | 539 | * mei_me_pg_exit - write pg exit register |
479 | * | 540 | * |
480 | * @dev: the device structure | 541 | * @dev: the device structure |
481 | */ | 542 | */ |
@@ -495,7 +556,7 @@ static void mei_me_pg_exit(struct mei_device *dev) | |||
495 | * | 556 | * |
496 | * @dev: the device structure | 557 | * @dev: the device structure |
497 | * | 558 | * |
498 | * returns 0 on success an error code otherwise | 559 | * Return: 0 on success an error code otherwise |
499 | */ | 560 | */ |
500 | int mei_me_pg_set_sync(struct mei_device *dev) | 561 | int mei_me_pg_set_sync(struct mei_device *dev) |
501 | { | 562 | { |
@@ -532,7 +593,7 @@ int mei_me_pg_set_sync(struct mei_device *dev) | |||
532 | * | 593 | * |
533 | * @dev: the device structure | 594 | * @dev: the device structure |
534 | * | 595 | * |
535 | * returns 0 on success an error code otherwise | 596 | * Return: 0 on success an error code otherwise |
536 | */ | 597 | */ |
537 | int mei_me_pg_unset_sync(struct mei_device *dev) | 598 | int mei_me_pg_unset_sync(struct mei_device *dev) |
538 | { | 599 | { |
@@ -569,7 +630,7 @@ reply: | |||
569 | * | 630 | * |
570 | * @dev: the device structure | 631 | * @dev: the device structure |
571 | * | 632 | * |
572 | * returns: true is pg supported, false otherwise | 633 | * Return: true is pg supported, false otherwise |
573 | */ | 634 | */ |
574 | static bool mei_me_pg_is_enabled(struct mei_device *dev) | 635 | static bool mei_me_pg_is_enabled(struct mei_device *dev) |
575 | { | 636 | { |
@@ -579,17 +640,13 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev) | |||
579 | if ((reg & ME_PGIC_HRA) == 0) | 640 | if ((reg & ME_PGIC_HRA) == 0) |
580 | goto notsupported; | 641 | goto notsupported; |
581 | 642 | ||
582 | if (dev->version.major_version < HBM_MAJOR_VERSION_PGI) | 643 | if (!dev->hbm_f_pg_supported) |
583 | goto notsupported; | ||
584 | |||
585 | if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && | ||
586 | dev->version.minor_version < HBM_MINOR_VERSION_PGI) | ||
587 | goto notsupported; | 644 | goto notsupported; |
588 | 645 | ||
589 | return true; | 646 | return true; |
590 | 647 | ||
591 | notsupported: | 648 | notsupported: |
592 | dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", | 649 | dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", |
593 | !!(reg & ME_PGIC_HRA), | 650 | !!(reg & ME_PGIC_HRA), |
594 | dev->version.major_version, | 651 | dev->version.major_version, |
595 | dev->version.minor_version, | 652 | dev->version.minor_version, |
@@ -605,7 +662,7 @@ notsupported: | |||
605 | * @irq: The irq number | 662 | * @irq: The irq number |
606 | * @dev_id: pointer to the device structure | 663 | * @dev_id: pointer to the device structure |
607 | * | 664 | * |
608 | * returns irqreturn_t | 665 | * Return: irqreturn_t |
609 | */ | 666 | */ |
610 | 667 | ||
611 | irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) | 668 | irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) |
@@ -630,7 +687,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) | |||
630 | * @irq: The irq number | 687 | * @irq: The irq number |
631 | * @dev_id: pointer to the device structure | 688 | * @dev_id: pointer to the device structure |
632 | * | 689 | * |
633 | * returns irqreturn_t | 690 | * Return: irqreturn_t |
634 | * | 691 | * |
635 | */ | 692 | */ |
636 | irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | 693 | irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) |
@@ -640,19 +697,19 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
640 | s32 slots; | 697 | s32 slots; |
641 | int rets = 0; | 698 | int rets = 0; |
642 | 699 | ||
643 | dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); | 700 | dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n"); |
644 | /* initialize our complete list */ | 701 | /* initialize our complete list */ |
645 | mutex_lock(&dev->device_lock); | 702 | mutex_lock(&dev->device_lock); |
646 | mei_io_list_init(&complete_list); | 703 | mei_io_list_init(&complete_list); |
647 | 704 | ||
648 | /* Ack the interrupt here | 705 | /* Ack the interrupt here |
649 | * In case of MSI we don't go through the quick handler */ | 706 | * In case of MSI we don't go through the quick handler */ |
650 | if (pci_dev_msi_enabled(dev->pdev)) | 707 | if (pci_dev_msi_enabled(to_pci_dev(dev->dev))) |
651 | mei_clear_interrupts(dev); | 708 | mei_clear_interrupts(dev); |
652 | 709 | ||
653 | /* check if ME wants a reset */ | 710 | /* check if ME wants a reset */ |
654 | if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { | 711 | if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { |
655 | dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); | 712 | dev_warn(dev->dev, "FW not ready: resetting.\n"); |
656 | schedule_work(&dev->reset_work); | 713 | schedule_work(&dev->reset_work); |
657 | goto end; | 714 | goto end; |
658 | } | 715 | } |
@@ -661,19 +718,19 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
661 | if (!mei_host_is_ready(dev)) { | 718 | if (!mei_host_is_ready(dev)) { |
662 | if (mei_hw_is_ready(dev)) { | 719 | if (mei_hw_is_ready(dev)) { |
663 | mei_me_hw_reset_release(dev); | 720 | mei_me_hw_reset_release(dev); |
664 | dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); | 721 | dev_dbg(dev->dev, "we need to start the dev.\n"); |
665 | 722 | ||
666 | dev->recvd_hw_ready = true; | 723 | dev->recvd_hw_ready = true; |
667 | wake_up_interruptible(&dev->wait_hw_ready); | 724 | wake_up(&dev->wait_hw_ready); |
668 | } else { | 725 | } else { |
669 | dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n"); | 726 | dev_dbg(dev->dev, "Spurious Interrupt\n"); |
670 | } | 727 | } |
671 | goto end; | 728 | goto end; |
672 | } | 729 | } |
673 | /* check slots available for reading */ | 730 | /* check slots available for reading */ |
674 | slots = mei_count_full_read_slots(dev); | 731 | slots = mei_count_full_read_slots(dev); |
675 | while (slots > 0) { | 732 | while (slots > 0) { |
676 | dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots); | 733 | dev_dbg(dev->dev, "slots to read = %08x\n", slots); |
677 | rets = mei_irq_read_handler(dev, &complete_list, &slots); | 734 | rets = mei_irq_read_handler(dev, &complete_list, &slots); |
678 | /* There is a race between ME write and interrupt delivery: | 735 | /* There is a race between ME write and interrupt delivery: |
679 | * Not all data is always available immediately after the | 736 | * Not all data is always available immediately after the |
@@ -683,7 +740,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
683 | break; | 740 | break; |
684 | 741 | ||
685 | if (rets && dev->dev_state != MEI_DEV_RESETTING) { | 742 | if (rets && dev->dev_state != MEI_DEV_RESETTING) { |
686 | dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n", | 743 | dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", |
687 | rets); | 744 | rets); |
688 | schedule_work(&dev->reset_work); | 745 | schedule_work(&dev->reset_work); |
689 | goto end; | 746 | goto end; |
@@ -705,13 +762,14 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
705 | mei_irq_compl_handler(dev, &complete_list); | 762 | mei_irq_compl_handler(dev, &complete_list); |
706 | 763 | ||
707 | end: | 764 | end: |
708 | dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); | 765 | dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); |
709 | mutex_unlock(&dev->device_lock); | 766 | mutex_unlock(&dev->device_lock); |
710 | return IRQ_HANDLED; | 767 | return IRQ_HANDLED; |
711 | } | 768 | } |
712 | 769 | ||
713 | static const struct mei_hw_ops mei_me_hw_ops = { | 770 | static const struct mei_hw_ops mei_me_hw_ops = { |
714 | 771 | ||
772 | .fw_status = mei_me_fw_status, | ||
715 | .pg_state = mei_me_pg_state, | 773 | .pg_state = mei_me_pg_state, |
716 | 774 | ||
717 | .host_is_ready = mei_me_host_is_ready, | 775 | .host_is_ready = mei_me_host_is_ready, |
@@ -741,6 +799,7 @@ static const struct mei_hw_ops mei_me_hw_ops = { | |||
741 | static bool mei_me_fw_type_nm(struct pci_dev *pdev) | 799 | static bool mei_me_fw_type_nm(struct pci_dev *pdev) |
742 | { | 800 | { |
743 | u32 reg; | 801 | u32 reg; |
802 | |||
744 | pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); | 803 | pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); |
745 | /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ | 804 | /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ |
746 | return (reg & 0x600) == 0x200; | 805 | return (reg & 0x600) == 0x200; |
@@ -809,23 +868,22 @@ const struct mei_cfg mei_me_lpt_cfg = { | |||
809 | * @pdev: The pci device structure | 868 | * @pdev: The pci device structure |
810 | * @cfg: per device generation config | 869 | * @cfg: per device generation config |
811 | * | 870 | * |
812 | * returns The mei_device_device pointer on success, NULL on failure. | 871 | * Return: The mei_device_device pointer on success, NULL on failure. |
813 | */ | 872 | */ |
814 | struct mei_device *mei_me_dev_init(struct pci_dev *pdev, | 873 | struct mei_device *mei_me_dev_init(struct pci_dev *pdev, |
815 | const struct mei_cfg *cfg) | 874 | const struct mei_cfg *cfg) |
816 | { | 875 | { |
817 | struct mei_device *dev; | 876 | struct mei_device *dev; |
877 | struct mei_me_hw *hw; | ||
818 | 878 | ||
819 | dev = kzalloc(sizeof(struct mei_device) + | 879 | dev = kzalloc(sizeof(struct mei_device) + |
820 | sizeof(struct mei_me_hw), GFP_KERNEL); | 880 | sizeof(struct mei_me_hw), GFP_KERNEL); |
821 | if (!dev) | 881 | if (!dev) |
822 | return NULL; | 882 | return NULL; |
883 | hw = to_me_hw(dev); | ||
823 | 884 | ||
824 | mei_device_init(dev, cfg); | 885 | mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); |
825 | 886 | hw->cfg = cfg; | |
826 | dev->ops = &mei_me_hw_ops; | ||
827 | |||
828 | dev->pdev = pdev; | ||
829 | return dev; | 887 | return dev; |
830 | } | 888 | } |
831 | 889 | ||
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 12b0f4bbe1f1..e6a59a62573a 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h | |||
@@ -19,14 +19,44 @@ | |||
19 | #ifndef _MEI_INTERFACE_H_ | 19 | #ifndef _MEI_INTERFACE_H_ |
20 | #define _MEI_INTERFACE_H_ | 20 | #define _MEI_INTERFACE_H_ |
21 | 21 | ||
22 | #include <linux/mei.h> | ||
23 | #include <linux/irqreturn.h> | 22 | #include <linux/irqreturn.h> |
23 | #include <linux/pci.h> | ||
24 | #include <linux/mei.h> | ||
25 | |||
24 | #include "mei_dev.h" | 26 | #include "mei_dev.h" |
25 | #include "client.h" | 27 | #include "client.h" |
26 | 28 | ||
29 | /* | ||
30 | * mei_cfg - mei device configuration | ||
31 | * | ||
32 | * @fw_status: FW status | ||
33 | * @quirk_probe: device exclusion quirk | ||
34 | */ | ||
35 | struct mei_cfg { | ||
36 | const struct mei_fw_status fw_status; | ||
37 | bool (*quirk_probe)(struct pci_dev *pdev); | ||
38 | }; | ||
39 | |||
40 | |||
41 | #define MEI_PCI_DEVICE(dev, cfg) \ | ||
42 | .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ | ||
43 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ | ||
44 | .driver_data = (kernel_ulong_t)&(cfg) | ||
45 | |||
46 | |||
27 | #define MEI_ME_RPM_TIMEOUT 500 /* ms */ | 47 | #define MEI_ME_RPM_TIMEOUT 500 /* ms */ |
28 | 48 | ||
49 | /** | ||
50 | * struct mei_me_hw - me hw specific data | ||
51 | * | ||
52 | * @cfg: per device generation config and ops | ||
53 | * @mem_addr: io memory address | ||
54 | * @host_hw_state: cached host state | ||
55 | * @me_hw_state: cached me (fw) state | ||
56 | * @pg_state: power gating state | ||
57 | */ | ||
29 | struct mei_me_hw { | 58 | struct mei_me_hw { |
59 | const struct mei_cfg *cfg; | ||
30 | void __iomem *mem_addr; | 60 | void __iomem *mem_addr; |
31 | /* | 61 | /* |
32 | * hw states of host and fw(ME) | 62 | * hw states of host and fw(ME) |
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index f1cd166094f2..c5e1902e493f 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c | |||
@@ -28,11 +28,12 @@ | |||
28 | #include "hbm.h" | 28 | #include "hbm.h" |
29 | 29 | ||
30 | /** | 30 | /** |
31 | * mei_txe_reg_read - Reads 32bit data from the device | 31 | * mei_txe_reg_read - Reads 32bit data from the txe device |
32 | * | 32 | * |
33 | * @base_addr: registers base address | 33 | * @base_addr: registers base address |
34 | * @offset: register offset | 34 | * @offset: register offset |
35 | * | 35 | * |
36 | * Return: register value | ||
36 | */ | 37 | */ |
37 | static inline u32 mei_txe_reg_read(void __iomem *base_addr, | 38 | static inline u32 mei_txe_reg_read(void __iomem *base_addr, |
38 | unsigned long offset) | 39 | unsigned long offset) |
@@ -41,7 +42,7 @@ static inline u32 mei_txe_reg_read(void __iomem *base_addr, | |||
41 | } | 42 | } |
42 | 43 | ||
43 | /** | 44 | /** |
44 | * mei_txe_reg_write - Writes 32bit data to the device | 45 | * mei_txe_reg_write - Writes 32bit data to the txe device |
45 | * | 46 | * |
46 | * @base_addr: registers base address | 47 | * @base_addr: registers base address |
47 | * @offset: register offset | 48 | * @offset: register offset |
@@ -56,10 +57,12 @@ static inline void mei_txe_reg_write(void __iomem *base_addr, | |||
56 | /** | 57 | /** |
57 | * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR | 58 | * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR |
58 | * | 59 | * |
59 | * @dev: the device structure | 60 | * @hw: the txe hardware structure |
60 | * @offset: register offset | 61 | * @offset: register offset |
61 | * | 62 | * |
62 | * Doesn't check for aliveness while Reads 32bit data from the SeC BAR | 63 | * Doesn't check for aliveness while Reads 32bit data from the SeC BAR |
64 | * | ||
65 | * Return: register value | ||
63 | */ | 66 | */ |
64 | static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw, | 67 | static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw, |
65 | unsigned long offset) | 68 | unsigned long offset) |
@@ -70,10 +73,12 @@ static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw, | |||
70 | /** | 73 | /** |
71 | * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR | 74 | * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR |
72 | * | 75 | * |
73 | * @dev: the device structure | 76 | * @hw: the txe hardware structure |
74 | * @offset: register offset | 77 | * @offset: register offset |
75 | * | 78 | * |
76 | * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set | 79 | * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set |
80 | * | ||
81 | * Return: register value | ||
77 | */ | 82 | */ |
78 | static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw, | 83 | static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw, |
79 | unsigned long offset) | 84 | unsigned long offset) |
@@ -85,7 +90,7 @@ static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw, | |||
85 | * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR | 90 | * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR |
86 | * doesn't check for aliveness | 91 | * doesn't check for aliveness |
87 | * | 92 | * |
88 | * @dev: the device structure | 93 | * @hw: the txe hardware structure |
89 | * @offset: register offset | 94 | * @offset: register offset |
90 | * @value: value to write | 95 | * @value: value to write |
91 | * | 96 | * |
@@ -100,7 +105,7 @@ static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw, | |||
100 | /** | 105 | /** |
101 | * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR | 106 | * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR |
102 | * | 107 | * |
103 | * @dev: the device structure | 108 | * @hw: the txe hardware structure |
104 | * @offset: register offset | 109 | * @offset: register offset |
105 | * @value: value to write | 110 | * @value: value to write |
106 | * | 111 | * |
@@ -115,9 +120,10 @@ static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw, | |||
115 | /** | 120 | /** |
116 | * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR | 121 | * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR |
117 | * | 122 | * |
118 | * @hw: the device structure | 123 | * @hw: the txe hardware structure |
119 | * @offset: offset from which to read the data | 124 | * @offset: offset from which to read the data |
120 | * | 125 | * |
126 | * Return: the byte read. | ||
121 | */ | 127 | */ |
122 | static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw, | 128 | static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw, |
123 | unsigned long offset) | 129 | unsigned long offset) |
@@ -128,7 +134,7 @@ static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw, | |||
128 | /** | 134 | /** |
129 | * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR | 135 | * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR |
130 | * | 136 | * |
131 | * @hw: the device structure | 137 | * @hw: the txe hardware structure |
132 | * @offset: offset from which to write the data | 138 | * @offset: offset from which to write the data |
133 | * @value: the byte to write | 139 | * @value: the byte to write |
134 | */ | 140 | */ |
@@ -147,7 +153,10 @@ static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw, | |||
147 | * Request for aliveness change and returns true if the change is | 153 | * Request for aliveness change and returns true if the change is |
148 | * really needed and false if aliveness is already | 154 | * really needed and false if aliveness is already |
149 | * in the requested state | 155 | * in the requested state |
150 | * Requires device lock to be held | 156 | * |
157 | * Locking: called under "dev->device_lock" lock | ||
158 | * | ||
159 | * Return: true if request was send | ||
151 | */ | 160 | */ |
152 | static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) | 161 | static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) |
153 | { | 162 | { |
@@ -155,7 +164,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) | |||
155 | struct mei_txe_hw *hw = to_txe_hw(dev); | 164 | struct mei_txe_hw *hw = to_txe_hw(dev); |
156 | bool do_req = hw->aliveness != req; | 165 | bool do_req = hw->aliveness != req; |
157 | 166 | ||
158 | dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n", | 167 | dev_dbg(dev->dev, "Aliveness current=%d request=%d\n", |
159 | hw->aliveness, req); | 168 | hw->aliveness, req); |
160 | if (do_req) { | 169 | if (do_req) { |
161 | dev->pg_event = MEI_PG_EVENT_WAIT; | 170 | dev->pg_event = MEI_PG_EVENT_WAIT; |
@@ -172,26 +181,31 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) | |||
172 | * | 181 | * |
173 | * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from | 182 | * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from |
174 | * from HICR_HOST_ALIVENESS_REQ register value | 183 | * from HICR_HOST_ALIVENESS_REQ register value |
184 | * | ||
185 | * Return: SICR_HOST_ALIVENESS_REQ_REQUESTED bit value | ||
175 | */ | 186 | */ |
176 | static u32 mei_txe_aliveness_req_get(struct mei_device *dev) | 187 | static u32 mei_txe_aliveness_req_get(struct mei_device *dev) |
177 | { | 188 | { |
178 | struct mei_txe_hw *hw = to_txe_hw(dev); | 189 | struct mei_txe_hw *hw = to_txe_hw(dev); |
179 | u32 reg; | 190 | u32 reg; |
191 | |||
180 | reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG); | 192 | reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG); |
181 | return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED; | 193 | return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED; |
182 | } | 194 | } |
183 | 195 | ||
184 | /** | 196 | /** |
185 | * mei_txe_aliveness_get - get aliveness response register value | 197 | * mei_txe_aliveness_get - get aliveness response register value |
198 | * | ||
186 | * @dev: the device structure | 199 | * @dev: the device structure |
187 | * | 200 | * |
188 | * Extract HICR_HOST_ALIVENESS_RESP_ACK bit | 201 | * Return: HICR_HOST_ALIVENESS_RESP_ACK bit from HICR_HOST_ALIVENESS_RESP |
189 | * from HICR_HOST_ALIVENESS_RESP register value | 202 | * register |
190 | */ | 203 | */ |
191 | static u32 mei_txe_aliveness_get(struct mei_device *dev) | 204 | static u32 mei_txe_aliveness_get(struct mei_device *dev) |
192 | { | 205 | { |
193 | struct mei_txe_hw *hw = to_txe_hw(dev); | 206 | struct mei_txe_hw *hw = to_txe_hw(dev); |
194 | u32 reg; | 207 | u32 reg; |
208 | |||
195 | reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG); | 209 | reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG); |
196 | return reg & HICR_HOST_ALIVENESS_RESP_ACK; | 210 | return reg & HICR_HOST_ALIVENESS_RESP_ACK; |
197 | } | 211 | } |
@@ -203,7 +217,8 @@ static u32 mei_txe_aliveness_get(struct mei_device *dev) | |||
203 | * @expected: expected aliveness value | 217 | * @expected: expected aliveness value |
204 | * | 218 | * |
205 | * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set | 219 | * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set |
206 | * returns > 0 if the expected value was received, -ETIME otherwise | 220 | * |
221 | * Return: > 0 if the expected value was received, -ETIME otherwise | ||
207 | */ | 222 | */ |
208 | static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) | 223 | static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) |
209 | { | 224 | { |
@@ -214,7 +229,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) | |||
214 | hw->aliveness = mei_txe_aliveness_get(dev); | 229 | hw->aliveness = mei_txe_aliveness_get(dev); |
215 | if (hw->aliveness == expected) { | 230 | if (hw->aliveness == expected) { |
216 | dev->pg_event = MEI_PG_EVENT_IDLE; | 231 | dev->pg_event = MEI_PG_EVENT_IDLE; |
217 | dev_dbg(&dev->pdev->dev, | 232 | dev_dbg(dev->dev, |
218 | "aliveness settled after %d msecs\n", t); | 233 | "aliveness settled after %d msecs\n", t); |
219 | return t; | 234 | return t; |
220 | } | 235 | } |
@@ -225,7 +240,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) | |||
225 | } while (t < SEC_ALIVENESS_WAIT_TIMEOUT); | 240 | } while (t < SEC_ALIVENESS_WAIT_TIMEOUT); |
226 | 241 | ||
227 | dev->pg_event = MEI_PG_EVENT_IDLE; | 242 | dev->pg_event = MEI_PG_EVENT_IDLE; |
228 | dev_err(&dev->pdev->dev, "aliveness timed out\n"); | 243 | dev_err(dev->dev, "aliveness timed out\n"); |
229 | return -ETIME; | 244 | return -ETIME; |
230 | } | 245 | } |
231 | 246 | ||
@@ -236,7 +251,8 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) | |||
236 | * @expected: expected aliveness value | 251 | * @expected: expected aliveness value |
237 | * | 252 | * |
238 | * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set | 253 | * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set |
239 | * returns returns 0 on success and < 0 otherwise | 254 | * |
255 | * Return: 0 on success and < 0 otherwise | ||
240 | */ | 256 | */ |
241 | static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) | 257 | static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) |
242 | { | 258 | { |
@@ -259,10 +275,10 @@ static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) | |||
259 | ret = hw->aliveness == expected ? 0 : -ETIME; | 275 | ret = hw->aliveness == expected ? 0 : -ETIME; |
260 | 276 | ||
261 | if (ret) | 277 | if (ret) |
262 | dev_warn(&dev->pdev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", | 278 | dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", |
263 | err, hw->aliveness, dev->pg_event); | 279 | err, hw->aliveness, dev->pg_event); |
264 | else | 280 | else |
265 | dev_dbg(&dev->pdev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", | 281 | dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", |
266 | jiffies_to_msecs(timeout - err), | 282 | jiffies_to_msecs(timeout - err), |
267 | hw->aliveness, dev->pg_event); | 283 | hw->aliveness, dev->pg_event); |
268 | 284 | ||
@@ -274,8 +290,9 @@ static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) | |||
274 | * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete | 290 | * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete |
275 | * | 291 | * |
276 | * @dev: the device structure | 292 | * @dev: the device structure |
293 | * @req: requested aliveness value | ||
277 | * | 294 | * |
278 | * returns returns 0 on success and < 0 otherwise | 295 | * Return: 0 on success and < 0 otherwise |
279 | */ | 296 | */ |
280 | int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) | 297 | int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) |
281 | { | 298 | { |
@@ -289,7 +306,7 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) | |||
289 | * | 306 | * |
290 | * @dev: the device structure | 307 | * @dev: the device structure |
291 | * | 308 | * |
292 | * returns: true is pg supported, false otherwise | 309 | * Return: true is pg supported, false otherwise |
293 | */ | 310 | */ |
294 | static bool mei_txe_pg_is_enabled(struct mei_device *dev) | 311 | static bool mei_txe_pg_is_enabled(struct mei_device *dev) |
295 | { | 312 | { |
@@ -302,11 +319,12 @@ static bool mei_txe_pg_is_enabled(struct mei_device *dev) | |||
302 | * | 319 | * |
303 | * @dev: the device structure | 320 | * @dev: the device structure |
304 | * | 321 | * |
305 | * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise | 322 | * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise |
306 | */ | 323 | */ |
307 | static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev) | 324 | static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev) |
308 | { | 325 | { |
309 | struct mei_txe_hw *hw = to_txe_hw(dev); | 326 | struct mei_txe_hw *hw = to_txe_hw(dev); |
327 | |||
310 | return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON; | 328 | return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON; |
311 | } | 329 | } |
312 | 330 | ||
@@ -326,9 +344,10 @@ static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev) | |||
326 | } | 344 | } |
327 | 345 | ||
328 | /** | 346 | /** |
329 | * mei_txe_input_doorbell_set | 347 | * mei_txe_input_doorbell_set - sets bit 0 in |
330 | * - Sets bit 0 in SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL. | 348 | * SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL. |
331 | * @dev: the device structure | 349 | * |
350 | * @hw: the txe hardware structure | ||
332 | */ | 351 | */ |
333 | static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw) | 352 | static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw) |
334 | { | 353 | { |
@@ -340,7 +359,7 @@ static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw) | |||
340 | /** | 359 | /** |
341 | * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1 | 360 | * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1 |
342 | * | 361 | * |
343 | * @dev: the device structure | 362 | * @hw: the txe hardware structure |
344 | */ | 363 | */ |
345 | static void mei_txe_output_ready_set(struct mei_txe_hw *hw) | 364 | static void mei_txe_output_ready_set(struct mei_txe_hw *hw) |
346 | { | 365 | { |
@@ -353,11 +372,14 @@ static void mei_txe_output_ready_set(struct mei_txe_hw *hw) | |||
353 | * mei_txe_is_input_ready - check if TXE is ready for receiving data | 372 | * mei_txe_is_input_ready - check if TXE is ready for receiving data |
354 | * | 373 | * |
355 | * @dev: the device structure | 374 | * @dev: the device structure |
375 | * | ||
376 | * Return: true if INPUT STATUS READY bit is set | ||
356 | */ | 377 | */ |
357 | static bool mei_txe_is_input_ready(struct mei_device *dev) | 378 | static bool mei_txe_is_input_ready(struct mei_device *dev) |
358 | { | 379 | { |
359 | struct mei_txe_hw *hw = to_txe_hw(dev); | 380 | struct mei_txe_hw *hw = to_txe_hw(dev); |
360 | u32 status; | 381 | u32 status; |
382 | |||
361 | status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG); | 383 | status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG); |
362 | return !!(SEC_IPC_INPUT_STATUS_RDY & status); | 384 | return !!(SEC_IPC_INPUT_STATUS_RDY & status); |
363 | } | 385 | } |
@@ -370,6 +392,7 @@ static bool mei_txe_is_input_ready(struct mei_device *dev) | |||
370 | static inline void mei_txe_intr_clear(struct mei_device *dev) | 392 | static inline void mei_txe_intr_clear(struct mei_device *dev) |
371 | { | 393 | { |
372 | struct mei_txe_hw *hw = to_txe_hw(dev); | 394 | struct mei_txe_hw *hw = to_txe_hw(dev); |
395 | |||
373 | mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG, | 396 | mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG, |
374 | SEC_IPC_HOST_INT_STATUS_PENDING); | 397 | SEC_IPC_HOST_INT_STATUS_PENDING); |
375 | mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK); | 398 | mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK); |
@@ -384,6 +407,7 @@ static inline void mei_txe_intr_clear(struct mei_device *dev) | |||
384 | static void mei_txe_intr_disable(struct mei_device *dev) | 407 | static void mei_txe_intr_disable(struct mei_device *dev) |
385 | { | 408 | { |
386 | struct mei_txe_hw *hw = to_txe_hw(dev); | 409 | struct mei_txe_hw *hw = to_txe_hw(dev); |
410 | |||
387 | mei_txe_br_reg_write(hw, HHIER_REG, 0); | 411 | mei_txe_br_reg_write(hw, HHIER_REG, 0); |
388 | mei_txe_br_reg_write(hw, HIER_REG, 0); | 412 | mei_txe_br_reg_write(hw, HIER_REG, 0); |
389 | } | 413 | } |
@@ -395,6 +419,7 @@ static void mei_txe_intr_disable(struct mei_device *dev) | |||
395 | static void mei_txe_intr_enable(struct mei_device *dev) | 419 | static void mei_txe_intr_enable(struct mei_device *dev) |
396 | { | 420 | { |
397 | struct mei_txe_hw *hw = to_txe_hw(dev); | 421 | struct mei_txe_hw *hw = to_txe_hw(dev); |
422 | |||
398 | mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK); | 423 | mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK); |
399 | mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK); | 424 | mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK); |
400 | } | 425 | } |
@@ -407,6 +432,8 @@ static void mei_txe_intr_enable(struct mei_device *dev) | |||
407 | * | 432 | * |
408 | * Checks if there are pending interrupts | 433 | * Checks if there are pending interrupts |
409 | * only Aliveness, Readiness, Input ready, and Output doorbell are relevant | 434 | * only Aliveness, Readiness, Input ready, and Output doorbell are relevant |
435 | * | ||
436 | * Return: true if there are pending interrupts | ||
410 | */ | 437 | */ |
411 | static bool mei_txe_pending_interrupts(struct mei_device *dev) | 438 | static bool mei_txe_pending_interrupts(struct mei_device *dev) |
412 | { | 439 | { |
@@ -418,7 +445,7 @@ static bool mei_txe_pending_interrupts(struct mei_device *dev) | |||
418 | TXE_INTR_OUT_DB)); | 445 | TXE_INTR_OUT_DB)); |
419 | 446 | ||
420 | if (ret) { | 447 | if (ret) { |
421 | dev_dbg(&dev->pdev->dev, | 448 | dev_dbg(dev->dev, |
422 | "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n", | 449 | "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n", |
423 | !!(hw->intr_cause & TXE_INTR_IN_READY), | 450 | !!(hw->intr_cause & TXE_INTR_IN_READY), |
424 | !!(hw->intr_cause & TXE_INTR_READINESS), | 451 | !!(hw->intr_cause & TXE_INTR_READINESS), |
@@ -440,6 +467,7 @@ static void mei_txe_input_payload_write(struct mei_device *dev, | |||
440 | unsigned long idx, u32 value) | 467 | unsigned long idx, u32 value) |
441 | { | 468 | { |
442 | struct mei_txe_hw *hw = to_txe_hw(dev); | 469 | struct mei_txe_hw *hw = to_txe_hw(dev); |
470 | |||
443 | mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG + | 471 | mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG + |
444 | (idx * sizeof(u32)), value); | 472 | (idx * sizeof(u32)), value); |
445 | } | 473 | } |
@@ -451,12 +479,13 @@ static void mei_txe_input_payload_write(struct mei_device *dev, | |||
451 | * @dev: the device structure | 479 | * @dev: the device structure |
452 | * @idx: index in the device buffer | 480 | * @idx: index in the device buffer |
453 | * | 481 | * |
454 | * returns register value at index | 482 | * Return: register value at index |
455 | */ | 483 | */ |
456 | static u32 mei_txe_out_data_read(const struct mei_device *dev, | 484 | static u32 mei_txe_out_data_read(const struct mei_device *dev, |
457 | unsigned long idx) | 485 | unsigned long idx) |
458 | { | 486 | { |
459 | struct mei_txe_hw *hw = to_txe_hw(dev); | 487 | struct mei_txe_hw *hw = to_txe_hw(dev); |
488 | |||
460 | return mei_txe_br_reg_read(hw, | 489 | return mei_txe_br_reg_read(hw, |
461 | BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32))); | 490 | BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32))); |
462 | } | 491 | } |
@@ -464,26 +493,28 @@ static u32 mei_txe_out_data_read(const struct mei_device *dev, | |||
464 | /* Readiness */ | 493 | /* Readiness */ |
465 | 494 | ||
466 | /** | 495 | /** |
467 | * mei_txe_readiness_set_host_rdy | 496 | * mei_txe_readiness_set_host_rdy - set host readiness bit |
468 | * | 497 | * |
469 | * @dev: the device structure | 498 | * @dev: the device structure |
470 | */ | 499 | */ |
471 | static void mei_txe_readiness_set_host_rdy(struct mei_device *dev) | 500 | static void mei_txe_readiness_set_host_rdy(struct mei_device *dev) |
472 | { | 501 | { |
473 | struct mei_txe_hw *hw = to_txe_hw(dev); | 502 | struct mei_txe_hw *hw = to_txe_hw(dev); |
503 | |||
474 | mei_txe_br_reg_write(hw, | 504 | mei_txe_br_reg_write(hw, |
475 | SICR_HOST_IPC_READINESS_REQ_REG, | 505 | SICR_HOST_IPC_READINESS_REQ_REG, |
476 | SICR_HOST_IPC_READINESS_HOST_RDY); | 506 | SICR_HOST_IPC_READINESS_HOST_RDY); |
477 | } | 507 | } |
478 | 508 | ||
479 | /** | 509 | /** |
480 | * mei_txe_readiness_clear | 510 | * mei_txe_readiness_clear - clear host readiness bit |
481 | * | 511 | * |
482 | * @dev: the device structure | 512 | * @dev: the device structure |
483 | */ | 513 | */ |
484 | static void mei_txe_readiness_clear(struct mei_device *dev) | 514 | static void mei_txe_readiness_clear(struct mei_device *dev) |
485 | { | 515 | { |
486 | struct mei_txe_hw *hw = to_txe_hw(dev); | 516 | struct mei_txe_hw *hw = to_txe_hw(dev); |
517 | |||
487 | mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG, | 518 | mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG, |
488 | SICR_HOST_IPC_READINESS_RDY_CLR); | 519 | SICR_HOST_IPC_READINESS_RDY_CLR); |
489 | } | 520 | } |
@@ -492,10 +523,13 @@ static void mei_txe_readiness_clear(struct mei_device *dev) | |||
492 | * the HICR_SEC_IPC_READINESS register value | 523 | * the HICR_SEC_IPC_READINESS register value |
493 | * | 524 | * |
494 | * @dev: the device structure | 525 | * @dev: the device structure |
526 | * | ||
527 | * Return: the HICR_SEC_IPC_READINESS register value | ||
495 | */ | 528 | */ |
496 | static u32 mei_txe_readiness_get(struct mei_device *dev) | 529 | static u32 mei_txe_readiness_get(struct mei_device *dev) |
497 | { | 530 | { |
498 | struct mei_txe_hw *hw = to_txe_hw(dev); | 531 | struct mei_txe_hw *hw = to_txe_hw(dev); |
532 | |||
499 | return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); | 533 | return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); |
500 | } | 534 | } |
501 | 535 | ||
@@ -504,7 +538,9 @@ static u32 mei_txe_readiness_get(struct mei_device *dev) | |||
504 | * mei_txe_readiness_is_sec_rdy - check readiness | 538 | * mei_txe_readiness_is_sec_rdy - check readiness |
505 | * for HICR_SEC_IPC_READINESS_SEC_RDY | 539 | * for HICR_SEC_IPC_READINESS_SEC_RDY |
506 | * | 540 | * |
507 | * @readiness - cached readiness state | 541 | * @readiness: cached readiness state |
542 | * | ||
543 | * Return: true if readiness bit is set | ||
508 | */ | 544 | */ |
509 | static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness) | 545 | static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness) |
510 | { | 546 | { |
@@ -515,10 +551,13 @@ static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness) | |||
515 | * mei_txe_hw_is_ready - check if the hw is ready | 551 | * mei_txe_hw_is_ready - check if the hw is ready |
516 | * | 552 | * |
517 | * @dev: the device structure | 553 | * @dev: the device structure |
554 | * | ||
555 | * Return: true if sec is ready | ||
518 | */ | 556 | */ |
519 | static bool mei_txe_hw_is_ready(struct mei_device *dev) | 557 | static bool mei_txe_hw_is_ready(struct mei_device *dev) |
520 | { | 558 | { |
521 | u32 readiness = mei_txe_readiness_get(dev); | 559 | u32 readiness = mei_txe_readiness_get(dev); |
560 | |||
522 | return mei_txe_readiness_is_sec_rdy(readiness); | 561 | return mei_txe_readiness_is_sec_rdy(readiness); |
523 | } | 562 | } |
524 | 563 | ||
@@ -526,11 +565,14 @@ static bool mei_txe_hw_is_ready(struct mei_device *dev) | |||
526 | * mei_txe_host_is_ready - check if the host is ready | 565 | * mei_txe_host_is_ready - check if the host is ready |
527 | * | 566 | * |
528 | * @dev: the device structure | 567 | * @dev: the device structure |
568 | * | ||
569 | * Return: true if host is ready | ||
529 | */ | 570 | */ |
530 | static inline bool mei_txe_host_is_ready(struct mei_device *dev) | 571 | static inline bool mei_txe_host_is_ready(struct mei_device *dev) |
531 | { | 572 | { |
532 | struct mei_txe_hw *hw = to_txe_hw(dev); | 573 | struct mei_txe_hw *hw = to_txe_hw(dev); |
533 | u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); | 574 | u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); |
575 | |||
534 | return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY); | 576 | return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY); |
535 | } | 577 | } |
536 | 578 | ||
@@ -539,7 +581,7 @@ static inline bool mei_txe_host_is_ready(struct mei_device *dev) | |||
539 | * | 581 | * |
540 | * @dev: the device structure | 582 | * @dev: the device structure |
541 | * | 583 | * |
542 | * returns 0 on success and -ETIME on timeout | 584 | * Return: 0 on success and -ETIME on timeout |
543 | */ | 585 | */ |
544 | static int mei_txe_readiness_wait(struct mei_device *dev) | 586 | static int mei_txe_readiness_wait(struct mei_device *dev) |
545 | { | 587 | { |
@@ -551,7 +593,7 @@ static int mei_txe_readiness_wait(struct mei_device *dev) | |||
551 | msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT)); | 593 | msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT)); |
552 | mutex_lock(&dev->device_lock); | 594 | mutex_lock(&dev->device_lock); |
553 | if (!dev->recvd_hw_ready) { | 595 | if (!dev->recvd_hw_ready) { |
554 | dev_err(&dev->pdev->dev, "wait for readiness failed\n"); | 596 | dev_err(dev->dev, "wait for readiness failed\n"); |
555 | return -ETIME; | 597 | return -ETIME; |
556 | } | 598 | } |
557 | 599 | ||
@@ -559,6 +601,42 @@ static int mei_txe_readiness_wait(struct mei_device *dev) | |||
559 | return 0; | 601 | return 0; |
560 | } | 602 | } |
561 | 603 | ||
604 | static const struct mei_fw_status mei_txe_fw_sts = { | ||
605 | .count = 2, | ||
606 | .status[0] = PCI_CFG_TXE_FW_STS0, | ||
607 | .status[1] = PCI_CFG_TXE_FW_STS1 | ||
608 | }; | ||
609 | |||
610 | /** | ||
611 | * mei_txe_fw_status - read fw status register from pci config space | ||
612 | * | ||
613 | * @dev: mei device | ||
614 | * @fw_status: fw status register values | ||
615 | * | ||
616 | * Return: 0 on success, error otherwise | ||
617 | */ | ||
618 | static int mei_txe_fw_status(struct mei_device *dev, | ||
619 | struct mei_fw_status *fw_status) | ||
620 | { | ||
621 | const struct mei_fw_status *fw_src = &mei_txe_fw_sts; | ||
622 | struct pci_dev *pdev = to_pci_dev(dev->dev); | ||
623 | int ret; | ||
624 | int i; | ||
625 | |||
626 | if (!fw_status) | ||
627 | return -EINVAL; | ||
628 | |||
629 | fw_status->count = fw_src->count; | ||
630 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { | ||
631 | ret = pci_read_config_dword(pdev, | ||
632 | fw_src->status[i], &fw_status->status[i]); | ||
633 | if (ret) | ||
634 | return ret; | ||
635 | } | ||
636 | |||
637 | return 0; | ||
638 | } | ||
639 | |||
562 | /** | 640 | /** |
563 | * mei_txe_hw_config - configure hardware at the start of the devices | 641 | * mei_txe_hw_config - configure hardware at the start of the devices |
564 | * | 642 | * |
@@ -571,13 +649,14 @@ static void mei_txe_hw_config(struct mei_device *dev) | |||
571 | { | 649 | { |
572 | 650 | ||
573 | struct mei_txe_hw *hw = to_txe_hw(dev); | 651 | struct mei_txe_hw *hw = to_txe_hw(dev); |
652 | |||
574 | /* Doesn't change in runtime */ | 653 | /* Doesn't change in runtime */ |
575 | dev->hbuf_depth = PAYLOAD_SIZE / 4; | 654 | dev->hbuf_depth = PAYLOAD_SIZE / 4; |
576 | 655 | ||
577 | hw->aliveness = mei_txe_aliveness_get(dev); | 656 | hw->aliveness = mei_txe_aliveness_get(dev); |
578 | hw->readiness = mei_txe_readiness_get(dev); | 657 | hw->readiness = mei_txe_readiness_get(dev); |
579 | 658 | ||
580 | dev_dbg(&dev->pdev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", | 659 | dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", |
581 | hw->aliveness, hw->readiness); | 660 | hw->aliveness, hw->readiness); |
582 | } | 661 | } |
583 | 662 | ||
@@ -588,7 +667,8 @@ static void mei_txe_hw_config(struct mei_device *dev) | |||
588 | * @dev: the device structure | 667 | * @dev: the device structure |
589 | * @header: header of message | 668 | * @header: header of message |
590 | * @buf: message buffer will be written | 669 | * @buf: message buffer will be written |
591 | * returns 1 if success, 0 - otherwise. | 670 | * |
671 | * Return: 0 if success, <0 - otherwise. | ||
592 | */ | 672 | */ |
593 | 673 | ||
594 | static int mei_txe_write(struct mei_device *dev, | 674 | static int mei_txe_write(struct mei_device *dev, |
@@ -607,7 +687,7 @@ static int mei_txe_write(struct mei_device *dev, | |||
607 | 687 | ||
608 | length = header->length; | 688 | length = header->length; |
609 | 689 | ||
610 | dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); | 690 | dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); |
611 | 691 | ||
612 | dw_cnt = mei_data2slots(length); | 692 | dw_cnt = mei_data2slots(length); |
613 | if (dw_cnt > slots) | 693 | if (dw_cnt > slots) |
@@ -621,8 +701,9 @@ static int mei_txe_write(struct mei_device *dev, | |||
621 | 701 | ||
622 | if (!mei_txe_is_input_ready(dev)) { | 702 | if (!mei_txe_is_input_ready(dev)) { |
623 | struct mei_fw_status fw_status; | 703 | struct mei_fw_status fw_status; |
704 | |||
624 | mei_fw_status(dev, &fw_status); | 705 | mei_fw_status(dev, &fw_status); |
625 | dev_err(&dev->pdev->dev, "Input is not ready " FW_STS_FMT "\n", | 706 | dev_err(dev->dev, "Input is not ready " FW_STS_FMT "\n", |
626 | FW_STS_PRM(fw_status)); | 707 | FW_STS_PRM(fw_status)); |
627 | return -EAGAIN; | 708 | return -EAGAIN; |
628 | } | 709 | } |
@@ -635,6 +716,7 @@ static int mei_txe_write(struct mei_device *dev, | |||
635 | rem = length & 0x3; | 716 | rem = length & 0x3; |
636 | if (rem > 0) { | 717 | if (rem > 0) { |
637 | u32 reg = 0; | 718 | u32 reg = 0; |
719 | |||
638 | memcpy(®, &buf[length - rem], rem); | 720 | memcpy(®, &buf[length - rem], rem); |
639 | mei_txe_input_payload_write(dev, i + 1, reg); | 721 | mei_txe_input_payload_write(dev, i + 1, reg); |
640 | } | 722 | } |
@@ -653,7 +735,7 @@ static int mei_txe_write(struct mei_device *dev, | |||
653 | * | 735 | * |
654 | * @dev: the device structure | 736 | * @dev: the device structure |
655 | * | 737 | * |
656 | * returns the PAYLOAD_SIZE - 4 | 738 | * Return: the PAYLOAD_SIZE - 4 |
657 | */ | 739 | */ |
658 | static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) | 740 | static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) |
659 | { | 741 | { |
@@ -665,11 +747,12 @@ static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) | |||
665 | * | 747 | * |
666 | * @dev: the device structure | 748 | * @dev: the device structure |
667 | * | 749 | * |
668 | * returns always hbuf_depth | 750 | * Return: always hbuf_depth |
669 | */ | 751 | */ |
670 | static int mei_txe_hbuf_empty_slots(struct mei_device *dev) | 752 | static int mei_txe_hbuf_empty_slots(struct mei_device *dev) |
671 | { | 753 | { |
672 | struct mei_txe_hw *hw = to_txe_hw(dev); | 754 | struct mei_txe_hw *hw = to_txe_hw(dev); |
755 | |||
673 | return hw->slots; | 756 | return hw->slots; |
674 | } | 757 | } |
675 | 758 | ||
@@ -678,7 +761,7 @@ static int mei_txe_hbuf_empty_slots(struct mei_device *dev) | |||
678 | * | 761 | * |
679 | * @dev: the device structure | 762 | * @dev: the device structure |
680 | * | 763 | * |
681 | * returns always buffer size in dwords count | 764 | * Return: always buffer size in dwords count |
682 | */ | 765 | */ |
683 | static int mei_txe_count_full_read_slots(struct mei_device *dev) | 766 | static int mei_txe_count_full_read_slots(struct mei_device *dev) |
684 | { | 767 | { |
@@ -691,7 +774,7 @@ static int mei_txe_count_full_read_slots(struct mei_device *dev) | |||
691 | * | 774 | * |
692 | * @dev: the device structure | 775 | * @dev: the device structure |
693 | * | 776 | * |
694 | * returns mei message header | 777 | * Return: mei message header |
695 | */ | 778 | */ |
696 | 779 | ||
697 | static u32 mei_txe_read_hdr(const struct mei_device *dev) | 780 | static u32 mei_txe_read_hdr(const struct mei_device *dev) |
@@ -705,33 +788,35 @@ static u32 mei_txe_read_hdr(const struct mei_device *dev) | |||
705 | * @buf: message buffer will be written | 788 | * @buf: message buffer will be written |
706 | * @len: message size will be read | 789 | * @len: message size will be read |
707 | * | 790 | * |
708 | * returns -EINVAL on error wrong argument and 0 on success | 791 | * Return: -EINVAL on error wrong argument and 0 on success |
709 | */ | 792 | */ |
710 | static int mei_txe_read(struct mei_device *dev, | 793 | static int mei_txe_read(struct mei_device *dev, |
711 | unsigned char *buf, unsigned long len) | 794 | unsigned char *buf, unsigned long len) |
712 | { | 795 | { |
713 | 796 | ||
714 | struct mei_txe_hw *hw = to_txe_hw(dev); | 797 | struct mei_txe_hw *hw = to_txe_hw(dev); |
798 | u32 *reg_buf, reg; | ||
799 | u32 rem; | ||
715 | u32 i; | 800 | u32 i; |
716 | u32 *reg_buf = (u32 *)buf; | ||
717 | u32 rem = len & 0x3; | ||
718 | 801 | ||
719 | if (WARN_ON(!buf || !len)) | 802 | if (WARN_ON(!buf || !len)) |
720 | return -EINVAL; | 803 | return -EINVAL; |
721 | 804 | ||
722 | dev_dbg(&dev->pdev->dev, | 805 | reg_buf = (u32 *)buf; |
723 | "buffer-length = %lu buf[0]0x%08X\n", | 806 | rem = len & 0x3; |
807 | |||
808 | dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n", | ||
724 | len, mei_txe_out_data_read(dev, 0)); | 809 | len, mei_txe_out_data_read(dev, 0)); |
725 | 810 | ||
726 | for (i = 0; i < len / 4; i++) { | 811 | for (i = 0; i < len / 4; i++) { |
727 | /* skip header: index starts from 1 */ | 812 | /* skip header: index starts from 1 */ |
728 | u32 reg = mei_txe_out_data_read(dev, i + 1); | 813 | reg = mei_txe_out_data_read(dev, i + 1); |
729 | dev_dbg(&dev->pdev->dev, "buf[%d] = 0x%08X\n", i, reg); | 814 | dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg); |
730 | *reg_buf++ = reg; | 815 | *reg_buf++ = reg; |
731 | } | 816 | } |
732 | 817 | ||
733 | if (rem) { | 818 | if (rem) { |
734 | u32 reg = mei_txe_out_data_read(dev, i + 1); | 819 | reg = mei_txe_out_data_read(dev, i + 1); |
735 | memcpy(reg_buf, ®, rem); | 820 | memcpy(reg_buf, ®, rem); |
736 | } | 821 | } |
737 | 822 | ||
@@ -745,7 +830,7 @@ static int mei_txe_read(struct mei_device *dev, | |||
745 | * @dev: the device structure | 830 | * @dev: the device structure |
746 | * @intr_enable: if interrupt should be enabled after reset. | 831 | * @intr_enable: if interrupt should be enabled after reset. |
747 | * | 832 | * |
748 | * returns 0 on success and < 0 in case of error | 833 | * Return: 0 on success and < 0 in case of error |
749 | */ | 834 | */ |
750 | static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) | 835 | static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) |
751 | { | 836 | { |
@@ -771,8 +856,7 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) | |||
771 | */ | 856 | */ |
772 | if (aliveness_req != hw->aliveness) | 857 | if (aliveness_req != hw->aliveness) |
773 | if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) { | 858 | if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) { |
774 | dev_err(&dev->pdev->dev, | 859 | dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n"); |
775 | "wait for aliveness settle failed ... bailing out\n"); | ||
776 | return -EIO; | 860 | return -EIO; |
777 | } | 861 | } |
778 | 862 | ||
@@ -782,14 +866,13 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) | |||
782 | if (aliveness_req) { | 866 | if (aliveness_req) { |
783 | mei_txe_aliveness_set(dev, 0); | 867 | mei_txe_aliveness_set(dev, 0); |
784 | if (mei_txe_aliveness_poll(dev, 0) < 0) { | 868 | if (mei_txe_aliveness_poll(dev, 0) < 0) { |
785 | dev_err(&dev->pdev->dev, | 869 | dev_err(dev->dev, "wait for aliveness failed ... bailing out\n"); |
786 | "wait for aliveness failed ... bailing out\n"); | ||
787 | return -EIO; | 870 | return -EIO; |
788 | } | 871 | } |
789 | } | 872 | } |
790 | 873 | ||
791 | /* | 874 | /* |
792 | * Set rediness RDY_CLR bit | 875 | * Set readiness RDY_CLR bit |
793 | */ | 876 | */ |
794 | mei_txe_readiness_clear(dev); | 877 | mei_txe_readiness_clear(dev); |
795 | 878 | ||
@@ -801,7 +884,7 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) | |||
801 | * | 884 | * |
802 | * @dev: the device structure | 885 | * @dev: the device structure |
803 | * | 886 | * |
804 | * returns 0 on success and < 0 in case of error | 887 | * Return: 0 on success an error code otherwise |
805 | */ | 888 | */ |
806 | static int mei_txe_hw_start(struct mei_device *dev) | 889 | static int mei_txe_hw_start(struct mei_device *dev) |
807 | { | 890 | { |
@@ -815,7 +898,7 @@ static int mei_txe_hw_start(struct mei_device *dev) | |||
815 | 898 | ||
816 | ret = mei_txe_readiness_wait(dev); | 899 | ret = mei_txe_readiness_wait(dev); |
817 | if (ret < 0) { | 900 | if (ret < 0) { |
818 | dev_err(&dev->pdev->dev, "wating for readiness failed\n"); | 901 | dev_err(dev->dev, "waiting for readiness failed\n"); |
819 | return ret; | 902 | return ret; |
820 | } | 903 | } |
821 | 904 | ||
@@ -831,7 +914,7 @@ static int mei_txe_hw_start(struct mei_device *dev) | |||
831 | 914 | ||
832 | ret = mei_txe_aliveness_set_sync(dev, 1); | 915 | ret = mei_txe_aliveness_set_sync(dev, 1); |
833 | if (ret < 0) { | 916 | if (ret < 0) { |
834 | dev_err(&dev->pdev->dev, "wait for aliveness failed ... bailing out\n"); | 917 | dev_err(dev->dev, "wait for aliveness failed ... bailing out\n"); |
835 | return ret; | 918 | return ret; |
836 | } | 919 | } |
837 | 920 | ||
@@ -857,6 +940,8 @@ static int mei_txe_hw_start(struct mei_device *dev) | |||
857 | * | 940 | * |
858 | * @dev: the device structure | 941 | * @dev: the device structure |
859 | * @do_ack: acknowledge interrupts | 942 | * @do_ack: acknowledge interrupts |
943 | * | ||
944 | * Return: true if found interrupts to process. | ||
860 | */ | 945 | */ |
861 | static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) | 946 | static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) |
862 | { | 947 | { |
@@ -912,7 +997,8 @@ out: | |||
912 | * @irq: The irq number | 997 | * @irq: The irq number |
913 | * @dev_id: pointer to the device structure | 998 | * @dev_id: pointer to the device structure |
914 | * | 999 | * |
915 | * returns irqreturn_t | 1000 | * Return: IRQ_WAKE_THREAD if interrupt is designed for the device |
1001 | * IRQ_NONE otherwise | ||
916 | */ | 1002 | */ |
917 | irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id) | 1003 | irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id) |
918 | { | 1004 | { |
@@ -930,8 +1016,7 @@ irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id) | |||
930 | * @irq: The irq number | 1016 | * @irq: The irq number |
931 | * @dev_id: pointer to the device structure | 1017 | * @dev_id: pointer to the device structure |
932 | * | 1018 | * |
933 | * returns irqreturn_t | 1019 | * Return: IRQ_HANDLED |
934 | * | ||
935 | */ | 1020 | */ |
936 | irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | 1021 | irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) |
937 | { | 1022 | { |
@@ -941,7 +1026,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | |||
941 | s32 slots; | 1026 | s32 slots; |
942 | int rets = 0; | 1027 | int rets = 0; |
943 | 1028 | ||
944 | dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", | 1029 | dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", |
945 | mei_txe_br_reg_read(hw, HHISR_REG), | 1030 | mei_txe_br_reg_read(hw, HHISR_REG), |
946 | mei_txe_br_reg_read(hw, HISR_REG), | 1031 | mei_txe_br_reg_read(hw, HISR_REG), |
947 | mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG)); | 1032 | mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG)); |
@@ -951,7 +1036,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | |||
951 | mutex_lock(&dev->device_lock); | 1036 | mutex_lock(&dev->device_lock); |
952 | mei_io_list_init(&complete_list); | 1037 | mei_io_list_init(&complete_list); |
953 | 1038 | ||
954 | if (pci_dev_msi_enabled(dev->pdev)) | 1039 | if (pci_dev_msi_enabled(to_pci_dev(dev->dev))) |
955 | mei_txe_check_and_ack_intrs(dev, true); | 1040 | mei_txe_check_and_ack_intrs(dev, true); |
956 | 1041 | ||
957 | /* show irq events */ | 1042 | /* show irq events */ |
@@ -965,17 +1050,17 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | |||
965 | * or TXE driver resetting the HECI interface. | 1050 | * or TXE driver resetting the HECI interface. |
966 | */ | 1051 | */ |
967 | if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) { | 1052 | if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) { |
968 | dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n"); | 1053 | dev_dbg(dev->dev, "Readiness Interrupt was received...\n"); |
969 | 1054 | ||
970 | /* Check if SeC is going through reset */ | 1055 | /* Check if SeC is going through reset */ |
971 | if (mei_txe_readiness_is_sec_rdy(hw->readiness)) { | 1056 | if (mei_txe_readiness_is_sec_rdy(hw->readiness)) { |
972 | dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); | 1057 | dev_dbg(dev->dev, "we need to start the dev.\n"); |
973 | dev->recvd_hw_ready = true; | 1058 | dev->recvd_hw_ready = true; |
974 | } else { | 1059 | } else { |
975 | dev->recvd_hw_ready = false; | 1060 | dev->recvd_hw_ready = false; |
976 | if (dev->dev_state != MEI_DEV_RESETTING) { | 1061 | if (dev->dev_state != MEI_DEV_RESETTING) { |
977 | 1062 | ||
978 | dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); | 1063 | dev_warn(dev->dev, "FW not ready: resetting.\n"); |
979 | schedule_work(&dev->reset_work); | 1064 | schedule_work(&dev->reset_work); |
980 | goto end; | 1065 | goto end; |
981 | 1066 | ||
@@ -992,7 +1077,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | |||
992 | 1077 | ||
993 | if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) { | 1078 | if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) { |
994 | /* Clear the interrupt cause */ | 1079 | /* Clear the interrupt cause */ |
995 | dev_dbg(&dev->pdev->dev, | 1080 | dev_dbg(dev->dev, |
996 | "Aliveness Interrupt: Status: %d\n", hw->aliveness); | 1081 | "Aliveness Interrupt: Status: %d\n", hw->aliveness); |
997 | dev->pg_event = MEI_PG_EVENT_RECEIVED; | 1082 | dev->pg_event = MEI_PG_EVENT_RECEIVED; |
998 | if (waitqueue_active(&hw->wait_aliveness_resp)) | 1083 | if (waitqueue_active(&hw->wait_aliveness_resp)) |
@@ -1008,7 +1093,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | |||
1008 | /* Read from TXE */ | 1093 | /* Read from TXE */ |
1009 | rets = mei_irq_read_handler(dev, &complete_list, &slots); | 1094 | rets = mei_irq_read_handler(dev, &complete_list, &slots); |
1010 | if (rets && dev->dev_state != MEI_DEV_RESETTING) { | 1095 | if (rets && dev->dev_state != MEI_DEV_RESETTING) { |
1011 | dev_err(&dev->pdev->dev, | 1096 | dev_err(dev->dev, |
1012 | "mei_irq_read_handler ret = %d.\n", rets); | 1097 | "mei_irq_read_handler ret = %d.\n", rets); |
1013 | 1098 | ||
1014 | schedule_work(&dev->reset_work); | 1099 | schedule_work(&dev->reset_work); |
@@ -1026,7 +1111,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | |||
1026 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); | 1111 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); |
1027 | rets = mei_irq_write_handler(dev, &complete_list); | 1112 | rets = mei_irq_write_handler(dev, &complete_list); |
1028 | if (rets && rets != -EMSGSIZE) | 1113 | if (rets && rets != -EMSGSIZE) |
1029 | dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n", | 1114 | dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n", |
1030 | rets); | 1115 | rets); |
1031 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); | 1116 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); |
1032 | } | 1117 | } |
@@ -1034,7 +1119,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | |||
1034 | mei_irq_compl_handler(dev, &complete_list); | 1119 | mei_irq_compl_handler(dev, &complete_list); |
1035 | 1120 | ||
1036 | end: | 1121 | end: |
1037 | dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); | 1122 | dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); |
1038 | 1123 | ||
1039 | mutex_unlock(&dev->device_lock); | 1124 | mutex_unlock(&dev->device_lock); |
1040 | 1125 | ||
@@ -1046,6 +1131,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = { | |||
1046 | 1131 | ||
1047 | .host_is_ready = mei_txe_host_is_ready, | 1132 | .host_is_ready = mei_txe_host_is_ready, |
1048 | 1133 | ||
1134 | .fw_status = mei_txe_fw_status, | ||
1049 | .pg_state = mei_txe_pg_state, | 1135 | .pg_state = mei_txe_pg_state, |
1050 | 1136 | ||
1051 | .hw_is_ready = mei_txe_hw_is_ready, | 1137 | .hw_is_ready = mei_txe_hw_is_ready, |
@@ -1072,27 +1158,14 @@ static const struct mei_hw_ops mei_txe_hw_ops = { | |||
1072 | 1158 | ||
1073 | }; | 1159 | }; |
1074 | 1160 | ||
1075 | #define MEI_CFG_TXE_FW_STS \ | ||
1076 | .fw_status.count = 2, \ | ||
1077 | .fw_status.status[0] = PCI_CFG_TXE_FW_STS0, \ | ||
1078 | .fw_status.status[1] = PCI_CFG_TXE_FW_STS1 | ||
1079 | |||
1080 | const struct mei_cfg mei_txe_cfg = { | ||
1081 | MEI_CFG_TXE_FW_STS, | ||
1082 | }; | ||
1083 | |||
1084 | |||
1085 | /** | 1161 | /** |
1086 | * mei_txe_dev_init - allocates and initializes txe hardware specific structure | 1162 | * mei_txe_dev_init - allocates and initializes txe hardware specific structure |
1087 | * | 1163 | * |
1088 | * @pdev - pci device | 1164 | * @pdev: pci device |
1089 | * @cfg - per device generation config | ||
1090 | * | ||
1091 | * returns struct mei_device * on success or NULL; | ||
1092 | * | 1165 | * |
1166 | * Return: struct mei_device * on success or NULL | ||
1093 | */ | 1167 | */ |
1094 | struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, | 1168 | struct mei_device *mei_txe_dev_init(struct pci_dev *pdev) |
1095 | const struct mei_cfg *cfg) | ||
1096 | { | 1169 | { |
1097 | struct mei_device *dev; | 1170 | struct mei_device *dev; |
1098 | struct mei_txe_hw *hw; | 1171 | struct mei_txe_hw *hw; |
@@ -1102,15 +1175,12 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, | |||
1102 | if (!dev) | 1175 | if (!dev) |
1103 | return NULL; | 1176 | return NULL; |
1104 | 1177 | ||
1105 | mei_device_init(dev, cfg); | 1178 | mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops); |
1106 | 1179 | ||
1107 | hw = to_txe_hw(dev); | 1180 | hw = to_txe_hw(dev); |
1108 | 1181 | ||
1109 | init_waitqueue_head(&hw->wait_aliveness_resp); | 1182 | init_waitqueue_head(&hw->wait_aliveness_resp); |
1110 | 1183 | ||
1111 | dev->ops = &mei_txe_hw_ops; | ||
1112 | |||
1113 | dev->pdev = pdev; | ||
1114 | return dev; | 1184 | return dev; |
1115 | } | 1185 | } |
1116 | 1186 | ||
@@ -1120,6 +1190,8 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, | |||
1120 | * @dev: the device structure | 1190 | * @dev: the device structure |
1121 | * @addr: physical address start of the range | 1191 | * @addr: physical address start of the range |
1122 | * @range: physical range size | 1192 | * @range: physical range size |
1193 | * | ||
1194 | * Return: 0 on success an error code otherwise | ||
1123 | */ | 1195 | */ |
1124 | int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) | 1196 | int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) |
1125 | { | 1197 | { |
@@ -1151,7 +1223,7 @@ int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) | |||
1151 | mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range); | 1223 | mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range); |
1152 | mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32); | 1224 | mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32); |
1153 | mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl); | 1225 | mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl); |
1154 | dev_dbg(&dev->pdev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n", | 1226 | dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n", |
1155 | range, lo32, ctrl); | 1227 | range, lo32, ctrl); |
1156 | 1228 | ||
1157 | return 0; | 1229 | return 0; |
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h index e244af79167f..ce3ed0b88b0c 100644 --- a/drivers/misc/mei/hw-txe.h +++ b/drivers/misc/mei/hw-txe.h | |||
@@ -40,6 +40,7 @@ | |||
40 | * @mem_addr: SeC and BRIDGE bars | 40 | * @mem_addr: SeC and BRIDGE bars |
41 | * @aliveness: aliveness (power gating) state of the hardware | 41 | * @aliveness: aliveness (power gating) state of the hardware |
42 | * @readiness: readiness state of the hardware | 42 | * @readiness: readiness state of the hardware |
43 | * @slots: number of empty slots | ||
43 | * @wait_aliveness_resp: aliveness wait queue | 44 | * @wait_aliveness_resp: aliveness wait queue |
44 | * @intr_cause: translated interrupt cause | 45 | * @intr_cause: translated interrupt cause |
45 | */ | 46 | */ |
@@ -61,10 +62,7 @@ static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw) | |||
61 | return container_of((void *)hw, struct mei_device, hw); | 62 | return container_of((void *)hw, struct mei_device, hw); |
62 | } | 63 | } |
63 | 64 | ||
64 | extern const struct mei_cfg mei_txe_cfg; | 65 | struct mei_device *mei_txe_dev_init(struct pci_dev *pdev); |
65 | |||
66 | struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, | ||
67 | const struct mei_cfg *cfg); | ||
68 | 66 | ||
69 | irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); | 67 | irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); |
70 | irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); | 68 | irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); |
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index dd448e58cc87..16fef6dc4dd7 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h | |||
@@ -97,23 +97,52 @@ enum mei_stop_reason_types { | |||
97 | SYSTEM_S5_ENTRY = 0x08 | 97 | SYSTEM_S5_ENTRY = 0x08 |
98 | }; | 98 | }; |
99 | 99 | ||
100 | |||
101 | /** | ||
102 | * enum mei_hbm_status - mei host bus messages return values | ||
103 | * | ||
104 | * @MEI_HBMS_SUCCESS : status success | ||
105 | * @MEI_HBMS_CLIENT_NOT_FOUND : client not found | ||
106 | * @MEI_HBMS_ALREADY_EXISTS : connection already established | ||
107 | * @MEI_HBMS_REJECTED : connection is rejected | ||
108 | * @MEI_HBMS_INVALID_PARAMETER : invalid parameter | ||
109 | * @MEI_HBMS_NOT_ALLOWED : operation not allowed | ||
110 | * @MEI_HBMS_ALREADY_STARTED : system is already started | ||
111 | * @MEI_HBMS_NOT_STARTED : system not started | ||
112 | * | ||
113 | * @MEI_HBMS_MAX : sentinel | ||
114 | */ | ||
115 | enum mei_hbm_status { | ||
116 | MEI_HBMS_SUCCESS = 0, | ||
117 | MEI_HBMS_CLIENT_NOT_FOUND = 1, | ||
118 | MEI_HBMS_ALREADY_EXISTS = 2, | ||
119 | MEI_HBMS_REJECTED = 3, | ||
120 | MEI_HBMS_INVALID_PARAMETER = 4, | ||
121 | MEI_HBMS_NOT_ALLOWED = 5, | ||
122 | MEI_HBMS_ALREADY_STARTED = 6, | ||
123 | MEI_HBMS_NOT_STARTED = 7, | ||
124 | |||
125 | MEI_HBMS_MAX | ||
126 | }; | ||
127 | |||
128 | |||
100 | /* | 129 | /* |
101 | * Client Connect Status | 130 | * Client Connect Status |
102 | * used by hbm_client_connect_response.status | 131 | * used by hbm_client_connect_response.status |
103 | */ | 132 | */ |
104 | enum mei_cl_connect_status { | 133 | enum mei_cl_connect_status { |
105 | MEI_CL_CONN_SUCCESS = 0x00, | 134 | MEI_CL_CONN_SUCCESS = MEI_HBMS_SUCCESS, |
106 | MEI_CL_CONN_NOT_FOUND = 0x01, | 135 | MEI_CL_CONN_NOT_FOUND = MEI_HBMS_CLIENT_NOT_FOUND, |
107 | MEI_CL_CONN_ALREADY_STARTED = 0x02, | 136 | MEI_CL_CONN_ALREADY_STARTED = MEI_HBMS_ALREADY_EXISTS, |
108 | MEI_CL_CONN_OUT_OF_RESOURCES = 0x03, | 137 | MEI_CL_CONN_OUT_OF_RESOURCES = MEI_HBMS_REJECTED, |
109 | MEI_CL_CONN_MESSAGE_SMALL = 0x04 | 138 | MEI_CL_CONN_MESSAGE_SMALL = MEI_HBMS_INVALID_PARAMETER, |
110 | }; | 139 | }; |
111 | 140 | ||
112 | /* | 141 | /* |
113 | * Client Disconnect Status | 142 | * Client Disconnect Status |
114 | */ | 143 | */ |
115 | enum mei_cl_disconnect_status { | 144 | enum mei_cl_disconnect_status { |
116 | MEI_CL_DISCONN_SUCCESS = 0x00 | 145 | MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS |
117 | }; | 146 | }; |
118 | 147 | ||
119 | /* | 148 | /* |
@@ -138,10 +167,10 @@ struct mei_bus_message { | |||
138 | * struct hbm_cl_cmd - client specific host bus command | 167 | * struct hbm_cl_cmd - client specific host bus command |
139 | * CONNECT, DISCONNECT, and FlOW CONTROL | 168 | * CONNECT, DISCONNECT, and FlOW CONTROL |
140 | * | 169 | * |
141 | * @hbm_cmd - bus message command header | 170 | * @hbm_cmd: bus message command header |
142 | * @me_addr - address of the client in ME | 171 | * @me_addr: address of the client in ME |
143 | * @host_addr - address of the client in the driver | 172 | * @host_addr: address of the client in the driver |
144 | * @data | 173 | * @data: generic data |
145 | */ | 174 | */ |
146 | struct mei_hbm_cl_cmd { | 175 | struct mei_hbm_cl_cmd { |
147 | u8 hbm_cmd; | 176 | u8 hbm_cmd; |
@@ -206,14 +235,13 @@ struct mei_client_properties { | |||
206 | 235 | ||
207 | struct hbm_props_request { | 236 | struct hbm_props_request { |
208 | u8 hbm_cmd; | 237 | u8 hbm_cmd; |
209 | u8 address; | 238 | u8 me_addr; |
210 | u8 reserved[2]; | 239 | u8 reserved[2]; |
211 | } __packed; | 240 | } __packed; |
212 | 241 | ||
213 | |||
214 | struct hbm_props_response { | 242 | struct hbm_props_response { |
215 | u8 hbm_cmd; | 243 | u8 hbm_cmd; |
216 | u8 address; | 244 | u8 me_addr; |
217 | u8 status; | 245 | u8 status; |
218 | u8 reserved[1]; | 246 | u8 reserved[1]; |
219 | struct mei_client_properties client_properties; | 247 | struct mei_client_properties client_properties; |
@@ -222,8 +250,8 @@ struct hbm_props_response { | |||
222 | /** | 250 | /** |
223 | * struct hbm_power_gate - power gate request/response | 251 | * struct hbm_power_gate - power gate request/response |
224 | * | 252 | * |
225 | * @hbm_cmd - bus message command header | 253 | * @hbm_cmd: bus message command header |
226 | * @reserved[3] | 254 | * @reserved: reserved |
227 | */ | 255 | */ |
228 | struct hbm_power_gate { | 256 | struct hbm_power_gate { |
229 | u8 hbm_cmd; | 257 | u8 hbm_cmd; |
@@ -233,10 +261,10 @@ struct hbm_power_gate { | |||
233 | /** | 261 | /** |
234 | * struct hbm_client_connect_request - connect/disconnect request | 262 | * struct hbm_client_connect_request - connect/disconnect request |
235 | * | 263 | * |
236 | * @hbm_cmd - bus message command header | 264 | * @hbm_cmd: bus message command header |
237 | * @me_addr - address of the client in ME | 265 | * @me_addr: address of the client in ME |
238 | * @host_addr - address of the client in the driver | 266 | * @host_addr: address of the client in the driver |
239 | * @reserved | 267 | * @reserved: reserved |
240 | */ | 268 | */ |
241 | struct hbm_client_connect_request { | 269 | struct hbm_client_connect_request { |
242 | u8 hbm_cmd; | 270 | u8 hbm_cmd; |
@@ -248,10 +276,10 @@ struct hbm_client_connect_request { | |||
248 | /** | 276 | /** |
249 | * struct hbm_client_connect_response - connect/disconnect response | 277 | * struct hbm_client_connect_response - connect/disconnect response |
250 | * | 278 | * |
251 | * @hbm_cmd - bus message command header | 279 | * @hbm_cmd: bus message command header |
252 | * @me_addr - address of the client in ME | 280 | * @me_addr: address of the client in ME |
253 | * @host_addr - address of the client in the driver | 281 | * @host_addr: address of the client in the driver |
254 | * @status - status of the request | 282 | * @status: status of the request |
255 | */ | 283 | */ |
256 | struct hbm_client_connect_response { | 284 | struct hbm_client_connect_response { |
257 | u8 hbm_cmd; | 285 | u8 hbm_cmd; |
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 006929222481..7901d076c127 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
@@ -15,7 +15,6 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/export.h> | 17 | #include <linux/export.h> |
18 | #include <linux/pci.h> | ||
19 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
20 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
21 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
@@ -43,13 +42,23 @@ const char *mei_dev_state_str(int state) | |||
43 | #undef MEI_DEV_STATE | 42 | #undef MEI_DEV_STATE |
44 | } | 43 | } |
45 | 44 | ||
45 | const char *mei_pg_state_str(enum mei_pg_state state) | ||
46 | { | ||
47 | #define MEI_PG_STATE(state) case MEI_PG_##state: return #state | ||
48 | switch (state) { | ||
49 | MEI_PG_STATE(OFF); | ||
50 | MEI_PG_STATE(ON); | ||
51 | default: | ||
52 | return "unknown"; | ||
53 | } | ||
54 | #undef MEI_PG_STATE | ||
55 | } | ||
56 | |||
46 | 57 | ||
47 | /** | 58 | /** |
48 | * mei_cancel_work. Cancel mei background jobs | 59 | * mei_cancel_work - Cancel mei background jobs |
49 | * | 60 | * |
50 | * @dev: the device structure | 61 | * @dev: the device structure |
51 | * | ||
52 | * returns 0 on success or < 0 if the reset hasn't succeeded | ||
53 | */ | 62 | */ |
54 | void mei_cancel_work(struct mei_device *dev) | 63 | void mei_cancel_work(struct mei_device *dev) |
55 | { | 64 | { |
@@ -64,6 +73,8 @@ EXPORT_SYMBOL_GPL(mei_cancel_work); | |||
64 | * mei_reset - resets host and fw. | 73 | * mei_reset - resets host and fw. |
65 | * | 74 | * |
66 | * @dev: the device structure | 75 | * @dev: the device structure |
76 | * | ||
77 | * Return: 0 on success or < 0 if the reset hasn't succeeded | ||
67 | */ | 78 | */ |
68 | int mei_reset(struct mei_device *dev) | 79 | int mei_reset(struct mei_device *dev) |
69 | { | 80 | { |
@@ -76,8 +87,9 @@ int mei_reset(struct mei_device *dev) | |||
76 | state != MEI_DEV_POWER_DOWN && | 87 | state != MEI_DEV_POWER_DOWN && |
77 | state != MEI_DEV_POWER_UP) { | 88 | state != MEI_DEV_POWER_UP) { |
78 | struct mei_fw_status fw_status; | 89 | struct mei_fw_status fw_status; |
90 | |||
79 | mei_fw_status(dev, &fw_status); | 91 | mei_fw_status(dev, &fw_status); |
80 | dev_warn(&dev->pdev->dev, | 92 | dev_warn(dev->dev, |
81 | "unexpected reset: dev_state = %s " FW_STS_FMT "\n", | 93 | "unexpected reset: dev_state = %s " FW_STS_FMT "\n", |
82 | mei_dev_state_str(state), FW_STS_PRM(fw_status)); | 94 | mei_dev_state_str(state), FW_STS_PRM(fw_status)); |
83 | } | 95 | } |
@@ -95,7 +107,7 @@ int mei_reset(struct mei_device *dev) | |||
95 | 107 | ||
96 | dev->reset_count++; | 108 | dev->reset_count++; |
97 | if (dev->reset_count > MEI_MAX_CONSEC_RESET) { | 109 | if (dev->reset_count > MEI_MAX_CONSEC_RESET) { |
98 | dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); | 110 | dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); |
99 | dev->dev_state = MEI_DEV_DISABLED; | 111 | dev->dev_state = MEI_DEV_DISABLED; |
100 | return -ENODEV; | 112 | return -ENODEV; |
101 | } | 113 | } |
@@ -116,7 +128,7 @@ int mei_reset(struct mei_device *dev) | |||
116 | mei_cl_all_wakeup(dev); | 128 | mei_cl_all_wakeup(dev); |
117 | 129 | ||
118 | /* remove entry if already in list */ | 130 | /* remove entry if already in list */ |
119 | dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); | 131 | dev_dbg(dev->dev, "remove iamthif and wd from the file list.\n"); |
120 | mei_cl_unlink(&dev->wd_cl); | 132 | mei_cl_unlink(&dev->wd_cl); |
121 | mei_cl_unlink(&dev->iamthif_cl); | 133 | mei_cl_unlink(&dev->iamthif_cl); |
122 | mei_amthif_reset_params(dev); | 134 | mei_amthif_reset_params(dev); |
@@ -128,28 +140,28 @@ int mei_reset(struct mei_device *dev) | |||
128 | dev->wd_pending = false; | 140 | dev->wd_pending = false; |
129 | 141 | ||
130 | if (ret) { | 142 | if (ret) { |
131 | dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); | 143 | dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); |
132 | return ret; | 144 | return ret; |
133 | } | 145 | } |
134 | 146 | ||
135 | if (state == MEI_DEV_POWER_DOWN) { | 147 | if (state == MEI_DEV_POWER_DOWN) { |
136 | dev_dbg(&dev->pdev->dev, "powering down: end of reset\n"); | 148 | dev_dbg(dev->dev, "powering down: end of reset\n"); |
137 | dev->dev_state = MEI_DEV_DISABLED; | 149 | dev->dev_state = MEI_DEV_DISABLED; |
138 | return 0; | 150 | return 0; |
139 | } | 151 | } |
140 | 152 | ||
141 | ret = mei_hw_start(dev); | 153 | ret = mei_hw_start(dev); |
142 | if (ret) { | 154 | if (ret) { |
143 | dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); | 155 | dev_err(dev->dev, "hw_start failed ret = %d\n", ret); |
144 | return ret; | 156 | return ret; |
145 | } | 157 | } |
146 | 158 | ||
147 | dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); | 159 | dev_dbg(dev->dev, "link is established start sending messages.\n"); |
148 | 160 | ||
149 | dev->dev_state = MEI_DEV_INIT_CLIENTS; | 161 | dev->dev_state = MEI_DEV_INIT_CLIENTS; |
150 | ret = mei_hbm_start_req(dev); | 162 | ret = mei_hbm_start_req(dev); |
151 | if (ret) { | 163 | if (ret) { |
152 | dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); | 164 | dev_err(dev->dev, "hbm_start failed ret = %d\n", ret); |
153 | dev->dev_state = MEI_DEV_RESETTING; | 165 | dev->dev_state = MEI_DEV_RESETTING; |
154 | return ret; | 166 | return ret; |
155 | } | 167 | } |
@@ -163,11 +175,12 @@ EXPORT_SYMBOL_GPL(mei_reset); | |||
163 | * | 175 | * |
164 | * @dev: the device structure | 176 | * @dev: the device structure |
165 | * | 177 | * |
166 | * returns 0 on success, <0 on failure. | 178 | * Return: 0 on success, <0 on failure. |
167 | */ | 179 | */ |
168 | int mei_start(struct mei_device *dev) | 180 | int mei_start(struct mei_device *dev) |
169 | { | 181 | { |
170 | int ret; | 182 | int ret; |
183 | |||
171 | mutex_lock(&dev->device_lock); | 184 | mutex_lock(&dev->device_lock); |
172 | 185 | ||
173 | /* acknowledge interrupt and stop interrupts */ | 186 | /* acknowledge interrupt and stop interrupts */ |
@@ -175,7 +188,7 @@ int mei_start(struct mei_device *dev) | |||
175 | 188 | ||
176 | mei_hw_config(dev); | 189 | mei_hw_config(dev); |
177 | 190 | ||
178 | dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); | 191 | dev_dbg(dev->dev, "reset in start the mei device.\n"); |
179 | 192 | ||
180 | dev->reset_count = 0; | 193 | dev->reset_count = 0; |
181 | do { | 194 | do { |
@@ -183,43 +196,43 @@ int mei_start(struct mei_device *dev) | |||
183 | ret = mei_reset(dev); | 196 | ret = mei_reset(dev); |
184 | 197 | ||
185 | if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { | 198 | if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { |
186 | dev_err(&dev->pdev->dev, "reset failed ret = %d", ret); | 199 | dev_err(dev->dev, "reset failed ret = %d", ret); |
187 | goto err; | 200 | goto err; |
188 | } | 201 | } |
189 | } while (ret); | 202 | } while (ret); |
190 | 203 | ||
191 | /* we cannot start the device w/o hbm start message completed */ | 204 | /* we cannot start the device w/o hbm start message completed */ |
192 | if (dev->dev_state == MEI_DEV_DISABLED) { | 205 | if (dev->dev_state == MEI_DEV_DISABLED) { |
193 | dev_err(&dev->pdev->dev, "reset failed"); | 206 | dev_err(dev->dev, "reset failed"); |
194 | goto err; | 207 | goto err; |
195 | } | 208 | } |
196 | 209 | ||
197 | if (mei_hbm_start_wait(dev)) { | 210 | if (mei_hbm_start_wait(dev)) { |
198 | dev_err(&dev->pdev->dev, "HBM haven't started"); | 211 | dev_err(dev->dev, "HBM haven't started"); |
199 | goto err; | 212 | goto err; |
200 | } | 213 | } |
201 | 214 | ||
202 | if (!mei_host_is_ready(dev)) { | 215 | if (!mei_host_is_ready(dev)) { |
203 | dev_err(&dev->pdev->dev, "host is not ready.\n"); | 216 | dev_err(dev->dev, "host is not ready.\n"); |
204 | goto err; | 217 | goto err; |
205 | } | 218 | } |
206 | 219 | ||
207 | if (!mei_hw_is_ready(dev)) { | 220 | if (!mei_hw_is_ready(dev)) { |
208 | dev_err(&dev->pdev->dev, "ME is not ready.\n"); | 221 | dev_err(dev->dev, "ME is not ready.\n"); |
209 | goto err; | 222 | goto err; |
210 | } | 223 | } |
211 | 224 | ||
212 | if (!mei_hbm_version_is_supported(dev)) { | 225 | if (!mei_hbm_version_is_supported(dev)) { |
213 | dev_dbg(&dev->pdev->dev, "MEI start failed.\n"); | 226 | dev_dbg(dev->dev, "MEI start failed.\n"); |
214 | goto err; | 227 | goto err; |
215 | } | 228 | } |
216 | 229 | ||
217 | dev_dbg(&dev->pdev->dev, "link layer has been established.\n"); | 230 | dev_dbg(dev->dev, "link layer has been established.\n"); |
218 | 231 | ||
219 | mutex_unlock(&dev->device_lock); | 232 | mutex_unlock(&dev->device_lock); |
220 | return 0; | 233 | return 0; |
221 | err: | 234 | err: |
222 | dev_err(&dev->pdev->dev, "link layer initialization failed.\n"); | 235 | dev_err(dev->dev, "link layer initialization failed.\n"); |
223 | dev->dev_state = MEI_DEV_DISABLED; | 236 | dev->dev_state = MEI_DEV_DISABLED; |
224 | mutex_unlock(&dev->device_lock); | 237 | mutex_unlock(&dev->device_lock); |
225 | return -ENODEV; | 238 | return -ENODEV; |
@@ -231,7 +244,7 @@ EXPORT_SYMBOL_GPL(mei_start); | |||
231 | * | 244 | * |
232 | * @dev: the device structure | 245 | * @dev: the device structure |
233 | * | 246 | * |
234 | * returns 0 on success or -ENODEV if the restart hasn't succeeded | 247 | * Return: 0 on success or -ENODEV if the restart hasn't succeeded |
235 | */ | 248 | */ |
236 | int mei_restart(struct mei_device *dev) | 249 | int mei_restart(struct mei_device *dev) |
237 | { | 250 | { |
@@ -249,7 +262,7 @@ int mei_restart(struct mei_device *dev) | |||
249 | mutex_unlock(&dev->device_lock); | 262 | mutex_unlock(&dev->device_lock); |
250 | 263 | ||
251 | if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { | 264 | if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { |
252 | dev_err(&dev->pdev->dev, "device disabled = %d\n", err); | 265 | dev_err(dev->dev, "device disabled = %d\n", err); |
253 | return -ENODEV; | 266 | return -ENODEV; |
254 | } | 267 | } |
255 | 268 | ||
@@ -275,7 +288,7 @@ static void mei_reset_work(struct work_struct *work) | |||
275 | mutex_unlock(&dev->device_lock); | 288 | mutex_unlock(&dev->device_lock); |
276 | 289 | ||
277 | if (dev->dev_state == MEI_DEV_DISABLED) { | 290 | if (dev->dev_state == MEI_DEV_DISABLED) { |
278 | dev_err(&dev->pdev->dev, "device disabled = %d\n", ret); | 291 | dev_err(dev->dev, "device disabled = %d\n", ret); |
279 | return; | 292 | return; |
280 | } | 293 | } |
281 | 294 | ||
@@ -286,7 +299,7 @@ static void mei_reset_work(struct work_struct *work) | |||
286 | 299 | ||
287 | void mei_stop(struct mei_device *dev) | 300 | void mei_stop(struct mei_device *dev) |
288 | { | 301 | { |
289 | dev_dbg(&dev->pdev->dev, "stopping the device.\n"); | 302 | dev_dbg(dev->dev, "stopping the device.\n"); |
290 | 303 | ||
291 | mei_cancel_work(dev); | 304 | mei_cancel_work(dev); |
292 | 305 | ||
@@ -312,7 +325,7 @@ EXPORT_SYMBOL_GPL(mei_stop); | |||
312 | * | 325 | * |
313 | * @dev: the device structure | 326 | * @dev: the device structure |
314 | * | 327 | * |
315 | * returns true of there is no pending write | 328 | * Return: true of there is no pending write |
316 | */ | 329 | */ |
317 | bool mei_write_is_idle(struct mei_device *dev) | 330 | bool mei_write_is_idle(struct mei_device *dev) |
318 | { | 331 | { |
@@ -320,7 +333,7 @@ bool mei_write_is_idle(struct mei_device *dev) | |||
320 | list_empty(&dev->ctrl_wr_list.list) && | 333 | list_empty(&dev->ctrl_wr_list.list) && |
321 | list_empty(&dev->write_list.list)); | 334 | list_empty(&dev->write_list.list)); |
322 | 335 | ||
323 | dev_dbg(&dev->pdev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n", | 336 | dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n", |
324 | idle, | 337 | idle, |
325 | mei_dev_state_str(dev->dev_state), | 338 | mei_dev_state_str(dev->dev_state), |
326 | list_empty(&dev->ctrl_wr_list.list), | 339 | list_empty(&dev->ctrl_wr_list.list), |
@@ -330,36 +343,25 @@ bool mei_write_is_idle(struct mei_device *dev) | |||
330 | } | 343 | } |
331 | EXPORT_SYMBOL_GPL(mei_write_is_idle); | 344 | EXPORT_SYMBOL_GPL(mei_write_is_idle); |
332 | 345 | ||
333 | int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) | 346 | /** |
334 | { | 347 | * mei_device_init -- initialize mei_device structure |
335 | int i; | 348 | * |
336 | const struct mei_fw_status *fw_src = &dev->cfg->fw_status; | 349 | * @dev: the mei device |
337 | 350 | * @device: the device structure | |
338 | if (!fw_status) | 351 | * @hw_ops: hw operations |
339 | return -EINVAL; | 352 | */ |
340 | 353 | void mei_device_init(struct mei_device *dev, | |
341 | fw_status->count = fw_src->count; | 354 | struct device *device, |
342 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { | 355 | const struct mei_hw_ops *hw_ops) |
343 | int ret; | ||
344 | ret = pci_read_config_dword(dev->pdev, | ||
345 | fw_src->status[i], &fw_status->status[i]); | ||
346 | if (ret) | ||
347 | return ret; | ||
348 | } | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | EXPORT_SYMBOL_GPL(mei_fw_status); | ||
353 | |||
354 | void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg) | ||
355 | { | 356 | { |
356 | /* setup our list array */ | 357 | /* setup our list array */ |
357 | INIT_LIST_HEAD(&dev->file_list); | 358 | INIT_LIST_HEAD(&dev->file_list); |
358 | INIT_LIST_HEAD(&dev->device_list); | 359 | INIT_LIST_HEAD(&dev->device_list); |
360 | INIT_LIST_HEAD(&dev->me_clients); | ||
359 | mutex_init(&dev->device_lock); | 361 | mutex_init(&dev->device_lock); |
360 | init_waitqueue_head(&dev->wait_hw_ready); | 362 | init_waitqueue_head(&dev->wait_hw_ready); |
361 | init_waitqueue_head(&dev->wait_pg); | 363 | init_waitqueue_head(&dev->wait_pg); |
362 | init_waitqueue_head(&dev->wait_recvd_msg); | 364 | init_waitqueue_head(&dev->wait_hbm_start); |
363 | init_waitqueue_head(&dev->wait_stop_wd); | 365 | init_waitqueue_head(&dev->wait_stop_wd); |
364 | dev->dev_state = MEI_DEV_INITIALIZING; | 366 | dev->dev_state = MEI_DEV_INITIALIZING; |
365 | dev->reset_count = 0; | 367 | dev->reset_count = 0; |
@@ -389,7 +391,8 @@ void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg) | |||
389 | bitmap_set(dev->host_clients_map, 0, 1); | 391 | bitmap_set(dev->host_clients_map, 0, 1); |
390 | 392 | ||
391 | dev->pg_event = MEI_PG_EVENT_IDLE; | 393 | dev->pg_event = MEI_PG_EVENT_IDLE; |
392 | dev->cfg = cfg; | 394 | dev->ops = hw_ops; |
395 | dev->dev = device; | ||
393 | } | 396 | } |
394 | EXPORT_SYMBOL_GPL(mei_device_init); | 397 | EXPORT_SYMBOL_GPL(mei_device_init); |
395 | 398 | ||
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 4e3cba6da3f5..20c6c511f438 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
@@ -16,11 +16,11 @@ | |||
16 | 16 | ||
17 | 17 | ||
18 | #include <linux/export.h> | 18 | #include <linux/export.h> |
19 | #include <linux/pci.h> | ||
20 | #include <linux/kthread.h> | 19 | #include <linux/kthread.h> |
21 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
22 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
23 | #include <linux/jiffies.h> | 22 | #include <linux/jiffies.h> |
23 | #include <linux/slab.h> | ||
24 | 24 | ||
25 | #include <linux/mei.h> | 25 | #include <linux/mei.h> |
26 | 26 | ||
@@ -33,8 +33,8 @@ | |||
33 | * mei_irq_compl_handler - dispatch complete handlers | 33 | * mei_irq_compl_handler - dispatch complete handlers |
34 | * for the completed callbacks | 34 | * for the completed callbacks |
35 | * | 35 | * |
36 | * @dev - mei device | 36 | * @dev: mei device |
37 | * @compl_list - list of completed cbs | 37 | * @compl_list: list of completed cbs |
38 | */ | 38 | */ |
39 | void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) | 39 | void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) |
40 | { | 40 | { |
@@ -47,7 +47,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) | |||
47 | if (!cl) | 47 | if (!cl) |
48 | continue; | 48 | continue; |
49 | 49 | ||
50 | dev_dbg(&dev->pdev->dev, "completing call back.\n"); | 50 | dev_dbg(dev->dev, "completing call back.\n"); |
51 | if (cl == &dev->iamthif_cl) | 51 | if (cl == &dev->iamthif_cl) |
52 | mei_amthif_complete(dev, cb); | 52 | mei_amthif_complete(dev, cb); |
53 | else | 53 | else |
@@ -62,7 +62,7 @@ EXPORT_SYMBOL_GPL(mei_irq_compl_handler); | |||
62 | * @cl: host client | 62 | * @cl: host client |
63 | * @mei_hdr: header of mei client message | 63 | * @mei_hdr: header of mei client message |
64 | * | 64 | * |
65 | * returns true if matches, false otherwise | 65 | * Return: true if matches, false otherwise |
66 | */ | 66 | */ |
67 | static inline int mei_cl_hbm_equal(struct mei_cl *cl, | 67 | static inline int mei_cl_hbm_equal(struct mei_cl *cl, |
68 | struct mei_msg_hdr *mei_hdr) | 68 | struct mei_msg_hdr *mei_hdr) |
@@ -72,12 +72,12 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, | |||
72 | } | 72 | } |
73 | /** | 73 | /** |
74 | * mei_cl_is_reading - checks if the client | 74 | * mei_cl_is_reading - checks if the client |
75 | is the one to read this message | 75 | * is the one to read this message |
76 | * | 76 | * |
77 | * @cl: mei client | 77 | * @cl: mei client |
78 | * @mei_hdr: header of mei message | 78 | * @mei_hdr: header of mei message |
79 | * | 79 | * |
80 | * returns true on match and false otherwise | 80 | * Return: true on match and false otherwise |
81 | */ | 81 | */ |
82 | static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) | 82 | static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) |
83 | { | 83 | { |
@@ -87,13 +87,13 @@ static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) | |||
87 | } | 87 | } |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * mei_irq_read_client_message - process client message | 90 | * mei_cl_irq_read_msg - process client message |
91 | * | 91 | * |
92 | * @dev: the device structure | 92 | * @dev: the device structure |
93 | * @mei_hdr: header of mei client message | 93 | * @mei_hdr: header of mei client message |
94 | * @complete_list: An instance of our list structure | 94 | * @complete_list: An instance of our list structure |
95 | * | 95 | * |
96 | * returns 0 on success, <0 on failure. | 96 | * Return: 0 on success, <0 on failure. |
97 | */ | 97 | */ |
98 | static int mei_cl_irq_read_msg(struct mei_device *dev, | 98 | static int mei_cl_irq_read_msg(struct mei_device *dev, |
99 | struct mei_msg_hdr *mei_hdr, | 99 | struct mei_msg_hdr *mei_hdr, |
@@ -126,7 +126,6 @@ static int mei_cl_irq_read_msg(struct mei_device *dev, | |||
126 | GFP_KERNEL); | 126 | GFP_KERNEL); |
127 | 127 | ||
128 | if (!buffer) { | 128 | if (!buffer) { |
129 | cl_err(dev, cl, "allocation failed.\n"); | ||
130 | list_del(&cb->list); | 129 | list_del(&cb->list); |
131 | return -ENOMEM; | 130 | return -ENOMEM; |
132 | } | 131 | } |
@@ -149,10 +148,10 @@ static int mei_cl_irq_read_msg(struct mei_device *dev, | |||
149 | break; | 148 | break; |
150 | } | 149 | } |
151 | 150 | ||
152 | dev_dbg(&dev->pdev->dev, "message read\n"); | 151 | dev_dbg(dev->dev, "message read\n"); |
153 | if (!buffer) { | 152 | if (!buffer) { |
154 | mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); | 153 | mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); |
155 | dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n", | 154 | dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", |
156 | MEI_HDR_PRM(mei_hdr)); | 155 | MEI_HDR_PRM(mei_hdr)); |
157 | } | 156 | } |
158 | 157 | ||
@@ -166,7 +165,7 @@ static int mei_cl_irq_read_msg(struct mei_device *dev, | |||
166 | * @cb: callback block. | 165 | * @cb: callback block. |
167 | * @cmpl_list: complete list. | 166 | * @cmpl_list: complete list. |
168 | * | 167 | * |
169 | * returns 0, OK; otherwise, error. | 168 | * Return: 0, OK; otherwise, error. |
170 | */ | 169 | */ |
171 | static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, | 170 | static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, |
172 | struct mei_cl_cb *cmpl_list) | 171 | struct mei_cl_cb *cmpl_list) |
@@ -195,16 +194,16 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
195 | 194 | ||
196 | 195 | ||
197 | /** | 196 | /** |
198 | * mei_cl_irq_close - processes close related operation from | 197 | * mei_cl_irq_disconnect - processes close related operation from |
199 | * interrupt thread context - send disconnect request | 198 | * interrupt thread context - send disconnect request |
200 | * | 199 | * |
201 | * @cl: client | 200 | * @cl: client |
202 | * @cb: callback block. | 201 | * @cb: callback block. |
203 | * @cmpl_list: complete list. | 202 | * @cmpl_list: complete list. |
204 | * | 203 | * |
205 | * returns 0, OK; otherwise, error. | 204 | * Return: 0, OK; otherwise, error. |
206 | */ | 205 | */ |
207 | static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, | 206 | static int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, |
208 | struct mei_cl_cb *cmpl_list) | 207 | struct mei_cl_cb *cmpl_list) |
209 | { | 208 | { |
210 | struct mei_device *dev = cl->dev; | 209 | struct mei_device *dev = cl->dev; |
@@ -235,14 +234,14 @@ static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
235 | 234 | ||
236 | 235 | ||
237 | /** | 236 | /** |
238 | * mei_cl_irq_close - processes client read related operation from the | 237 | * mei_cl_irq_read - processes client read related operation from the |
239 | * interrupt thread context - request for flow control credits | 238 | * interrupt thread context - request for flow control credits |
240 | * | 239 | * |
241 | * @cl: client | 240 | * @cl: client |
242 | * @cb: callback block. | 241 | * @cb: callback block. |
243 | * @cmpl_list: complete list. | 242 | * @cmpl_list: complete list. |
244 | * | 243 | * |
245 | * returns 0, OK; otherwise, error. | 244 | * Return: 0, OK; otherwise, error. |
246 | */ | 245 | */ |
247 | static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, | 246 | static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, |
248 | struct mei_cl_cb *cmpl_list) | 247 | struct mei_cl_cb *cmpl_list) |
@@ -279,7 +278,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
279 | * @cb: callback block. | 278 | * @cb: callback block. |
280 | * @cmpl_list: complete list. | 279 | * @cmpl_list: complete list. |
281 | * | 280 | * |
282 | * returns 0, OK; otherwise, error. | 281 | * Return: 0, OK; otherwise, error. |
283 | */ | 282 | */ |
284 | static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, | 283 | static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, |
285 | struct mei_cl_cb *cmpl_list) | 284 | struct mei_cl_cb *cmpl_list) |
@@ -322,7 +321,7 @@ static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
322 | * @cmpl_list: An instance of our list structure | 321 | * @cmpl_list: An instance of our list structure |
323 | * @slots: slots to read. | 322 | * @slots: slots to read. |
324 | * | 323 | * |
325 | * returns 0 on success, <0 on failure. | 324 | * Return: 0 on success, <0 on failure. |
326 | */ | 325 | */ |
327 | int mei_irq_read_handler(struct mei_device *dev, | 326 | int mei_irq_read_handler(struct mei_device *dev, |
328 | struct mei_cl_cb *cmpl_list, s32 *slots) | 327 | struct mei_cl_cb *cmpl_list, s32 *slots) |
@@ -334,20 +333,20 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
334 | if (!dev->rd_msg_hdr) { | 333 | if (!dev->rd_msg_hdr) { |
335 | dev->rd_msg_hdr = mei_read_hdr(dev); | 334 | dev->rd_msg_hdr = mei_read_hdr(dev); |
336 | (*slots)--; | 335 | (*slots)--; |
337 | dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); | 336 | dev_dbg(dev->dev, "slots =%08x.\n", *slots); |
338 | } | 337 | } |
339 | mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; | 338 | mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; |
340 | dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); | 339 | dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); |
341 | 340 | ||
342 | if (mei_hdr->reserved || !dev->rd_msg_hdr) { | 341 | if (mei_hdr->reserved || !dev->rd_msg_hdr) { |
343 | dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n", | 342 | dev_err(dev->dev, "corrupted message header 0x%08X\n", |
344 | dev->rd_msg_hdr); | 343 | dev->rd_msg_hdr); |
345 | ret = -EBADMSG; | 344 | ret = -EBADMSG; |
346 | goto end; | 345 | goto end; |
347 | } | 346 | } |
348 | 347 | ||
349 | if (mei_slots2data(*slots) < mei_hdr->length) { | 348 | if (mei_slots2data(*slots) < mei_hdr->length) { |
350 | dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", | 349 | dev_err(dev->dev, "less data available than length=%08x.\n", |
351 | *slots); | 350 | *slots); |
352 | /* we can't read the message */ | 351 | /* we can't read the message */ |
353 | ret = -ENODATA; | 352 | ret = -ENODATA; |
@@ -358,7 +357,7 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
358 | if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { | 357 | if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { |
359 | ret = mei_hbm_dispatch(dev, mei_hdr); | 358 | ret = mei_hbm_dispatch(dev, mei_hdr); |
360 | if (ret) { | 359 | if (ret) { |
361 | dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n", | 360 | dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", |
362 | ret); | 361 | ret); |
363 | goto end; | 362 | goto end; |
364 | } | 363 | } |
@@ -375,7 +374,7 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
375 | 374 | ||
376 | /* if no recipient cl was found we assume corrupted header */ | 375 | /* if no recipient cl was found we assume corrupted header */ |
377 | if (&cl->link == &dev->file_list) { | 376 | if (&cl->link == &dev->file_list) { |
378 | dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n", | 377 | dev_err(dev->dev, "no destination client found 0x%08X\n", |
379 | dev->rd_msg_hdr); | 378 | dev->rd_msg_hdr); |
380 | ret = -EBADMSG; | 379 | ret = -EBADMSG; |
381 | goto end; | 380 | goto end; |
@@ -387,14 +386,14 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
387 | 386 | ||
388 | ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); | 387 | ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); |
389 | if (ret) { | 388 | if (ret) { |
390 | dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n", | 389 | dev_err(dev->dev, "mei_amthif_irq_read_msg failed = %d\n", |
391 | ret); | 390 | ret); |
392 | goto end; | 391 | goto end; |
393 | } | 392 | } |
394 | } else { | 393 | } else { |
395 | ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); | 394 | ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); |
396 | if (ret) { | 395 | if (ret) { |
397 | dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n", | 396 | dev_err(dev->dev, "mei_cl_irq_read_msg failed = %d\n", |
398 | ret); | 397 | ret); |
399 | goto end; | 398 | goto end; |
400 | } | 399 | } |
@@ -407,7 +406,7 @@ reset_slots: | |||
407 | 406 | ||
408 | if (*slots == -EOVERFLOW) { | 407 | if (*slots == -EOVERFLOW) { |
409 | /* overflow - reset */ | 408 | /* overflow - reset */ |
410 | dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n"); | 409 | dev_err(dev->dev, "resetting due to slots overflow.\n"); |
411 | /* set the event since message has been read */ | 410 | /* set the event since message has been read */ |
412 | ret = -ERANGE; | 411 | ret = -ERANGE; |
413 | goto end; | 412 | goto end; |
@@ -425,7 +424,7 @@ EXPORT_SYMBOL_GPL(mei_irq_read_handler); | |||
425 | * @dev: the device structure | 424 | * @dev: the device structure |
426 | * @cmpl_list: An instance of our list structure | 425 | * @cmpl_list: An instance of our list structure |
427 | * | 426 | * |
428 | * returns 0 on success, <0 on failure. | 427 | * Return: 0 on success, <0 on failure. |
429 | */ | 428 | */ |
430 | int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) | 429 | int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) |
431 | { | 430 | { |
@@ -445,7 +444,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) | |||
445 | return -EMSGSIZE; | 444 | return -EMSGSIZE; |
446 | 445 | ||
447 | /* complete all waiting for write CB */ | 446 | /* complete all waiting for write CB */ |
448 | dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n"); | 447 | dev_dbg(dev->dev, "complete all waiting for write cb.\n"); |
449 | 448 | ||
450 | list = &dev->write_waiting_list; | 449 | list = &dev->write_waiting_list; |
451 | list_for_each_entry_safe(cb, next, &list->list, list) { | 450 | list_for_each_entry_safe(cb, next, &list->list, list) { |
@@ -487,7 +486,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) | |||
487 | } | 486 | } |
488 | 487 | ||
489 | /* complete control write list CB */ | 488 | /* complete control write list CB */ |
490 | dev_dbg(&dev->pdev->dev, "complete control write list cb.\n"); | 489 | dev_dbg(dev->dev, "complete control write list cb.\n"); |
491 | list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { | 490 | list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { |
492 | cl = cb->cl; | 491 | cl = cb->cl; |
493 | if (!cl) { | 492 | if (!cl) { |
@@ -495,9 +494,9 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) | |||
495 | return -ENODEV; | 494 | return -ENODEV; |
496 | } | 495 | } |
497 | switch (cb->fop_type) { | 496 | switch (cb->fop_type) { |
498 | case MEI_FOP_CLOSE: | 497 | case MEI_FOP_DISCONNECT: |
499 | /* send disconnect message */ | 498 | /* send disconnect message */ |
500 | ret = mei_cl_irq_close(cl, cb, cmpl_list); | 499 | ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); |
501 | if (ret) | 500 | if (ret) |
502 | return ret; | 501 | return ret; |
503 | 502 | ||
@@ -528,7 +527,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) | |||
528 | 527 | ||
529 | } | 528 | } |
530 | /* complete write list CB */ | 529 | /* complete write list CB */ |
531 | dev_dbg(&dev->pdev->dev, "complete write list cb.\n"); | 530 | dev_dbg(dev->dev, "complete write list cb.\n"); |
532 | list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { | 531 | list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { |
533 | cl = cb->cl; | 532 | cl = cb->cl; |
534 | if (cl == NULL) | 533 | if (cl == NULL) |
@@ -556,8 +555,6 @@ void mei_timer(struct work_struct *work) | |||
556 | { | 555 | { |
557 | unsigned long timeout; | 556 | unsigned long timeout; |
558 | struct mei_cl *cl; | 557 | struct mei_cl *cl; |
559 | struct mei_cl_cb *cb_pos = NULL; | ||
560 | struct mei_cl_cb *cb_next = NULL; | ||
561 | 558 | ||
562 | struct mei_device *dev = container_of(work, | 559 | struct mei_device *dev = container_of(work, |
563 | struct mei_device, timer_work.work); | 560 | struct mei_device, timer_work.work); |
@@ -571,7 +568,7 @@ void mei_timer(struct work_struct *work) | |||
571 | 568 | ||
572 | if (dev->init_clients_timer) { | 569 | if (dev->init_clients_timer) { |
573 | if (--dev->init_clients_timer == 0) { | 570 | if (--dev->init_clients_timer == 0) { |
574 | dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n", | 571 | dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n", |
575 | dev->hbm_state); | 572 | dev->hbm_state); |
576 | mei_reset(dev); | 573 | mei_reset(dev); |
577 | goto out; | 574 | goto out; |
@@ -586,7 +583,7 @@ void mei_timer(struct work_struct *work) | |||
586 | list_for_each_entry(cl, &dev->file_list, link) { | 583 | list_for_each_entry(cl, &dev->file_list, link) { |
587 | if (cl->timer_count) { | 584 | if (cl->timer_count) { |
588 | if (--cl->timer_count == 0) { | 585 | if (--cl->timer_count == 0) { |
589 | dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); | 586 | dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); |
590 | mei_reset(dev); | 587 | mei_reset(dev); |
591 | goto out; | 588 | goto out; |
592 | } | 589 | } |
@@ -598,7 +595,7 @@ void mei_timer(struct work_struct *work) | |||
598 | 595 | ||
599 | if (dev->iamthif_stall_timer) { | 596 | if (dev->iamthif_stall_timer) { |
600 | if (--dev->iamthif_stall_timer == 0) { | 597 | if (--dev->iamthif_stall_timer == 0) { |
601 | dev_err(&dev->pdev->dev, "timer: amthif hanged.\n"); | 598 | dev_err(dev->dev, "timer: amthif hanged.\n"); |
602 | mei_reset(dev); | 599 | mei_reset(dev); |
603 | dev->iamthif_msg_buf_size = 0; | 600 | dev->iamthif_msg_buf_size = 0; |
604 | dev->iamthif_msg_buf_index = 0; | 601 | dev->iamthif_msg_buf_index = 0; |
@@ -620,27 +617,20 @@ void mei_timer(struct work_struct *work) | |||
620 | timeout = dev->iamthif_timer + | 617 | timeout = dev->iamthif_timer + |
621 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); | 618 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); |
622 | 619 | ||
623 | dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", | 620 | dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", |
624 | dev->iamthif_timer); | 621 | dev->iamthif_timer); |
625 | dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout); | 622 | dev_dbg(dev->dev, "timeout = %ld\n", timeout); |
626 | dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies); | 623 | dev_dbg(dev->dev, "jiffies = %ld\n", jiffies); |
627 | if (time_after(jiffies, timeout)) { | 624 | if (time_after(jiffies, timeout)) { |
628 | /* | 625 | /* |
629 | * User didn't read the AMTHI data on time (15sec) | 626 | * User didn't read the AMTHI data on time (15sec) |
630 | * freeing AMTHI for other requests | 627 | * freeing AMTHI for other requests |
631 | */ | 628 | */ |
632 | 629 | ||
633 | dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n"); | 630 | dev_dbg(dev->dev, "freeing AMTHI for other requests\n"); |
634 | 631 | ||
635 | list_for_each_entry_safe(cb_pos, cb_next, | 632 | mei_io_list_flush(&dev->amthif_rd_complete_list, |
636 | &dev->amthif_rd_complete_list.list, list) { | 633 | &dev->iamthif_cl); |
637 | |||
638 | cl = cb_pos->file_object->private_data; | ||
639 | |||
640 | /* Finding the AMTHI entry. */ | ||
641 | if (cl == &dev->iamthif_cl) | ||
642 | list_del(&cb_pos->list); | ||
643 | } | ||
644 | mei_io_cb_free(dev->iamthif_current_cb); | 634 | mei_io_cb_free(dev->iamthif_current_cb); |
645 | dev->iamthif_current_cb = NULL; | 635 | dev->iamthif_current_cb = NULL; |
646 | 636 | ||
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 401a3d526cd0..beedc91f03a6 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c | |||
@@ -17,12 +17,12 @@ | |||
17 | #include <linux/moduleparam.h> | 17 | #include <linux/moduleparam.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
21 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
23 | #include <linux/fcntl.h> | 24 | #include <linux/fcntl.h> |
24 | #include <linux/aio.h> | 25 | #include <linux/aio.h> |
25 | #include <linux/pci.h> | ||
26 | #include <linux/poll.h> | 26 | #include <linux/poll.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/ioctl.h> | 28 | #include <linux/ioctl.h> |
@@ -44,7 +44,7 @@ | |||
44 | * @inode: pointer to inode structure | 44 | * @inode: pointer to inode structure |
45 | * @file: pointer to file structure | 45 | * @file: pointer to file structure |
46 | * | 46 | * |
47 | * returns 0 on success, <0 on error | 47 | * Return: 0 on success, <0 on error |
48 | */ | 48 | */ |
49 | static int mei_open(struct inode *inode, struct file *file) | 49 | static int mei_open(struct inode *inode, struct file *file) |
50 | { | 50 | { |
@@ -63,7 +63,7 @@ static int mei_open(struct inode *inode, struct file *file) | |||
63 | 63 | ||
64 | err = -ENODEV; | 64 | err = -ENODEV; |
65 | if (dev->dev_state != MEI_DEV_ENABLED) { | 65 | if (dev->dev_state != MEI_DEV_ENABLED) { |
66 | dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", | 66 | dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", |
67 | mei_dev_state_str(dev->dev_state)); | 67 | mei_dev_state_str(dev->dev_state)); |
68 | goto err_unlock; | 68 | goto err_unlock; |
69 | } | 69 | } |
@@ -96,7 +96,7 @@ err_unlock: | |||
96 | * @inode: pointer to inode structure | 96 | * @inode: pointer to inode structure |
97 | * @file: pointer to file structure | 97 | * @file: pointer to file structure |
98 | * | 98 | * |
99 | * returns 0 on success, <0 on error | 99 | * Return: 0 on success, <0 on error |
100 | */ | 100 | */ |
101 | static int mei_release(struct inode *inode, struct file *file) | 101 | static int mei_release(struct inode *inode, struct file *file) |
102 | { | 102 | { |
@@ -157,7 +157,7 @@ out: | |||
157 | * @length: buffer length | 157 | * @length: buffer length |
158 | * @offset: data offset in buffer | 158 | * @offset: data offset in buffer |
159 | * | 159 | * |
160 | * returns >=0 data length on success , <0 on error | 160 | * Return: >=0 data length on success , <0 on error |
161 | */ | 161 | */ |
162 | static ssize_t mei_read(struct file *file, char __user *ubuf, | 162 | static ssize_t mei_read(struct file *file, char __user *ubuf, |
163 | size_t length, loff_t *offset) | 163 | size_t length, loff_t *offset) |
@@ -211,7 +211,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, | |||
211 | 211 | ||
212 | err = mei_cl_read_start(cl, length); | 212 | err = mei_cl_read_start(cl, length); |
213 | if (err && err != -EBUSY) { | 213 | if (err && err != -EBUSY) { |
214 | dev_dbg(&dev->pdev->dev, | 214 | dev_dbg(dev->dev, |
215 | "mei start read failure with status = %d\n", err); | 215 | "mei start read failure with status = %d\n", err); |
216 | rets = err; | 216 | rets = err; |
217 | goto out; | 217 | goto out; |
@@ -254,7 +254,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, | |||
254 | } | 254 | } |
255 | /* now copy the data to user space */ | 255 | /* now copy the data to user space */ |
256 | copy_buffer: | 256 | copy_buffer: |
257 | dev_dbg(&dev->pdev->dev, "buf.size = %d buf.idx= %ld\n", | 257 | dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n", |
258 | cb->response_buffer.size, cb->buf_idx); | 258 | cb->response_buffer.size, cb->buf_idx); |
259 | if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { | 259 | if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { |
260 | rets = -EMSGSIZE; | 260 | rets = -EMSGSIZE; |
@@ -266,7 +266,7 @@ copy_buffer: | |||
266 | length = min_t(size_t, length, cb->buf_idx - *offset); | 266 | length = min_t(size_t, length, cb->buf_idx - *offset); |
267 | 267 | ||
268 | if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { | 268 | if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { |
269 | dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); | 269 | dev_dbg(dev->dev, "failed to copy data to userland\n"); |
270 | rets = -EFAULT; | 270 | rets = -EFAULT; |
271 | goto free; | 271 | goto free; |
272 | } | 272 | } |
@@ -285,7 +285,7 @@ free: | |||
285 | cl->reading_state = MEI_IDLE; | 285 | cl->reading_state = MEI_IDLE; |
286 | cl->read_cb = NULL; | 286 | cl->read_cb = NULL; |
287 | out: | 287 | out: |
288 | dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets); | 288 | dev_dbg(dev->dev, "end mei read rets= %d\n", rets); |
289 | mutex_unlock(&dev->device_lock); | 289 | mutex_unlock(&dev->device_lock); |
290 | return rets; | 290 | return rets; |
291 | } | 291 | } |
@@ -297,17 +297,17 @@ out: | |||
297 | * @length: buffer length | 297 | * @length: buffer length |
298 | * @offset: data offset in buffer | 298 | * @offset: data offset in buffer |
299 | * | 299 | * |
300 | * returns >=0 data length on success , <0 on error | 300 | * Return: >=0 data length on success , <0 on error |
301 | */ | 301 | */ |
302 | static ssize_t mei_write(struct file *file, const char __user *ubuf, | 302 | static ssize_t mei_write(struct file *file, const char __user *ubuf, |
303 | size_t length, loff_t *offset) | 303 | size_t length, loff_t *offset) |
304 | { | 304 | { |
305 | struct mei_cl *cl = file->private_data; | 305 | struct mei_cl *cl = file->private_data; |
306 | struct mei_me_client *me_cl; | ||
306 | struct mei_cl_cb *write_cb = NULL; | 307 | struct mei_cl_cb *write_cb = NULL; |
307 | struct mei_device *dev; | 308 | struct mei_device *dev; |
308 | unsigned long timeout = 0; | 309 | unsigned long timeout = 0; |
309 | int rets; | 310 | int rets; |
310 | int id; | ||
311 | 311 | ||
312 | if (WARN_ON(!cl || !cl->dev)) | 312 | if (WARN_ON(!cl || !cl->dev)) |
313 | return -ENODEV; | 313 | return -ENODEV; |
@@ -321,8 +321,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
321 | goto out; | 321 | goto out; |
322 | } | 322 | } |
323 | 323 | ||
324 | id = mei_me_cl_by_id(dev, cl->me_client_id); | 324 | me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); |
325 | if (id < 0) { | 325 | if (!me_cl) { |
326 | rets = -ENOTTY; | 326 | rets = -ENOTTY; |
327 | goto out; | 327 | goto out; |
328 | } | 328 | } |
@@ -332,13 +332,13 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
332 | goto out; | 332 | goto out; |
333 | } | 333 | } |
334 | 334 | ||
335 | if (length > dev->me_clients[id].props.max_msg_length) { | 335 | if (length > me_cl->props.max_msg_length) { |
336 | rets = -EFBIG; | 336 | rets = -EFBIG; |
337 | goto out; | 337 | goto out; |
338 | } | 338 | } |
339 | 339 | ||
340 | if (cl->state != MEI_FILE_CONNECTED) { | 340 | if (cl->state != MEI_FILE_CONNECTED) { |
341 | dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d", | 341 | dev_err(dev->dev, "host client = %d, is not connected to ME client = %d", |
342 | cl->host_client_id, cl->me_client_id); | 342 | cl->host_client_id, cl->me_client_id); |
343 | rets = -ENODEV; | 343 | rets = -ENODEV; |
344 | goto out; | 344 | goto out; |
@@ -377,7 +377,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
377 | 377 | ||
378 | write_cb = mei_io_cb_init(cl, file); | 378 | write_cb = mei_io_cb_init(cl, file); |
379 | if (!write_cb) { | 379 | if (!write_cb) { |
380 | dev_err(&dev->pdev->dev, "write cb allocation failed\n"); | ||
381 | rets = -ENOMEM; | 380 | rets = -ENOMEM; |
382 | goto out; | 381 | goto out; |
383 | } | 382 | } |
@@ -387,7 +386,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
387 | 386 | ||
388 | rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); | 387 | rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); |
389 | if (rets) { | 388 | if (rets) { |
390 | dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); | 389 | dev_dbg(dev->dev, "failed to copy data from userland\n"); |
391 | rets = -EFAULT; | 390 | rets = -EFAULT; |
392 | goto out; | 391 | goto out; |
393 | } | 392 | } |
@@ -396,7 +395,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
396 | rets = mei_amthif_write(dev, write_cb); | 395 | rets = mei_amthif_write(dev, write_cb); |
397 | 396 | ||
398 | if (rets) { | 397 | if (rets) { |
399 | dev_err(&dev->pdev->dev, | 398 | dev_err(dev->dev, |
400 | "amthif write failed with status = %d\n", rets); | 399 | "amthif write failed with status = %d\n", rets); |
401 | goto out; | 400 | goto out; |
402 | } | 401 | } |
@@ -415,27 +414,23 @@ out: | |||
415 | /** | 414 | /** |
416 | * mei_ioctl_connect_client - the connect to fw client IOCTL function | 415 | * mei_ioctl_connect_client - the connect to fw client IOCTL function |
417 | * | 416 | * |
418 | * @dev: the device structure | ||
419 | * @data: IOCTL connect data, input and output parameters | ||
420 | * @file: private data of the file object | 417 | * @file: private data of the file object |
418 | * @data: IOCTL connect data, input and output parameters | ||
421 | * | 419 | * |
422 | * Locking: called under "dev->device_lock" lock | 420 | * Locking: called under "dev->device_lock" lock |
423 | * | 421 | * |
424 | * returns 0 on success, <0 on failure. | 422 | * Return: 0 on success, <0 on failure. |
425 | */ | 423 | */ |
426 | static int mei_ioctl_connect_client(struct file *file, | 424 | static int mei_ioctl_connect_client(struct file *file, |
427 | struct mei_connect_client_data *data) | 425 | struct mei_connect_client_data *data) |
428 | { | 426 | { |
429 | struct mei_device *dev; | 427 | struct mei_device *dev; |
430 | struct mei_client *client; | 428 | struct mei_client *client; |
429 | struct mei_me_client *me_cl; | ||
431 | struct mei_cl *cl; | 430 | struct mei_cl *cl; |
432 | int i; | ||
433 | int rets; | 431 | int rets; |
434 | 432 | ||
435 | cl = file->private_data; | 433 | cl = file->private_data; |
436 | if (WARN_ON(!cl || !cl->dev)) | ||
437 | return -ENODEV; | ||
438 | |||
439 | dev = cl->dev; | 434 | dev = cl->dev; |
440 | 435 | ||
441 | if (dev->dev_state != MEI_DEV_ENABLED) { | 436 | if (dev->dev_state != MEI_DEV_ENABLED) { |
@@ -450,28 +445,29 @@ static int mei_ioctl_connect_client(struct file *file, | |||
450 | } | 445 | } |
451 | 446 | ||
452 | /* find ME client we're trying to connect to */ | 447 | /* find ME client we're trying to connect to */ |
453 | i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); | 448 | me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); |
454 | if (i < 0 || dev->me_clients[i].props.fixed_address) { | 449 | if (!me_cl || me_cl->props.fixed_address) { |
455 | dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", | 450 | dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", |
456 | &data->in_client_uuid); | 451 | &data->in_client_uuid); |
457 | rets = -ENOTTY; | 452 | rets = -ENOTTY; |
458 | goto end; | 453 | goto end; |
459 | } | 454 | } |
460 | 455 | ||
461 | cl->me_client_id = dev->me_clients[i].client_id; | 456 | cl->me_client_id = me_cl->client_id; |
457 | cl->cl_uuid = me_cl->props.protocol_name; | ||
462 | 458 | ||
463 | dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", | 459 | dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", |
464 | cl->me_client_id); | 460 | cl->me_client_id); |
465 | dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", | 461 | dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", |
466 | dev->me_clients[i].props.protocol_version); | 462 | me_cl->props.protocol_version); |
467 | dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n", | 463 | dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", |
468 | dev->me_clients[i].props.max_msg_length); | 464 | me_cl->props.max_msg_length); |
469 | 465 | ||
470 | /* if we're connecting to amthif client then we will use the | 466 | /* if we're connecting to amthif client then we will use the |
471 | * existing connection | 467 | * existing connection |
472 | */ | 468 | */ |
473 | if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { | 469 | if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { |
474 | dev_dbg(&dev->pdev->dev, "FW Client is amthi\n"); | 470 | dev_dbg(dev->dev, "FW Client is amthi\n"); |
475 | if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { | 471 | if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { |
476 | rets = -ENODEV; | 472 | rets = -ENODEV; |
477 | goto end; | 473 | goto end; |
@@ -484,10 +480,8 @@ static int mei_ioctl_connect_client(struct file *file, | |||
484 | file->private_data = &dev->iamthif_cl; | 480 | file->private_data = &dev->iamthif_cl; |
485 | 481 | ||
486 | client = &data->out_client_properties; | 482 | client = &data->out_client_properties; |
487 | client->max_msg_length = | 483 | client->max_msg_length = me_cl->props.max_msg_length; |
488 | dev->me_clients[i].props.max_msg_length; | 484 | client->protocol_version = me_cl->props.protocol_version; |
489 | client->protocol_version = | ||
490 | dev->me_clients[i].props.protocol_version; | ||
491 | rets = dev->iamthif_cl.status; | 485 | rets = dev->iamthif_cl.status; |
492 | 486 | ||
493 | goto end; | 487 | goto end; |
@@ -496,9 +490,9 @@ static int mei_ioctl_connect_client(struct file *file, | |||
496 | 490 | ||
497 | /* prepare the output buffer */ | 491 | /* prepare the output buffer */ |
498 | client = &data->out_client_properties; | 492 | client = &data->out_client_properties; |
499 | client->max_msg_length = dev->me_clients[i].props.max_msg_length; | 493 | client->max_msg_length = me_cl->props.max_msg_length; |
500 | client->protocol_version = dev->me_clients[i].props.protocol_version; | 494 | client->protocol_version = me_cl->props.protocol_version; |
501 | dev_dbg(&dev->pdev->dev, "Can connect?\n"); | 495 | dev_dbg(dev->dev, "Can connect?\n"); |
502 | 496 | ||
503 | 497 | ||
504 | rets = mei_cl_connect(cl, file); | 498 | rets = mei_cl_connect(cl, file); |
@@ -507,7 +501,6 @@ end: | |||
507 | return rets; | 501 | return rets; |
508 | } | 502 | } |
509 | 503 | ||
510 | |||
511 | /** | 504 | /** |
512 | * mei_ioctl - the IOCTL function | 505 | * mei_ioctl - the IOCTL function |
513 | * | 506 | * |
@@ -515,24 +508,22 @@ end: | |||
515 | * @cmd: ioctl command | 508 | * @cmd: ioctl command |
516 | * @data: pointer to mei message structure | 509 | * @data: pointer to mei message structure |
517 | * | 510 | * |
518 | * returns 0 on success , <0 on error | 511 | * Return: 0 on success , <0 on error |
519 | */ | 512 | */ |
520 | static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) | 513 | static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) |
521 | { | 514 | { |
522 | struct mei_device *dev; | 515 | struct mei_device *dev; |
523 | struct mei_cl *cl = file->private_data; | 516 | struct mei_cl *cl = file->private_data; |
524 | struct mei_connect_client_data *connect_data = NULL; | 517 | struct mei_connect_client_data connect_data; |
525 | int rets; | 518 | int rets; |
526 | 519 | ||
527 | if (cmd != IOCTL_MEI_CONNECT_CLIENT) | ||
528 | return -EINVAL; | ||
529 | 520 | ||
530 | if (WARN_ON(!cl || !cl->dev)) | 521 | if (WARN_ON(!cl || !cl->dev)) |
531 | return -ENODEV; | 522 | return -ENODEV; |
532 | 523 | ||
533 | dev = cl->dev; | 524 | dev = cl->dev; |
534 | 525 | ||
535 | dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd); | 526 | dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd); |
536 | 527 | ||
537 | mutex_lock(&dev->device_lock); | 528 | mutex_lock(&dev->device_lock); |
538 | if (dev->dev_state != MEI_DEV_ENABLED) { | 529 | if (dev->dev_state != MEI_DEV_ENABLED) { |
@@ -540,38 +531,36 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) | |||
540 | goto out; | 531 | goto out; |
541 | } | 532 | } |
542 | 533 | ||
543 | dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); | 534 | switch (cmd) { |
544 | 535 | case IOCTL_MEI_CONNECT_CLIENT: | |
545 | connect_data = kzalloc(sizeof(struct mei_connect_client_data), | 536 | dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); |
546 | GFP_KERNEL); | 537 | if (copy_from_user(&connect_data, (char __user *)data, |
547 | if (!connect_data) { | ||
548 | rets = -ENOMEM; | ||
549 | goto out; | ||
550 | } | ||
551 | dev_dbg(&dev->pdev->dev, "copy connect data from user\n"); | ||
552 | if (copy_from_user(connect_data, (char __user *)data, | ||
553 | sizeof(struct mei_connect_client_data))) { | 538 | sizeof(struct mei_connect_client_data))) { |
554 | dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); | 539 | dev_dbg(dev->dev, "failed to copy data from userland\n"); |
555 | rets = -EFAULT; | 540 | rets = -EFAULT; |
556 | goto out; | 541 | goto out; |
557 | } | 542 | } |
558 | |||
559 | rets = mei_ioctl_connect_client(file, connect_data); | ||
560 | 543 | ||
561 | /* if all is ok, copying the data back to user. */ | 544 | rets = mei_ioctl_connect_client(file, &connect_data); |
562 | if (rets) | 545 | if (rets) |
563 | goto out; | 546 | goto out; |
564 | 547 | ||
565 | dev_dbg(&dev->pdev->dev, "copy connect data to user\n"); | 548 | /* if all is ok, copying the data back to user. */ |
566 | if (copy_to_user((char __user *)data, connect_data, | 549 | if (copy_to_user((char __user *)data, &connect_data, |
567 | sizeof(struct mei_connect_client_data))) { | 550 | sizeof(struct mei_connect_client_data))) { |
568 | dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); | 551 | dev_dbg(dev->dev, "failed to copy data to userland\n"); |
569 | rets = -EFAULT; | 552 | rets = -EFAULT; |
570 | goto out; | 553 | goto out; |
554 | } | ||
555 | |||
556 | break; | ||
557 | |||
558 | default: | ||
559 | dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd); | ||
560 | rets = -ENOIOCTLCMD; | ||
571 | } | 561 | } |
572 | 562 | ||
573 | out: | 563 | out: |
574 | kfree(connect_data); | ||
575 | mutex_unlock(&dev->device_lock); | 564 | mutex_unlock(&dev->device_lock); |
576 | return rets; | 565 | return rets; |
577 | } | 566 | } |
@@ -583,7 +572,7 @@ out: | |||
583 | * @cmd: ioctl command | 572 | * @cmd: ioctl command |
584 | * @data: pointer to mei message structure | 573 | * @data: pointer to mei message structure |
585 | * | 574 | * |
586 | * returns 0 on success , <0 on error | 575 | * Return: 0 on success , <0 on error |
587 | */ | 576 | */ |
588 | #ifdef CONFIG_COMPAT | 577 | #ifdef CONFIG_COMPAT |
589 | static long mei_compat_ioctl(struct file *file, | 578 | static long mei_compat_ioctl(struct file *file, |
@@ -600,7 +589,7 @@ static long mei_compat_ioctl(struct file *file, | |||
600 | * @file: pointer to file structure | 589 | * @file: pointer to file structure |
601 | * @wait: pointer to poll_table structure | 590 | * @wait: pointer to poll_table structure |
602 | * | 591 | * |
603 | * returns poll mask | 592 | * Return: poll mask |
604 | */ | 593 | */ |
605 | static unsigned int mei_poll(struct file *file, poll_table *wait) | 594 | static unsigned int mei_poll(struct file *file, poll_table *wait) |
606 | { | 595 | { |
@@ -670,7 +659,7 @@ static DEFINE_IDR(mei_idr); | |||
670 | * | 659 | * |
671 | * @dev: device pointer | 660 | * @dev: device pointer |
672 | * | 661 | * |
673 | * returns allocated minor, or -ENOSPC if no free minor left | 662 | * Return: allocated minor, or -ENOSPC if no free minor left |
674 | */ | 663 | */ |
675 | static int mei_minor_get(struct mei_device *dev) | 664 | static int mei_minor_get(struct mei_device *dev) |
676 | { | 665 | { |
@@ -681,7 +670,7 @@ static int mei_minor_get(struct mei_device *dev) | |||
681 | if (ret >= 0) | 670 | if (ret >= 0) |
682 | dev->minor = ret; | 671 | dev->minor = ret; |
683 | else if (ret == -ENOSPC) | 672 | else if (ret == -ENOSPC) |
684 | dev_err(&dev->pdev->dev, "too many mei devices\n"); | 673 | dev_err(dev->dev, "too many mei devices\n"); |
685 | 674 | ||
686 | mutex_unlock(&mei_minor_lock); | 675 | mutex_unlock(&mei_minor_lock); |
687 | return ret; | 676 | return ret; |
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 0b0d6135543b..71744b16cc8c 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h | |||
@@ -129,20 +129,18 @@ enum mei_wd_states { | |||
129 | 129 | ||
130 | /** | 130 | /** |
131 | * enum mei_cb_file_ops - file operation associated with the callback | 131 | * enum mei_cb_file_ops - file operation associated with the callback |
132 | * @MEI_FOP_READ - read | 132 | * @MEI_FOP_READ: read |
133 | * @MEI_FOP_WRITE - write | 133 | * @MEI_FOP_WRITE: write |
134 | * @MEI_FOP_CONNECT - connect | 134 | * @MEI_FOP_CONNECT: connect |
135 | * @MEI_FOP_DISCONNECT_RSP - disconnect response | 135 | * @MEI_FOP_DISCONNECT: disconnect |
136 | * @MEI_FOP_OPEN - open | 136 | * @MEI_FOP_DISCONNECT_RSP: disconnect response |
137 | * @MEI_FOP_CLOSE - close | ||
138 | */ | 137 | */ |
139 | enum mei_cb_file_ops { | 138 | enum mei_cb_file_ops { |
140 | MEI_FOP_READ = 0, | 139 | MEI_FOP_READ = 0, |
141 | MEI_FOP_WRITE, | 140 | MEI_FOP_WRITE, |
142 | MEI_FOP_CONNECT, | 141 | MEI_FOP_CONNECT, |
142 | MEI_FOP_DISCONNECT, | ||
143 | MEI_FOP_DISCONNECT_RSP, | 143 | MEI_FOP_DISCONNECT_RSP, |
144 | MEI_FOP_OPEN, | ||
145 | MEI_FOP_CLOSE | ||
146 | }; | 144 | }; |
147 | 145 | ||
148 | /* | 146 | /* |
@@ -159,8 +157,8 @@ struct mei_msg_data { | |||
159 | /* | 157 | /* |
160 | * struct mei_fw_status - storage of FW status data | 158 | * struct mei_fw_status - storage of FW status data |
161 | * | 159 | * |
162 | * @count - number of actually available elements in array | 160 | * @count: number of actually available elements in array |
163 | * @status - FW status registers | 161 | * @status: FW status registers |
164 | */ | 162 | */ |
165 | struct mei_fw_status { | 163 | struct mei_fw_status { |
166 | int count; | 164 | int count; |
@@ -170,11 +168,13 @@ struct mei_fw_status { | |||
170 | /** | 168 | /** |
171 | * struct mei_me_client - representation of me (fw) client | 169 | * struct mei_me_client - representation of me (fw) client |
172 | * | 170 | * |
173 | * @props - client properties | 171 | * @list: link in me client list |
174 | * @client_id - me client id | 172 | * @props: client properties |
175 | * @mei_flow_ctrl_creds - flow control credits | 173 | * @client_id: me client id |
174 | * @mei_flow_ctrl_creds: flow control credits | ||
176 | */ | 175 | */ |
177 | struct mei_me_client { | 176 | struct mei_me_client { |
177 | struct list_head list; | ||
178 | struct mei_client_properties props; | 178 | struct mei_client_properties props; |
179 | u8 client_id; | 179 | u8 client_id; |
180 | u8 mei_flow_ctrl_creds; | 180 | u8 mei_flow_ctrl_creds; |
@@ -186,8 +186,15 @@ struct mei_cl; | |||
186 | /** | 186 | /** |
187 | * struct mei_cl_cb - file operation callback structure | 187 | * struct mei_cl_cb - file operation callback structure |
188 | * | 188 | * |
189 | * @cl - file client who is running this operation | 189 | * @list: link in callback queue |
190 | * @fop_type - file operation type | 190 | * @cl: file client who is running this operation |
191 | * @fop_type: file operation type | ||
192 | * @request_buffer: buffer to store request data | ||
193 | * @response_buffer: buffer to store response data | ||
194 | * @buf_idx: last read index | ||
195 | * @read_time: last read operation time stamp (iamthif) | ||
196 | * @file_object: pointer to file structure | ||
197 | * @internal: communication between driver and FW flag | ||
191 | */ | 198 | */ |
192 | struct mei_cl_cb { | 199 | struct mei_cl_cb { |
193 | struct list_head list; | 200 | struct list_head list; |
@@ -201,7 +208,29 @@ struct mei_cl_cb { | |||
201 | u32 internal:1; | 208 | u32 internal:1; |
202 | }; | 209 | }; |
203 | 210 | ||
204 | /* MEI client instance carried as file->private_data*/ | 211 | /** |
212 | * struct mei_cl - me client host representation | ||
213 | * carried in file->private_data | ||
214 | * | ||
215 | * @link: link in the clients list | ||
216 | * @dev: mei parent device | ||
217 | * @state: file operation state | ||
218 | * @tx_wait: wait queue for tx completion | ||
219 | * @rx_wait: wait queue for rx completion | ||
220 | * @wait: wait queue for management operation | ||
221 | * @status: connection status | ||
222 | * @cl_uuid: client uuid name | ||
223 | * @host_client_id: host id | ||
224 | * @me_client_id: me/fw id | ||
225 | * @mei_flow_ctrl_creds: transmit flow credentials | ||
226 | * @timer_count: watchdog timer for operation completion | ||
227 | * @reading_state: state of the rx | ||
228 | * @writing_state: state of the tx | ||
229 | * @read_cb: current pending reading callback | ||
230 | * | ||
231 | * @device: device on the mei client bus | ||
232 | * @device_link: link to bus clients | ||
233 | */ | ||
205 | struct mei_cl { | 234 | struct mei_cl { |
206 | struct list_head link; | 235 | struct list_head link; |
207 | struct mei_device *dev; | 236 | struct mei_device *dev; |
@@ -210,7 +239,7 @@ struct mei_cl { | |||
210 | wait_queue_head_t rx_wait; | 239 | wait_queue_head_t rx_wait; |
211 | wait_queue_head_t wait; | 240 | wait_queue_head_t wait; |
212 | int status; | 241 | int status; |
213 | /* ID of client connected */ | 242 | uuid_le cl_uuid; |
214 | u8 host_client_id; | 243 | u8 host_client_id; |
215 | u8 me_client_id; | 244 | u8 me_client_id; |
216 | u8 mei_flow_ctrl_creds; | 245 | u8 mei_flow_ctrl_creds; |
@@ -222,35 +251,35 @@ struct mei_cl { | |||
222 | /* MEI CL bus data */ | 251 | /* MEI CL bus data */ |
223 | struct mei_cl_device *device; | 252 | struct mei_cl_device *device; |
224 | struct list_head device_link; | 253 | struct list_head device_link; |
225 | uuid_le device_uuid; | ||
226 | }; | 254 | }; |
227 | 255 | ||
228 | /** struct mei_hw_ops | 256 | /** struct mei_hw_ops |
229 | * | 257 | * |
230 | * @host_is_ready - query for host readiness | 258 | * @host_is_ready : query for host readiness |
231 | 259 | ||
232 | * @hw_is_ready - query if hw is ready | 260 | * @hw_is_ready : query if hw is ready |
233 | * @hw_reset - reset hw | 261 | * @hw_reset : reset hw |
234 | * @hw_start - start hw after reset | 262 | * @hw_start : start hw after reset |
235 | * @hw_config - configure hw | 263 | * @hw_config : configure hw |
236 | 264 | ||
237 | * @pg_state - power gating state of the device | 265 | * @fw_status : get fw status registers |
238 | * @pg_is_enabled - is power gating enabled | 266 | * @pg_state : power gating state of the device |
267 | * @pg_is_enabled : is power gating enabled | ||
239 | 268 | ||
240 | * @intr_clear - clear pending interrupts | 269 | * @intr_clear : clear pending interrupts |
241 | * @intr_enable - enable interrupts | 270 | * @intr_enable : enable interrupts |
242 | * @intr_disable - disable interrupts | 271 | * @intr_disable : disable interrupts |
243 | 272 | ||
244 | * @hbuf_free_slots - query for write buffer empty slots | 273 | * @hbuf_free_slots : query for write buffer empty slots |
245 | * @hbuf_is_ready - query if write buffer is empty | 274 | * @hbuf_is_ready : query if write buffer is empty |
246 | * @hbuf_max_len - query for write buffer max len | 275 | * @hbuf_max_len : query for write buffer max len |
247 | 276 | ||
248 | * @write - write a message to FW | 277 | * @write : write a message to FW |
249 | 278 | ||
250 | * @rdbuf_full_slots - query how many slots are filled | 279 | * @rdbuf_full_slots : query how many slots are filled |
251 | 280 | ||
252 | * @read_hdr - get first 4 bytes (header) | 281 | * @read_hdr : get first 4 bytes (header) |
253 | * @read - read a buffer from the FW | 282 | * @read : read a buffer from the FW |
254 | */ | 283 | */ |
255 | struct mei_hw_ops { | 284 | struct mei_hw_ops { |
256 | 285 | ||
@@ -261,6 +290,8 @@ struct mei_hw_ops { | |||
261 | int (*hw_start)(struct mei_device *dev); | 290 | int (*hw_start)(struct mei_device *dev); |
262 | void (*hw_config)(struct mei_device *dev); | 291 | void (*hw_config)(struct mei_device *dev); |
263 | 292 | ||
293 | |||
294 | int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts); | ||
264 | enum mei_pg_state (*pg_state)(struct mei_device *dev); | 295 | enum mei_pg_state (*pg_state)(struct mei_device *dev); |
265 | bool (*pg_is_enabled)(struct mei_device *dev); | 296 | bool (*pg_is_enabled)(struct mei_device *dev); |
266 | 297 | ||
@@ -328,11 +359,12 @@ void mei_cl_bus_exit(void); | |||
328 | * when being probed and shall use it for doing ME bus I/O. | 359 | * when being probed and shall use it for doing ME bus I/O. |
329 | * | 360 | * |
330 | * @dev: linux driver model device pointer | 361 | * @dev: linux driver model device pointer |
331 | * @uuid: me client uuid | ||
332 | * @cl: mei client | 362 | * @cl: mei client |
333 | * @ops: ME transport ops | 363 | * @ops: ME transport ops |
364 | * @event_work: async work to execute event callback | ||
334 | * @event_cb: Drivers register this callback to get asynchronous ME | 365 | * @event_cb: Drivers register this callback to get asynchronous ME |
335 | * events (e.g. Rx buffer pending) notifications. | 366 | * events (e.g. Rx buffer pending) notifications. |
367 | * @event_context: event callback run context | ||
336 | * @events: Events bitmask sent to the driver. | 368 | * @events: Events bitmask sent to the driver. |
337 | * @priv_data: client private data | 369 | * @priv_data: client private data |
338 | */ | 370 | */ |
@@ -352,7 +384,7 @@ struct mei_cl_device { | |||
352 | }; | 384 | }; |
353 | 385 | ||
354 | 386 | ||
355 | /** | 387 | /** |
356 | * enum mei_pg_event - power gating transition events | 388 | * enum mei_pg_event - power gating transition events |
357 | * | 389 | * |
358 | * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition | 390 | * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition |
@@ -376,67 +408,106 @@ enum mei_pg_state { | |||
376 | MEI_PG_ON = 1, | 408 | MEI_PG_ON = 1, |
377 | }; | 409 | }; |
378 | 410 | ||
379 | /* | 411 | const char *mei_pg_state_str(enum mei_pg_state state); |
380 | * mei_cfg | ||
381 | * | ||
382 | * @fw_status - FW status | ||
383 | * @quirk_probe - device exclusion quirk | ||
384 | */ | ||
385 | struct mei_cfg { | ||
386 | const struct mei_fw_status fw_status; | ||
387 | bool (*quirk_probe)(struct pci_dev *pdev); | ||
388 | }; | ||
389 | |||
390 | |||
391 | #define MEI_PCI_DEVICE(dev, cfg) \ | ||
392 | .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ | ||
393 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ | ||
394 | .driver_data = (kernel_ulong_t)&(cfg) | ||
395 | |||
396 | 412 | ||
397 | /** | 413 | /** |
398 | * struct mei_device - MEI private device struct | 414 | * struct mei_device - MEI private device struct |
399 | |||
400 | * @pdev - pointer to pci device struct | ||
401 | * @cdev - character device | ||
402 | * @minor - minor number allocated for device | ||
403 | * | 415 | * |
404 | * @reset_count - limits the number of consecutive resets | 416 | * @dev : device on a bus |
405 | * @hbm_state - state of host bus message protocol | 417 | * @cdev : character device |
406 | * @pg_event - power gating event | 418 | * @minor : minor number allocated for device |
407 | * @mem_addr - mem mapped base register address | 419 | * |
408 | 420 | * @read_list : read completion list | |
409 | * @hbuf_depth - depth of hardware host/write buffer is slots | 421 | * @write_list : write pending list |
410 | * @hbuf_is_ready - query if the host host/write buffer is ready | 422 | * @write_waiting_list : write completion list |
411 | * @wr_msg - the buffer for hbm control messages | 423 | * @ctrl_wr_list : pending control write list |
412 | * @cfg - per device generation config and ops | 424 | * @ctrl_rd_list : pending control read list |
425 | * | ||
426 | * @file_list : list of opened handles | ||
427 | * @open_handle_count: number of opened handles | ||
428 | * | ||
429 | * @device_lock : big device lock | ||
430 | * @timer_work : MEI timer delayed work (timeouts) | ||
431 | * | ||
432 | * @recvd_hw_ready : hw ready message received flag | ||
433 | * | ||
434 | * @wait_hw_ready : wait queue for receive HW ready message form FW | ||
435 | * @wait_pg : wait queue for receive PG message from FW | ||
436 | * @wait_hbm_start : wait queue for receive HBM start message from FW | ||
437 | * @wait_stop_wd : wait queue for receive WD stop message from FW | ||
438 | * | ||
439 | * @reset_count : number of consecutive resets | ||
440 | * @dev_state : device state | ||
441 | * @hbm_state : state of host bus message protocol | ||
442 | * @init_clients_timer : HBM init handshake timeout | ||
443 | * | ||
444 | * @pg_event : power gating event | ||
445 | * @pg_domain : runtime PM domain | ||
446 | * | ||
447 | * @rd_msg_buf : control messages buffer | ||
448 | * @rd_msg_hdr : read message header storage | ||
449 | * | ||
450 | * @hbuf_depth : depth of hardware host/write buffer is slots | ||
451 | * @hbuf_is_ready : query if the host host/write buffer is ready | ||
452 | * @wr_msg : the buffer for hbm control messages | ||
453 | * | ||
454 | * @version : HBM protocol version in use | ||
455 | * @hbm_f_pg_supported : hbm feature pgi protocol | ||
456 | * | ||
457 | * @me_clients : list of FW clients | ||
458 | * @me_clients_map : FW clients bit map | ||
459 | * @host_clients_map : host clients id pool | ||
460 | * @me_client_index : last FW client index in enumeration | ||
461 | * | ||
462 | * @wd_cl : watchdog client | ||
463 | * @wd_state : watchdog client state | ||
464 | * @wd_pending : watchdog command is pending | ||
465 | * @wd_timeout : watchdog expiration timeout | ||
466 | * @wd_data : watchdog message buffer | ||
467 | * | ||
468 | * @amthif_cmd_list : amthif list for cmd waiting | ||
469 | * @amthif_rd_complete_list : amthif list for reading completed cmd data | ||
470 | * @iamthif_file_object : file for current amthif operation | ||
471 | * @iamthif_cl : amthif host client | ||
472 | * @iamthif_current_cb : amthif current operation callback | ||
473 | * @iamthif_open_count : number of opened amthif connections | ||
474 | * @iamthif_mtu : amthif client max message length | ||
475 | * @iamthif_timer : time stamp of current amthif command completion | ||
476 | * @iamthif_stall_timer : timer to detect amthif hang | ||
477 | * @iamthif_msg_buf : amthif current message buffer | ||
478 | * @iamthif_msg_buf_size : size of current amthif message request buffer | ||
479 | * @iamthif_msg_buf_index : current index in amthif message request buffer | ||
480 | * @iamthif_state : amthif processor state | ||
481 | * @iamthif_flow_control_pending: amthif waits for flow control | ||
482 | * @iamthif_ioctl : wait for completion if amthif control message | ||
483 | * @iamthif_canceled : current amthif command is canceled | ||
484 | * | ||
485 | * @init_work : work item for the device init | ||
486 | * @reset_work : work item for the device reset | ||
487 | * | ||
488 | * @device_list : mei client bus list | ||
489 | * | ||
490 | * @dbgfs_dir : debugfs mei root directory | ||
491 | * | ||
492 | * @ops: : hw specific operations | ||
493 | * @hw : hw specific data | ||
413 | */ | 494 | */ |
414 | struct mei_device { | 495 | struct mei_device { |
415 | struct pci_dev *pdev; /* pointer to pci device struct */ | 496 | struct device *dev; |
416 | struct cdev cdev; | 497 | struct cdev cdev; |
417 | int minor; | 498 | int minor; |
418 | 499 | ||
419 | /* | 500 | struct mei_cl_cb read_list; |
420 | * lists of queues | 501 | struct mei_cl_cb write_list; |
421 | */ | 502 | struct mei_cl_cb write_waiting_list; |
422 | /* array of pointers to aio lists */ | 503 | struct mei_cl_cb ctrl_wr_list; |
423 | struct mei_cl_cb read_list; /* driver read queue */ | 504 | struct mei_cl_cb ctrl_rd_list; |
424 | struct mei_cl_cb write_list; /* driver write queue */ | ||
425 | struct mei_cl_cb write_waiting_list; /* write waiting queue */ | ||
426 | struct mei_cl_cb ctrl_wr_list; /* managed write IOCTL list */ | ||
427 | struct mei_cl_cb ctrl_rd_list; /* managed read IOCTL list */ | ||
428 | 505 | ||
429 | /* | ||
430 | * list of files | ||
431 | */ | ||
432 | struct list_head file_list; | 506 | struct list_head file_list; |
433 | long open_handle_count; | 507 | long open_handle_count; |
434 | 508 | ||
435 | /* | 509 | struct mutex device_lock; |
436 | * lock for the device | 510 | struct delayed_work timer_work; |
437 | */ | ||
438 | struct mutex device_lock; /* device lock */ | ||
439 | struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */ | ||
440 | 511 | ||
441 | bool recvd_hw_ready; | 512 | bool recvd_hw_ready; |
442 | /* | 513 | /* |
@@ -444,7 +515,7 @@ struct mei_device { | |||
444 | */ | 515 | */ |
445 | wait_queue_head_t wait_hw_ready; | 516 | wait_queue_head_t wait_hw_ready; |
446 | wait_queue_head_t wait_pg; | 517 | wait_queue_head_t wait_pg; |
447 | wait_queue_head_t wait_recvd_msg; | 518 | wait_queue_head_t wait_hbm_start; |
448 | wait_queue_head_t wait_stop_wd; | 519 | wait_queue_head_t wait_stop_wd; |
449 | 520 | ||
450 | /* | 521 | /* |
@@ -463,7 +534,7 @@ struct mei_device { | |||
463 | struct dev_pm_domain pg_domain; | 534 | struct dev_pm_domain pg_domain; |
464 | #endif /* CONFIG_PM_RUNTIME */ | 535 | #endif /* CONFIG_PM_RUNTIME */ |
465 | 536 | ||
466 | unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */ | 537 | unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; |
467 | u32 rd_msg_hdr; | 538 | u32 rd_msg_hdr; |
468 | 539 | ||
469 | /* write buffer */ | 540 | /* write buffer */ |
@@ -477,12 +548,11 @@ struct mei_device { | |||
477 | } wr_msg; | 548 | } wr_msg; |
478 | 549 | ||
479 | struct hbm_version version; | 550 | struct hbm_version version; |
551 | unsigned int hbm_f_pg_supported:1; | ||
480 | 552 | ||
481 | struct mei_me_client *me_clients; /* Note: memory has to be allocated */ | 553 | struct list_head me_clients; |
482 | DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); | 554 | DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); |
483 | DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); | 555 | DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); |
484 | unsigned long me_clients_num; | ||
485 | unsigned long me_client_presentation_num; | ||
486 | unsigned long me_client_index; | 556 | unsigned long me_client_index; |
487 | 557 | ||
488 | struct mei_cl wd_cl; | 558 | struct mei_cl wd_cl; |
@@ -523,7 +593,6 @@ struct mei_device { | |||
523 | 593 | ||
524 | 594 | ||
525 | const struct mei_hw_ops *ops; | 595 | const struct mei_hw_ops *ops; |
526 | const struct mei_cfg *cfg; | ||
527 | char hw[0] __aligned(sizeof(void *)); | 596 | char hw[0] __aligned(sizeof(void *)); |
528 | }; | 597 | }; |
529 | 598 | ||
@@ -535,8 +604,10 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec) | |||
535 | /** | 604 | /** |
536 | * mei_data2slots - get slots - number of (dwords) from a message length | 605 | * mei_data2slots - get slots - number of (dwords) from a message length |
537 | * + size of the mei header | 606 | * + size of the mei header |
538 | * @length - size of the messages in bytes | 607 | * |
539 | * returns - number of slots | 608 | * @length: size of the messages in bytes |
609 | * | ||
610 | * Return: number of slots | ||
540 | */ | 611 | */ |
541 | static inline u32 mei_data2slots(size_t length) | 612 | static inline u32 mei_data2slots(size_t length) |
542 | { | 613 | { |
@@ -544,9 +615,11 @@ static inline u32 mei_data2slots(size_t length) | |||
544 | } | 615 | } |
545 | 616 | ||
546 | /** | 617 | /** |
547 | * mei_slots2data- get data in slots - bytes from slots | 618 | * mei_slots2data - get data in slots - bytes from slots |
548 | * @slots - number of available slots | 619 | * |
549 | * returns - number of bytes in slots | 620 | * @slots: number of available slots |
621 | * | ||
622 | * Return: number of bytes in slots | ||
550 | */ | 623 | */ |
551 | static inline u32 mei_slots2data(int slots) | 624 | static inline u32 mei_slots2data(int slots) |
552 | { | 625 | { |
@@ -556,7 +629,9 @@ static inline u32 mei_slots2data(int slots) | |||
556 | /* | 629 | /* |
557 | * mei init function prototypes | 630 | * mei init function prototypes |
558 | */ | 631 | */ |
559 | void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg); | 632 | void mei_device_init(struct mei_device *dev, |
633 | struct device *device, | ||
634 | const struct mei_hw_ops *hw_ops); | ||
560 | int mei_reset(struct mei_device *dev); | 635 | int mei_reset(struct mei_device *dev); |
561 | int mei_start(struct mei_device *dev); | 636 | int mei_start(struct mei_device *dev); |
562 | int mei_restart(struct mei_device *dev); | 637 | int mei_restart(struct mei_device *dev); |
@@ -622,12 +697,12 @@ int mei_wd_host_init(struct mei_device *dev); | |||
622 | /* | 697 | /* |
623 | * mei_watchdog_register - Registering watchdog interface | 698 | * mei_watchdog_register - Registering watchdog interface |
624 | * once we got connection to the WD Client | 699 | * once we got connection to the WD Client |
625 | * @dev - mei device | 700 | * @dev: mei device |
626 | */ | 701 | */ |
627 | int mei_watchdog_register(struct mei_device *dev); | 702 | int mei_watchdog_register(struct mei_device *dev); |
628 | /* | 703 | /* |
629 | * mei_watchdog_unregister - Unregistering watchdog interface | 704 | * mei_watchdog_unregister - Unregistering watchdog interface |
630 | * @dev - mei device | 705 | * @dev: mei device |
631 | */ | 706 | */ |
632 | void mei_watchdog_unregister(struct mei_device *dev); | 707 | void mei_watchdog_unregister(struct mei_device *dev); |
633 | 708 | ||
@@ -723,7 +798,11 @@ static inline int mei_count_full_read_slots(struct mei_device *dev) | |||
723 | return dev->ops->rdbuf_full_slots(dev); | 798 | return dev->ops->rdbuf_full_slots(dev); |
724 | } | 799 | } |
725 | 800 | ||
726 | int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status); | 801 | static inline int mei_fw_status(struct mei_device *dev, |
802 | struct mei_fw_status *fw_status) | ||
803 | { | ||
804 | return dev->ops->fw_status(dev, fw_status); | ||
805 | } | ||
727 | 806 | ||
728 | #define FW_STS_FMT "%08X %08X" | 807 | #define FW_STS_FMT "%08X %08X" |
729 | #define FW_STS_PRM(fw_status) \ | 808 | #define FW_STS_PRM(fw_status) \ |
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c index 5ccc23bc7690..622654323177 100644 --- a/drivers/misc/mei/nfc.c +++ b/drivers/misc/mei/nfc.c | |||
@@ -19,7 +19,8 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/moduleparam.h> | 20 | #include <linux/moduleparam.h> |
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/slab.h> |
23 | |||
23 | #include <linux/mei_cl_bus.h> | 24 | #include <linux/mei_cl_bus.h> |
24 | 25 | ||
25 | #include "mei_dev.h" | 26 | #include "mei_dev.h" |
@@ -87,14 +88,20 @@ struct mei_nfc_hci_hdr { | |||
87 | 88 | ||
88 | #define MEI_NFC_HEADER_SIZE 10 | 89 | #define MEI_NFC_HEADER_SIZE 10 |
89 | 90 | ||
90 | /** mei_nfc_dev - NFC mei device | 91 | /** |
92 | * struct mei_nfc_dev - NFC mei device | ||
91 | * | 93 | * |
92 | * @cl: NFC host client | 94 | * @cl: NFC host client |
93 | * @cl_info: NFC info host client | 95 | * @cl_info: NFC info host client |
94 | * @init_work: perform connection to the info client | 96 | * @init_work: perform connection to the info client |
97 | * @send_wq: send completion wait queue | ||
95 | * @fw_ivn: NFC Interface Version Number | 98 | * @fw_ivn: NFC Interface Version Number |
96 | * @vendor_id: NFC manufacturer ID | 99 | * @vendor_id: NFC manufacturer ID |
97 | * @radio_type: NFC radio type | 100 | * @radio_type: NFC radio type |
101 | * @bus_name: bus name | ||
102 | * | ||
103 | * @req_id: message counter | ||
104 | * @recv_req_id: reception message counter | ||
98 | */ | 105 | */ |
99 | struct mei_nfc_dev { | 106 | struct mei_nfc_dev { |
100 | struct mei_cl *cl; | 107 | struct mei_cl *cl; |
@@ -163,7 +170,7 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) | |||
163 | return 0; | 170 | return 0; |
164 | 171 | ||
165 | default: | 172 | default: |
166 | dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", | 173 | dev_err(dev->dev, "Unknown radio type 0x%x\n", |
167 | ndev->radio_type); | 174 | ndev->radio_type); |
168 | 175 | ||
169 | return -EINVAL; | 176 | return -EINVAL; |
@@ -175,14 +182,14 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) | |||
175 | ndev->bus_name = "pn544"; | 182 | ndev->bus_name = "pn544"; |
176 | return 0; | 183 | return 0; |
177 | default: | 184 | default: |
178 | dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", | 185 | dev_err(dev->dev, "Unknown radio type 0x%x\n", |
179 | ndev->radio_type); | 186 | ndev->radio_type); |
180 | 187 | ||
181 | return -EINVAL; | 188 | return -EINVAL; |
182 | } | 189 | } |
183 | 190 | ||
184 | default: | 191 | default: |
185 | dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n", | 192 | dev_err(dev->dev, "Unknown vendor ID 0x%x\n", |
186 | ndev->vendor_id); | 193 | ndev->vendor_id); |
187 | 194 | ||
188 | return -EINVAL; | 195 | return -EINVAL; |
@@ -231,21 +238,21 @@ static int mei_nfc_connect(struct mei_nfc_dev *ndev) | |||
231 | 238 | ||
232 | ret = __mei_cl_send(cl, (u8 *)cmd, connect_length); | 239 | ret = __mei_cl_send(cl, (u8 *)cmd, connect_length); |
233 | if (ret < 0) { | 240 | if (ret < 0) { |
234 | dev_err(&dev->pdev->dev, "Could not send connect cmd\n"); | 241 | dev_err(dev->dev, "Could not send connect cmd\n"); |
235 | goto err; | 242 | goto err; |
236 | } | 243 | } |
237 | 244 | ||
238 | bytes_recv = __mei_cl_recv(cl, (u8 *)reply, connect_resp_length); | 245 | bytes_recv = __mei_cl_recv(cl, (u8 *)reply, connect_resp_length); |
239 | if (bytes_recv < 0) { | 246 | if (bytes_recv < 0) { |
240 | dev_err(&dev->pdev->dev, "Could not read connect response\n"); | 247 | dev_err(dev->dev, "Could not read connect response\n"); |
241 | ret = bytes_recv; | 248 | ret = bytes_recv; |
242 | goto err; | 249 | goto err; |
243 | } | 250 | } |
244 | 251 | ||
245 | dev_info(&dev->pdev->dev, "IVN 0x%x Vendor ID 0x%x\n", | 252 | dev_info(dev->dev, "IVN 0x%x Vendor ID 0x%x\n", |
246 | connect_resp->fw_ivn, connect_resp->vendor_id); | 253 | connect_resp->fw_ivn, connect_resp->vendor_id); |
247 | 254 | ||
248 | dev_info(&dev->pdev->dev, "ME FW %d.%d.%d.%d\n", | 255 | dev_info(dev->dev, "ME FW %d.%d.%d.%d\n", |
249 | connect_resp->me_major, connect_resp->me_minor, | 256 | connect_resp->me_major, connect_resp->me_minor, |
250 | connect_resp->me_hotfix, connect_resp->me_build); | 257 | connect_resp->me_hotfix, connect_resp->me_build); |
251 | 258 | ||
@@ -279,7 +286,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev) | |||
279 | 286 | ||
280 | ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd)); | 287 | ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd)); |
281 | if (ret < 0) { | 288 | if (ret < 0) { |
282 | dev_err(&dev->pdev->dev, "Could not send IF version cmd\n"); | 289 | dev_err(dev->dev, "Could not send IF version cmd\n"); |
283 | return ret; | 290 | return ret; |
284 | } | 291 | } |
285 | 292 | ||
@@ -293,7 +300,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev) | |||
293 | 300 | ||
294 | bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); | 301 | bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); |
295 | if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { | 302 | if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { |
296 | dev_err(&dev->pdev->dev, "Could not read IF version\n"); | 303 | dev_err(dev->dev, "Could not read IF version\n"); |
297 | ret = -EIO; | 304 | ret = -EIO; |
298 | goto err; | 305 | goto err; |
299 | } | 306 | } |
@@ -319,7 +326,7 @@ static int mei_nfc_enable(struct mei_cl_device *cldev) | |||
319 | 326 | ||
320 | ret = mei_nfc_connect(ndev); | 327 | ret = mei_nfc_connect(ndev); |
321 | if (ret < 0) { | 328 | if (ret < 0) { |
322 | dev_err(&dev->pdev->dev, "Could not connect to NFC"); | 329 | dev_err(dev->dev, "Could not connect to NFC"); |
323 | return ret; | 330 | return ret; |
324 | } | 331 | } |
325 | 332 | ||
@@ -361,7 +368,7 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) | |||
361 | 368 | ||
362 | if (!wait_event_interruptible_timeout(ndev->send_wq, | 369 | if (!wait_event_interruptible_timeout(ndev->send_wq, |
363 | ndev->recv_req_id == ndev->req_id, HZ)) { | 370 | ndev->recv_req_id == ndev->req_id, HZ)) { |
364 | dev_err(&dev->pdev->dev, "NFC MEI command timeout\n"); | 371 | dev_err(dev->dev, "NFC MEI command timeout\n"); |
365 | err = -ETIME; | 372 | err = -ETIME; |
366 | } else { | 373 | } else { |
367 | ndev->req_id++; | 374 | ndev->req_id++; |
@@ -418,8 +425,7 @@ static void mei_nfc_init(struct work_struct *work) | |||
418 | 425 | ||
419 | if (mei_cl_connect(cl_info, NULL) < 0) { | 426 | if (mei_cl_connect(cl_info, NULL) < 0) { |
420 | mutex_unlock(&dev->device_lock); | 427 | mutex_unlock(&dev->device_lock); |
421 | dev_err(&dev->pdev->dev, | 428 | dev_err(dev->dev, "Could not connect to the NFC INFO ME client"); |
422 | "Could not connect to the NFC INFO ME client"); | ||
423 | 429 | ||
424 | goto err; | 430 | goto err; |
425 | } | 431 | } |
@@ -427,21 +433,19 @@ static void mei_nfc_init(struct work_struct *work) | |||
427 | mutex_unlock(&dev->device_lock); | 433 | mutex_unlock(&dev->device_lock); |
428 | 434 | ||
429 | if (mei_nfc_if_version(ndev) < 0) { | 435 | if (mei_nfc_if_version(ndev) < 0) { |
430 | dev_err(&dev->pdev->dev, "Could not get the NFC interface version"); | 436 | dev_err(dev->dev, "Could not get the NFC interface version"); |
431 | 437 | ||
432 | goto err; | 438 | goto err; |
433 | } | 439 | } |
434 | 440 | ||
435 | dev_info(&dev->pdev->dev, | 441 | dev_info(dev->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", |
436 | "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", | ||
437 | ndev->fw_ivn, ndev->vendor_id, ndev->radio_type); | 442 | ndev->fw_ivn, ndev->vendor_id, ndev->radio_type); |
438 | 443 | ||
439 | mutex_lock(&dev->device_lock); | 444 | mutex_lock(&dev->device_lock); |
440 | 445 | ||
441 | if (mei_cl_disconnect(cl_info) < 0) { | 446 | if (mei_cl_disconnect(cl_info) < 0) { |
442 | mutex_unlock(&dev->device_lock); | 447 | mutex_unlock(&dev->device_lock); |
443 | dev_err(&dev->pdev->dev, | 448 | dev_err(dev->dev, "Could not disconnect the NFC INFO ME client"); |
444 | "Could not disconnect the NFC INFO ME client"); | ||
445 | 449 | ||
446 | goto err; | 450 | goto err; |
447 | } | 451 | } |
@@ -449,15 +453,13 @@ static void mei_nfc_init(struct work_struct *work) | |||
449 | mutex_unlock(&dev->device_lock); | 453 | mutex_unlock(&dev->device_lock); |
450 | 454 | ||
451 | if (mei_nfc_build_bus_name(ndev) < 0) { | 455 | if (mei_nfc_build_bus_name(ndev) < 0) { |
452 | dev_err(&dev->pdev->dev, | 456 | dev_err(dev->dev, "Could not build the bus ID name\n"); |
453 | "Could not build the bus ID name\n"); | ||
454 | return; | 457 | return; |
455 | } | 458 | } |
456 | 459 | ||
457 | cldev = mei_cl_add_device(dev, mei_nfc_guid, ndev->bus_name, &nfc_ops); | 460 | cldev = mei_cl_add_device(dev, mei_nfc_guid, ndev->bus_name, &nfc_ops); |
458 | if (!cldev) { | 461 | if (!cldev) { |
459 | dev_err(&dev->pdev->dev, | 462 | dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n"); |
460 | "Could not add the NFC device to the MEI bus\n"); | ||
461 | 463 | ||
462 | goto err; | 464 | goto err; |
463 | } | 465 | } |
@@ -472,7 +474,6 @@ err: | |||
472 | mei_nfc_free(ndev); | 474 | mei_nfc_free(ndev); |
473 | mutex_unlock(&dev->device_lock); | 475 | mutex_unlock(&dev->device_lock); |
474 | 476 | ||
475 | return; | ||
476 | } | 477 | } |
477 | 478 | ||
478 | 479 | ||
@@ -480,7 +481,8 @@ int mei_nfc_host_init(struct mei_device *dev) | |||
480 | { | 481 | { |
481 | struct mei_nfc_dev *ndev = &nfc_dev; | 482 | struct mei_nfc_dev *ndev = &nfc_dev; |
482 | struct mei_cl *cl_info, *cl = NULL; | 483 | struct mei_cl *cl_info, *cl = NULL; |
483 | int i, ret; | 484 | struct mei_me_client *me_cl; |
485 | int ret; | ||
484 | 486 | ||
485 | /* already initialized */ | 487 | /* already initialized */ |
486 | if (ndev->cl_info) | 488 | if (ndev->cl_info) |
@@ -498,40 +500,38 @@ int mei_nfc_host_init(struct mei_device *dev) | |||
498 | } | 500 | } |
499 | 501 | ||
500 | /* check for valid client id */ | 502 | /* check for valid client id */ |
501 | i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); | 503 | me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); |
502 | if (i < 0) { | 504 | if (!me_cl) { |
503 | dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); | 505 | dev_info(dev->dev, "nfc: failed to find the client\n"); |
504 | ret = -ENOTTY; | 506 | ret = -ENOTTY; |
505 | goto err; | 507 | goto err; |
506 | } | 508 | } |
507 | 509 | ||
508 | cl_info->me_client_id = dev->me_clients[i].client_id; | 510 | cl_info->me_client_id = me_cl->client_id; |
511 | cl_info->cl_uuid = me_cl->props.protocol_name; | ||
509 | 512 | ||
510 | ret = mei_cl_link(cl_info, MEI_HOST_CLIENT_ID_ANY); | 513 | ret = mei_cl_link(cl_info, MEI_HOST_CLIENT_ID_ANY); |
511 | if (ret) | 514 | if (ret) |
512 | goto err; | 515 | goto err; |
513 | 516 | ||
514 | cl_info->device_uuid = mei_nfc_info_guid; | ||
515 | 517 | ||
516 | list_add_tail(&cl_info->device_link, &dev->device_list); | 518 | list_add_tail(&cl_info->device_link, &dev->device_list); |
517 | 519 | ||
518 | /* check for valid client id */ | 520 | /* check for valid client id */ |
519 | i = mei_me_cl_by_uuid(dev, &mei_nfc_guid); | 521 | me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid); |
520 | if (i < 0) { | 522 | if (!me_cl) { |
521 | dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); | 523 | dev_info(dev->dev, "nfc: failed to find the client\n"); |
522 | ret = -ENOTTY; | 524 | ret = -ENOTTY; |
523 | goto err; | 525 | goto err; |
524 | } | 526 | } |
525 | 527 | ||
526 | cl->me_client_id = dev->me_clients[i].client_id; | 528 | cl->me_client_id = me_cl->client_id; |
529 | cl->cl_uuid = me_cl->props.protocol_name; | ||
527 | 530 | ||
528 | ret = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); | 531 | ret = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); |
529 | if (ret) | 532 | if (ret) |
530 | goto err; | 533 | goto err; |
531 | 534 | ||
532 | cl->device_uuid = mei_nfc_guid; | ||
533 | |||
534 | |||
535 | list_add_tail(&cl->device_link, &dev->device_list); | 535 | list_add_tail(&cl->device_link, &dev->device_list); |
536 | 536 | ||
537 | ndev->req_id = 1; | 537 | ndev->req_id = 1; |
@@ -551,6 +551,7 @@ err: | |||
551 | void mei_nfc_host_exit(struct mei_device *dev) | 551 | void mei_nfc_host_exit(struct mei_device *dev) |
552 | { | 552 | { |
553 | struct mei_nfc_dev *ndev = &nfc_dev; | 553 | struct mei_nfc_dev *ndev = &nfc_dev; |
554 | |||
554 | cancel_work_sync(&ndev->init_work); | 555 | cancel_work_sync(&ndev->init_work); |
555 | } | 556 | } |
556 | 557 | ||
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 959c313d84a7..f3225b1643ab 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -98,12 +98,12 @@ static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} | |||
98 | #endif /* CONFIG_PM_RUNTIME */ | 98 | #endif /* CONFIG_PM_RUNTIME */ |
99 | 99 | ||
100 | /** | 100 | /** |
101 | * mei_quirk_probe - probe for devices that doesn't valid ME interface | 101 | * mei_me_quirk_probe - probe for devices that doesn't valid ME interface |
102 | * | 102 | * |
103 | * @pdev: PCI device structure | 103 | * @pdev: PCI device structure |
104 | * @cfg: per generation config | 104 | * @cfg: per generation config |
105 | * | 105 | * |
106 | * returns true if ME Interface is valid, false otherwise | 106 | * Return: true if ME Interface is valid, false otherwise |
107 | */ | 107 | */ |
108 | static bool mei_me_quirk_probe(struct pci_dev *pdev, | 108 | static bool mei_me_quirk_probe(struct pci_dev *pdev, |
109 | const struct mei_cfg *cfg) | 109 | const struct mei_cfg *cfg) |
@@ -117,12 +117,12 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev, | |||
117 | } | 117 | } |
118 | 118 | ||
119 | /** | 119 | /** |
120 | * mei_probe - Device Initialization Routine | 120 | * mei_me_probe - Device Initialization Routine |
121 | * | 121 | * |
122 | * @pdev: PCI device structure | 122 | * @pdev: PCI device structure |
123 | * @ent: entry in kcs_pci_tbl | 123 | * @ent: entry in kcs_pci_tbl |
124 | * | 124 | * |
125 | * returns 0 on success, <0 on failure. | 125 | * Return: 0 on success, <0 on failure. |
126 | */ | 126 | */ |
127 | static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 127 | static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
128 | { | 128 | { |
@@ -249,7 +249,7 @@ end: | |||
249 | } | 249 | } |
250 | 250 | ||
251 | /** | 251 | /** |
252 | * mei_remove - Device Removal Routine | 252 | * mei_me_remove - Device Removal Routine |
253 | * | 253 | * |
254 | * @pdev: PCI device structure | 254 | * @pdev: PCI device structure |
255 | * | 255 | * |
@@ -430,7 +430,7 @@ static int mei_me_pm_runtime_resume(struct device *device) | |||
430 | */ | 430 | */ |
431 | static inline void mei_me_set_pm_domain(struct mei_device *dev) | 431 | static inline void mei_me_set_pm_domain(struct mei_device *dev) |
432 | { | 432 | { |
433 | struct pci_dev *pdev = dev->pdev; | 433 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
434 | 434 | ||
435 | if (pdev->dev.bus && pdev->dev.bus->pm) { | 435 | if (pdev->dev.bus && pdev->dev.bus->pm) { |
436 | dev->pg_domain.ops = *pdev->dev.bus->pm; | 436 | dev->pg_domain.ops = *pdev->dev.bus->pm; |
@@ -451,7 +451,7 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev) | |||
451 | static inline void mei_me_unset_pm_domain(struct mei_device *dev) | 451 | static inline void mei_me_unset_pm_domain(struct mei_device *dev) |
452 | { | 452 | { |
453 | /* stop using pm callbacks if any */ | 453 | /* stop using pm callbacks if any */ |
454 | dev->pdev->dev.pm_domain = NULL; | 454 | dev->dev->pm_domain = NULL; |
455 | } | 455 | } |
456 | #endif /* CONFIG_PM_RUNTIME */ | 456 | #endif /* CONFIG_PM_RUNTIME */ |
457 | 457 | ||
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index 74727dda51c1..bee1c6fb7e75 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c | |||
@@ -36,7 +36,8 @@ | |||
36 | #include "hw-txe.h" | 36 | #include "hw-txe.h" |
37 | 37 | ||
38 | static const struct pci_device_id mei_txe_pci_tbl[] = { | 38 | static const struct pci_device_id mei_txe_pci_tbl[] = { |
39 | {MEI_PCI_DEVICE(0x0F18, mei_txe_cfg)}, /* Baytrail */ | 39 | {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */ |
40 | |||
40 | {0, } | 41 | {0, } |
41 | }; | 42 | }; |
42 | MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); | 43 | MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); |
@@ -52,6 +53,7 @@ static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} | |||
52 | static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) | 53 | static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) |
53 | { | 54 | { |
54 | int i; | 55 | int i; |
56 | |||
55 | for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { | 57 | for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { |
56 | if (hw->mem_addr[i]) { | 58 | if (hw->mem_addr[i]) { |
57 | pci_iounmap(pdev, hw->mem_addr[i]); | 59 | pci_iounmap(pdev, hw->mem_addr[i]); |
@@ -65,11 +67,10 @@ static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) | |||
65 | * @pdev: PCI device structure | 67 | * @pdev: PCI device structure |
66 | * @ent: entry in mei_txe_pci_tbl | 68 | * @ent: entry in mei_txe_pci_tbl |
67 | * | 69 | * |
68 | * returns 0 on success, <0 on failure. | 70 | * Return: 0 on success, <0 on failure. |
69 | */ | 71 | */ |
70 | static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 72 | static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
71 | { | 73 | { |
72 | const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); | ||
73 | struct mei_device *dev; | 74 | struct mei_device *dev; |
74 | struct mei_txe_hw *hw; | 75 | struct mei_txe_hw *hw; |
75 | int err; | 76 | int err; |
@@ -100,7 +101,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
100 | } | 101 | } |
101 | 102 | ||
102 | /* allocates and initializes the mei dev structure */ | 103 | /* allocates and initializes the mei dev structure */ |
103 | dev = mei_txe_dev_init(pdev, cfg); | 104 | dev = mei_txe_dev_init(pdev); |
104 | if (!dev) { | 105 | if (!dev) { |
105 | err = -ENOMEM; | 106 | err = -ENOMEM; |
106 | goto release_regions; | 107 | goto release_regions; |
@@ -377,7 +378,7 @@ static int mei_txe_pm_runtime_resume(struct device *device) | |||
377 | */ | 378 | */ |
378 | static inline void mei_txe_set_pm_domain(struct mei_device *dev) | 379 | static inline void mei_txe_set_pm_domain(struct mei_device *dev) |
379 | { | 380 | { |
380 | struct pci_dev *pdev = dev->pdev; | 381 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
381 | 382 | ||
382 | if (pdev->dev.bus && pdev->dev.bus->pm) { | 383 | if (pdev->dev.bus && pdev->dev.bus->pm) { |
383 | dev->pg_domain.ops = *pdev->dev.bus->pm; | 384 | dev->pg_domain.ops = *pdev->dev.bus->pm; |
@@ -398,7 +399,7 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev) | |||
398 | static inline void mei_txe_unset_pm_domain(struct mei_device *dev) | 399 | static inline void mei_txe_unset_pm_domain(struct mei_device *dev) |
399 | { | 400 | { |
400 | /* stop using pm callbacks if any */ | 401 | /* stop using pm callbacks if any */ |
401 | dev->pdev->dev.pm_domain = NULL; | 402 | dev->dev->pm_domain = NULL; |
402 | } | 403 | } |
403 | #endif /* CONFIG_PM_RUNTIME */ | 404 | #endif /* CONFIG_PM_RUNTIME */ |
404 | 405 | ||
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c index a84a664dfccb..b836dfffceb5 100644 --- a/drivers/misc/mei/wd.c +++ b/drivers/misc/mei/wd.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <linux/pci.h> | ||
21 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
22 | #include <linux/watchdog.h> | 21 | #include <linux/watchdog.h> |
23 | 22 | ||
@@ -42,7 +41,7 @@ const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89, | |||
42 | 41 | ||
43 | static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) | 42 | static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) |
44 | { | 43 | { |
45 | dev_dbg(&dev->pdev->dev, "wd: set timeout=%d.\n", timeout); | 44 | dev_dbg(dev->dev, "wd: set timeout=%d.\n", timeout); |
46 | memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE); | 45 | memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE); |
47 | memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16)); | 46 | memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16)); |
48 | } | 47 | } |
@@ -52,14 +51,14 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) | |||
52 | * | 51 | * |
53 | * @dev: the device structure | 52 | * @dev: the device structure |
54 | * | 53 | * |
55 | * returns -ENOTTY if wd client cannot be found | 54 | * Return: -ENOTTY if wd client cannot be found |
56 | * -EIO if write has failed | 55 | * -EIO if write has failed |
57 | * 0 on success | 56 | * 0 on success |
58 | */ | 57 | */ |
59 | int mei_wd_host_init(struct mei_device *dev) | 58 | int mei_wd_host_init(struct mei_device *dev) |
60 | { | 59 | { |
61 | struct mei_cl *cl = &dev->wd_cl; | 60 | struct mei_cl *cl = &dev->wd_cl; |
62 | int id; | 61 | struct mei_me_client *me_cl; |
63 | int ret; | 62 | int ret; |
64 | 63 | ||
65 | mei_cl_init(cl, dev); | 64 | mei_cl_init(cl, dev); |
@@ -69,25 +68,26 @@ int mei_wd_host_init(struct mei_device *dev) | |||
69 | 68 | ||
70 | 69 | ||
71 | /* check for valid client id */ | 70 | /* check for valid client id */ |
72 | id = mei_me_cl_by_uuid(dev, &mei_wd_guid); | 71 | me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid); |
73 | if (id < 0) { | 72 | if (!me_cl) { |
74 | dev_info(&dev->pdev->dev, "wd: failed to find the client\n"); | 73 | dev_info(dev->dev, "wd: failed to find the client\n"); |
75 | return -ENOTTY; | 74 | return -ENOTTY; |
76 | } | 75 | } |
77 | 76 | ||
78 | cl->me_client_id = dev->me_clients[id].client_id; | 77 | cl->me_client_id = me_cl->client_id; |
78 | cl->cl_uuid = me_cl->props.protocol_name; | ||
79 | 79 | ||
80 | ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID); | 80 | ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID); |
81 | 81 | ||
82 | if (ret < 0) { | 82 | if (ret < 0) { |
83 | dev_info(&dev->pdev->dev, "wd: failed link client\n"); | 83 | dev_info(dev->dev, "wd: failed link client\n"); |
84 | return ret; | 84 | return ret; |
85 | } | 85 | } |
86 | 86 | ||
87 | ret = mei_cl_connect(cl, NULL); | 87 | ret = mei_cl_connect(cl, NULL); |
88 | 88 | ||
89 | if (ret) { | 89 | if (ret) { |
90 | dev_err(&dev->pdev->dev, "wd: failed to connect = %d\n", ret); | 90 | dev_err(dev->dev, "wd: failed to connect = %d\n", ret); |
91 | mei_cl_unlink(cl); | 91 | mei_cl_unlink(cl); |
92 | return ret; | 92 | return ret; |
93 | } | 93 | } |
@@ -105,7 +105,7 @@ int mei_wd_host_init(struct mei_device *dev) | |||
105 | * | 105 | * |
106 | * @dev: the device structure | 106 | * @dev: the device structure |
107 | * | 107 | * |
108 | * returns 0 if success, | 108 | * Return: 0 if success, |
109 | * -EIO when message send fails | 109 | * -EIO when message send fails |
110 | * -EINVAL when invalid message is to be sent | 110 | * -EINVAL when invalid message is to be sent |
111 | * -ENODEV on flow control failure | 111 | * -ENODEV on flow control failure |
@@ -127,19 +127,19 @@ int mei_wd_send(struct mei_device *dev) | |||
127 | else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) | 127 | else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) |
128 | hdr.length = MEI_WD_STOP_MSG_SIZE; | 128 | hdr.length = MEI_WD_STOP_MSG_SIZE; |
129 | else { | 129 | else { |
130 | dev_err(&dev->pdev->dev, "wd: invalid message is to be sent, aborting\n"); | 130 | dev_err(dev->dev, "wd: invalid message is to be sent, aborting\n"); |
131 | return -EINVAL; | 131 | return -EINVAL; |
132 | } | 132 | } |
133 | 133 | ||
134 | ret = mei_write_message(dev, &hdr, dev->wd_data); | 134 | ret = mei_write_message(dev, &hdr, dev->wd_data); |
135 | if (ret) { | 135 | if (ret) { |
136 | dev_err(&dev->pdev->dev, "wd: write message failed\n"); | 136 | dev_err(dev->dev, "wd: write message failed\n"); |
137 | return ret; | 137 | return ret; |
138 | } | 138 | } |
139 | 139 | ||
140 | ret = mei_cl_flow_ctrl_reduce(cl); | 140 | ret = mei_cl_flow_ctrl_reduce(cl); |
141 | if (ret) { | 141 | if (ret) { |
142 | dev_err(&dev->pdev->dev, "wd: flow_ctrl_reduce failed.\n"); | 142 | dev_err(dev->dev, "wd: flow_ctrl_reduce failed.\n"); |
143 | return ret; | 143 | return ret; |
144 | } | 144 | } |
145 | 145 | ||
@@ -150,9 +150,8 @@ int mei_wd_send(struct mei_device *dev) | |||
150 | * mei_wd_stop - sends watchdog stop message to fw. | 150 | * mei_wd_stop - sends watchdog stop message to fw. |
151 | * | 151 | * |
152 | * @dev: the device structure | 152 | * @dev: the device structure |
153 | * @preserve: indicate if to keep the timeout value | ||
154 | * | 153 | * |
155 | * returns 0 if success | 154 | * Return: 0 if success |
156 | * on error: | 155 | * on error: |
157 | * -EIO when message send fails | 156 | * -EIO when message send fails |
158 | * -EINVAL when invalid message is to be sent | 157 | * -EINVAL when invalid message is to be sent |
@@ -192,11 +191,10 @@ int mei_wd_stop(struct mei_device *dev) | |||
192 | if (dev->wd_state != MEI_WD_IDLE) { | 191 | if (dev->wd_state != MEI_WD_IDLE) { |
193 | /* timeout */ | 192 | /* timeout */ |
194 | ret = -ETIME; | 193 | ret = -ETIME; |
195 | dev_warn(&dev->pdev->dev, | 194 | dev_warn(dev->dev, "wd: stop failed to complete ret=%d\n", ret); |
196 | "wd: stop failed to complete ret=%d.\n", ret); | ||
197 | goto err; | 195 | goto err; |
198 | } | 196 | } |
199 | dev_dbg(&dev->pdev->dev, "wd: stop completed after %u msec\n", | 197 | dev_dbg(dev->dev, "wd: stop completed after %u msec\n", |
200 | MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret)); | 198 | MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret)); |
201 | return 0; | 199 | return 0; |
202 | err: | 200 | err: |
@@ -208,7 +206,7 @@ err: | |||
208 | * | 206 | * |
209 | * @wd_dev - watchdog device struct | 207 | * @wd_dev - watchdog device struct |
210 | * | 208 | * |
211 | * returns 0 if success, negative errno code for failure | 209 | * Return: 0 if success, negative errno code for failure |
212 | */ | 210 | */ |
213 | static int mei_wd_ops_start(struct watchdog_device *wd_dev) | 211 | static int mei_wd_ops_start(struct watchdog_device *wd_dev) |
214 | { | 212 | { |
@@ -222,15 +220,13 @@ static int mei_wd_ops_start(struct watchdog_device *wd_dev) | |||
222 | mutex_lock(&dev->device_lock); | 220 | mutex_lock(&dev->device_lock); |
223 | 221 | ||
224 | if (dev->dev_state != MEI_DEV_ENABLED) { | 222 | if (dev->dev_state != MEI_DEV_ENABLED) { |
225 | dev_dbg(&dev->pdev->dev, | 223 | dev_dbg(dev->dev, "wd: dev_state != MEI_DEV_ENABLED dev_state = %s\n", |
226 | "wd: dev_state != MEI_DEV_ENABLED dev_state = %s\n", | ||
227 | mei_dev_state_str(dev->dev_state)); | 224 | mei_dev_state_str(dev->dev_state)); |
228 | goto end_unlock; | 225 | goto end_unlock; |
229 | } | 226 | } |
230 | 227 | ||
231 | if (dev->wd_cl.state != MEI_FILE_CONNECTED) { | 228 | if (dev->wd_cl.state != MEI_FILE_CONNECTED) { |
232 | dev_dbg(&dev->pdev->dev, | 229 | dev_dbg(dev->dev, "MEI Driver is not connected to Watchdog Client\n"); |
233 | "MEI Driver is not connected to Watchdog Client\n"); | ||
234 | goto end_unlock; | 230 | goto end_unlock; |
235 | } | 231 | } |
236 | 232 | ||
@@ -247,7 +243,7 @@ end_unlock: | |||
247 | * | 243 | * |
248 | * @wd_dev - watchdog device struct | 244 | * @wd_dev - watchdog device struct |
249 | * | 245 | * |
250 | * returns 0 if success, negative errno code for failure | 246 | * Return: 0 if success, negative errno code for failure |
251 | */ | 247 | */ |
252 | static int mei_wd_ops_stop(struct watchdog_device *wd_dev) | 248 | static int mei_wd_ops_stop(struct watchdog_device *wd_dev) |
253 | { | 249 | { |
@@ -269,7 +265,7 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev) | |||
269 | * | 265 | * |
270 | * @wd_dev - watchdog device struct | 266 | * @wd_dev - watchdog device struct |
271 | * | 267 | * |
272 | * returns 0 if success, negative errno code for failure | 268 | * Return: 0 if success, negative errno code for failure |
273 | */ | 269 | */ |
274 | static int mei_wd_ops_ping(struct watchdog_device *wd_dev) | 270 | static int mei_wd_ops_ping(struct watchdog_device *wd_dev) |
275 | { | 271 | { |
@@ -283,7 +279,7 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev) | |||
283 | mutex_lock(&dev->device_lock); | 279 | mutex_lock(&dev->device_lock); |
284 | 280 | ||
285 | if (dev->wd_cl.state != MEI_FILE_CONNECTED) { | 281 | if (dev->wd_cl.state != MEI_FILE_CONNECTED) { |
286 | dev_err(&dev->pdev->dev, "wd: not connected.\n"); | 282 | dev_err(dev->dev, "wd: not connected.\n"); |
287 | ret = -ENODEV; | 283 | ret = -ENODEV; |
288 | goto end; | 284 | goto end; |
289 | } | 285 | } |
@@ -296,7 +292,7 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev) | |||
296 | /* Check if we can send the ping to HW*/ | 292 | /* Check if we can send the ping to HW*/ |
297 | if (ret && mei_hbuf_acquire(dev)) { | 293 | if (ret && mei_hbuf_acquire(dev)) { |
298 | 294 | ||
299 | dev_dbg(&dev->pdev->dev, "wd: sending ping\n"); | 295 | dev_dbg(dev->dev, "wd: sending ping\n"); |
300 | 296 | ||
301 | ret = mei_wd_send(dev); | 297 | ret = mei_wd_send(dev); |
302 | if (ret) | 298 | if (ret) |
@@ -317,7 +313,7 @@ end: | |||
317 | * @wd_dev - watchdog device struct | 313 | * @wd_dev - watchdog device struct |
318 | * @timeout - timeout value to set | 314 | * @timeout - timeout value to set |
319 | * | 315 | * |
320 | * returns 0 if success, negative errno code for failure | 316 | * Return: 0 if success, negative errno code for failure |
321 | */ | 317 | */ |
322 | static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, | 318 | static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, |
323 | unsigned int timeout) | 319 | unsigned int timeout) |
@@ -379,13 +375,12 @@ int mei_watchdog_register(struct mei_device *dev) | |||
379 | ret = watchdog_register_device(&amt_wd_dev); | 375 | ret = watchdog_register_device(&amt_wd_dev); |
380 | mutex_lock(&dev->device_lock); | 376 | mutex_lock(&dev->device_lock); |
381 | if (ret) { | 377 | if (ret) { |
382 | dev_err(&dev->pdev->dev, "wd: unable to register watchdog device = %d.\n", | 378 | dev_err(dev->dev, "wd: unable to register watchdog device = %d.\n", |
383 | ret); | 379 | ret); |
384 | return ret; | 380 | return ret; |
385 | } | 381 | } |
386 | 382 | ||
387 | dev_dbg(&dev->pdev->dev, | 383 | dev_dbg(dev->dev, "wd: successfully register watchdog interface.\n"); |
388 | "wd: successfully register watchdog interface.\n"); | ||
389 | watchdog_set_drvdata(&amt_wd_dev, dev); | 384 | watchdog_set_drvdata(&amt_wd_dev, dev); |
390 | return 0; | 385 | return 0; |
391 | } | 386 | } |
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c index 2e13614d41e8..fe3ad0ca9a3e 100644 --- a/drivers/misc/spear13xx_pcie_gadget.c +++ b/drivers/misc/spear13xx_pcie_gadget.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * warranty of any kind, whether express or implied. | 9 | * warranty of any kind, whether express or implied. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/device.h> | ||
12 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
13 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
14 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
@@ -743,58 +744,33 @@ static int spear_pcie_gadget_probe(struct platform_device *pdev) | |||
743 | struct config_item *cg_item; | 744 | struct config_item *cg_item; |
744 | struct configfs_subsystem *subsys; | 745 | struct configfs_subsystem *subsys; |
745 | 746 | ||
746 | /* get resource for application registers*/ | 747 | target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL); |
747 | |||
748 | res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
749 | if (!res0) { | ||
750 | dev_err(&pdev->dev, "no resource defined\n"); | ||
751 | return -EBUSY; | ||
752 | } | ||
753 | if (!request_mem_region(res0->start, resource_size(res0), | ||
754 | pdev->name)) { | ||
755 | dev_err(&pdev->dev, "pcie gadget region already claimed\n"); | ||
756 | return -EBUSY; | ||
757 | } | ||
758 | /* get resource for dbi registers*/ | ||
759 | |||
760 | res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
761 | if (!res1) { | ||
762 | dev_err(&pdev->dev, "no resource defined\n"); | ||
763 | goto err_rel_res0; | ||
764 | } | ||
765 | if (!request_mem_region(res1->start, resource_size(res1), | ||
766 | pdev->name)) { | ||
767 | dev_err(&pdev->dev, "pcie gadget region already claimed\n"); | ||
768 | goto err_rel_res0; | ||
769 | } | ||
770 | |||
771 | target = kzalloc(sizeof(*target), GFP_KERNEL); | ||
772 | if (!target) { | 748 | if (!target) { |
773 | dev_err(&pdev->dev, "out of memory\n"); | 749 | dev_err(&pdev->dev, "out of memory\n"); |
774 | status = -ENOMEM; | 750 | return -ENOMEM; |
775 | goto err_rel_res; | ||
776 | } | 751 | } |
777 | 752 | ||
778 | cg_item = &target->subsys.su_group.cg_item; | 753 | cg_item = &target->subsys.su_group.cg_item; |
779 | sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id); | 754 | sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id); |
780 | cg_item->ci_type = &pcie_gadget_target_type; | 755 | cg_item->ci_type = &pcie_gadget_target_type; |
781 | config = &target->config; | 756 | config = &target->config; |
782 | config->va_app_base = (void __iomem *)ioremap(res0->start, | 757 | |
783 | resource_size(res0)); | 758 | /* get resource for application registers*/ |
784 | if (!config->va_app_base) { | 759 | res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
760 | config->va_app_base = devm_ioremap_resource(&pdev->dev, res0); | ||
761 | if (IS_ERR(config->va_app_base)) { | ||
785 | dev_err(&pdev->dev, "ioremap fail\n"); | 762 | dev_err(&pdev->dev, "ioremap fail\n"); |
786 | status = -ENOMEM; | 763 | return PTR_ERR(config->va_app_base); |
787 | goto err_kzalloc; | ||
788 | } | 764 | } |
789 | 765 | ||
766 | /* get resource for dbi registers*/ | ||
767 | res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
790 | config->base = (void __iomem *)res1->start; | 768 | config->base = (void __iomem *)res1->start; |
791 | 769 | ||
792 | config->va_dbi_base = (void __iomem *)ioremap(res1->start, | 770 | config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1); |
793 | resource_size(res1)); | 771 | if (IS_ERR(config->va_dbi_base)) { |
794 | if (!config->va_dbi_base) { | ||
795 | dev_err(&pdev->dev, "ioremap fail\n"); | 772 | dev_err(&pdev->dev, "ioremap fail\n"); |
796 | status = -ENOMEM; | 773 | return PTR_ERR(config->va_dbi_base); |
797 | goto err_iounmap_app; | ||
798 | } | 774 | } |
799 | 775 | ||
800 | platform_set_drvdata(pdev, target); | 776 | platform_set_drvdata(pdev, target); |
@@ -802,15 +778,15 @@ static int spear_pcie_gadget_probe(struct platform_device *pdev) | |||
802 | irq = platform_get_irq(pdev, 0); | 778 | irq = platform_get_irq(pdev, 0); |
803 | if (irq < 0) { | 779 | if (irq < 0) { |
804 | dev_err(&pdev->dev, "no update irq?\n"); | 780 | dev_err(&pdev->dev, "no update irq?\n"); |
805 | status = irq; | 781 | return irq; |
806 | goto err_iounmap; | ||
807 | } | 782 | } |
808 | 783 | ||
809 | status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL); | 784 | status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq, |
785 | 0, pdev->name, NULL); | ||
810 | if (status) { | 786 | if (status) { |
811 | dev_err(&pdev->dev, | 787 | dev_err(&pdev->dev, |
812 | "pcie gadget interrupt IRQ%d already claimed\n", irq); | 788 | "pcie gadget interrupt IRQ%d already claimed\n", irq); |
813 | goto err_iounmap; | 789 | return status; |
814 | } | 790 | } |
815 | 791 | ||
816 | /* Register configfs hooks */ | 792 | /* Register configfs hooks */ |
@@ -819,7 +795,7 @@ static int spear_pcie_gadget_probe(struct platform_device *pdev) | |||
819 | mutex_init(&subsys->su_mutex); | 795 | mutex_init(&subsys->su_mutex); |
820 | status = configfs_register_subsystem(subsys); | 796 | status = configfs_register_subsystem(subsys); |
821 | if (status) | 797 | if (status) |
822 | goto err_irq; | 798 | return status; |
823 | 799 | ||
824 | /* | 800 | /* |
825 | * init basic pcie application registers | 801 | * init basic pcie application registers |
@@ -835,13 +811,12 @@ static int spear_pcie_gadget_probe(struct platform_device *pdev) | |||
835 | clk = clk_get_sys("pcie1", NULL); | 811 | clk = clk_get_sys("pcie1", NULL); |
836 | if (IS_ERR(clk)) { | 812 | if (IS_ERR(clk)) { |
837 | pr_err("%s:couldn't get clk for pcie1\n", __func__); | 813 | pr_err("%s:couldn't get clk for pcie1\n", __func__); |
838 | status = PTR_ERR(clk); | 814 | return PTR_ERR(clk); |
839 | goto err_irq; | ||
840 | } | 815 | } |
841 | status = clk_enable(clk); | 816 | status = clk_enable(clk); |
842 | if (status) { | 817 | if (status) { |
843 | pr_err("%s:couldn't enable clk for pcie1\n", __func__); | 818 | pr_err("%s:couldn't enable clk for pcie1\n", __func__); |
844 | goto err_irq; | 819 | return status; |
845 | } | 820 | } |
846 | } else if (pdev->id == 2) { | 821 | } else if (pdev->id == 2) { |
847 | /* | 822 | /* |
@@ -851,53 +826,26 @@ static int spear_pcie_gadget_probe(struct platform_device *pdev) | |||
851 | clk = clk_get_sys("pcie2", NULL); | 826 | clk = clk_get_sys("pcie2", NULL); |
852 | if (IS_ERR(clk)) { | 827 | if (IS_ERR(clk)) { |
853 | pr_err("%s:couldn't get clk for pcie2\n", __func__); | 828 | pr_err("%s:couldn't get clk for pcie2\n", __func__); |
854 | status = PTR_ERR(clk); | 829 | return PTR_ERR(clk); |
855 | goto err_irq; | ||
856 | } | 830 | } |
857 | status = clk_enable(clk); | 831 | status = clk_enable(clk); |
858 | if (status) { | 832 | if (status) { |
859 | pr_err("%s:couldn't enable clk for pcie2\n", __func__); | 833 | pr_err("%s:couldn't enable clk for pcie2\n", __func__); |
860 | goto err_irq; | 834 | return status; |
861 | } | 835 | } |
862 | } | 836 | } |
863 | spear13xx_pcie_device_init(config); | 837 | spear13xx_pcie_device_init(config); |
864 | 838 | ||
865 | return 0; | 839 | return 0; |
866 | err_irq: | ||
867 | free_irq(irq, NULL); | ||
868 | err_iounmap: | ||
869 | iounmap(config->va_dbi_base); | ||
870 | err_iounmap_app: | ||
871 | iounmap(config->va_app_base); | ||
872 | err_kzalloc: | ||
873 | kfree(target); | ||
874 | err_rel_res: | ||
875 | release_mem_region(res1->start, resource_size(res1)); | ||
876 | err_rel_res0: | ||
877 | release_mem_region(res0->start, resource_size(res0)); | ||
878 | return status; | ||
879 | } | 840 | } |
880 | 841 | ||
881 | static int spear_pcie_gadget_remove(struct platform_device *pdev) | 842 | static int spear_pcie_gadget_remove(struct platform_device *pdev) |
882 | { | 843 | { |
883 | struct resource *res0, *res1; | ||
884 | static struct pcie_gadget_target *target; | 844 | static struct pcie_gadget_target *target; |
885 | struct spear_pcie_gadget_config *config; | ||
886 | int irq; | ||
887 | 845 | ||
888 | res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
889 | res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
890 | irq = platform_get_irq(pdev, 0); | ||
891 | target = platform_get_drvdata(pdev); | 846 | target = platform_get_drvdata(pdev); |
892 | config = &target->config; | ||
893 | 847 | ||
894 | free_irq(irq, NULL); | ||
895 | iounmap(config->va_dbi_base); | ||
896 | iounmap(config->va_app_base); | ||
897 | release_mem_region(res1->start, resource_size(res1)); | ||
898 | release_mem_region(res0->start, resource_size(res0)); | ||
899 | configfs_unregister_subsystem(&target->subsys); | 848 | configfs_unregister_subsystem(&target->subsys); |
900 | kfree(target); | ||
901 | 849 | ||
902 | return 0; | 850 | return 0; |
903 | } | 851 | } |
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 1972d57aadb3..54be83d3efdd 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c | |||
@@ -153,8 +153,9 @@ static void st_reg_complete(struct st_data_s *st_gdata, char err) | |||
153 | (st_gdata->list[i]->priv_data, err); | 153 | (st_gdata->list[i]->priv_data, err); |
154 | pr_info("protocol %d's cb sent %d\n", i, err); | 154 | pr_info("protocol %d's cb sent %d\n", i, err); |
155 | if (err) { /* cleanup registered protocol */ | 155 | if (err) { /* cleanup registered protocol */ |
156 | st_gdata->protos_registered--; | ||
157 | st_gdata->is_registered[i] = false; | 156 | st_gdata->is_registered[i] = false; |
157 | if (st_gdata->protos_registered) | ||
158 | st_gdata->protos_registered--; | ||
158 | } | 159 | } |
159 | } | 160 | } |
160 | } | 161 | } |
@@ -639,14 +640,12 @@ long st_unregister(struct st_proto_s *proto) | |||
639 | return -EPROTONOSUPPORT; | 640 | return -EPROTONOSUPPORT; |
640 | } | 641 | } |
641 | 642 | ||
642 | st_gdata->protos_registered--; | 643 | if (st_gdata->protos_registered) |
644 | st_gdata->protos_registered--; | ||
645 | |||
643 | remove_channel_from_table(st_gdata, proto); | 646 | remove_channel_from_table(st_gdata, proto); |
644 | spin_unlock_irqrestore(&st_gdata->lock, flags); | 647 | spin_unlock_irqrestore(&st_gdata->lock, flags); |
645 | 648 | ||
646 | /* paranoid check */ | ||
647 | if (st_gdata->protos_registered < ST_EMPTY) | ||
648 | st_gdata->protos_registered = ST_EMPTY; | ||
649 | |||
650 | if ((st_gdata->protos_registered == ST_EMPTY) && | 649 | if ((st_gdata->protos_registered == ST_EMPTY) && |
651 | (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) { | 650 | (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) { |
652 | pr_info(" all chnl_ids unregistered "); | 651 | pr_info(" all chnl_ids unregistered "); |
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c index f3cdd904fe4d..822665245588 100644 --- a/drivers/misc/vmw_vmci/vmci_datagram.c +++ b/drivers/misc/vmw_vmci/vmci_datagram.c | |||
@@ -328,7 +328,8 @@ int vmci_datagram_dispatch(u32 context_id, | |||
328 | 328 | ||
329 | BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24); | 329 | BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24); |
330 | 330 | ||
331 | if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) { | 331 | if (dg->payload_size > VMCI_MAX_DG_SIZE || |
332 | VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) { | ||
332 | pr_devel("Payload (size=%llu bytes) too big to send\n", | 333 | pr_devel("Payload (size=%llu bytes) too big to send\n", |
333 | (unsigned long long)dg->payload_size); | 334 | (unsigned long long)dg->payload_size); |
334 | return VMCI_ERROR_INVALID_ARGS; | 335 | return VMCI_ERROR_INVALID_ARGS; |
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 76ee7750bc5e..f721299eb1ba 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c | |||
@@ -1702,6 +1702,46 @@ static int parport_ECP_supported(struct parport *pb) | |||
1702 | } | 1702 | } |
1703 | #endif | 1703 | #endif |
1704 | 1704 | ||
1705 | #ifdef CONFIG_X86_32 | ||
1706 | static int intel_bug_present_check_epp(struct parport *pb) | ||
1707 | { | ||
1708 | const struct parport_pc_private *priv = pb->private_data; | ||
1709 | int bug_present = 0; | ||
1710 | |||
1711 | if (priv->ecr) { | ||
1712 | /* store value of ECR */ | ||
1713 | unsigned char ecr = inb(ECONTROL(pb)); | ||
1714 | unsigned char i; | ||
1715 | for (i = 0x00; i < 0x80; i += 0x20) { | ||
1716 | ECR_WRITE(pb, i); | ||
1717 | if (clear_epp_timeout(pb)) { | ||
1718 | /* Phony EPP in ECP. */ | ||
1719 | bug_present = 1; | ||
1720 | break; | ||
1721 | } | ||
1722 | } | ||
1723 | /* return ECR into the inital state */ | ||
1724 | ECR_WRITE(pb, ecr); | ||
1725 | } | ||
1726 | |||
1727 | return bug_present; | ||
1728 | } | ||
1729 | static int intel_bug_present(struct parport *pb) | ||
1730 | { | ||
1731 | /* Check whether the device is legacy, not PCI or PCMCIA. Only legacy is known to be affected. */ | ||
1732 | if (pb->dev != NULL) { | ||
1733 | return 0; | ||
1734 | } | ||
1735 | |||
1736 | return intel_bug_present_check_epp(pb); | ||
1737 | } | ||
1738 | #else | ||
1739 | static int intel_bug_present(struct parport *pb) | ||
1740 | { | ||
1741 | return 0; | ||
1742 | } | ||
1743 | #endif /* CONFIG_X86_32 */ | ||
1744 | |||
1705 | static int parport_ECPPS2_supported(struct parport *pb) | 1745 | static int parport_ECPPS2_supported(struct parport *pb) |
1706 | { | 1746 | { |
1707 | const struct parport_pc_private *priv = pb->private_data; | 1747 | const struct parport_pc_private *priv = pb->private_data; |
@@ -1722,8 +1762,6 @@ static int parport_ECPPS2_supported(struct parport *pb) | |||
1722 | 1762 | ||
1723 | static int parport_EPP_supported(struct parport *pb) | 1763 | static int parport_EPP_supported(struct parport *pb) |
1724 | { | 1764 | { |
1725 | const struct parport_pc_private *priv = pb->private_data; | ||
1726 | |||
1727 | /* | 1765 | /* |
1728 | * Theory: | 1766 | * Theory: |
1729 | * Bit 0 of STR is the EPP timeout bit, this bit is 0 | 1767 | * Bit 0 of STR is the EPP timeout bit, this bit is 0 |
@@ -1742,16 +1780,8 @@ static int parport_EPP_supported(struct parport *pb) | |||
1742 | return 0; /* No way to clear timeout */ | 1780 | return 0; /* No way to clear timeout */ |
1743 | 1781 | ||
1744 | /* Check for Intel bug. */ | 1782 | /* Check for Intel bug. */ |
1745 | if (priv->ecr) { | 1783 | if (intel_bug_present(pb)) |
1746 | unsigned char i; | 1784 | return 0; |
1747 | for (i = 0x00; i < 0x80; i += 0x20) { | ||
1748 | ECR_WRITE(pb, i); | ||
1749 | if (clear_epp_timeout(pb)) { | ||
1750 | /* Phony EPP in ECP. */ | ||
1751 | return 0; | ||
1752 | } | ||
1753 | } | ||
1754 | } | ||
1755 | 1785 | ||
1756 | pb->modes |= PARPORT_MODE_EPP; | 1786 | pb->modes |= PARPORT_MODE_EPP; |
1757 | 1787 | ||