aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-21 12:42:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-21 12:42:58 -0400
commit1fc149933fd49a5b0e7738dc0853dbfbac4ae0e1 (patch)
treedfe99751c21aaf39e49765379d0b9b32114c757d /drivers/misc
parent41d5e08ea86af3359239d5a6f7021cdc61beaa49 (diff)
parentea5505fabd3b59608750bfd3721d0f8bc5c8b0bb (diff)
Merge tag 'char-misc-4.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here's the big char/misc driver patchset for 4.1-rc1. Lots of different driver subsystem updates here, nothing major, full details are in the shortlog. All of this has been in linux-next for a while" * tag 'char-misc-4.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (133 commits) mei: trace: remove unused TRACE_SYSTEM_STRING DTS: ARM: OMAP3-N900: Add lis3lv02d support Documentation: DT: lis302: update wakeup binding lis3lv02d: DT: add wakeup unit 2 and wakeup threshold lis3lv02d: DT: use s32 to support negative values Drivers: hv: hv_balloon: correctly handle num_pages>INT_MAX case Drivers: hv: hv_balloon: correctly handle val.freeram<num_pages case mei: replace check for connection instead of transitioning mei: use mei_cl_is_connected consistently mei: fix mei_poll operation hv_vmbus: Add gradually increased delay for retries in vmbus_post_msg() Drivers: hv: hv_balloon: survive ballooning request with num_pages=0 Drivers: hv: hv_balloon: eliminate jumps in piecewiese linear floor function Drivers: hv: hv_balloon: do not online pages in offline blocks hv: remove the per-channel workqueue hv: don't schedule new works in vmbus_onoffer()/vmbus_onoffer_rescind() hv: run non-blocking message handlers in the dispatch tasklet coresight: moving to new "hwtracing" directory coresight-tmc: Adding a status interface to sysfs coresight: remove the unnecessary configuration coresight-default-sink ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/bh1780gli.c2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c12
-rw-r--r--drivers/misc/carma/carma-fpga.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c56
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c2
-rw-r--r--drivers/misc/mei/Makefile3
-rw-r--r--drivers/misc/mei/amthif.c423
-rw-r--r--drivers/misc/mei/bus.c105
-rw-r--r--drivers/misc/mei/client.c478
-rw-r--r--drivers/misc/mei/client.h42
-rw-r--r--drivers/misc/mei/debugfs.c21
-rw-r--r--drivers/misc/mei/hbm.c8
-rw-r--r--drivers/misc/mei/hw-me.c170
-rw-r--r--drivers/misc/mei/hw-me.h4
-rw-r--r--drivers/misc/mei/hw-txe.c2
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/misc/mei/interrupt.c171
-rw-r--r--drivers/misc/mei/main.c146
-rw-r--r--drivers/misc/mei/mei-trace.c25
-rw-r--r--drivers/misc/mei/mei-trace.h74
-rw-r--r--drivers/misc/mei/mei_dev.h40
-rw-r--r--drivers/misc/mei/nfc.c43
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/wd.c36
-rw-r--r--drivers/misc/mic/host/mic_boot.c14
-rw-r--r--drivers/misc/mic/host/mic_intr.c2
-rw-r--r--drivers/misc/sram.c19
-rw-r--r--drivers/misc/tifm_7xx1.c5
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c37
33 files changed, 1042 insertions, 920 deletions
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 4c4a59b25537..7f90ce5a569a 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -230,6 +230,8 @@ static const struct i2c_device_id bh1780_id[] = {
230 { }, 230 { },
231}; 231};
232 232
233MODULE_DEVICE_TABLE(i2c, bh1780_id);
234
233#ifdef CONFIG_OF 235#ifdef CONFIG_OF
234static const struct of_device_id of_bh1780_match[] = { 236static const struct of_device_id of_bh1780_match[] = {
235 { .compatible = "rohm,bh1780gli", }, 237 { .compatible = "rohm,bh1780gli", },
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 06166ac000e0..0b1bd85e4ae6 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -479,6 +479,7 @@ static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
479static noinline int fpga_program_cpu(struct fpga_dev *priv) 479static noinline int fpga_program_cpu(struct fpga_dev *priv)
480{ 480{
481 int ret; 481 int ret;
482 unsigned long timeout;
482 483
483 /* Disable the programmer */ 484 /* Disable the programmer */
484 fpga_programmer_disable(priv); 485 fpga_programmer_disable(priv);
@@ -497,8 +498,8 @@ static noinline int fpga_program_cpu(struct fpga_dev *priv)
497 goto out_disable_controller; 498 goto out_disable_controller;
498 499
499 /* Wait for the interrupt handler to signal that programming finished */ 500 /* Wait for the interrupt handler to signal that programming finished */
500 ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); 501 timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
501 if (!ret) { 502 if (!timeout) {
502 dev_err(priv->dev, "Timed out waiting for completion\n"); 503 dev_err(priv->dev, "Timed out waiting for completion\n");
503 ret = -ETIMEDOUT; 504 ret = -ETIMEDOUT;
504 goto out_disable_controller; 505 goto out_disable_controller;
@@ -536,6 +537,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
536 struct sg_table table; 537 struct sg_table table;
537 dma_cookie_t cookie; 538 dma_cookie_t cookie;
538 int ret, i; 539 int ret, i;
540 unsigned long timeout;
539 541
540 /* Disable the programmer */ 542 /* Disable the programmer */
541 fpga_programmer_disable(priv); 543 fpga_programmer_disable(priv);
@@ -623,8 +625,8 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
623 dev_dbg(priv->dev, "enabled the controller\n"); 625 dev_dbg(priv->dev, "enabled the controller\n");
624 626
625 /* Wait for the interrupt handler to signal that programming finished */ 627 /* Wait for the interrupt handler to signal that programming finished */
626 ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); 628 timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
627 if (!ret) { 629 if (!timeout) {
628 dev_err(priv->dev, "Timed out waiting for completion\n"); 630 dev_err(priv->dev, "Timed out waiting for completion\n");
629 ret = -ETIMEDOUT; 631 ret = -ETIMEDOUT;
630 goto out_disable_controller; 632 goto out_disable_controller;
@@ -1142,7 +1144,7 @@ out_return:
1142 return ret; 1144 return ret;
1143} 1145}
1144 1146
1145static struct of_device_id fpga_of_match[] = { 1147static const struct of_device_id fpga_of_match[] = {
1146 { .compatible = "carma,fpga-programmer", }, 1148 { .compatible = "carma,fpga-programmer", },
1147 {}, 1149 {},
1148}; 1150};
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 68cdfe151bdb..5aba3fd789de 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -1486,7 +1486,7 @@ static int data_of_remove(struct platform_device *op)
1486 return 0; 1486 return 0;
1487} 1487}
1488 1488
1489static struct of_device_id data_of_match[] = { 1489static const struct of_device_id data_of_match[] = {
1490 { .compatible = "carma,carma-fpga", }, 1490 { .compatible = "carma,carma-fpga", },
1491 {}, 1491 {},
1492}; 1492};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 3ef4627f9cb1..4739689d23ad 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -950,6 +950,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
950 struct lis3lv02d_platform_data *pdata; 950 struct lis3lv02d_platform_data *pdata;
951 struct device_node *np = lis3->of_node; 951 struct device_node *np = lis3->of_node;
952 u32 val; 952 u32 val;
953 s32 sval;
953 954
954 if (!lis3->of_node) 955 if (!lis3->of_node)
955 return 0; 956 return 0;
@@ -1031,6 +1032,23 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
1031 pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO; 1032 pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO;
1032 if (of_get_property(np, "st,wakeup-z-hi", NULL)) 1033 if (of_get_property(np, "st,wakeup-z-hi", NULL))
1033 pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI; 1034 pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI;
1035 if (of_get_property(np, "st,wakeup-threshold", &val))
1036 pdata->wakeup_thresh = val;
1037
1038 if (of_get_property(np, "st,wakeup2-x-lo", NULL))
1039 pdata->wakeup_flags2 |= LIS3_WAKEUP_X_LO;
1040 if (of_get_property(np, "st,wakeup2-x-hi", NULL))
1041 pdata->wakeup_flags2 |= LIS3_WAKEUP_X_HI;
1042 if (of_get_property(np, "st,wakeup2-y-lo", NULL))
1043 pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_LO;
1044 if (of_get_property(np, "st,wakeup2-y-hi", NULL))
1045 pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_HI;
1046 if (of_get_property(np, "st,wakeup2-z-lo", NULL))
1047 pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO;
1048 if (of_get_property(np, "st,wakeup2-z-hi", NULL))
1049 pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI;
1050 if (of_get_property(np, "st,wakeup2-threshold", &val))
1051 pdata->wakeup_thresh2 = val;
1034 1052
1035 if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) { 1053 if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) {
1036 switch (val) { 1054 switch (val) {
@@ -1054,29 +1072,29 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
1054 if (of_get_property(np, "st,hipass2-disable", NULL)) 1072 if (of_get_property(np, "st,hipass2-disable", NULL))
1055 pdata->hipass_ctrl |= LIS3_HIPASS2_DISABLE; 1073 pdata->hipass_ctrl |= LIS3_HIPASS2_DISABLE;
1056 1074
1057 if (of_get_property(np, "st,axis-x", &val)) 1075 if (of_property_read_s32(np, "st,axis-x", &sval) == 0)
1058 pdata->axis_x = val; 1076 pdata->axis_x = sval;
1059 if (of_get_property(np, "st,axis-y", &val)) 1077 if (of_property_read_s32(np, "st,axis-y", &sval) == 0)
1060 pdata->axis_y = val; 1078 pdata->axis_y = sval;
1061 if (of_get_property(np, "st,axis-z", &val)) 1079 if (of_property_read_s32(np, "st,axis-z", &sval) == 0)
1062 pdata->axis_z = val; 1080 pdata->axis_z = sval;
1063 1081
1064 if (of_get_property(np, "st,default-rate", NULL)) 1082 if (of_get_property(np, "st,default-rate", NULL))
1065 pdata->default_rate = val; 1083 pdata->default_rate = val;
1066 1084
1067 if (of_get_property(np, "st,min-limit-x", &val)) 1085 if (of_property_read_s32(np, "st,min-limit-x", &sval) == 0)
1068 pdata->st_min_limits[0] = val; 1086 pdata->st_min_limits[0] = sval;
1069 if (of_get_property(np, "st,min-limit-y", &val)) 1087 if (of_property_read_s32(np, "st,min-limit-y", &sval) == 0)
1070 pdata->st_min_limits[1] = val; 1088 pdata->st_min_limits[1] = sval;
1071 if (of_get_property(np, "st,min-limit-z", &val)) 1089 if (of_property_read_s32(np, "st,min-limit-z", &sval) == 0)
1072 pdata->st_min_limits[2] = val; 1090 pdata->st_min_limits[2] = sval;
1073 1091
1074 if (of_get_property(np, "st,max-limit-x", &val)) 1092 if (of_property_read_s32(np, "st,max-limit-x", &sval) == 0)
1075 pdata->st_max_limits[0] = val; 1093 pdata->st_max_limits[0] = sval;
1076 if (of_get_property(np, "st,max-limit-y", &val)) 1094 if (of_property_read_s32(np, "st,max-limit-y", &sval) == 0)
1077 pdata->st_max_limits[1] = val; 1095 pdata->st_max_limits[1] = sval;
1078 if (of_get_property(np, "st,max-limit-z", &val)) 1096 if (of_property_read_s32(np, "st,max-limit-z", &sval) == 0)
1079 pdata->st_max_limits[2] = val; 1097 pdata->st_max_limits[2] = sval;
1080 1098
1081 1099
1082 lis3->pdata = pdata; 1100 lis3->pdata = pdata;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 63fe096d4462..e3e7f1dc27ba 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -106,7 +106,7 @@ static union axis_conversion lis3lv02d_axis_map =
106 { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } }; 106 { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } };
107 107
108#ifdef CONFIG_OF 108#ifdef CONFIG_OF
109static struct of_device_id lis3lv02d_i2c_dt_ids[] = { 109static const struct of_device_id lis3lv02d_i2c_dt_ids[] = {
110 { .compatible = "st,lis3lv02d" }, 110 { .compatible = "st,lis3lv02d" },
111 {} 111 {}
112}; 112};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index bd06d0cfac45..b2f6e1651ac9 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -61,7 +61,7 @@ static union axis_conversion lis3lv02d_axis_normal =
61 { .as_array = { 1, 2, 3 } }; 61 { .as_array = { 1, 2, 3 } };
62 62
63#ifdef CONFIG_OF 63#ifdef CONFIG_OF
64static struct of_device_id lis302dl_spi_dt_ids[] = { 64static const struct of_device_id lis302dl_spi_dt_ids[] = {
65 { .compatible = "st,lis302dl-spi" }, 65 { .compatible = "st,lis302dl-spi" },
66 {} 66 {}
67}; 67};
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 8ebc6cda1373..518914a82b83 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -21,3 +21,6 @@ mei-me-objs += hw-me.o
21obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o 21obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
22mei-txe-objs := pci-txe.o 22mei-txe-objs := pci-txe.o
23mei-txe-objs += hw-txe.o 23mei-txe-objs += hw-txe.o
24
25mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
26CFLAGS_mei-trace.o = -I$(src)
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 40ea639fa413..d2cd53e3fac3 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -48,10 +48,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
48{ 48{
49 /* reset iamthif parameters. */ 49 /* reset iamthif parameters. */
50 dev->iamthif_current_cb = NULL; 50 dev->iamthif_current_cb = NULL;
51 dev->iamthif_msg_buf_size = 0;
52 dev->iamthif_msg_buf_index = 0;
53 dev->iamthif_canceled = false; 51 dev->iamthif_canceled = false;
54 dev->iamthif_ioctl = false;
55 dev->iamthif_state = MEI_IAMTHIF_IDLE; 52 dev->iamthif_state = MEI_IAMTHIF_IDLE;
56 dev->iamthif_timer = 0; 53 dev->iamthif_timer = 0;
57 dev->iamthif_stall_timer = 0; 54 dev->iamthif_stall_timer = 0;
@@ -69,7 +66,6 @@ int mei_amthif_host_init(struct mei_device *dev)
69{ 66{
70 struct mei_cl *cl = &dev->iamthif_cl; 67 struct mei_cl *cl = &dev->iamthif_cl;
71 struct mei_me_client *me_cl; 68 struct mei_me_client *me_cl;
72 unsigned char *msg_buf;
73 int ret; 69 int ret;
74 70
75 dev->iamthif_state = MEI_IAMTHIF_IDLE; 71 dev->iamthif_state = MEI_IAMTHIF_IDLE;
@@ -90,18 +86,6 @@ int mei_amthif_host_init(struct mei_device *dev)
90 dev->iamthif_mtu = me_cl->props.max_msg_length; 86 dev->iamthif_mtu = me_cl->props.max_msg_length;
91 dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu); 87 dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu);
92 88
93 kfree(dev->iamthif_msg_buf);
94 dev->iamthif_msg_buf = NULL;
95
96 /* allocate storage for ME message buffer */
97 msg_buf = kcalloc(dev->iamthif_mtu,
98 sizeof(unsigned char), GFP_KERNEL);
99 if (!msg_buf) {
100 ret = -ENOMEM;
101 goto out;
102 }
103
104 dev->iamthif_msg_buf = msg_buf;
105 89
106 ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); 90 ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
107 if (ret < 0) { 91 if (ret < 0) {
@@ -194,30 +178,33 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
194 dev_dbg(dev->dev, "woke up from sleep\n"); 178 dev_dbg(dev->dev, "woke up from sleep\n");
195 } 179 }
196 180
181 if (cb->status) {
182 rets = cb->status;
183 dev_dbg(dev->dev, "read operation failed %d\n", rets);
184 goto free;
185 }
197 186
198 dev_dbg(dev->dev, "Got amthif data\n"); 187 dev_dbg(dev->dev, "Got amthif data\n");
199 dev->iamthif_timer = 0; 188 dev->iamthif_timer = 0;
200 189
201 if (cb) { 190 timeout = cb->read_time +
202 timeout = cb->read_time + 191 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
203 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 192 dev_dbg(dev->dev, "amthif timeout = %lud\n",
204 dev_dbg(dev->dev, "amthif timeout = %lud\n", 193 timeout);
205 timeout); 194
206 195 if (time_after(jiffies, timeout)) {
207 if (time_after(jiffies, timeout)) { 196 dev_dbg(dev->dev, "amthif Time out\n");
208 dev_dbg(dev->dev, "amthif Time out\n"); 197 /* 15 sec for the message has expired */
209 /* 15 sec for the message has expired */ 198 list_del_init(&cb->list);
210 list_del(&cb->list); 199 rets = -ETIME;
211 rets = -ETIME; 200 goto free;
212 goto free;
213 }
214 } 201 }
215 /* if the whole message will fit remove it from the list */ 202 /* if the whole message will fit remove it from the list */
216 if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset)) 203 if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
217 list_del(&cb->list); 204 list_del_init(&cb->list);
218 else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { 205 else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
219 /* end of the message has been reached */ 206 /* end of the message has been reached */
220 list_del(&cb->list); 207 list_del_init(&cb->list);
221 rets = 0; 208 rets = 0;
222 goto free; 209 goto free;
223 } 210 }
@@ -225,15 +212,15 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
225 * remove message from deletion list 212 * remove message from deletion list
226 */ 213 */
227 214
228 dev_dbg(dev->dev, "amthif cb->response_buffer size - %d\n", 215 dev_dbg(dev->dev, "amthif cb->buf size - %d\n",
229 cb->response_buffer.size); 216 cb->buf.size);
230 dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); 217 dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
231 218
232 /* length is being truncated to PAGE_SIZE, however, 219 /* length is being truncated to PAGE_SIZE, however,
233 * the buf_idx may point beyond */ 220 * the buf_idx may point beyond */
234 length = min_t(size_t, length, (cb->buf_idx - *offset)); 221 length = min_t(size_t, length, (cb->buf_idx - *offset));
235 222
236 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 223 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
237 dev_dbg(dev->dev, "failed to copy data to userland\n"); 224 dev_dbg(dev->dev, "failed to copy data to userland\n");
238 rets = -EFAULT; 225 rets = -EFAULT;
239 } else { 226 } else {
@@ -252,126 +239,88 @@ out:
252} 239}
253 240
254/** 241/**
255 * mei_amthif_send_cmd - send amthif command to the ME 242 * mei_amthif_read_start - queue message for sending read credential
256 * 243 *
257 * @dev: the device structure 244 * @cl: host client
258 * @cb: mei call back struct 245 * @file: file pointer of message recipient
259 * 246 *
260 * Return: 0 on success, <0 on failure. 247 * Return: 0 on success, <0 on failure.
261 *
262 */ 248 */
263static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) 249static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
264{ 250{
265 struct mei_msg_hdr mei_hdr; 251 struct mei_device *dev = cl->dev;
266 struct mei_cl *cl; 252 struct mei_cl_cb *cb;
267 int ret; 253 size_t length = dev->iamthif_mtu;
268 254 int rets;
269 if (!dev || !cb)
270 return -ENODEV;
271 255
272 dev_dbg(dev->dev, "write data to amthif client.\n"); 256 cb = mei_io_cb_init(cl, MEI_FOP_READ, file);
257 if (!cb) {
258 rets = -ENOMEM;
259 goto err;
260 }
273 261
274 dev->iamthif_state = MEI_IAMTHIF_WRITING; 262 rets = mei_io_cb_alloc_buf(cb, length);
275 dev->iamthif_current_cb = cb; 263 if (rets)
276 dev->iamthif_file_object = cb->file_object; 264 goto err;
277 dev->iamthif_canceled = false;
278 dev->iamthif_ioctl = true;
279 dev->iamthif_msg_buf_size = cb->request_buffer.size;
280 memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
281 cb->request_buffer.size);
282 cl = &dev->iamthif_cl;
283 265
284 ret = mei_cl_flow_ctrl_creds(cl); 266 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
285 if (ret < 0)
286 return ret;
287 267
288 if (ret && mei_hbuf_acquire(dev)) { 268 dev->iamthif_state = MEI_IAMTHIF_READING;
289 ret = 0; 269 dev->iamthif_file_object = cb->file_object;
290 if (cb->request_buffer.size > mei_hbuf_max_len(dev)) { 270 dev->iamthif_current_cb = cb;
291 mei_hdr.length = mei_hbuf_max_len(dev);
292 mei_hdr.msg_complete = 0;
293 } else {
294 mei_hdr.length = cb->request_buffer.size;
295 mei_hdr.msg_complete = 1;
296 }
297 271
298 mei_hdr.host_addr = cl->host_client_id;
299 mei_hdr.me_addr = cl->me_client_id;
300 mei_hdr.reserved = 0;
301 mei_hdr.internal = 0;
302 dev->iamthif_msg_buf_index += mei_hdr.length;
303 ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf);
304 if (ret)
305 return ret;
306
307 if (mei_hdr.msg_complete) {
308 if (mei_cl_flow_ctrl_reduce(cl))
309 return -EIO;
310 dev->iamthif_flow_control_pending = true;
311 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
312 dev_dbg(dev->dev, "add amthif cb to write waiting list\n");
313 dev->iamthif_current_cb = cb;
314 dev->iamthif_file_object = cb->file_object;
315 list_add_tail(&cb->list, &dev->write_waiting_list.list);
316 } else {
317 dev_dbg(dev->dev, "message does not complete, so add amthif cb to write list.\n");
318 list_add_tail(&cb->list, &dev->write_list.list);
319 }
320 } else {
321 list_add_tail(&cb->list, &dev->write_list.list);
322 }
323 return 0; 272 return 0;
273err:
274 mei_io_cb_free(cb);
275 return rets;
324} 276}
325 277
326/** 278/**
327 * mei_amthif_write - write amthif data to amthif client 279 * mei_amthif_send_cmd - send amthif command to the ME
328 * 280 *
329 * @dev: the device structure 281 * @cl: the host client
330 * @cb: mei call back struct 282 * @cb: mei call back struct
331 * 283 *
332 * Return: 0 on success, <0 on failure. 284 * Return: 0 on success, <0 on failure.
333 *
334 */ 285 */
335int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb) 286static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
336{ 287{
288 struct mei_device *dev;
337 int ret; 289 int ret;
338 290
339 if (!dev || !cb) 291 if (!cl->dev || !cb)
340 return -ENODEV; 292 return -ENODEV;
341 293
342 ret = mei_io_cb_alloc_resp_buf(cb, dev->iamthif_mtu); 294 dev = cl->dev;
343 if (ret) 295
296 dev->iamthif_state = MEI_IAMTHIF_WRITING;
297 dev->iamthif_current_cb = cb;
298 dev->iamthif_file_object = cb->file_object;
299 dev->iamthif_canceled = false;
300
301 ret = mei_cl_write(cl, cb, false);
302 if (ret < 0)
344 return ret; 303 return ret;
345 304
346 cb->fop_type = MEI_FOP_WRITE; 305 if (cb->completed)
306 cb->status = mei_amthif_read_start(cl, cb->file_object);
347 307
348 if (!list_empty(&dev->amthif_cmd_list.list) || 308 return 0;
349 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
350 dev_dbg(dev->dev,
351 "amthif state = %d\n", dev->iamthif_state);
352 dev_dbg(dev->dev, "AMTHIF: add cb to the wait list\n");
353 list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
354 return 0;
355 }
356 return mei_amthif_send_cmd(dev, cb);
357} 309}
310
358/** 311/**
359 * mei_amthif_run_next_cmd - send next amt command from queue 312 * mei_amthif_run_next_cmd - send next amt command from queue
360 * 313 *
361 * @dev: the device structure 314 * @dev: the device structure
315 *
316 * Return: 0 on success, <0 on failure.
362 */ 317 */
363void mei_amthif_run_next_cmd(struct mei_device *dev) 318int mei_amthif_run_next_cmd(struct mei_device *dev)
364{ 319{
320 struct mei_cl *cl = &dev->iamthif_cl;
365 struct mei_cl_cb *cb; 321 struct mei_cl_cb *cb;
366 int ret;
367
368 if (!dev)
369 return;
370 322
371 dev->iamthif_msg_buf_size = 0;
372 dev->iamthif_msg_buf_index = 0;
373 dev->iamthif_canceled = false; 323 dev->iamthif_canceled = false;
374 dev->iamthif_ioctl = true;
375 dev->iamthif_state = MEI_IAMTHIF_IDLE; 324 dev->iamthif_state = MEI_IAMTHIF_IDLE;
376 dev->iamthif_timer = 0; 325 dev->iamthif_timer = 0;
377 dev->iamthif_file_object = NULL; 326 dev->iamthif_file_object = NULL;
@@ -381,13 +330,48 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
381 cb = list_first_entry_or_null(&dev->amthif_cmd_list.list, 330 cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
382 typeof(*cb), list); 331 typeof(*cb), list);
383 if (!cb) 332 if (!cb)
384 return; 333 return 0;
385 list_del(&cb->list); 334
386 ret = mei_amthif_send_cmd(dev, cb); 335 list_del_init(&cb->list);
387 if (ret) 336 return mei_amthif_send_cmd(cl, cb);
388 dev_warn(dev->dev, "amthif write failed status = %d\n", ret);
389} 337}
390 338
339/**
340 * mei_amthif_write - write amthif data to amthif client
341 *
342 * @cl: host client
343 * @cb: mei call back struct
344 *
345 * Return: 0 on success, <0 on failure.
346 */
347int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
348{
349
350 struct mei_device *dev;
351
352 if (WARN_ON(!cl || !cl->dev))
353 return -ENODEV;
354
355 if (WARN_ON(!cb))
356 return -EINVAL;
357
358 dev = cl->dev;
359
360 list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
361 return mei_amthif_run_next_cmd(dev);
362}
363
364/**
365 * mei_amthif_poll - the amthif poll function
366 *
367 * @dev: the device structure
368 * @file: pointer to file structure
369 * @wait: pointer to poll_table structure
370 *
371 * Return: poll mask
372 *
373 * Locking: called under "dev->device_lock" lock
374 */
391 375
392unsigned int mei_amthif_poll(struct mei_device *dev, 376unsigned int mei_amthif_poll(struct mei_device *dev,
393 struct file *file, poll_table *wait) 377 struct file *file, poll_table *wait)
@@ -396,19 +380,12 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
396 380
397 poll_wait(file, &dev->iamthif_cl.wait, wait); 381 poll_wait(file, &dev->iamthif_cl.wait, wait);
398 382
399 mutex_lock(&dev->device_lock); 383 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
400 if (!mei_cl_is_connected(&dev->iamthif_cl)) { 384 dev->iamthif_file_object == file) {
401
402 mask = POLLERR;
403
404 } else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
405 dev->iamthif_file_object == file) {
406 385
407 mask |= (POLLIN | POLLRDNORM); 386 mask |= POLLIN | POLLRDNORM;
408 dev_dbg(dev->dev, "run next amthif cb\n");
409 mei_amthif_run_next_cmd(dev); 387 mei_amthif_run_next_cmd(dev);
410 } 388 }
411 mutex_unlock(&dev->device_lock);
412 389
413 return mask; 390 return mask;
414} 391}
@@ -427,71 +404,14 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
427int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 404int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
428 struct mei_cl_cb *cmpl_list) 405 struct mei_cl_cb *cmpl_list)
429{ 406{
430 struct mei_device *dev = cl->dev; 407 int ret;
431 struct mei_msg_hdr mei_hdr;
432 size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
433 u32 msg_slots = mei_data2slots(len);
434 int slots;
435 int rets;
436
437 rets = mei_cl_flow_ctrl_creds(cl);
438 if (rets < 0)
439 return rets;
440
441 if (rets == 0) {
442 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
443 return 0;
444 }
445
446 mei_hdr.host_addr = cl->host_client_id;
447 mei_hdr.me_addr = cl->me_client_id;
448 mei_hdr.reserved = 0;
449 mei_hdr.internal = 0;
450
451 slots = mei_hbuf_empty_slots(dev);
452
453 if (slots >= msg_slots) {
454 mei_hdr.length = len;
455 mei_hdr.msg_complete = 1;
456 /* Split the message only if we can write the whole host buffer */
457 } else if (slots == dev->hbuf_depth) {
458 msg_slots = slots;
459 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
460 mei_hdr.length = len;
461 mei_hdr.msg_complete = 0;
462 } else {
463 /* wait for next time the host buffer is empty */
464 return 0;
465 }
466
467 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
468
469 rets = mei_write_message(dev, &mei_hdr,
470 dev->iamthif_msg_buf + dev->iamthif_msg_buf_index);
471 if (rets) {
472 dev->iamthif_state = MEI_IAMTHIF_IDLE;
473 cl->status = rets;
474 list_del(&cb->list);
475 return rets;
476 }
477
478 if (mei_cl_flow_ctrl_reduce(cl))
479 return -EIO;
480
481 dev->iamthif_msg_buf_index += mei_hdr.length;
482 cl->status = 0;
483
484 if (mei_hdr.msg_complete) {
485 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
486 dev->iamthif_flow_control_pending = true;
487
488 /* save iamthif cb sent to amthif client */
489 cb->buf_idx = dev->iamthif_msg_buf_index;
490 dev->iamthif_current_cb = cb;
491 408
492 list_move_tail(&cb->list, &dev->write_waiting_list.list); 409 ret = mei_cl_irq_write(cl, cb, cmpl_list);
493 } 410 if (ret)
411 return ret;
494 412
413 if (cb->completed)
414 cb->status = mei_amthif_read_start(cl, cb->file_object);
495 415
496 return 0; 416 return 0;
497} 417}
@@ -500,83 +420,35 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
500 * mei_amthif_irq_read_msg - read routine after ISR to 420 * mei_amthif_irq_read_msg - read routine after ISR to
501 * handle the read amthif message 421 * handle the read amthif message
502 * 422 *
503 * @dev: the device structure 423 * @cl: mei client
504 * @mei_hdr: header of amthif message 424 * @mei_hdr: header of amthif message
505 * @complete_list: An instance of our list structure 425 * @cmpl_list: completed callbacks list
506 * 426 *
507 * Return: 0 on success, <0 on failure. 427 * Return: -ENODEV if cb is NULL 0 otherwise; error message is in cb->status
508 */ 428 */
509int mei_amthif_irq_read_msg(struct mei_device *dev, 429int mei_amthif_irq_read_msg(struct mei_cl *cl,
510 struct mei_msg_hdr *mei_hdr, 430 struct mei_msg_hdr *mei_hdr,
511 struct mei_cl_cb *complete_list) 431 struct mei_cl_cb *cmpl_list)
512{ 432{
513 struct mei_cl_cb *cb; 433 struct mei_device *dev;
514 unsigned char *buffer; 434 int ret;
515
516 BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
517 BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
518 435
519 buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index; 436 dev = cl->dev;
520 BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
521 437
522 mei_read_slots(dev, buffer, mei_hdr->length); 438 if (dev->iamthif_state != MEI_IAMTHIF_READING)
439 return 0;
523 440
524 dev->iamthif_msg_buf_index += mei_hdr->length; 441 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
442 if (ret)
443 return ret;
525 444
526 if (!mei_hdr->msg_complete) 445 if (!mei_hdr->msg_complete)
527 return 0; 446 return 0;
528 447
529 dev_dbg(dev->dev, "amthif_message_buffer_index =%d\n",
530 mei_hdr->length);
531
532 dev_dbg(dev->dev, "completed amthif read.\n "); 448 dev_dbg(dev->dev, "completed amthif read.\n ");
533 if (!dev->iamthif_current_cb)
534 return -ENODEV;
535
536 cb = dev->iamthif_current_cb;
537 dev->iamthif_current_cb = NULL; 449 dev->iamthif_current_cb = NULL;
538
539 dev->iamthif_stall_timer = 0; 450 dev->iamthif_stall_timer = 0;
540 cb->buf_idx = dev->iamthif_msg_buf_index;
541 cb->read_time = jiffies;
542 if (dev->iamthif_ioctl) {
543 /* found the iamthif cb */
544 dev_dbg(dev->dev, "complete the amthif read cb.\n ");
545 dev_dbg(dev->dev, "add the amthif read cb to complete.\n ");
546 list_add_tail(&cb->list, &complete_list->list);
547 }
548 return 0;
549}
550
551/**
552 * mei_amthif_irq_read - prepares to read amthif data.
553 *
554 * @dev: the device structure.
555 * @slots: free slots.
556 *
557 * Return: 0, OK; otherwise, error.
558 */
559int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
560{
561 u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
562
563 if (*slots < msg_slots)
564 return -EMSGSIZE;
565
566 *slots -= msg_slots;
567
568 if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
569 dev_dbg(dev->dev, "iamthif flow control failed\n");
570 return -EIO;
571 }
572 451
573 dev_dbg(dev->dev, "iamthif flow control success\n");
574 dev->iamthif_state = MEI_IAMTHIF_READING;
575 dev->iamthif_flow_control_pending = false;
576 dev->iamthif_msg_buf_index = 0;
577 dev->iamthif_msg_buf_size = 0;
578 dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
579 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
580 return 0; 452 return 0;
581} 453}
582 454
@@ -588,17 +460,30 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
588 */ 460 */
589void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) 461void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
590{ 462{
463
464 if (cb->fop_type == MEI_FOP_WRITE) {
465 if (!cb->status) {
466 dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
467 mei_io_cb_free(cb);
468 return;
469 }
470 /*
471 * in case of error enqueue the write cb to complete read list
472 * so it can be propagated to the reader
473 */
474 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
475 wake_up_interruptible(&dev->iamthif_cl.wait);
476 return;
477 }
478
591 if (dev->iamthif_canceled != 1) { 479 if (dev->iamthif_canceled != 1) {
592 dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; 480 dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
593 dev->iamthif_stall_timer = 0; 481 dev->iamthif_stall_timer = 0;
594 memcpy(cb->response_buffer.data,
595 dev->iamthif_msg_buf,
596 dev->iamthif_msg_buf_index);
597 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); 482 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
598 dev_dbg(dev->dev, "amthif read completed\n"); 483 dev_dbg(dev->dev, "amthif read completed\n");
599 dev->iamthif_timer = jiffies; 484 dev->iamthif_timer = jiffies;
600 dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", 485 dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
601 dev->iamthif_timer); 486 dev->iamthif_timer);
602 } else { 487 } else {
603 mei_amthif_run_next_cmd(dev); 488 mei_amthif_run_next_cmd(dev);
604 } 489 }
@@ -623,26 +508,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
623static bool mei_clear_list(struct mei_device *dev, 508static bool mei_clear_list(struct mei_device *dev,
624 const struct file *file, struct list_head *mei_cb_list) 509 const struct file *file, struct list_head *mei_cb_list)
625{ 510{
626 struct mei_cl_cb *cb_pos = NULL; 511 struct mei_cl *cl = &dev->iamthif_cl;
627 struct mei_cl_cb *cb_next = NULL; 512 struct mei_cl_cb *cb, *next;
628 bool removed = false; 513 bool removed = false;
629 514
630 /* list all list member */ 515 /* list all list member */
631 list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, list) { 516 list_for_each_entry_safe(cb, next, mei_cb_list, list) {
632 /* check if list member associated with a file */ 517 /* check if list member associated with a file */
633 if (file == cb_pos->file_object) { 518 if (file == cb->file_object) {
634 /* remove member from the list */
635 list_del(&cb_pos->list);
636 /* check if cb equal to current iamthif cb */ 519 /* check if cb equal to current iamthif cb */
637 if (dev->iamthif_current_cb == cb_pos) { 520 if (dev->iamthif_current_cb == cb) {
638 dev->iamthif_current_cb = NULL; 521 dev->iamthif_current_cb = NULL;
639 /* send flow control to iamthif client */ 522 /* send flow control to iamthif client */
640 mei_hbm_cl_flow_control_req(dev, 523 mei_hbm_cl_flow_control_req(dev, cl);
641 &dev->iamthif_cl);
642 } 524 }
643 /* free all allocated buffers */ 525 /* free all allocated buffers */
644 mei_io_cb_free(cb_pos); 526 mei_io_cb_free(cb);
645 cb_pos = NULL;
646 removed = true; 527 removed = true;
647 } 528 }
648 } 529 }
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index be767f4db26a..4cf38c39878a 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -238,7 +238,7 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
238 dev = cl->dev; 238 dev = cl->dev;
239 239
240 mutex_lock(&dev->device_lock); 240 mutex_lock(&dev->device_lock);
241 if (cl->state != MEI_FILE_CONNECTED) { 241 if (!mei_cl_is_connected(cl)) {
242 rets = -ENODEV; 242 rets = -ENODEV;
243 goto out; 243 goto out;
244 } 244 }
@@ -255,17 +255,13 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
255 goto out; 255 goto out;
256 } 256 }
257 257
258 cb = mei_io_cb_init(cl, NULL); 258 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
259 if (!cb) { 259 if (!cb) {
260 rets = -ENOMEM; 260 rets = -ENOMEM;
261 goto out; 261 goto out;
262 } 262 }
263 263
264 rets = mei_io_cb_alloc_req_buf(cb, length); 264 memcpy(cb->buf.data, buf, length);
265 if (rets < 0)
266 goto out;
267
268 memcpy(cb->request_buffer.data, buf, length);
269 265
270 rets = mei_cl_write(cl, cb, blocking); 266 rets = mei_cl_write(cl, cb, blocking);
271 267
@@ -292,20 +288,21 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
292 288
293 mutex_lock(&dev->device_lock); 289 mutex_lock(&dev->device_lock);
294 290
295 if (!cl->read_cb) { 291 cb = mei_cl_read_cb(cl, NULL);
296 rets = mei_cl_read_start(cl, length); 292 if (cb)
297 if (rets < 0) 293 goto copy;
298 goto out;
299 }
300 294
301 if (cl->reading_state != MEI_READ_COMPLETE && 295 rets = mei_cl_read_start(cl, length, NULL);
302 !waitqueue_active(&cl->rx_wait)) { 296 if (rets && rets != -EBUSY)
297 goto out;
298
299 if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
303 300
304 mutex_unlock(&dev->device_lock); 301 mutex_unlock(&dev->device_lock);
305 302
306 if (wait_event_interruptible(cl->rx_wait, 303 if (wait_event_interruptible(cl->rx_wait,
307 cl->reading_state == MEI_READ_COMPLETE || 304 (!list_empty(&cl->rd_completed)) ||
308 mei_cl_is_transitioning(cl))) { 305 (!mei_cl_is_connected(cl)))) {
309 306
310 if (signal_pending(current)) 307 if (signal_pending(current))
311 return -EINTR; 308 return -EINTR;
@@ -313,23 +310,31 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
313 } 310 }
314 311
315 mutex_lock(&dev->device_lock); 312 mutex_lock(&dev->device_lock);
316 }
317 313
318 cb = cl->read_cb; 314 if (!mei_cl_is_connected(cl)) {
315 rets = -EBUSY;
316 goto out;
317 }
318 }
319 319
320 if (cl->reading_state != MEI_READ_COMPLETE) { 320 cb = mei_cl_read_cb(cl, NULL);
321 if (!cb) {
321 rets = 0; 322 rets = 0;
322 goto out; 323 goto out;
323 } 324 }
324 325
326copy:
327 if (cb->status) {
328 rets = cb->status;
329 goto free;
330 }
331
325 r_length = min_t(size_t, length, cb->buf_idx); 332 r_length = min_t(size_t, length, cb->buf_idx);
326 memcpy(buf, cb->response_buffer.data, r_length); 333 memcpy(buf, cb->buf.data, r_length);
327 rets = r_length; 334 rets = r_length;
328 335
336free:
329 mei_io_cb_free(cb); 337 mei_io_cb_free(cb);
330 cl->reading_state = MEI_IDLE;
331 cl->read_cb = NULL;
332
333out: 338out:
334 mutex_unlock(&dev->device_lock); 339 mutex_unlock(&dev->device_lock);
335 340
@@ -386,7 +391,7 @@ static void mei_bus_event_work(struct work_struct *work)
386 device->events = 0; 391 device->events = 0;
387 392
388 /* Prepare for the next read */ 393 /* Prepare for the next read */
389 mei_cl_read_start(device->cl, 0); 394 mei_cl_read_start(device->cl, 0, NULL);
390} 395}
391 396
392int mei_cl_register_event_cb(struct mei_cl_device *device, 397int mei_cl_register_event_cb(struct mei_cl_device *device,
@@ -400,7 +405,7 @@ int mei_cl_register_event_cb(struct mei_cl_device *device,
400 device->event_context = context; 405 device->event_context = context;
401 INIT_WORK(&device->event_work, mei_bus_event_work); 406 INIT_WORK(&device->event_work, mei_bus_event_work);
402 407
403 mei_cl_read_start(device->cl, 0); 408 mei_cl_read_start(device->cl, 0, NULL);
404 409
405 return 0; 410 return 0;
406} 411}
@@ -441,8 +446,8 @@ int mei_cl_enable_device(struct mei_cl_device *device)
441 446
442 mutex_unlock(&dev->device_lock); 447 mutex_unlock(&dev->device_lock);
443 448
444 if (device->event_cb && !cl->read_cb) 449 if (device->event_cb)
445 mei_cl_read_start(device->cl, 0); 450 mei_cl_read_start(device->cl, 0, NULL);
446 451
447 if (!device->ops || !device->ops->enable) 452 if (!device->ops || !device->ops->enable)
448 return 0; 453 return 0;
@@ -462,54 +467,34 @@ int mei_cl_disable_device(struct mei_cl_device *device)
462 467
463 dev = cl->dev; 468 dev = cl->dev;
464 469
470 if (device->ops && device->ops->disable)
471 device->ops->disable(device);
472
473 device->event_cb = NULL;
474
465 mutex_lock(&dev->device_lock); 475 mutex_lock(&dev->device_lock);
466 476
467 if (cl->state != MEI_FILE_CONNECTED) { 477 if (!mei_cl_is_connected(cl)) {
468 mutex_unlock(&dev->device_lock);
469 dev_err(dev->dev, "Already disconnected"); 478 dev_err(dev->dev, "Already disconnected");
470 479 err = 0;
471 return 0; 480 goto out;
472 } 481 }
473 482
474 cl->state = MEI_FILE_DISCONNECTING; 483 cl->state = MEI_FILE_DISCONNECTING;
475 484
476 err = mei_cl_disconnect(cl); 485 err = mei_cl_disconnect(cl);
477 if (err < 0) { 486 if (err < 0) {
478 mutex_unlock(&dev->device_lock); 487 dev_err(dev->dev, "Could not disconnect from the ME client");
479 dev_err(dev->dev, 488 goto out;
480 "Could not disconnect from the ME client");
481
482 return err;
483 } 489 }
484 490
485 /* Flush queues and remove any pending read */ 491 /* Flush queues and remove any pending read */
486 mei_cl_flush_queues(cl); 492 mei_cl_flush_queues(cl, NULL);
487
488 if (cl->read_cb) {
489 struct mei_cl_cb *cb = NULL;
490
491 cb = mei_cl_find_read_cb(cl);
492 /* Remove entry from read list */
493 if (cb)
494 list_del(&cb->list);
495
496 cb = cl->read_cb;
497 cl->read_cb = NULL;
498
499 if (cb) {
500 mei_io_cb_free(cb);
501 cb = NULL;
502 }
503 }
504
505 device->event_cb = NULL;
506 493
494out:
507 mutex_unlock(&dev->device_lock); 495 mutex_unlock(&dev->device_lock);
496 return err;
508 497
509 if (!device->ops || !device->ops->disable)
510 return 0;
511
512 return device->ops->disable(device);
513} 498}
514EXPORT_SYMBOL_GPL(mei_cl_disable_device); 499EXPORT_SYMBOL_GPL(mei_cl_disable_device);
515 500
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index dfbddfe1c7a0..1e99ef6a54a2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -48,14 +48,14 @@ void mei_me_cl_init(struct mei_me_client *me_cl)
48 */ 48 */
49struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 49struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
50{ 50{
51 if (me_cl) 51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
52 kref_get(&me_cl->refcnt); 52 return me_cl;
53 53
54 return me_cl; 54 return NULL;
55} 55}
56 56
57/** 57/**
58 * mei_me_cl_release - unlink and free me client 58 * mei_me_cl_release - free me client
59 * 59 *
60 * Locking: called under "dev->device_lock" lock 60 * Locking: called under "dev->device_lock" lock
61 * 61 *
@@ -65,9 +65,10 @@ static void mei_me_cl_release(struct kref *ref)
65{ 65{
66 struct mei_me_client *me_cl = 66 struct mei_me_client *me_cl =
67 container_of(ref, struct mei_me_client, refcnt); 67 container_of(ref, struct mei_me_client, refcnt);
68 list_del(&me_cl->list); 68
69 kfree(me_cl); 69 kfree(me_cl);
70} 70}
71
71/** 72/**
72 * mei_me_cl_put - decrease me client refcount and free client if necessary 73 * mei_me_cl_put - decrease me client refcount and free client if necessary
73 * 74 *
@@ -82,51 +83,146 @@ void mei_me_cl_put(struct mei_me_client *me_cl)
82} 83}
83 84
84/** 85/**
85 * mei_me_cl_by_uuid - locate me client by uuid 86 * __mei_me_cl_del - delete me client form the list and decrease
87 * reference counter
88 *
89 * @dev: mei device
90 * @me_cl: me client
91 *
92 * Locking: dev->me_clients_rwsem
93 */
94static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
95{
96 if (!me_cl)
97 return;
98
99 list_del(&me_cl->list);
100 mei_me_cl_put(me_cl);
101}
102
103/**
104 * mei_me_cl_add - add me client to the list
105 *
106 * @dev: mei device
107 * @me_cl: me client
108 */
109void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
110{
111 down_write(&dev->me_clients_rwsem);
112 list_add(&me_cl->list, &dev->me_clients);
113 up_write(&dev->me_clients_rwsem);
114}
115
116/**
117 * __mei_me_cl_by_uuid - locate me client by uuid
86 * increases ref count 118 * increases ref count
87 * 119 *
88 * @dev: mei device 120 * @dev: mei device
89 * @uuid: me client uuid 121 * @uuid: me client uuid
90 * 122 *
91 * Locking: called under "dev->device_lock" lock
92 *
93 * Return: me client or NULL if not found 123 * Return: me client or NULL if not found
124 *
125 * Locking: dev->me_clients_rwsem
94 */ 126 */
95struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev, 127static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
96 const uuid_le *uuid) 128 const uuid_le *uuid)
97{ 129{
98 struct mei_me_client *me_cl; 130 struct mei_me_client *me_cl;
131 const uuid_le *pn;
99 132
100 list_for_each_entry(me_cl, &dev->me_clients, list) 133 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
101 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0) 134
135 list_for_each_entry(me_cl, &dev->me_clients, list) {
136 pn = &me_cl->props.protocol_name;
137 if (uuid_le_cmp(*uuid, *pn) == 0)
102 return mei_me_cl_get(me_cl); 138 return mei_me_cl_get(me_cl);
139 }
103 140
104 return NULL; 141 return NULL;
105} 142}
106 143
107/** 144/**
145 * mei_me_cl_by_uuid - locate me client by uuid
146 * increases ref count
147 *
148 * @dev: mei device
149 * @uuid: me client uuid
150 *
151 * Return: me client or NULL if not found
152 *
153 * Locking: dev->me_clients_rwsem
154 */
155struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
156 const uuid_le *uuid)
157{
158 struct mei_me_client *me_cl;
159
160 down_read(&dev->me_clients_rwsem);
161 me_cl = __mei_me_cl_by_uuid(dev, uuid);
162 up_read(&dev->me_clients_rwsem);
163
164 return me_cl;
165}
166
167/**
108 * mei_me_cl_by_id - locate me client by client id 168 * mei_me_cl_by_id - locate me client by client id
109 * increases ref count 169 * increases ref count
110 * 170 *
111 * @dev: the device structure 171 * @dev: the device structure
112 * @client_id: me client id 172 * @client_id: me client id
113 * 173 *
114 * Locking: called under "dev->device_lock" lock
115 *
116 * Return: me client or NULL if not found 174 * Return: me client or NULL if not found
175 *
176 * Locking: dev->me_clients_rwsem
117 */ 177 */
118struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 178struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
119{ 179{
120 180
181 struct mei_me_client *__me_cl, *me_cl = NULL;
182
183 down_read(&dev->me_clients_rwsem);
184 list_for_each_entry(__me_cl, &dev->me_clients, list) {
185 if (__me_cl->client_id == client_id) {
186 me_cl = mei_me_cl_get(__me_cl);
187 break;
188 }
189 }
190 up_read(&dev->me_clients_rwsem);
191
192 return me_cl;
193}
194
195/**
196 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
197 * increases ref count
198 *
199 * @dev: the device structure
200 * @uuid: me client uuid
201 * @client_id: me client id
202 *
203 * Return: me client or null if not found
204 *
205 * Locking: dev->me_clients_rwsem
206 */
207static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
208 const uuid_le *uuid, u8 client_id)
209{
121 struct mei_me_client *me_cl; 210 struct mei_me_client *me_cl;
211 const uuid_le *pn;
212
213 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
122 214
123 list_for_each_entry(me_cl, &dev->me_clients, list) 215 list_for_each_entry(me_cl, &dev->me_clients, list) {
124 if (me_cl->client_id == client_id) 216 pn = &me_cl->props.protocol_name;
217 if (uuid_le_cmp(*uuid, *pn) == 0 &&
218 me_cl->client_id == client_id)
125 return mei_me_cl_get(me_cl); 219 return mei_me_cl_get(me_cl);
220 }
126 221
127 return NULL; 222 return NULL;
128} 223}
129 224
225
130/** 226/**
131 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 227 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
132 * increases ref count 228 * increases ref count
@@ -135,21 +231,18 @@ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
135 * @uuid: me client uuid 231 * @uuid: me client uuid
136 * @client_id: me client id 232 * @client_id: me client id
137 * 233 *
138 * Locking: called under "dev->device_lock" lock 234 * Return: me client or null if not found
139 *
140 * Return: me client or NULL if not found
141 */ 235 */
142struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 236struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
143 const uuid_le *uuid, u8 client_id) 237 const uuid_le *uuid, u8 client_id)
144{ 238{
145 struct mei_me_client *me_cl; 239 struct mei_me_client *me_cl;
146 240
147 list_for_each_entry(me_cl, &dev->me_clients, list) 241 down_read(&dev->me_clients_rwsem);
148 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 && 242 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
149 me_cl->client_id == client_id) 243 up_read(&dev->me_clients_rwsem);
150 return mei_me_cl_get(me_cl);
151 244
152 return NULL; 245 return me_cl;
153} 246}
154 247
155/** 248/**
@@ -162,12 +255,14 @@ struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
162 */ 255 */
163void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 256void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
164{ 257{
165 struct mei_me_client *me_cl, *next; 258 struct mei_me_client *me_cl;
166 259
167 dev_dbg(dev->dev, "remove %pUl\n", uuid); 260 dev_dbg(dev->dev, "remove %pUl\n", uuid);
168 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 261
169 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0) 262 down_write(&dev->me_clients_rwsem);
170 mei_me_cl_put(me_cl); 263 me_cl = __mei_me_cl_by_uuid(dev, uuid);
264 __mei_me_cl_del(dev, me_cl);
265 up_write(&dev->me_clients_rwsem);
171} 266}
172 267
173/** 268/**
@@ -181,15 +276,14 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
181 */ 276 */
182void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 277void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
183{ 278{
184 struct mei_me_client *me_cl, *next; 279 struct mei_me_client *me_cl;
185 const uuid_le *pn;
186 280
187 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 281 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
188 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) { 282
189 pn = &me_cl->props.protocol_name; 283 down_write(&dev->me_clients_rwsem);
190 if (me_cl->client_id == id && uuid_le_cmp(*uuid, *pn) == 0) 284 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
191 mei_me_cl_put(me_cl); 285 __mei_me_cl_del(dev, me_cl);
192 } 286 up_write(&dev->me_clients_rwsem);
193} 287}
194 288
195/** 289/**
@@ -203,12 +297,12 @@ void mei_me_cl_rm_all(struct mei_device *dev)
203{ 297{
204 struct mei_me_client *me_cl, *next; 298 struct mei_me_client *me_cl, *next;
205 299
300 down_write(&dev->me_clients_rwsem);
206 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 301 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
207 mei_me_cl_put(me_cl); 302 __mei_me_cl_del(dev, me_cl);
303 up_write(&dev->me_clients_rwsem);
208} 304}
209 305
210
211
212/** 306/**
213 * mei_cl_cmp_id - tells if the clients are the same 307 * mei_cl_cmp_id - tells if the clients are the same
214 * 308 *
@@ -227,7 +321,48 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
227} 321}
228 322
229/** 323/**
230 * mei_io_list_flush - removes cbs belonging to cl. 324 * mei_io_cb_free - free mei_cb_private related memory
325 *
326 * @cb: mei callback struct
327 */
328void mei_io_cb_free(struct mei_cl_cb *cb)
329{
330 if (cb == NULL)
331 return;
332
333 list_del(&cb->list);
334 kfree(cb->buf.data);
335 kfree(cb);
336}
337
338/**
339 * mei_io_cb_init - allocate and initialize io callback
340 *
341 * @cl: mei client
342 * @type: operation type
343 * @fp: pointer to file structure
344 *
345 * Return: mei_cl_cb pointer or NULL;
346 */
347struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
348 struct file *fp)
349{
350 struct mei_cl_cb *cb;
351
352 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
353 if (!cb)
354 return NULL;
355
356 INIT_LIST_HEAD(&cb->list);
357 cb->file_object = fp;
358 cb->cl = cl;
359 cb->buf_idx = 0;
360 cb->fop_type = type;
361 return cb;
362}
363
364/**
365 * __mei_io_list_flush - removes and frees cbs belonging to cl.
231 * 366 *
232 * @list: an instance of our list structure 367 * @list: an instance of our list structure
233 * @cl: host client, can be NULL for flushing the whole list 368 * @cl: host client, can be NULL for flushing the whole list
@@ -236,13 +371,12 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
236static void __mei_io_list_flush(struct mei_cl_cb *list, 371static void __mei_io_list_flush(struct mei_cl_cb *list,
237 struct mei_cl *cl, bool free) 372 struct mei_cl *cl, bool free)
238{ 373{
239 struct mei_cl_cb *cb; 374 struct mei_cl_cb *cb, *next;
240 struct mei_cl_cb *next;
241 375
242 /* enable removing everything if no cl is specified */ 376 /* enable removing everything if no cl is specified */
243 list_for_each_entry_safe(cb, next, &list->list, list) { 377 list_for_each_entry_safe(cb, next, &list->list, list) {
244 if (!cl || mei_cl_cmp_id(cl, cb->cl)) { 378 if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
245 list_del(&cb->list); 379 list_del_init(&cb->list);
246 if (free) 380 if (free)
247 mei_io_cb_free(cb); 381 mei_io_cb_free(cb);
248 } 382 }
@@ -260,7 +394,6 @@ void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
260 __mei_io_list_flush(list, cl, false); 394 __mei_io_list_flush(list, cl, false);
261} 395}
262 396
263
264/** 397/**
265 * mei_io_list_free - removes cb belonging to cl and free them 398 * mei_io_list_free - removes cb belonging to cl and free them
266 * 399 *
@@ -273,103 +406,107 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
273} 406}
274 407
275/** 408/**
276 * mei_io_cb_free - free mei_cb_private related memory 409 * mei_io_cb_alloc_buf - allocate callback buffer
277 * 410 *
278 * @cb: mei callback struct 411 * @cb: io callback structure
412 * @length: size of the buffer
413 *
414 * Return: 0 on success
415 * -EINVAL if cb is NULL
416 * -ENOMEM if allocation failed
279 */ 417 */
280void mei_io_cb_free(struct mei_cl_cb *cb) 418int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
281{ 419{
282 if (cb == NULL) 420 if (!cb)
283 return; 421 return -EINVAL;
284 422
285 kfree(cb->request_buffer.data); 423 if (length == 0)
286 kfree(cb->response_buffer.data); 424 return 0;
287 kfree(cb); 425
426 cb->buf.data = kmalloc(length, GFP_KERNEL);
427 if (!cb->buf.data)
428 return -ENOMEM;
429 cb->buf.size = length;
430 return 0;
288} 431}
289 432
290/** 433/**
291 * mei_io_cb_init - allocate and initialize io callback 434 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
292 * 435 *
293 * @cl: mei client 436 * @cl: host client
294 * @fp: pointer to file structure 437 * @length: size of the buffer
438 * @type: operation type
439 * @fp: associated file pointer (might be NULL)
295 * 440 *
296 * Return: mei_cl_cb pointer or NULL; 441 * Return: cb on success and NULL on failure
297 */ 442 */
298struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) 443struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
444 enum mei_cb_file_ops type, struct file *fp)
299{ 445{
300 struct mei_cl_cb *cb; 446 struct mei_cl_cb *cb;
301 447
302 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 448 cb = mei_io_cb_init(cl, type, fp);
303 if (!cb) 449 if (!cb)
304 return NULL; 450 return NULL;
305 451
306 mei_io_list_init(cb); 452 if (mei_io_cb_alloc_buf(cb, length)) {
453 mei_io_cb_free(cb);
454 return NULL;
455 }
307 456
308 cb->file_object = fp;
309 cb->cl = cl;
310 cb->buf_idx = 0;
311 return cb; 457 return cb;
312} 458}
313 459
314/** 460/**
315 * mei_io_cb_alloc_req_buf - allocate request buffer 461 * mei_cl_read_cb - find this cl's callback in the read list
462 * for a specific file
316 * 463 *
317 * @cb: io callback structure 464 * @cl: host client
318 * @length: size of the buffer 465 * @fp: file pointer (matching cb file object), may be NULL
319 * 466 *
320 * Return: 0 on success 467 * Return: cb on success, NULL if cb is not found
321 * -EINVAL if cb is NULL
322 * -ENOMEM if allocation failed
323 */ 468 */
324int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) 469struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
325{ 470{
326 if (!cb) 471 struct mei_cl_cb *cb;
327 return -EINVAL;
328 472
329 if (length == 0) 473 list_for_each_entry(cb, &cl->rd_completed, list)
330 return 0; 474 if (!fp || fp == cb->file_object)
475 return cb;
331 476
332 cb->request_buffer.data = kmalloc(length, GFP_KERNEL); 477 return NULL;
333 if (!cb->request_buffer.data)
334 return -ENOMEM;
335 cb->request_buffer.size = length;
336 return 0;
337} 478}
479
338/** 480/**
339 * mei_io_cb_alloc_resp_buf - allocate response buffer 481 * mei_cl_read_cb_flush - free client's read pending and completed cbs
340 * 482 * for a specific file
341 * @cb: io callback structure
342 * @length: size of the buffer
343 * 483 *
344 * Return: 0 on success 484 * @cl: host client
345 * -EINVAL if cb is NULL 485 * @fp: file pointer (matching cb file object), may be NULL
346 * -ENOMEM if allocation failed
347 */ 486 */
348int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) 487void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
349{ 488{
350 if (!cb) 489 struct mei_cl_cb *cb, *next;
351 return -EINVAL;
352 490
353 if (length == 0) 491 list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
354 return 0; 492 if (!fp || fp == cb->file_object)
355 493 mei_io_cb_free(cb);
356 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
357 if (!cb->response_buffer.data)
358 return -ENOMEM;
359 cb->response_buffer.size = length;
360 return 0;
361}
362 494
363 495
496 list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
497 if (!fp || fp == cb->file_object)
498 mei_io_cb_free(cb);
499}
364 500
365/** 501/**
366 * mei_cl_flush_queues - flushes queue lists belonging to cl. 502 * mei_cl_flush_queues - flushes queue lists belonging to cl.
367 * 503 *
368 * @cl: host client 504 * @cl: host client
505 * @fp: file pointer (matching cb file object), may be NULL
369 * 506 *
370 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 507 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
371 */ 508 */
372int mei_cl_flush_queues(struct mei_cl *cl) 509int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
373{ 510{
374 struct mei_device *dev; 511 struct mei_device *dev;
375 512
@@ -379,13 +516,15 @@ int mei_cl_flush_queues(struct mei_cl *cl)
379 dev = cl->dev; 516 dev = cl->dev;
380 517
381 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 518 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
382 mei_io_list_flush(&cl->dev->read_list, cl);
383 mei_io_list_free(&cl->dev->write_list, cl); 519 mei_io_list_free(&cl->dev->write_list, cl);
384 mei_io_list_free(&cl->dev->write_waiting_list, cl); 520 mei_io_list_free(&cl->dev->write_waiting_list, cl);
385 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 521 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
386 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 522 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
387 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 523 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
388 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); 524 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
525
526 mei_cl_read_cb_flush(cl, fp);
527
389 return 0; 528 return 0;
390} 529}
391 530
@@ -402,9 +541,10 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
402 init_waitqueue_head(&cl->wait); 541 init_waitqueue_head(&cl->wait);
403 init_waitqueue_head(&cl->rx_wait); 542 init_waitqueue_head(&cl->rx_wait);
404 init_waitqueue_head(&cl->tx_wait); 543 init_waitqueue_head(&cl->tx_wait);
544 INIT_LIST_HEAD(&cl->rd_completed);
545 INIT_LIST_HEAD(&cl->rd_pending);
405 INIT_LIST_HEAD(&cl->link); 546 INIT_LIST_HEAD(&cl->link);
406 INIT_LIST_HEAD(&cl->device_link); 547 INIT_LIST_HEAD(&cl->device_link);
407 cl->reading_state = MEI_IDLE;
408 cl->writing_state = MEI_IDLE; 548 cl->writing_state = MEI_IDLE;
409 cl->dev = dev; 549 cl->dev = dev;
410} 550}
@@ -429,31 +569,14 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
429} 569}
430 570
431/** 571/**
432 * mei_cl_find_read_cb - find this cl's callback in the read list 572 * mei_cl_link - allocate host id in the host map
433 * 573 *
434 * @cl: host client 574 * @cl: host client
435 * 575 * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
436 * Return: cb on success, NULL on error
437 */
438struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
439{
440 struct mei_device *dev = cl->dev;
441 struct mei_cl_cb *cb;
442
443 list_for_each_entry(cb, &dev->read_list.list, list)
444 if (mei_cl_cmp_id(cl, cb->cl))
445 return cb;
446 return NULL;
447}
448
449/** mei_cl_link: allocate host id in the host map
450 *
451 * @cl - host client
452 * @id - fixed host id or -1 for generic one
453 * 576 *
454 * Return: 0 on success 577 * Return: 0 on success
455 * -EINVAL on incorrect values 578 * -EINVAL on incorrect values
456 * -ENONET if client not found 579 * -EMFILE if open count exceeded.
457 */ 580 */
458int mei_cl_link(struct mei_cl *cl, int id) 581int mei_cl_link(struct mei_cl *cl, int id)
459{ 582{
@@ -535,28 +658,31 @@ int mei_cl_unlink(struct mei_cl *cl)
535 658
536void mei_host_client_init(struct work_struct *work) 659void mei_host_client_init(struct work_struct *work)
537{ 660{
538 struct mei_device *dev = container_of(work, 661 struct mei_device *dev =
539 struct mei_device, init_work); 662 container_of(work, struct mei_device, init_work);
540 struct mei_me_client *me_cl; 663 struct mei_me_client *me_cl;
541 struct mei_client_properties *props;
542 664
543 mutex_lock(&dev->device_lock); 665 mutex_lock(&dev->device_lock);
544 666
545 list_for_each_entry(me_cl, &dev->me_clients, list) {
546 props = &me_cl->props;
547 667
548 if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid)) 668 me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
549 mei_amthif_host_init(dev); 669 if (me_cl)
550 else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid)) 670 mei_amthif_host_init(dev);
551 mei_wd_host_init(dev); 671 mei_me_cl_put(me_cl);
552 else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid)) 672
553 mei_nfc_host_init(dev); 673 me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
674 if (me_cl)
675 mei_wd_host_init(dev);
676 mei_me_cl_put(me_cl);
677
678 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
679 if (me_cl)
680 mei_nfc_host_init(dev);
681 mei_me_cl_put(me_cl);
554 682
555 }
556 683
557 dev->dev_state = MEI_DEV_ENABLED; 684 dev->dev_state = MEI_DEV_ENABLED;
558 dev->reset_count = 0; 685 dev->reset_count = 0;
559
560 mutex_unlock(&dev->device_lock); 686 mutex_unlock(&dev->device_lock);
561 687
562 pm_runtime_mark_last_busy(dev->dev); 688 pm_runtime_mark_last_busy(dev->dev);
@@ -620,13 +746,10 @@ int mei_cl_disconnect(struct mei_cl *cl)
620 return rets; 746 return rets;
621 } 747 }
622 748
623 cb = mei_io_cb_init(cl, NULL); 749 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
624 if (!cb) { 750 rets = cb ? 0 : -ENOMEM;
625 rets = -ENOMEM; 751 if (rets)
626 goto free; 752 goto free;
627 }
628
629 cb->fop_type = MEI_FOP_DISCONNECT;
630 753
631 if (mei_hbuf_acquire(dev)) { 754 if (mei_hbuf_acquire(dev)) {
632 if (mei_hbm_cl_disconnect_req(dev, cl)) { 755 if (mei_hbm_cl_disconnect_req(dev, cl)) {
@@ -727,13 +850,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
727 return rets; 850 return rets;
728 } 851 }
729 852
730 cb = mei_io_cb_init(cl, file); 853 cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
731 if (!cb) { 854 rets = cb ? 0 : -ENOMEM;
732 rets = -ENOMEM; 855 if (rets)
733 goto out; 856 goto out;
734 }
735
736 cb->fop_type = MEI_FOP_CONNECT;
737 857
738 /* run hbuf acquire last so we don't have to undo */ 858 /* run hbuf acquire last so we don't have to undo */
739 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 859 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
@@ -756,7 +876,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
756 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 876 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
757 mutex_lock(&dev->device_lock); 877 mutex_lock(&dev->device_lock);
758 878
759 if (cl->state != MEI_FILE_CONNECTED) { 879 if (!mei_cl_is_connected(cl)) {
760 cl->state = MEI_FILE_DISCONNECTED; 880 cl->state = MEI_FILE_DISCONNECTED;
761 /* something went really wrong */ 881 /* something went really wrong */
762 if (!cl->status) 882 if (!cl->status)
@@ -778,6 +898,37 @@ out:
778} 898}
779 899
780/** 900/**
901 * mei_cl_alloc_linked - allocate and link host client
902 *
903 * @dev: the device structure
904 * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
905 *
906 * Return: cl on success ERR_PTR on failure
907 */
908struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
909{
910 struct mei_cl *cl;
911 int ret;
912
913 cl = mei_cl_allocate(dev);
914 if (!cl) {
915 ret = -ENOMEM;
916 goto err;
917 }
918
919 ret = mei_cl_link(cl, id);
920 if (ret)
921 goto err;
922
923 return cl;
924err:
925 kfree(cl);
926 return ERR_PTR(ret);
927}
928
929
930
931/**
781 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. 932 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
782 * 933 *
783 * @cl: private data of the file object 934 * @cl: private data of the file object
@@ -866,10 +1017,11 @@ out:
866 * 1017 *
867 * @cl: host client 1018 * @cl: host client
868 * @length: number of bytes to read 1019 * @length: number of bytes to read
1020 * @fp: pointer to file structure
869 * 1021 *
870 * Return: 0 on success, <0 on failure. 1022 * Return: 0 on success, <0 on failure.
871 */ 1023 */
872int mei_cl_read_start(struct mei_cl *cl, size_t length) 1024int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
873{ 1025{
874 struct mei_device *dev; 1026 struct mei_device *dev;
875 struct mei_cl_cb *cb; 1027 struct mei_cl_cb *cb;
@@ -884,10 +1036,10 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
884 if (!mei_cl_is_connected(cl)) 1036 if (!mei_cl_is_connected(cl))
885 return -ENODEV; 1037 return -ENODEV;
886 1038
887 if (cl->read_cb) { 1039 /* HW currently supports only one pending read */
888 cl_dbg(dev, cl, "read is pending.\n"); 1040 if (!list_empty(&cl->rd_pending))
889 return -EBUSY; 1041 return -EBUSY;
890 } 1042
891 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); 1043 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
892 if (!me_cl) { 1044 if (!me_cl) {
893 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 1045 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
@@ -904,29 +1056,21 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
904 return rets; 1056 return rets;
905 } 1057 }
906 1058
907 cb = mei_io_cb_init(cl, NULL); 1059 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
908 if (!cb) { 1060 rets = cb ? 0 : -ENOMEM;
909 rets = -ENOMEM;
910 goto out;
911 }
912
913 rets = mei_io_cb_alloc_resp_buf(cb, length);
914 if (rets) 1061 if (rets)
915 goto out; 1062 goto out;
916 1063
917 cb->fop_type = MEI_FOP_READ;
918 if (mei_hbuf_acquire(dev)) { 1064 if (mei_hbuf_acquire(dev)) {
919 rets = mei_hbm_cl_flow_control_req(dev, cl); 1065 rets = mei_hbm_cl_flow_control_req(dev, cl);
920 if (rets < 0) 1066 if (rets < 0)
921 goto out; 1067 goto out;
922 1068
923 list_add_tail(&cb->list, &dev->read_list.list); 1069 list_add_tail(&cb->list, &cl->rd_pending);
924 } else { 1070 } else {
925 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1071 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
926 } 1072 }
927 1073
928 cl->read_cb = cb;
929
930out: 1074out:
931 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1075 cl_dbg(dev, cl, "rpm: autosuspend\n");
932 pm_runtime_mark_last_busy(dev->dev); 1076 pm_runtime_mark_last_busy(dev->dev);
@@ -964,7 +1108,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
964 1108
965 dev = cl->dev; 1109 dev = cl->dev;
966 1110
967 buf = &cb->request_buffer; 1111 buf = &cb->buf;
968 1112
969 rets = mei_cl_flow_ctrl_creds(cl); 1113 rets = mei_cl_flow_ctrl_creds(cl);
970 if (rets < 0) 1114 if (rets < 0)
@@ -999,7 +1143,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
999 } 1143 }
1000 1144
1001 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", 1145 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
1002 cb->request_buffer.size, cb->buf_idx); 1146 cb->buf.size, cb->buf_idx);
1003 1147
1004 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 1148 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
1005 if (rets) { 1149 if (rets) {
@@ -1011,6 +1155,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1011 cl->status = 0; 1155 cl->status = 0;
1012 cl->writing_state = MEI_WRITING; 1156 cl->writing_state = MEI_WRITING;
1013 cb->buf_idx += mei_hdr.length; 1157 cb->buf_idx += mei_hdr.length;
1158 cb->completed = mei_hdr.msg_complete == 1;
1014 1159
1015 if (mei_hdr.msg_complete) { 1160 if (mei_hdr.msg_complete) {
1016 if (mei_cl_flow_ctrl_reduce(cl)) 1161 if (mei_cl_flow_ctrl_reduce(cl))
@@ -1048,7 +1193,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1048 dev = cl->dev; 1193 dev = cl->dev;
1049 1194
1050 1195
1051 buf = &cb->request_buffer; 1196 buf = &cb->buf;
1052 1197
1053 cl_dbg(dev, cl, "size=%d\n", buf->size); 1198 cl_dbg(dev, cl, "size=%d\n", buf->size);
1054 1199
@@ -1059,7 +1204,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1059 return rets; 1204 return rets;
1060 } 1205 }
1061 1206
1062 cb->fop_type = MEI_FOP_WRITE;
1063 cb->buf_idx = 0; 1207 cb->buf_idx = 0;
1064 cl->writing_state = MEI_IDLE; 1208 cl->writing_state = MEI_IDLE;
1065 1209
@@ -1099,6 +1243,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1099 1243
1100 cl->writing_state = MEI_WRITING; 1244 cl->writing_state = MEI_WRITING;
1101 cb->buf_idx = mei_hdr.length; 1245 cb->buf_idx = mei_hdr.length;
1246 cb->completed = mei_hdr.msg_complete == 1;
1102 1247
1103out: 1248out:
1104 if (mei_hdr.msg_complete) { 1249 if (mei_hdr.msg_complete) {
@@ -1151,11 +1296,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1151 if (waitqueue_active(&cl->tx_wait)) 1296 if (waitqueue_active(&cl->tx_wait))
1152 wake_up_interruptible(&cl->tx_wait); 1297 wake_up_interruptible(&cl->tx_wait);
1153 1298
1154 } else if (cb->fop_type == MEI_FOP_READ && 1299 } else if (cb->fop_type == MEI_FOP_READ) {
1155 MEI_READING == cl->reading_state) { 1300 list_add_tail(&cb->list, &cl->rd_completed);
1156 cl->reading_state = MEI_READ_COMPLETE;
1157 if (waitqueue_active(&cl->rx_wait)) 1301 if (waitqueue_active(&cl->rx_wait))
1158 wake_up_interruptible(&cl->rx_wait); 1302 wake_up_interruptible_all(&cl->rx_wait);
1159 else 1303 else
1160 mei_cl_bus_rx_event(cl); 1304 mei_cl_bus_rx_event(cl);
1161 1305
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index cfcde8e97fc4..0a39e5d45171 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -31,7 +31,10 @@ void mei_me_cl_init(struct mei_me_client *me_cl);
31void mei_me_cl_put(struct mei_me_client *me_cl); 31void mei_me_cl_put(struct mei_me_client *me_cl);
32struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl); 32struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl);
33 33
34struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev, 34void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl);
35void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl);
36
37struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
35 const uuid_le *uuid); 38 const uuid_le *uuid);
36struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id); 39struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
37struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 40struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
@@ -44,10 +47,10 @@ void mei_me_cl_rm_all(struct mei_device *dev);
44/* 47/*
45 * MEI IO Functions 48 * MEI IO Functions
46 */ 49 */
47struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp); 50struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
51 struct file *fp);
48void mei_io_cb_free(struct mei_cl_cb *priv_cb); 52void mei_io_cb_free(struct mei_cl_cb *priv_cb);
49int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length); 53int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
50int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
51 54
52 55
53/** 56/**
@@ -72,9 +75,14 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
72int mei_cl_link(struct mei_cl *cl, int id); 75int mei_cl_link(struct mei_cl *cl, int id);
73int mei_cl_unlink(struct mei_cl *cl); 76int mei_cl_unlink(struct mei_cl *cl);
74 77
75int mei_cl_flush_queues(struct mei_cl *cl); 78struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);
76struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
77 79
80struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
81 const struct file *fp);
82void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
83struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
84 enum mei_cb_file_ops type, struct file *fp);
85int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
78 86
79int mei_cl_flow_ctrl_creds(struct mei_cl *cl); 87int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
80 88
@@ -82,23 +90,25 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
82/* 90/*
83 * MEI input output function prototype 91 * MEI input output function prototype
84 */ 92 */
93
94/**
95 * mei_cl_is_connected - host client is connected
96 *
97 * @cl: host clinet
98 *
99 * Return: true if the host clinet is connected
100 */
85static inline bool mei_cl_is_connected(struct mei_cl *cl) 101static inline bool mei_cl_is_connected(struct mei_cl *cl)
86{ 102{
87 return cl->dev && 103 return cl->state == MEI_FILE_CONNECTED;
88 cl->dev->dev_state == MEI_DEV_ENABLED &&
89 cl->state == MEI_FILE_CONNECTED;
90}
91static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
92{
93 return MEI_FILE_INITIALIZING == cl->state ||
94 MEI_FILE_DISCONNECTED == cl->state ||
95 MEI_FILE_DISCONNECTING == cl->state;
96} 104}
97 105
98bool mei_cl_is_other_connecting(struct mei_cl *cl); 106bool mei_cl_is_other_connecting(struct mei_cl *cl);
99int mei_cl_disconnect(struct mei_cl *cl); 107int mei_cl_disconnect(struct mei_cl *cl);
100int mei_cl_connect(struct mei_cl *cl, struct file *file); 108int mei_cl_connect(struct mei_cl *cl, struct file *file);
101int mei_cl_read_start(struct mei_cl *cl, size_t length); 109int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
110int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
111 struct mei_cl_cb *cmpl_list);
102int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); 112int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
103int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 113int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
104 struct mei_cl_cb *cmpl_list); 114 struct mei_cl_cb *cmpl_list);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index b125380ee871..d9cd7e6ee484 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -28,7 +28,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
28 size_t cnt, loff_t *ppos) 28 size_t cnt, loff_t *ppos)
29{ 29{
30 struct mei_device *dev = fp->private_data; 30 struct mei_device *dev = fp->private_data;
31 struct mei_me_client *me_cl, *n; 31 struct mei_me_client *me_cl;
32 size_t bufsz = 1; 32 size_t bufsz = 1;
33 char *buf; 33 char *buf;
34 int i = 0; 34 int i = 0;
@@ -38,15 +38,14 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
38#define HDR \ 38#define HDR \
39" |id|fix| UUID |con|msg len|sb|refc|\n" 39" |id|fix| UUID |con|msg len|sb|refc|\n"
40 40
41 mutex_lock(&dev->device_lock); 41 down_read(&dev->me_clients_rwsem);
42
43 list_for_each_entry(me_cl, &dev->me_clients, list) 42 list_for_each_entry(me_cl, &dev->me_clients, list)
44 bufsz++; 43 bufsz++;
45 44
46 bufsz *= sizeof(HDR) + 1; 45 bufsz *= sizeof(HDR) + 1;
47 buf = kzalloc(bufsz, GFP_KERNEL); 46 buf = kzalloc(bufsz, GFP_KERNEL);
48 if (!buf) { 47 if (!buf) {
49 mutex_unlock(&dev->device_lock); 48 up_read(&dev->me_clients_rwsem);
50 return -ENOMEM; 49 return -ENOMEM;
51 } 50 }
52 51
@@ -56,10 +55,9 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
56 if (dev->dev_state != MEI_DEV_ENABLED) 55 if (dev->dev_state != MEI_DEV_ENABLED)
57 goto out; 56 goto out;
58 57
59 list_for_each_entry_safe(me_cl, n, &dev->me_clients, list) { 58 list_for_each_entry(me_cl, &dev->me_clients, list) {
60 59
61 me_cl = mei_me_cl_get(me_cl); 60 if (mei_me_cl_get(me_cl)) {
62 if (me_cl) {
63 pos += scnprintf(buf + pos, bufsz - pos, 61 pos += scnprintf(buf + pos, bufsz - pos,
64 "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n", 62 "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n",
65 i++, me_cl->client_id, 63 i++, me_cl->client_id,
@@ -69,12 +67,13 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
69 me_cl->props.max_msg_length, 67 me_cl->props.max_msg_length,
70 me_cl->props.single_recv_buf, 68 me_cl->props.single_recv_buf,
71 atomic_read(&me_cl->refcnt.refcount)); 69 atomic_read(&me_cl->refcnt.refcount));
72 }
73 70
74 mei_me_cl_put(me_cl); 71 mei_me_cl_put(me_cl);
72 }
75 } 73 }
74
76out: 75out:
77 mutex_unlock(&dev->device_lock); 76 up_read(&dev->me_clients_rwsem);
78 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); 77 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
79 kfree(buf); 78 kfree(buf);
80 return ret; 79 return ret;
@@ -118,7 +117,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
118 pos += scnprintf(buf + pos, bufsz - pos, 117 pos += scnprintf(buf + pos, bufsz - pos,
119 "%2d|%2d|%4d|%5d|%2d|%2d|\n", 118 "%2d|%2d|%4d|%5d|%2d|%2d|\n",
120 i, cl->me_client_id, cl->host_client_id, cl->state, 119 i, cl->me_client_id, cl->host_client_id, cl->state,
121 cl->reading_state, cl->writing_state); 120 !list_empty(&cl->rd_completed), cl->writing_state);
122 i++; 121 i++;
123 } 122 }
124out: 123out:
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index c8412d41e4f1..58da92565c5e 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -338,7 +338,8 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
338 me_cl->client_id = res->me_addr; 338 me_cl->client_id = res->me_addr;
339 me_cl->mei_flow_ctrl_creds = 0; 339 me_cl->mei_flow_ctrl_creds = 0;
340 340
341 list_add(&me_cl->list, &dev->me_clients); 341 mei_me_cl_add(dev, me_cl);
342
342 return 0; 343 return 0;
343} 344}
344 345
@@ -638,7 +639,7 @@ static void mei_hbm_cl_res(struct mei_device *dev,
638 continue; 639 continue;
639 640
640 if (mei_hbm_cl_addr_equal(cl, rs)) { 641 if (mei_hbm_cl_addr_equal(cl, rs)) {
641 list_del(&cb->list); 642 list_del_init(&cb->list);
642 break; 643 break;
643 } 644 }
644 } 645 }
@@ -683,10 +684,9 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
683 cl->state = MEI_FILE_DISCONNECTED; 684 cl->state = MEI_FILE_DISCONNECTED;
684 cl->timer_count = 0; 685 cl->timer_count = 0;
685 686
686 cb = mei_io_cb_init(cl, NULL); 687 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
687 if (!cb) 688 if (!cb)
688 return -ENOMEM; 689 return -ENOMEM;
689 cb->fop_type = MEI_FOP_DISCONNECT_RSP;
690 cl_dbg(dev, cl, "add disconnect response as first\n"); 690 cl_dbg(dev, cl, "add disconnect response as first\n");
691 list_add(&cb->list, &dev->ctrl_wr_list.list); 691 list_add(&cb->list, &dev->ctrl_wr_list.list);
692 } 692 }
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index f8fd503dfbd6..6fb75e62a764 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -25,6 +25,8 @@
25#include "hw-me.h" 25#include "hw-me.h"
26#include "hw-me-regs.h" 26#include "hw-me-regs.h"
27 27
28#include "mei-trace.h"
29
28/** 30/**
29 * mei_me_reg_read - Reads 32bit data from the mei device 31 * mei_me_reg_read - Reads 32bit data from the mei device
30 * 32 *
@@ -61,45 +63,79 @@ static inline void mei_me_reg_write(const struct mei_me_hw *hw,
61 * 63 *
62 * Return: ME_CB_RW register value (u32) 64 * Return: ME_CB_RW register value (u32)
63 */ 65 */
64static u32 mei_me_mecbrw_read(const struct mei_device *dev) 66static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
65{ 67{
66 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW); 68 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
67} 69}
70
71/**
72 * mei_me_hcbww_write - write 32bit data to the host circular buffer
73 *
74 * @dev: the device structure
75 * @data: 32bit data to be written to the host circular buffer
76 */
77static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
78{
79 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
80}
81
68/** 82/**
69 * mei_me_mecsr_read - Reads 32bit data from the ME CSR 83 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
70 * 84 *
71 * @hw: the me hardware structure 85 * @dev: the device structure
72 * 86 *
73 * Return: ME_CSR_HA register value (u32) 87 * Return: ME_CSR_HA register value (u32)
74 */ 88 */
75static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw) 89static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
76{ 90{
77 return mei_me_reg_read(hw, ME_CSR_HA); 91 u32 reg;
92
93 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
94 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
95
96 return reg;
78} 97}
79 98
80/** 99/**
81 * mei_hcsr_read - Reads 32bit data from the host CSR 100 * mei_hcsr_read - Reads 32bit data from the host CSR
82 * 101 *
83 * @hw: the me hardware structure 102 * @dev: the device structure
84 * 103 *
85 * Return: H_CSR register value (u32) 104 * Return: H_CSR register value (u32)
86 */ 105 */
87static inline u32 mei_hcsr_read(const struct mei_me_hw *hw) 106static inline u32 mei_hcsr_read(const struct mei_device *dev)
107{
108 u32 reg;
109
110 reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
111 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
112
113 return reg;
114}
115
116/**
117 * mei_hcsr_write - writes H_CSR register to the mei device
118 *
119 * @dev: the device structure
120 * @reg: new register value
121 */
122static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
88{ 123{
89 return mei_me_reg_read(hw, H_CSR); 124 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
125 mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
90} 126}
91 127
92/** 128/**
93 * mei_hcsr_set - writes H_CSR register to the mei device, 129 * mei_hcsr_set - writes H_CSR register to the mei device,
94 * and ignores the H_IS bit for it is write-one-to-zero. 130 * and ignores the H_IS bit for it is write-one-to-zero.
95 * 131 *
96 * @hw: the me hardware structure 132 * @dev: the device structure
97 * @hcsr: new register value 133 * @reg: new register value
98 */ 134 */
99static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) 135static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
100{ 136{
101 hcsr &= ~H_IS; 137 reg &= ~H_IS;
102 mei_me_reg_write(hw, H_CSR, hcsr); 138 mei_hcsr_write(dev, reg);
103} 139}
104 140
105/** 141/**
@@ -141,7 +177,7 @@ static int mei_me_fw_status(struct mei_device *dev,
141static void mei_me_hw_config(struct mei_device *dev) 177static void mei_me_hw_config(struct mei_device *dev)
142{ 178{
143 struct mei_me_hw *hw = to_me_hw(dev); 179 struct mei_me_hw *hw = to_me_hw(dev);
144 u32 hcsr = mei_hcsr_read(to_me_hw(dev)); 180 u32 hcsr = mei_hcsr_read(dev);
145 /* Doesn't change in runtime */ 181 /* Doesn't change in runtime */
146 dev->hbuf_depth = (hcsr & H_CBD) >> 24; 182 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
147 183
@@ -170,11 +206,10 @@ static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
170 */ 206 */
171static void mei_me_intr_clear(struct mei_device *dev) 207static void mei_me_intr_clear(struct mei_device *dev)
172{ 208{
173 struct mei_me_hw *hw = to_me_hw(dev); 209 u32 hcsr = mei_hcsr_read(dev);
174 u32 hcsr = mei_hcsr_read(hw);
175 210
176 if ((hcsr & H_IS) == H_IS) 211 if ((hcsr & H_IS) == H_IS)
177 mei_me_reg_write(hw, H_CSR, hcsr); 212 mei_hcsr_write(dev, hcsr);
178} 213}
179/** 214/**
180 * mei_me_intr_enable - enables mei device interrupts 215 * mei_me_intr_enable - enables mei device interrupts
@@ -183,11 +218,10 @@ static void mei_me_intr_clear(struct mei_device *dev)
183 */ 218 */
184static void mei_me_intr_enable(struct mei_device *dev) 219static void mei_me_intr_enable(struct mei_device *dev)
185{ 220{
186 struct mei_me_hw *hw = to_me_hw(dev); 221 u32 hcsr = mei_hcsr_read(dev);
187 u32 hcsr = mei_hcsr_read(hw);
188 222
189 hcsr |= H_IE; 223 hcsr |= H_IE;
190 mei_hcsr_set(hw, hcsr); 224 mei_hcsr_set(dev, hcsr);
191} 225}
192 226
193/** 227/**
@@ -197,11 +231,10 @@ static void mei_me_intr_enable(struct mei_device *dev)
197 */ 231 */
198static void mei_me_intr_disable(struct mei_device *dev) 232static void mei_me_intr_disable(struct mei_device *dev)
199{ 233{
200 struct mei_me_hw *hw = to_me_hw(dev); 234 u32 hcsr = mei_hcsr_read(dev);
201 u32 hcsr = mei_hcsr_read(hw);
202 235
203 hcsr &= ~H_IE; 236 hcsr &= ~H_IE;
204 mei_hcsr_set(hw, hcsr); 237 mei_hcsr_set(dev, hcsr);
205} 238}
206 239
207/** 240/**
@@ -211,12 +244,11 @@ static void mei_me_intr_disable(struct mei_device *dev)
211 */ 244 */
212static void mei_me_hw_reset_release(struct mei_device *dev) 245static void mei_me_hw_reset_release(struct mei_device *dev)
213{ 246{
214 struct mei_me_hw *hw = to_me_hw(dev); 247 u32 hcsr = mei_hcsr_read(dev);
215 u32 hcsr = mei_hcsr_read(hw);
216 248
217 hcsr |= H_IG; 249 hcsr |= H_IG;
218 hcsr &= ~H_RST; 250 hcsr &= ~H_RST;
219 mei_hcsr_set(hw, hcsr); 251 mei_hcsr_set(dev, hcsr);
220 252
221 /* complete this write before we set host ready on another CPU */ 253 /* complete this write before we set host ready on another CPU */
222 mmiowb(); 254 mmiowb();
@@ -231,8 +263,7 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
231 */ 263 */
232static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) 264static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
233{ 265{
234 struct mei_me_hw *hw = to_me_hw(dev); 266 u32 hcsr = mei_hcsr_read(dev);
235 u32 hcsr = mei_hcsr_read(hw);
236 267
237 /* H_RST may be found lit before reset is started, 268 /* H_RST may be found lit before reset is started,
238 * for example if preceding reset flow hasn't completed. 269 * for example if preceding reset flow hasn't completed.
@@ -242,8 +273,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
242 if ((hcsr & H_RST) == H_RST) { 273 if ((hcsr & H_RST) == H_RST) {
243 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); 274 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
244 hcsr &= ~H_RST; 275 hcsr &= ~H_RST;
245 mei_hcsr_set(hw, hcsr); 276 mei_hcsr_set(dev, hcsr);
246 hcsr = mei_hcsr_read(hw); 277 hcsr = mei_hcsr_read(dev);
247 } 278 }
248 279
249 hcsr |= H_RST | H_IG | H_IS; 280 hcsr |= H_RST | H_IG | H_IS;
@@ -254,13 +285,13 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
254 hcsr &= ~H_IE; 285 hcsr &= ~H_IE;
255 286
256 dev->recvd_hw_ready = false; 287 dev->recvd_hw_ready = false;
257 mei_me_reg_write(hw, H_CSR, hcsr); 288 mei_hcsr_write(dev, hcsr);
258 289
259 /* 290 /*
260 * Host reads the H_CSR once to ensure that the 291 * Host reads the H_CSR once to ensure that the
261 * posted write to H_CSR completes. 292 * posted write to H_CSR completes.
262 */ 293 */
263 hcsr = mei_hcsr_read(hw); 294 hcsr = mei_hcsr_read(dev);
264 295
265 if ((hcsr & H_RST) == 0) 296 if ((hcsr & H_RST) == 0)
266 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); 297 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
@@ -281,11 +312,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
281 */ 312 */
282static void mei_me_host_set_ready(struct mei_device *dev) 313static void mei_me_host_set_ready(struct mei_device *dev)
283{ 314{
284 struct mei_me_hw *hw = to_me_hw(dev); 315 u32 hcsr = mei_hcsr_read(dev);
285 u32 hcsr = mei_hcsr_read(hw);
286 316
287 hcsr |= H_IE | H_IG | H_RDY; 317 hcsr |= H_IE | H_IG | H_RDY;
288 mei_hcsr_set(hw, hcsr); 318 mei_hcsr_set(dev, hcsr);
289} 319}
290 320
291/** 321/**
@@ -296,8 +326,7 @@ static void mei_me_host_set_ready(struct mei_device *dev)
296 */ 326 */
297static bool mei_me_host_is_ready(struct mei_device *dev) 327static bool mei_me_host_is_ready(struct mei_device *dev)
298{ 328{
299 struct mei_me_hw *hw = to_me_hw(dev); 329 u32 hcsr = mei_hcsr_read(dev);
300 u32 hcsr = mei_hcsr_read(hw);
301 330
302 return (hcsr & H_RDY) == H_RDY; 331 return (hcsr & H_RDY) == H_RDY;
303} 332}
@@ -310,8 +339,7 @@ static bool mei_me_host_is_ready(struct mei_device *dev)
310 */ 339 */
311static bool mei_me_hw_is_ready(struct mei_device *dev) 340static bool mei_me_hw_is_ready(struct mei_device *dev)
312{ 341{
313 struct mei_me_hw *hw = to_me_hw(dev); 342 u32 mecsr = mei_me_mecsr_read(dev);
314 u32 mecsr = mei_me_mecsr_read(hw);
315 343
316 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA; 344 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
317} 345}
@@ -368,11 +396,10 @@ static int mei_me_hw_start(struct mei_device *dev)
368 */ 396 */
369static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) 397static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
370{ 398{
371 struct mei_me_hw *hw = to_me_hw(dev);
372 u32 hcsr; 399 u32 hcsr;
373 char read_ptr, write_ptr; 400 char read_ptr, write_ptr;
374 401
375 hcsr = mei_hcsr_read(hw); 402 hcsr = mei_hcsr_read(dev);
376 403
377 read_ptr = (char) ((hcsr & H_CBRP) >> 8); 404 read_ptr = (char) ((hcsr & H_CBRP) >> 8);
378 write_ptr = (char) ((hcsr & H_CBWP) >> 16); 405 write_ptr = (char) ((hcsr & H_CBWP) >> 16);
@@ -439,7 +466,6 @@ static int mei_me_write_message(struct mei_device *dev,
439 struct mei_msg_hdr *header, 466 struct mei_msg_hdr *header,
440 unsigned char *buf) 467 unsigned char *buf)
441{ 468{
442 struct mei_me_hw *hw = to_me_hw(dev);
443 unsigned long rem; 469 unsigned long rem;
444 unsigned long length = header->length; 470 unsigned long length = header->length;
445 u32 *reg_buf = (u32 *)buf; 471 u32 *reg_buf = (u32 *)buf;
@@ -457,21 +483,21 @@ static int mei_me_write_message(struct mei_device *dev,
457 if (empty_slots < 0 || dw_cnt > empty_slots) 483 if (empty_slots < 0 || dw_cnt > empty_slots)
458 return -EMSGSIZE; 484 return -EMSGSIZE;
459 485
460 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header)); 486 mei_me_hcbww_write(dev, *((u32 *) header));
461 487
462 for (i = 0; i < length / 4; i++) 488 for (i = 0; i < length / 4; i++)
463 mei_me_reg_write(hw, H_CB_WW, reg_buf[i]); 489 mei_me_hcbww_write(dev, reg_buf[i]);
464 490
465 rem = length & 0x3; 491 rem = length & 0x3;
466 if (rem > 0) { 492 if (rem > 0) {
467 u32 reg = 0; 493 u32 reg = 0;
468 494
469 memcpy(&reg, &buf[length - rem], rem); 495 memcpy(&reg, &buf[length - rem], rem);
470 mei_me_reg_write(hw, H_CB_WW, reg); 496 mei_me_hcbww_write(dev, reg);
471 } 497 }
472 498
473 hcsr = mei_hcsr_read(hw) | H_IG; 499 hcsr = mei_hcsr_read(dev) | H_IG;
474 mei_hcsr_set(hw, hcsr); 500 mei_hcsr_set(dev, hcsr);
475 if (!mei_me_hw_is_ready(dev)) 501 if (!mei_me_hw_is_ready(dev))
476 return -EIO; 502 return -EIO;
477 503
@@ -487,12 +513,11 @@ static int mei_me_write_message(struct mei_device *dev,
487 */ 513 */
488static int mei_me_count_full_read_slots(struct mei_device *dev) 514static int mei_me_count_full_read_slots(struct mei_device *dev)
489{ 515{
490 struct mei_me_hw *hw = to_me_hw(dev);
491 u32 me_csr; 516 u32 me_csr;
492 char read_ptr, write_ptr; 517 char read_ptr, write_ptr;
493 unsigned char buffer_depth, filled_slots; 518 unsigned char buffer_depth, filled_slots;
494 519
495 me_csr = mei_me_mecsr_read(hw); 520 me_csr = mei_me_mecsr_read(dev);
496 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24); 521 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
497 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8); 522 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
498 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16); 523 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
@@ -518,7 +543,6 @@ static int mei_me_count_full_read_slots(struct mei_device *dev)
518static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, 543static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
519 unsigned long buffer_length) 544 unsigned long buffer_length)
520{ 545{
521 struct mei_me_hw *hw = to_me_hw(dev);
522 u32 *reg_buf = (u32 *)buffer; 546 u32 *reg_buf = (u32 *)buffer;
523 u32 hcsr; 547 u32 hcsr;
524 548
@@ -531,49 +555,59 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
531 memcpy(reg_buf, &reg, buffer_length); 555 memcpy(reg_buf, &reg, buffer_length);
532 } 556 }
533 557
534 hcsr = mei_hcsr_read(hw) | H_IG; 558 hcsr = mei_hcsr_read(dev) | H_IG;
535 mei_hcsr_set(hw, hcsr); 559 mei_hcsr_set(dev, hcsr);
536 return 0; 560 return 0;
537} 561}
538 562
539/** 563/**
540 * mei_me_pg_enter - write pg enter register 564 * mei_me_pg_set - write pg enter register
541 * 565 *
542 * @dev: the device structure 566 * @dev: the device structure
543 */ 567 */
544static void mei_me_pg_enter(struct mei_device *dev) 568static void mei_me_pg_set(struct mei_device *dev)
545{ 569{
546 struct mei_me_hw *hw = to_me_hw(dev); 570 struct mei_me_hw *hw = to_me_hw(dev);
547 u32 reg = mei_me_reg_read(hw, H_HPG_CSR); 571 u32 reg;
572
573 reg = mei_me_reg_read(hw, H_HPG_CSR);
574 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
548 575
549 reg |= H_HPG_CSR_PGI; 576 reg |= H_HPG_CSR_PGI;
577
578 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
550 mei_me_reg_write(hw, H_HPG_CSR, reg); 579 mei_me_reg_write(hw, H_HPG_CSR, reg);
551} 580}
552 581
553/** 582/**
554 * mei_me_pg_exit - write pg exit register 583 * mei_me_pg_unset - write pg exit register
555 * 584 *
556 * @dev: the device structure 585 * @dev: the device structure
557 */ 586 */
558static void mei_me_pg_exit(struct mei_device *dev) 587static void mei_me_pg_unset(struct mei_device *dev)
559{ 588{
560 struct mei_me_hw *hw = to_me_hw(dev); 589 struct mei_me_hw *hw = to_me_hw(dev);
561 u32 reg = mei_me_reg_read(hw, H_HPG_CSR); 590 u32 reg;
591
592 reg = mei_me_reg_read(hw, H_HPG_CSR);
593 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
562 594
563 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); 595 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
564 596
565 reg |= H_HPG_CSR_PGIHEXR; 597 reg |= H_HPG_CSR_PGIHEXR;
598
599 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
566 mei_me_reg_write(hw, H_HPG_CSR, reg); 600 mei_me_reg_write(hw, H_HPG_CSR, reg);
567} 601}
568 602
569/** 603/**
570 * mei_me_pg_set_sync - perform pg entry procedure 604 * mei_me_pg_enter_sync - perform pg entry procedure
571 * 605 *
572 * @dev: the device structure 606 * @dev: the device structure
573 * 607 *
574 * Return: 0 on success an error code otherwise 608 * Return: 0 on success an error code otherwise
575 */ 609 */
576int mei_me_pg_set_sync(struct mei_device *dev) 610int mei_me_pg_enter_sync(struct mei_device *dev)
577{ 611{
578 struct mei_me_hw *hw = to_me_hw(dev); 612 struct mei_me_hw *hw = to_me_hw(dev);
579 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 613 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -591,7 +625,7 @@ int mei_me_pg_set_sync(struct mei_device *dev)
591 mutex_lock(&dev->device_lock); 625 mutex_lock(&dev->device_lock);
592 626
593 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { 627 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
594 mei_me_pg_enter(dev); 628 mei_me_pg_set(dev);
595 ret = 0; 629 ret = 0;
596 } else { 630 } else {
597 ret = -ETIME; 631 ret = -ETIME;
@@ -604,13 +638,13 @@ int mei_me_pg_set_sync(struct mei_device *dev)
604} 638}
605 639
606/** 640/**
607 * mei_me_pg_unset_sync - perform pg exit procedure 641 * mei_me_pg_exit_sync - perform pg exit procedure
608 * 642 *
609 * @dev: the device structure 643 * @dev: the device structure
610 * 644 *
611 * Return: 0 on success an error code otherwise 645 * Return: 0 on success an error code otherwise
612 */ 646 */
613int mei_me_pg_unset_sync(struct mei_device *dev) 647int mei_me_pg_exit_sync(struct mei_device *dev)
614{ 648{
615 struct mei_me_hw *hw = to_me_hw(dev); 649 struct mei_me_hw *hw = to_me_hw(dev);
616 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 650 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -621,7 +655,7 @@ int mei_me_pg_unset_sync(struct mei_device *dev)
621 655
622 dev->pg_event = MEI_PG_EVENT_WAIT; 656 dev->pg_event = MEI_PG_EVENT_WAIT;
623 657
624 mei_me_pg_exit(dev); 658 mei_me_pg_unset(dev);
625 659
626 mutex_unlock(&dev->device_lock); 660 mutex_unlock(&dev->device_lock);
627 wait_event_timeout(dev->wait_pg, 661 wait_event_timeout(dev->wait_pg,
@@ -649,8 +683,7 @@ reply:
649 */ 683 */
650static bool mei_me_pg_is_enabled(struct mei_device *dev) 684static bool mei_me_pg_is_enabled(struct mei_device *dev)
651{ 685{
652 struct mei_me_hw *hw = to_me_hw(dev); 686 u32 reg = mei_me_mecsr_read(dev);
653 u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
654 687
655 if ((reg & ME_PGIC_HRA) == 0) 688 if ((reg & ME_PGIC_HRA) == 0)
656 goto notsupported; 689 goto notsupported;
@@ -683,14 +716,13 @@ notsupported:
683irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) 716irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
684{ 717{
685 struct mei_device *dev = (struct mei_device *) dev_id; 718 struct mei_device *dev = (struct mei_device *) dev_id;
686 struct mei_me_hw *hw = to_me_hw(dev); 719 u32 hcsr = mei_hcsr_read(dev);
687 u32 csr_reg = mei_hcsr_read(hw);
688 720
689 if ((csr_reg & H_IS) != H_IS) 721 if ((hcsr & H_IS) != H_IS)
690 return IRQ_NONE; 722 return IRQ_NONE;
691 723
692 /* clear H_IS bit in H_CSR */ 724 /* clear H_IS bit in H_CSR */
693 mei_me_reg_write(hw, H_CSR, csr_reg); 725 mei_hcsr_write(dev, hcsr);
694 726
695 return IRQ_WAKE_THREAD; 727 return IRQ_WAKE_THREAD;
696} 728}
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index d6567af44377..6022d52af6f6 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -71,8 +71,8 @@ extern const struct mei_cfg mei_me_pch8_sps_cfg;
71struct mei_device *mei_me_dev_init(struct pci_dev *pdev, 71struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
72 const struct mei_cfg *cfg); 72 const struct mei_cfg *cfg);
73 73
74int mei_me_pg_set_sync(struct mei_device *dev); 74int mei_me_pg_enter_sync(struct mei_device *dev);
75int mei_me_pg_unset_sync(struct mei_device *dev); 75int mei_me_pg_exit_sync(struct mei_device *dev);
76 76
77irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); 77irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
78irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); 78irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 618ea721aca8..7abafe7d120d 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -412,7 +412,7 @@ static void mei_txe_intr_disable(struct mei_device *dev)
412 mei_txe_br_reg_write(hw, HIER_REG, 0); 412 mei_txe_br_reg_write(hw, HIER_REG, 0);
413} 413}
414/** 414/**
415 * mei_txe_intr_disable - enable all interrupts 415 * mei_txe_intr_enable - enable all interrupts
416 * 416 *
417 * @dev: the device structure 417 * @dev: the device structure
418 */ 418 */
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 6ad049a08e4d..97353cf8d9b6 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -389,6 +389,7 @@ void mei_device_init(struct mei_device *dev,
389 INIT_LIST_HEAD(&dev->device_list); 389 INIT_LIST_HEAD(&dev->device_list);
390 INIT_LIST_HEAD(&dev->me_clients); 390 INIT_LIST_HEAD(&dev->me_clients);
391 mutex_init(&dev->device_lock); 391 mutex_init(&dev->device_lock);
392 init_rwsem(&dev->me_clients_rwsem);
392 init_waitqueue_head(&dev->wait_hw_ready); 393 init_waitqueue_head(&dev->wait_hw_ready);
393 init_waitqueue_head(&dev->wait_pg); 394 init_waitqueue_head(&dev->wait_pg);
394 init_waitqueue_head(&dev->wait_hbm_start); 395 init_waitqueue_head(&dev->wait_hbm_start);
@@ -396,7 +397,6 @@ void mei_device_init(struct mei_device *dev,
396 dev->dev_state = MEI_DEV_INITIALIZING; 397 dev->dev_state = MEI_DEV_INITIALIZING;
397 dev->reset_count = 0; 398 dev->reset_count = 0;
398 399
399 mei_io_list_init(&dev->read_list);
400 mei_io_list_init(&dev->write_list); 400 mei_io_list_init(&dev->write_list);
401 mei_io_list_init(&dev->write_waiting_list); 401 mei_io_list_init(&dev->write_waiting_list);
402 mei_io_list_init(&dev->ctrl_wr_list); 402 mei_io_list_init(&dev->ctrl_wr_list);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 711cddfa9c99..3f84d2edcde4 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -43,7 +43,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
43 43
44 list_for_each_entry_safe(cb, next, &compl_list->list, list) { 44 list_for_each_entry_safe(cb, next, &compl_list->list, list) {
45 cl = cb->cl; 45 cl = cb->cl;
46 list_del(&cb->list); 46 list_del_init(&cb->list);
47 47
48 dev_dbg(dev->dev, "completing call back.\n"); 48 dev_dbg(dev->dev, "completing call back.\n");
49 if (cl == &dev->iamthif_cl) 49 if (cl == &dev->iamthif_cl)
@@ -68,91 +68,91 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
68 return cl->host_client_id == mei_hdr->host_addr && 68 return cl->host_client_id == mei_hdr->host_addr &&
69 cl->me_client_id == mei_hdr->me_addr; 69 cl->me_client_id == mei_hdr->me_addr;
70} 70}
71
71/** 72/**
72 * mei_cl_is_reading - checks if the client 73 * mei_irq_discard_msg - discard received message
73 * is the one to read this message
74 *
75 * @cl: mei client
76 * @mei_hdr: header of mei message
77 * 74 *
78 * Return: true on match and false otherwise 75 * @dev: mei device
76 * @hdr: message header
79 */ 77 */
80static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) 78static inline
79void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
81{ 80{
82 return mei_cl_hbm_equal(cl, mei_hdr) && 81 /*
83 cl->state == MEI_FILE_CONNECTED && 82 * no need to check for size as it is guarantied
84 cl->reading_state != MEI_READ_COMPLETE; 83 * that length fits into rd_msg_buf
84 */
85 mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
86 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
87 MEI_HDR_PRM(hdr));
85} 88}
86 89
87/** 90/**
88 * mei_cl_irq_read_msg - process client message 91 * mei_cl_irq_read_msg - process client message
89 * 92 *
90 * @dev: the device structure 93 * @cl: reading client
91 * @mei_hdr: header of mei client message 94 * @mei_hdr: header of mei client message
92 * @complete_list: An instance of our list structure 95 * @complete_list: completion list
93 * 96 *
94 * Return: 0 on success, <0 on failure. 97 * Return: always 0
95 */ 98 */
96static int mei_cl_irq_read_msg(struct mei_device *dev, 99int mei_cl_irq_read_msg(struct mei_cl *cl,
97 struct mei_msg_hdr *mei_hdr, 100 struct mei_msg_hdr *mei_hdr,
98 struct mei_cl_cb *complete_list) 101 struct mei_cl_cb *complete_list)
99{ 102{
100 struct mei_cl *cl; 103 struct mei_device *dev = cl->dev;
101 struct mei_cl_cb *cb, *next; 104 struct mei_cl_cb *cb;
102 unsigned char *buffer = NULL; 105 unsigned char *buffer = NULL;
103 106
104 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { 107 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
105 cl = cb->cl; 108 if (!cb) {
106 if (!mei_cl_is_reading(cl, mei_hdr)) 109 cl_err(dev, cl, "pending read cb not found\n");
107 continue; 110 goto out;
108 111 }
109 cl->reading_state = MEI_READING;
110 112
111 if (cb->response_buffer.size == 0 || 113 if (!mei_cl_is_connected(cl)) {
112 cb->response_buffer.data == NULL) { 114 cl_dbg(dev, cl, "not connected\n");
113 cl_err(dev, cl, "response buffer is not allocated.\n"); 115 cb->status = -ENODEV;
114 list_del(&cb->list); 116 goto out;
115 return -ENOMEM; 117 }
116 }
117 118
118 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { 119 if (cb->buf.size == 0 || cb->buf.data == NULL) {
119 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", 120 cl_err(dev, cl, "response buffer is not allocated.\n");
120 cb->response_buffer.size, 121 list_move_tail(&cb->list, &complete_list->list);
121 mei_hdr->length, cb->buf_idx); 122 cb->status = -ENOMEM;
122 buffer = krealloc(cb->response_buffer.data, 123 goto out;
123 mei_hdr->length + cb->buf_idx, 124 }
124 GFP_KERNEL);
125
126 if (!buffer) {
127 list_del(&cb->list);
128 return -ENOMEM;
129 }
130 cb->response_buffer.data = buffer;
131 cb->response_buffer.size =
132 mei_hdr->length + cb->buf_idx;
133 }
134 125
135 buffer = cb->response_buffer.data + cb->buf_idx; 126 if (cb->buf.size < mei_hdr->length + cb->buf_idx) {
136 mei_read_slots(dev, buffer, mei_hdr->length); 127 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
128 cb->buf.size, mei_hdr->length, cb->buf_idx);
129 buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx,
130 GFP_KERNEL);
137 131
138 cb->buf_idx += mei_hdr->length; 132 if (!buffer) {
139 if (mei_hdr->msg_complete) { 133 cb->status = -ENOMEM;
140 cl->status = 0; 134 list_move_tail(&cb->list, &complete_list->list);
141 list_del(&cb->list); 135 goto out;
142 cl_dbg(dev, cl, "completed read length = %lu\n",
143 cb->buf_idx);
144 list_add_tail(&cb->list, &complete_list->list);
145 } 136 }
146 break; 137 cb->buf.data = buffer;
138 cb->buf.size = mei_hdr->length + cb->buf_idx;
147 } 139 }
148 140
149 dev_dbg(dev->dev, "message read\n"); 141 buffer = cb->buf.data + cb->buf_idx;
150 if (!buffer) { 142 mei_read_slots(dev, buffer, mei_hdr->length);
151 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); 143
152 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", 144 cb->buf_idx += mei_hdr->length;
153 MEI_HDR_PRM(mei_hdr)); 145
146 if (mei_hdr->msg_complete) {
147 cb->read_time = jiffies;
148 cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx);
149 list_move_tail(&cb->list, &complete_list->list);
154 } 150 }
155 151
152out:
153 if (!buffer)
154 mei_irq_discard_msg(dev, mei_hdr);
155
156 return 0; 156 return 0;
157} 157}
158 158
@@ -183,7 +183,6 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
183 183
184 cl->state = MEI_FILE_DISCONNECTED; 184 cl->state = MEI_FILE_DISCONNECTED;
185 cl->status = 0; 185 cl->status = 0;
186 list_del(&cb->list);
187 mei_io_cb_free(cb); 186 mei_io_cb_free(cb);
188 187
189 return ret; 188 return ret;
@@ -263,7 +262,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
263 return ret; 262 return ret;
264 } 263 }
265 264
266 list_move_tail(&cb->list, &dev->read_list.list); 265 list_move_tail(&cb->list, &cl->rd_pending);
267 266
268 return 0; 267 return 0;
269} 268}
@@ -301,7 +300,7 @@ static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
301 if (ret) { 300 if (ret) {
302 cl->status = ret; 301 cl->status = ret;
303 cb->buf_idx = 0; 302 cb->buf_idx = 0;
304 list_del(&cb->list); 303 list_del_init(&cb->list);
305 return ret; 304 return ret;
306 } 305 }
307 306
@@ -378,25 +377,13 @@ int mei_irq_read_handler(struct mei_device *dev,
378 goto end; 377 goto end;
379 } 378 }
380 379
381 if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && 380 if (cl == &dev->iamthif_cl) {
382 MEI_FILE_CONNECTED == dev->iamthif_cl.state && 381 ret = mei_amthif_irq_read_msg(cl, mei_hdr, cmpl_list);
383 dev->iamthif_state == MEI_IAMTHIF_READING) {
384
385 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
386 if (ret) {
387 dev_err(dev->dev, "mei_amthif_irq_read_msg failed = %d\n",
388 ret);
389 goto end;
390 }
391 } else { 382 } else {
392 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); 383 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
393 if (ret) {
394 dev_err(dev->dev, "mei_cl_irq_read_msg failed = %d\n",
395 ret);
396 goto end;
397 }
398 } 384 }
399 385
386
400reset_slots: 387reset_slots:
401 /* reset the number of slots and header */ 388 /* reset the number of slots and header */
402 *slots = mei_count_full_read_slots(dev); 389 *slots = mei_count_full_read_slots(dev);
@@ -449,21 +436,9 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
449 cl = cb->cl; 436 cl = cb->cl;
450 437
451 cl->status = 0; 438 cl->status = 0;
452 list_del(&cb->list); 439 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
453 if (cb->fop_type == MEI_FOP_WRITE && 440 cl->writing_state = MEI_WRITE_COMPLETE;
454 cl != &dev->iamthif_cl) { 441 list_move_tail(&cb->list, &cmpl_list->list);
455 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
456 cl->writing_state = MEI_WRITE_COMPLETE;
457 list_add_tail(&cb->list, &cmpl_list->list);
458 }
459 if (cl == &dev->iamthif_cl) {
460 cl_dbg(dev, cl, "check iamthif flow control.\n");
461 if (dev->iamthif_flow_control_pending) {
462 ret = mei_amthif_irq_read(dev, &slots);
463 if (ret)
464 return ret;
465 }
466 }
467 } 442 }
468 443
469 if (dev->wd_state == MEI_WD_STOPPING) { 444 if (dev->wd_state == MEI_WD_STOPPING) {
@@ -587,10 +562,7 @@ void mei_timer(struct work_struct *work)
587 if (--dev->iamthif_stall_timer == 0) { 562 if (--dev->iamthif_stall_timer == 0) {
588 dev_err(dev->dev, "timer: amthif hanged.\n"); 563 dev_err(dev->dev, "timer: amthif hanged.\n");
589 mei_reset(dev); 564 mei_reset(dev);
590 dev->iamthif_msg_buf_size = 0;
591 dev->iamthif_msg_buf_index = 0;
592 dev->iamthif_canceled = false; 565 dev->iamthif_canceled = false;
593 dev->iamthif_ioctl = true;
594 dev->iamthif_state = MEI_IAMTHIF_IDLE; 566 dev->iamthif_state = MEI_IAMTHIF_IDLE;
595 dev->iamthif_timer = 0; 567 dev->iamthif_timer = 0;
596 568
@@ -636,4 +608,3 @@ out:
636 schedule_delayed_work(&dev->timer_work, 2 * HZ); 608 schedule_delayed_work(&dev->timer_work, 2 * HZ);
637 mutex_unlock(&dev->device_lock); 609 mutex_unlock(&dev->device_lock);
638} 610}
639
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 47680c84801c..3e2968159506 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -58,24 +58,18 @@ static int mei_open(struct inode *inode, struct file *file)
58 58
59 mutex_lock(&dev->device_lock); 59 mutex_lock(&dev->device_lock);
60 60
61 cl = NULL;
62
63 err = -ENODEV;
64 if (dev->dev_state != MEI_DEV_ENABLED) { 61 if (dev->dev_state != MEI_DEV_ENABLED) {
65 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", 62 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
66 mei_dev_state_str(dev->dev_state)); 63 mei_dev_state_str(dev->dev_state));
64 err = -ENODEV;
67 goto err_unlock; 65 goto err_unlock;
68 } 66 }
69 67
70 err = -ENOMEM; 68 cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
71 cl = mei_cl_allocate(dev); 69 if (IS_ERR(cl)) {
72 if (!cl) 70 err = PTR_ERR(cl);
73 goto err_unlock;
74
75 /* open_handle_count check is handled in the mei_cl_link */
76 err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
77 if (err)
78 goto err_unlock; 71 goto err_unlock;
72 }
79 73
80 file->private_data = cl; 74 file->private_data = cl;
81 75
@@ -85,7 +79,6 @@ static int mei_open(struct inode *inode, struct file *file)
85 79
86err_unlock: 80err_unlock:
87 mutex_unlock(&dev->device_lock); 81 mutex_unlock(&dev->device_lock);
88 kfree(cl);
89 return err; 82 return err;
90} 83}
91 84
@@ -100,7 +93,6 @@ err_unlock:
100static int mei_release(struct inode *inode, struct file *file) 93static int mei_release(struct inode *inode, struct file *file)
101{ 94{
102 struct mei_cl *cl = file->private_data; 95 struct mei_cl *cl = file->private_data;
103 struct mei_cl_cb *cb;
104 struct mei_device *dev; 96 struct mei_device *dev;
105 int rets = 0; 97 int rets = 0;
106 98
@@ -114,33 +106,18 @@ static int mei_release(struct inode *inode, struct file *file)
114 rets = mei_amthif_release(dev, file); 106 rets = mei_amthif_release(dev, file);
115 goto out; 107 goto out;
116 } 108 }
117 if (cl->state == MEI_FILE_CONNECTED) { 109 if (mei_cl_is_connected(cl)) {
118 cl->state = MEI_FILE_DISCONNECTING; 110 cl->state = MEI_FILE_DISCONNECTING;
119 cl_dbg(dev, cl, "disconnecting\n"); 111 cl_dbg(dev, cl, "disconnecting\n");
120 rets = mei_cl_disconnect(cl); 112 rets = mei_cl_disconnect(cl);
121 } 113 }
122 mei_cl_flush_queues(cl); 114 mei_cl_flush_queues(cl, file);
123 cl_dbg(dev, cl, "removing\n"); 115 cl_dbg(dev, cl, "removing\n");
124 116
125 mei_cl_unlink(cl); 117 mei_cl_unlink(cl);
126 118
127
128 /* free read cb */
129 cb = NULL;
130 if (cl->read_cb) {
131 cb = mei_cl_find_read_cb(cl);
132 /* Remove entry from read list */
133 if (cb)
134 list_del(&cb->list);
135
136 cb = cl->read_cb;
137 cl->read_cb = NULL;
138 }
139
140 file->private_data = NULL; 119 file->private_data = NULL;
141 120
142 mei_io_cb_free(cb);
143
144 kfree(cl); 121 kfree(cl);
145out: 122out:
146 mutex_unlock(&dev->device_lock); 123 mutex_unlock(&dev->device_lock);
@@ -162,9 +139,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
162 size_t length, loff_t *offset) 139 size_t length, loff_t *offset)
163{ 140{
164 struct mei_cl *cl = file->private_data; 141 struct mei_cl *cl = file->private_data;
165 struct mei_cl_cb *cb_pos = NULL;
166 struct mei_cl_cb *cb = NULL;
167 struct mei_device *dev; 142 struct mei_device *dev;
143 struct mei_cl_cb *cb = NULL;
168 int rets; 144 int rets;
169 int err; 145 int err;
170 146
@@ -191,8 +167,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
191 goto out; 167 goto out;
192 } 168 }
193 169
194 if (cl->read_cb) { 170 cb = mei_cl_read_cb(cl, file);
195 cb = cl->read_cb; 171 if (cb) {
196 /* read what left */ 172 /* read what left */
197 if (cb->buf_idx > *offset) 173 if (cb->buf_idx > *offset)
198 goto copy_buffer; 174 goto copy_buffer;
@@ -208,7 +184,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
208 *offset = 0; 184 *offset = 0;
209 } 185 }
210 186
211 err = mei_cl_read_start(cl, length); 187 err = mei_cl_read_start(cl, length, file);
212 if (err && err != -EBUSY) { 188 if (err && err != -EBUSY) {
213 dev_dbg(dev->dev, 189 dev_dbg(dev->dev,
214 "mei start read failure with status = %d\n", err); 190 "mei start read failure with status = %d\n", err);
@@ -216,8 +192,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
216 goto out; 192 goto out;
217 } 193 }
218 194
219 if (MEI_READ_COMPLETE != cl->reading_state && 195 if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
220 !waitqueue_active(&cl->rx_wait)) {
221 if (file->f_flags & O_NONBLOCK) { 196 if (file->f_flags & O_NONBLOCK) {
222 rets = -EAGAIN; 197 rets = -EAGAIN;
223 goto out; 198 goto out;
@@ -226,8 +201,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
226 mutex_unlock(&dev->device_lock); 201 mutex_unlock(&dev->device_lock);
227 202
228 if (wait_event_interruptible(cl->rx_wait, 203 if (wait_event_interruptible(cl->rx_wait,
229 MEI_READ_COMPLETE == cl->reading_state || 204 (!list_empty(&cl->rd_completed)) ||
230 mei_cl_is_transitioning(cl))) { 205 (!mei_cl_is_connected(cl)))) {
231 206
232 if (signal_pending(current)) 207 if (signal_pending(current))
233 return -EINTR; 208 return -EINTR;
@@ -235,26 +210,28 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
235 } 210 }
236 211
237 mutex_lock(&dev->device_lock); 212 mutex_lock(&dev->device_lock);
238 if (mei_cl_is_transitioning(cl)) { 213 if (!mei_cl_is_connected(cl)) {
239 rets = -EBUSY; 214 rets = -EBUSY;
240 goto out; 215 goto out;
241 } 216 }
242 } 217 }
243 218
244 cb = cl->read_cb; 219 cb = mei_cl_read_cb(cl, file);
245
246 if (!cb) { 220 if (!cb) {
247 rets = -ENODEV;
248 goto out;
249 }
250 if (cl->reading_state != MEI_READ_COMPLETE) {
251 rets = 0; 221 rets = 0;
252 goto out; 222 goto out;
253 } 223 }
254 /* now copy the data to user space */ 224
255copy_buffer: 225copy_buffer:
226 /* now copy the data to user space */
227 if (cb->status) {
228 rets = cb->status;
229 dev_dbg(dev->dev, "read operation failed %d\n", rets);
230 goto free;
231 }
232
256 dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n", 233 dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n",
257 cb->response_buffer.size, cb->buf_idx); 234 cb->buf.size, cb->buf_idx);
258 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { 235 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
259 rets = -EMSGSIZE; 236 rets = -EMSGSIZE;
260 goto free; 237 goto free;
@@ -264,7 +241,7 @@ copy_buffer:
264 * however buf_idx may point beyond that */ 241 * however buf_idx may point beyond that */
265 length = min_t(size_t, length, cb->buf_idx - *offset); 242 length = min_t(size_t, length, cb->buf_idx - *offset);
266 243
267 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 244 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
268 dev_dbg(dev->dev, "failed to copy data to userland\n"); 245 dev_dbg(dev->dev, "failed to copy data to userland\n");
269 rets = -EFAULT; 246 rets = -EFAULT;
270 goto free; 247 goto free;
@@ -276,13 +253,8 @@ copy_buffer:
276 goto out; 253 goto out;
277 254
278free: 255free:
279 cb_pos = mei_cl_find_read_cb(cl);
280 /* Remove entry from read list */
281 if (cb_pos)
282 list_del(&cb_pos->list);
283 mei_io_cb_free(cb); 256 mei_io_cb_free(cb);
284 cl->reading_state = MEI_IDLE; 257
285 cl->read_cb = NULL;
286out: 258out:
287 dev_dbg(dev->dev, "end mei read rets= %d\n", rets); 259 dev_dbg(dev->dev, "end mei read rets= %d\n", rets);
288 mutex_unlock(&dev->device_lock); 260 mutex_unlock(&dev->device_lock);
@@ -336,9 +308,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
336 goto out; 308 goto out;
337 } 309 }
338 310
339 if (cl->state != MEI_FILE_CONNECTED) { 311 if (!mei_cl_is_connected(cl)) {
340 dev_err(dev->dev, "host client = %d, is not connected to ME client = %d", 312 cl_err(dev, cl, "is not connected");
341 cl->host_client_id, cl->me_client_id);
342 rets = -ENODEV; 313 rets = -ENODEV;
343 goto out; 314 goto out;
344 } 315 }
@@ -349,41 +320,22 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
349 timeout = write_cb->read_time + 320 timeout = write_cb->read_time +
350 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 321 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
351 322
352 if (time_after(jiffies, timeout) || 323 if (time_after(jiffies, timeout)) {
353 cl->reading_state == MEI_READ_COMPLETE) {
354 *offset = 0; 324 *offset = 0;
355 list_del(&write_cb->list);
356 mei_io_cb_free(write_cb); 325 mei_io_cb_free(write_cb);
357 write_cb = NULL; 326 write_cb = NULL;
358 } 327 }
359 } 328 }
360 } 329 }
361 330
362 /* free entry used in read */ 331 *offset = 0;
363 if (cl->reading_state == MEI_READ_COMPLETE) { 332 write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
364 *offset = 0;
365 write_cb = mei_cl_find_read_cb(cl);
366 if (write_cb) {
367 list_del(&write_cb->list);
368 mei_io_cb_free(write_cb);
369 write_cb = NULL;
370 cl->reading_state = MEI_IDLE;
371 cl->read_cb = NULL;
372 }
373 } else if (cl->reading_state == MEI_IDLE)
374 *offset = 0;
375
376
377 write_cb = mei_io_cb_init(cl, file);
378 if (!write_cb) { 333 if (!write_cb) {
379 rets = -ENOMEM; 334 rets = -ENOMEM;
380 goto out; 335 goto out;
381 } 336 }
382 rets = mei_io_cb_alloc_req_buf(write_cb, length);
383 if (rets)
384 goto out;
385 337
386 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); 338 rets = copy_from_user(write_cb->buf.data, ubuf, length);
387 if (rets) { 339 if (rets) {
388 dev_dbg(dev->dev, "failed to copy data from userland\n"); 340 dev_dbg(dev->dev, "failed to copy data from userland\n");
389 rets = -EFAULT; 341 rets = -EFAULT;
@@ -391,7 +343,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
391 } 343 }
392 344
393 if (cl == &dev->iamthif_cl) { 345 if (cl == &dev->iamthif_cl) {
394 rets = mei_amthif_write(dev, write_cb); 346 rets = mei_amthif_write(cl, write_cb);
395 347
396 if (rets) { 348 if (rets) {
397 dev_err(dev->dev, 349 dev_err(dev->dev,
@@ -464,7 +416,7 @@ static int mei_ioctl_connect_client(struct file *file,
464 */ 416 */
465 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { 417 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
466 dev_dbg(dev->dev, "FW Client is amthi\n"); 418 dev_dbg(dev->dev, "FW Client is amthi\n");
467 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { 419 if (!mei_cl_is_connected(&dev->iamthif_cl)) {
468 rets = -ENODEV; 420 rets = -ENODEV;
469 goto end; 421 goto end;
470 } 422 }
@@ -588,6 +540,7 @@ static long mei_compat_ioctl(struct file *file,
588 */ 540 */
589static unsigned int mei_poll(struct file *file, poll_table *wait) 541static unsigned int mei_poll(struct file *file, poll_table *wait)
590{ 542{
543 unsigned long req_events = poll_requested_events(wait);
591 struct mei_cl *cl = file->private_data; 544 struct mei_cl *cl = file->private_data;
592 struct mei_device *dev; 545 struct mei_device *dev;
593 unsigned int mask = 0; 546 unsigned int mask = 0;
@@ -599,27 +552,26 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
599 552
600 mutex_lock(&dev->device_lock); 553 mutex_lock(&dev->device_lock);
601 554
602 if (!mei_cl_is_connected(cl)) { 555
556 if (dev->dev_state != MEI_DEV_ENABLED ||
557 !mei_cl_is_connected(cl)) {
603 mask = POLLERR; 558 mask = POLLERR;
604 goto out; 559 goto out;
605 } 560 }
606 561
607 mutex_unlock(&dev->device_lock); 562 if (cl == &dev->iamthif_cl) {
608 563 mask = mei_amthif_poll(dev, file, wait);
609
610 if (cl == &dev->iamthif_cl)
611 return mei_amthif_poll(dev, file, wait);
612
613 poll_wait(file, &cl->tx_wait, wait);
614
615 mutex_lock(&dev->device_lock);
616
617 if (!mei_cl_is_connected(cl)) {
618 mask = POLLERR;
619 goto out; 564 goto out;
620 } 565 }
621 566
622 mask |= (POLLIN | POLLRDNORM); 567 if (req_events & (POLLIN | POLLRDNORM)) {
568 poll_wait(file, &cl->rx_wait, wait);
569
570 if (!list_empty(&cl->rd_completed))
571 mask |= POLLIN | POLLRDNORM;
572 else
573 mei_cl_read_start(cl, 0, file);
574 }
623 575
624out: 576out:
625 mutex_unlock(&dev->device_lock); 577 mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
new file mode 100644
index 000000000000..388efb519138
--- /dev/null
+++ b/drivers/misc/mei/mei-trace.c
@@ -0,0 +1,25 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2015, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16#include <linux/module.h>
17
18/* sparse doesn't like tracepoint macros */
19#ifndef __CHECKER__
20#define CREATE_TRACE_POINTS
21#include "mei-trace.h"
22
23EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
24EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
25#endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
new file mode 100644
index 000000000000..47e1bc6551d4
--- /dev/null
+++ b/drivers/misc/mei/mei-trace.h
@@ -0,0 +1,74 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2015, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#if !defined(_MEI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
18#define _MEI_TRACE_H_
19
20#include <linux/stringify.h>
21#include <linux/types.h>
22#include <linux/tracepoint.h>
23
24#include <linux/device.h>
25
26#undef TRACE_SYSTEM
27#define TRACE_SYSTEM mei
28
29TRACE_EVENT(mei_reg_read,
30 TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
31 TP_ARGS(dev, reg, offs, val),
32 TP_STRUCT__entry(
33 __string(dev, dev_name(dev))
34 __field(const char *, reg)
35 __field(u32, offs)
36 __field(u32, val)
37 ),
38 TP_fast_assign(
39 __assign_str(dev, dev_name(dev))
40 __entry->reg = reg;
41 __entry->offs = offs;
42 __entry->val = val;
43 ),
44 TP_printk("[%s] read %s:[%#x] = %#x",
45 __get_str(dev), __entry->reg, __entry->offs, __entry->val)
46);
47
48TRACE_EVENT(mei_reg_write,
49 TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
50 TP_ARGS(dev, reg, offs, val),
51 TP_STRUCT__entry(
52 __string(dev, dev_name(dev))
53 __field(const char *, reg)
54 __field(u32, offs)
55 __field(u32, val)
56 ),
57 TP_fast_assign(
58 __assign_str(dev, dev_name(dev))
59 __entry->reg = reg;
60 __entry->offs = offs;
61 __entry->val = val;
62 ),
63 TP_printk("[%s] write %s[%#x] = %#x)",
64 __get_str(dev), __entry->reg, __entry->offs, __entry->val)
65);
66
67#endif /* _MEI_TRACE_H_ */
68
69/* This part must be outside protection */
70#undef TRACE_INCLUDE_PATH
71#undef TRACE_INCLUDE_FILE
72#define TRACE_INCLUDE_PATH .
73#define TRACE_INCLUDE_FILE mei-trace
74#include <trace/define_trace.h>
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 6c6ce9381535..f066ecd71939 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -194,23 +194,25 @@ struct mei_cl;
194 * @list: link in callback queue 194 * @list: link in callback queue
195 * @cl: file client who is running this operation 195 * @cl: file client who is running this operation
196 * @fop_type: file operation type 196 * @fop_type: file operation type
197 * @request_buffer: buffer to store request data 197 * @buf: buffer for data associated with the callback
198 * @response_buffer: buffer to store response data
199 * @buf_idx: last read index 198 * @buf_idx: last read index
200 * @read_time: last read operation time stamp (iamthif) 199 * @read_time: last read operation time stamp (iamthif)
201 * @file_object: pointer to file structure 200 * @file_object: pointer to file structure
201 * @status: io status of the cb
202 * @internal: communication between driver and FW flag 202 * @internal: communication between driver and FW flag
203 * @completed: the transfer or reception has completed
203 */ 204 */
204struct mei_cl_cb { 205struct mei_cl_cb {
205 struct list_head list; 206 struct list_head list;
206 struct mei_cl *cl; 207 struct mei_cl *cl;
207 enum mei_cb_file_ops fop_type; 208 enum mei_cb_file_ops fop_type;
208 struct mei_msg_data request_buffer; 209 struct mei_msg_data buf;
209 struct mei_msg_data response_buffer;
210 unsigned long buf_idx; 210 unsigned long buf_idx;
211 unsigned long read_time; 211 unsigned long read_time;
212 struct file *file_object; 212 struct file *file_object;
213 int status;
213 u32 internal:1; 214 u32 internal:1;
215 u32 completed:1;
214}; 216};
215 217
216/** 218/**
@@ -229,9 +231,9 @@ struct mei_cl_cb {
229 * @me_client_id: me/fw id 231 * @me_client_id: me/fw id
230 * @mei_flow_ctrl_creds: transmit flow credentials 232 * @mei_flow_ctrl_creds: transmit flow credentials
231 * @timer_count: watchdog timer for operation completion 233 * @timer_count: watchdog timer for operation completion
232 * @reading_state: state of the rx
233 * @writing_state: state of the tx 234 * @writing_state: state of the tx
234 * @read_cb: current pending reading callback 235 * @rd_pending: pending read credits
236 * @rd_completed: completed read
235 * 237 *
236 * @device: device on the mei client bus 238 * @device: device on the mei client bus
237 * @device_link: link to bus clients 239 * @device_link: link to bus clients
@@ -249,9 +251,9 @@ struct mei_cl {
249 u8 me_client_id; 251 u8 me_client_id;
250 u8 mei_flow_ctrl_creds; 252 u8 mei_flow_ctrl_creds;
251 u8 timer_count; 253 u8 timer_count;
252 enum mei_file_transaction_states reading_state;
253 enum mei_file_transaction_states writing_state; 254 enum mei_file_transaction_states writing_state;
254 struct mei_cl_cb *read_cb; 255 struct list_head rd_pending;
256 struct list_head rd_completed;
255 257
256 /* MEI CL bus data */ 258 /* MEI CL bus data */
257 struct mei_cl_device *device; 259 struct mei_cl_device *device;
@@ -423,7 +425,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
423 * @cdev : character device 425 * @cdev : character device
424 * @minor : minor number allocated for device 426 * @minor : minor number allocated for device
425 * 427 *
426 * @read_list : read completion list
427 * @write_list : write pending list 428 * @write_list : write pending list
428 * @write_waiting_list : write completion list 429 * @write_waiting_list : write completion list
429 * @ctrl_wr_list : pending control write list 430 * @ctrl_wr_list : pending control write list
@@ -460,6 +461,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
460 * @version : HBM protocol version in use 461 * @version : HBM protocol version in use
461 * @hbm_f_pg_supported : hbm feature pgi protocol 462 * @hbm_f_pg_supported : hbm feature pgi protocol
462 * 463 *
464 * @me_clients_rwsem: rw lock over me_clients list
463 * @me_clients : list of FW clients 465 * @me_clients : list of FW clients
464 * @me_clients_map : FW clients bit map 466 * @me_clients_map : FW clients bit map
465 * @host_clients_map : host clients id pool 467 * @host_clients_map : host clients id pool
@@ -480,12 +482,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
480 * @iamthif_mtu : amthif client max message length 482 * @iamthif_mtu : amthif client max message length
481 * @iamthif_timer : time stamp of current amthif command completion 483 * @iamthif_timer : time stamp of current amthif command completion
482 * @iamthif_stall_timer : timer to detect amthif hang 484 * @iamthif_stall_timer : timer to detect amthif hang
483 * @iamthif_msg_buf : amthif current message buffer
484 * @iamthif_msg_buf_size : size of current amthif message request buffer
485 * @iamthif_msg_buf_index : current index in amthif message request buffer
486 * @iamthif_state : amthif processor state 485 * @iamthif_state : amthif processor state
487 * @iamthif_flow_control_pending: amthif waits for flow control
488 * @iamthif_ioctl : wait for completion if amthif control message
489 * @iamthif_canceled : current amthif command is canceled 486 * @iamthif_canceled : current amthif command is canceled
490 * 487 *
491 * @init_work : work item for the device init 488 * @init_work : work item for the device init
@@ -503,7 +500,6 @@ struct mei_device {
503 struct cdev cdev; 500 struct cdev cdev;
504 int minor; 501 int minor;
505 502
506 struct mei_cl_cb read_list;
507 struct mei_cl_cb write_list; 503 struct mei_cl_cb write_list;
508 struct mei_cl_cb write_waiting_list; 504 struct mei_cl_cb write_waiting_list;
509 struct mei_cl_cb ctrl_wr_list; 505 struct mei_cl_cb ctrl_wr_list;
@@ -556,6 +552,7 @@ struct mei_device {
556 struct hbm_version version; 552 struct hbm_version version;
557 unsigned int hbm_f_pg_supported:1; 553 unsigned int hbm_f_pg_supported:1;
558 554
555 struct rw_semaphore me_clients_rwsem;
559 struct list_head me_clients; 556 struct list_head me_clients;
560 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); 557 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
561 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 558 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
@@ -579,12 +576,7 @@ struct mei_device {
579 int iamthif_mtu; 576 int iamthif_mtu;
580 unsigned long iamthif_timer; 577 unsigned long iamthif_timer;
581 u32 iamthif_stall_timer; 578 u32 iamthif_stall_timer;
582 unsigned char *iamthif_msg_buf; /* Note: memory has to be allocated */
583 u32 iamthif_msg_buf_size;
584 u32 iamthif_msg_buf_index;
585 enum iamthif_states iamthif_state; 579 enum iamthif_states iamthif_state;
586 bool iamthif_flow_control_pending;
587 bool iamthif_ioctl;
588 bool iamthif_canceled; 580 bool iamthif_canceled;
589 581
590 struct work_struct init_work; 582 struct work_struct init_work;
@@ -662,8 +654,6 @@ void mei_amthif_reset_params(struct mei_device *dev);
662 654
663int mei_amthif_host_init(struct mei_device *dev); 655int mei_amthif_host_init(struct mei_device *dev);
664 656
665int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
666
667int mei_amthif_read(struct mei_device *dev, struct file *file, 657int mei_amthif_read(struct mei_device *dev, struct file *file,
668 char __user *ubuf, size_t length, loff_t *offset); 658 char __user *ubuf, size_t length, loff_t *offset);
669 659
@@ -675,13 +665,13 @@ int mei_amthif_release(struct mei_device *dev, struct file *file);
675struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, 665struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
676 struct file *file); 666 struct file *file);
677 667
678void mei_amthif_run_next_cmd(struct mei_device *dev); 668int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
679 669int mei_amthif_run_next_cmd(struct mei_device *dev);
680int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 670int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
681 struct mei_cl_cb *cmpl_list); 671 struct mei_cl_cb *cmpl_list);
682 672
683void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb); 673void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
684int mei_amthif_irq_read_msg(struct mei_device *dev, 674int mei_amthif_irq_read_msg(struct mei_cl *cl,
685 struct mei_msg_hdr *mei_hdr, 675 struct mei_msg_hdr *mei_hdr,
686 struct mei_cl_cb *complete_list); 676 struct mei_cl_cb *complete_list);
687int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); 677int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index bb61a119b8bb..c3bcb63686d7 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -482,8 +482,8 @@ err:
482int mei_nfc_host_init(struct mei_device *dev) 482int mei_nfc_host_init(struct mei_device *dev)
483{ 483{
484 struct mei_nfc_dev *ndev; 484 struct mei_nfc_dev *ndev;
485 struct mei_cl *cl_info, *cl = NULL; 485 struct mei_cl *cl_info, *cl;
486 struct mei_me_client *me_cl; 486 struct mei_me_client *me_cl = NULL;
487 int ret; 487 int ret;
488 488
489 489
@@ -500,17 +500,6 @@ int mei_nfc_host_init(struct mei_device *dev)
500 goto err; 500 goto err;
501 } 501 }
502 502
503 ndev->cl_info = mei_cl_allocate(dev);
504 ndev->cl = mei_cl_allocate(dev);
505
506 cl = ndev->cl;
507 cl_info = ndev->cl_info;
508
509 if (!cl || !cl_info) {
510 ret = -ENOMEM;
511 goto err;
512 }
513
514 /* check for valid client id */ 503 /* check for valid client id */
515 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); 504 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
516 if (!me_cl) { 505 if (!me_cl) {
@@ -519,17 +508,21 @@ int mei_nfc_host_init(struct mei_device *dev)
519 goto err; 508 goto err;
520 } 509 }
521 510
511 cl_info = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
512 if (IS_ERR(cl_info)) {
513 ret = PTR_ERR(cl_info);
514 goto err;
515 }
516
522 cl_info->me_client_id = me_cl->client_id; 517 cl_info->me_client_id = me_cl->client_id;
523 cl_info->cl_uuid = me_cl->props.protocol_name; 518 cl_info->cl_uuid = me_cl->props.protocol_name;
524 mei_me_cl_put(me_cl); 519 mei_me_cl_put(me_cl);
525 520 me_cl = NULL;
526 ret = mei_cl_link(cl_info, MEI_HOST_CLIENT_ID_ANY);
527 if (ret)
528 goto err;
529
530 521
531 list_add_tail(&cl_info->device_link, &dev->device_list); 522 list_add_tail(&cl_info->device_link, &dev->device_list);
532 523
524 ndev->cl_info = cl_info;
525
533 /* check for valid client id */ 526 /* check for valid client id */
534 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid); 527 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
535 if (!me_cl) { 528 if (!me_cl) {
@@ -538,16 +531,21 @@ int mei_nfc_host_init(struct mei_device *dev)
538 goto err; 531 goto err;
539 } 532 }
540 533
534 cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
535 if (IS_ERR(cl)) {
536 ret = PTR_ERR(cl);
537 goto err;
538 }
539
541 cl->me_client_id = me_cl->client_id; 540 cl->me_client_id = me_cl->client_id;
542 cl->cl_uuid = me_cl->props.protocol_name; 541 cl->cl_uuid = me_cl->props.protocol_name;
543 mei_me_cl_put(me_cl); 542 mei_me_cl_put(me_cl);
544 543 me_cl = NULL;
545 ret = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
546 if (ret)
547 goto err;
548 544
549 list_add_tail(&cl->device_link, &dev->device_list); 545 list_add_tail(&cl->device_link, &dev->device_list);
550 546
547 ndev->cl = cl;
548
551 ndev->req_id = 1; 549 ndev->req_id = 1;
552 550
553 INIT_WORK(&ndev->init_work, mei_nfc_init); 551 INIT_WORK(&ndev->init_work, mei_nfc_init);
@@ -557,6 +555,7 @@ int mei_nfc_host_init(struct mei_device *dev)
557 return 0; 555 return 0;
558 556
559err: 557err:
558 mei_me_cl_put(me_cl);
560 mei_nfc_free(ndev); 559 mei_nfc_free(ndev);
561 560
562 return ret; 561 return ret;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index af44ee26075d..23f71f5ce4fb 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -388,7 +388,7 @@ static int mei_me_pm_runtime_suspend(struct device *device)
388 mutex_lock(&dev->device_lock); 388 mutex_lock(&dev->device_lock);
389 389
390 if (mei_write_is_idle(dev)) 390 if (mei_write_is_idle(dev))
391 ret = mei_me_pg_set_sync(dev); 391 ret = mei_me_pg_enter_sync(dev);
392 else 392 else
393 ret = -EAGAIN; 393 ret = -EAGAIN;
394 394
@@ -413,7 +413,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
413 413
414 mutex_lock(&dev->device_lock); 414 mutex_lock(&dev->device_lock);
415 415
416 ret = mei_me_pg_unset_sync(dev); 416 ret = mei_me_pg_exit_sync(dev);
417 417
418 mutex_unlock(&dev->device_lock); 418 mutex_unlock(&dev->device_lock);
419 419
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index c86e2ddbe30a..dcfcba44b6f7 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -63,7 +63,7 @@ static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
63 } 63 }
64} 64}
65/** 65/**
66 * mei_probe - Device Initialization Routine 66 * mei_txe_probe - Device Initialization Routine
67 * 67 *
68 * @pdev: PCI device structure 68 * @pdev: PCI device structure
69 * @ent: entry in mei_txe_pci_tbl 69 * @ent: entry in mei_txe_pci_tbl
@@ -193,7 +193,7 @@ end:
193} 193}
194 194
195/** 195/**
196 * mei_remove - Device Removal Routine 196 * mei_txe_remove - Device Removal Routine
197 * 197 *
198 * @pdev: PCI device structure 198 * @pdev: PCI device structure
199 * 199 *
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 475f1dea45bf..2725f865c3d6 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -160,9 +160,10 @@ int mei_wd_send(struct mei_device *dev)
160 */ 160 */
161int mei_wd_stop(struct mei_device *dev) 161int mei_wd_stop(struct mei_device *dev)
162{ 162{
163 struct mei_cl *cl = &dev->wd_cl;
163 int ret; 164 int ret;
164 165
165 if (dev->wd_cl.state != MEI_FILE_CONNECTED || 166 if (!mei_cl_is_connected(cl) ||
166 dev->wd_state != MEI_WD_RUNNING) 167 dev->wd_state != MEI_WD_RUNNING)
167 return 0; 168 return 0;
168 169
@@ -170,7 +171,7 @@ int mei_wd_stop(struct mei_device *dev)
170 171
171 dev->wd_state = MEI_WD_STOPPING; 172 dev->wd_state = MEI_WD_STOPPING;
172 173
173 ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); 174 ret = mei_cl_flow_ctrl_creds(cl);
174 if (ret < 0) 175 if (ret < 0)
175 goto err; 176 goto err;
176 177
@@ -202,22 +203,25 @@ err:
202 return ret; 203 return ret;
203} 204}
204 205
205/* 206/**
206 * mei_wd_ops_start - wd start command from the watchdog core. 207 * mei_wd_ops_start - wd start command from the watchdog core.
207 * 208 *
208 * @wd_dev - watchdog device struct 209 * @wd_dev: watchdog device struct
209 * 210 *
210 * Return: 0 if success, negative errno code for failure 211 * Return: 0 if success, negative errno code for failure
211 */ 212 */
212static int mei_wd_ops_start(struct watchdog_device *wd_dev) 213static int mei_wd_ops_start(struct watchdog_device *wd_dev)
213{ 214{
214 int err = -ENODEV;
215 struct mei_device *dev; 215 struct mei_device *dev;
216 struct mei_cl *cl;
217 int err = -ENODEV;
216 218
217 dev = watchdog_get_drvdata(wd_dev); 219 dev = watchdog_get_drvdata(wd_dev);
218 if (!dev) 220 if (!dev)
219 return -ENODEV; 221 return -ENODEV;
220 222
223 cl = &dev->wd_cl;
224
221 mutex_lock(&dev->device_lock); 225 mutex_lock(&dev->device_lock);
222 226
223 if (dev->dev_state != MEI_DEV_ENABLED) { 227 if (dev->dev_state != MEI_DEV_ENABLED) {
@@ -226,8 +230,8 @@ static int mei_wd_ops_start(struct watchdog_device *wd_dev)
226 goto end_unlock; 230 goto end_unlock;
227 } 231 }
228 232
229 if (dev->wd_cl.state != MEI_FILE_CONNECTED) { 233 if (!mei_cl_is_connected(cl)) {
230 dev_dbg(dev->dev, "MEI Driver is not connected to Watchdog Client\n"); 234 cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n");
231 goto end_unlock; 235 goto end_unlock;
232 } 236 }
233 237
@@ -239,10 +243,10 @@ end_unlock:
239 return err; 243 return err;
240} 244}
241 245
242/* 246/**
243 * mei_wd_ops_stop - wd stop command from the watchdog core. 247 * mei_wd_ops_stop - wd stop command from the watchdog core.
244 * 248 *
245 * @wd_dev - watchdog device struct 249 * @wd_dev: watchdog device struct
246 * 250 *
247 * Return: 0 if success, negative errno code for failure 251 * Return: 0 if success, negative errno code for failure
248 */ 252 */
@@ -261,10 +265,10 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
261 return 0; 265 return 0;
262} 266}
263 267
264/* 268/**
265 * mei_wd_ops_ping - wd ping command from the watchdog core. 269 * mei_wd_ops_ping - wd ping command from the watchdog core.
266 * 270 *
267 * @wd_dev - watchdog device struct 271 * @wd_dev: watchdog device struct
268 * 272 *
269 * Return: 0 if success, negative errno code for failure 273 * Return: 0 if success, negative errno code for failure
270 */ 274 */
@@ -282,8 +286,8 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
282 286
283 mutex_lock(&dev->device_lock); 287 mutex_lock(&dev->device_lock);
284 288
285 if (cl->state != MEI_FILE_CONNECTED) { 289 if (!mei_cl_is_connected(cl)) {
286 dev_err(dev->dev, "wd: not connected.\n"); 290 cl_err(dev, cl, "wd: not connected.\n");
287 ret = -ENODEV; 291 ret = -ENODEV;
288 goto end; 292 goto end;
289 } 293 }
@@ -311,11 +315,11 @@ end:
311 return ret; 315 return ret;
312} 316}
313 317
314/* 318/**
315 * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core. 319 * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core.
316 * 320 *
317 * @wd_dev - watchdog device struct 321 * @wd_dev: watchdog device struct
318 * @timeout - timeout value to set 322 * @timeout: timeout value to set
319 * 323 *
320 * Return: 0 if success, negative errno code for failure 324 * Return: 0 if success, negative errno code for failure
321 */ 325 */
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index ff2b0fb1a6be..d9fa609da061 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -309,7 +309,7 @@ void mic_complete_resume(struct mic_device *mdev)
309 */ 309 */
310void mic_prepare_suspend(struct mic_device *mdev) 310void mic_prepare_suspend(struct mic_device *mdev)
311{ 311{
312 int rc; 312 unsigned long timeout;
313 313
314#define MIC_SUSPEND_TIMEOUT (60 * HZ) 314#define MIC_SUSPEND_TIMEOUT (60 * HZ)
315 315
@@ -331,10 +331,10 @@ void mic_prepare_suspend(struct mic_device *mdev)
331 */ 331 */
332 mic_set_state(mdev, MIC_SUSPENDING); 332 mic_set_state(mdev, MIC_SUSPENDING);
333 mutex_unlock(&mdev->mic_mutex); 333 mutex_unlock(&mdev->mic_mutex);
334 rc = wait_for_completion_timeout(&mdev->reset_wait, 334 timeout = wait_for_completion_timeout(&mdev->reset_wait,
335 MIC_SUSPEND_TIMEOUT); 335 MIC_SUSPEND_TIMEOUT);
336 /* Force reset the card if the shutdown completion timed out */ 336 /* Force reset the card if the shutdown completion timed out */
337 if (!rc) { 337 if (!timeout) {
338 mutex_lock(&mdev->mic_mutex); 338 mutex_lock(&mdev->mic_mutex);
339 mic_set_state(mdev, MIC_SUSPENDED); 339 mic_set_state(mdev, MIC_SUSPENDED);
340 mutex_unlock(&mdev->mic_mutex); 340 mutex_unlock(&mdev->mic_mutex);
@@ -348,10 +348,10 @@ void mic_prepare_suspend(struct mic_device *mdev)
348 */ 348 */
349 mic_set_state(mdev, MIC_SUSPENDED); 349 mic_set_state(mdev, MIC_SUSPENDED);
350 mutex_unlock(&mdev->mic_mutex); 350 mutex_unlock(&mdev->mic_mutex);
351 rc = wait_for_completion_timeout(&mdev->reset_wait, 351 timeout = wait_for_completion_timeout(&mdev->reset_wait,
352 MIC_SUSPEND_TIMEOUT); 352 MIC_SUSPEND_TIMEOUT);
353 /* Force reset the card if the shutdown completion timed out */ 353 /* Force reset the card if the shutdown completion timed out */
354 if (!rc) 354 if (!timeout)
355 mic_stop(mdev, true); 355 mic_stop(mdev, true);
356 break; 356 break;
357 default: 357 default:
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index d686f2846ac7..b4ca6c884d19 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -363,8 +363,6 @@ static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
363{ 363{
364 int rc; 364 int rc;
365 365
366 pci_msi_off(pdev);
367
368 /* Enable intx */ 366 /* Enable intx */
369 pci_intx(pdev, 1); 367 pci_intx(pdev, 1);
370 rc = mic_setup_callbacks(mdev); 368 rc = mic_setup_callbacks(mdev);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 21181fa243df..eeaaf5fca105 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -69,12 +69,23 @@ static int sram_probe(struct platform_device *pdev)
69 INIT_LIST_HEAD(&reserve_list); 69 INIT_LIST_HEAD(&reserve_list);
70 70
71 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 71 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
72 virt_base = devm_ioremap_resource(&pdev->dev, res); 72 if (!res) {
73 if (IS_ERR(virt_base)) 73 dev_err(&pdev->dev, "found no memory resource\n");
74 return PTR_ERR(virt_base); 74 return -EINVAL;
75 }
75 76
76 size = resource_size(res); 77 size = resource_size(res);
77 78
79 if (!devm_request_mem_region(&pdev->dev,
80 res->start, size, pdev->name)) {
81 dev_err(&pdev->dev, "could not request region for resource\n");
82 return -EBUSY;
83 }
84
85 virt_base = devm_ioremap_wc(&pdev->dev, res->start, size);
86 if (IS_ERR(virt_base))
87 return PTR_ERR(virt_base);
88
78 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL); 89 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
79 if (!sram) 90 if (!sram)
80 return -ENOMEM; 91 return -ENOMEM;
@@ -205,7 +216,7 @@ static int sram_remove(struct platform_device *pdev)
205} 216}
206 217
207#ifdef CONFIG_OF 218#ifdef CONFIG_OF
208static struct of_device_id sram_dt_ids[] = { 219static const struct of_device_id sram_dt_ids[] = {
209 { .compatible = "mmio-sram" }, 220 { .compatible = "mmio-sram" },
210 {} 221 {}
211}; 222};
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index a606c8901e18..a37a42f67088 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -236,6 +236,7 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
236{ 236{
237 struct tifm_adapter *fm = pci_get_drvdata(dev); 237 struct tifm_adapter *fm = pci_get_drvdata(dev);
238 int rc; 238 int rc;
239 unsigned long timeout;
239 unsigned int good_sockets = 0, bad_sockets = 0; 240 unsigned int good_sockets = 0, bad_sockets = 0;
240 unsigned long flags; 241 unsigned long flags;
241 unsigned char new_ids[fm->num_sockets]; 242 unsigned char new_ids[fm->num_sockets];
@@ -272,8 +273,8 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
272 if (good_sockets) { 273 if (good_sockets) {
273 fm->finish_me = &finish_resume; 274 fm->finish_me = &finish_resume;
274 spin_unlock_irqrestore(&fm->lock, flags); 275 spin_unlock_irqrestore(&fm->lock, flags);
275 rc = wait_for_completion_timeout(&finish_resume, HZ); 276 timeout = wait_for_completion_timeout(&finish_resume, HZ);
276 dev_dbg(&dev->dev, "wait returned %d\n", rc); 277 dev_dbg(&dev->dev, "wait returned %lu\n", timeout);
277 writel(TIFM_IRQ_FIFOMASK(good_sockets) 278 writel(TIFM_IRQ_FIFOMASK(good_sockets)
278 | TIFM_IRQ_CARDMASK(good_sockets), 279 | TIFM_IRQ_CARDMASK(good_sockets),
279 fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 280 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 032d35cf93ca..b823f9a6e464 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
113 113
114MODULE_AUTHOR("VMware, Inc."); 114MODULE_AUTHOR("VMware, Inc.");
115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); 115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
116MODULE_VERSION("1.1.1.0-k"); 116MODULE_VERSION("1.1.3.0-k");
117MODULE_LICENSE("GPL v2"); 117MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 66fc9921fc85..a721b5d8a9da 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -395,6 +395,12 @@ static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
395 return -EFAULT; 395 return -EFAULT;
396 } 396 }
397 397
398 if (VMCI_DG_SIZE(dg) != send_info.len) {
399 vmci_ioctl_err("datagram size mismatch\n");
400 kfree(dg);
401 return -EINVAL;
402 }
403
398 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", 404 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
399 dg->dst.context, dg->dst.resource, 405 dg->dst.context, dg->dst.resource,
400 dg->src.context, dg->src.resource, 406 dg->src.context, dg->src.resource,
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 35f19a683822..f42d9c4e4561 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -295,12 +295,20 @@ static void *qp_alloc_queue(u64 size, u32 flags)
295{ 295{
296 u64 i; 296 u64 i;
297 struct vmci_queue *queue; 297 struct vmci_queue *queue;
298 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 298 size_t pas_size;
299 const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 299 size_t vas_size;
300 const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
301 const size_t queue_size = 301 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
302 sizeof(*queue) + sizeof(*queue->kernel_if) + 302
303 pas_size + vas_size; 303 if (num_pages >
304 (SIZE_MAX - queue_size) /
305 (sizeof(*queue->kernel_if->u.g.pas) +
306 sizeof(*queue->kernel_if->u.g.vas)))
307 return NULL;
308
309 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
310 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
311 queue_size += pas_size + vas_size;
304 312
305 queue = vmalloc(queue_size); 313 queue = vmalloc(queue_size);
306 if (!queue) 314 if (!queue)
@@ -615,10 +623,15 @@ static int qp_memcpy_from_queue_iov(void *dest,
615static struct vmci_queue *qp_host_alloc_queue(u64 size) 623static struct vmci_queue *qp_host_alloc_queue(u64 size)
616{ 624{
617 struct vmci_queue *queue; 625 struct vmci_queue *queue;
618 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 626 size_t queue_page_size;
627 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
619 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 628 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
620 const size_t queue_page_size = 629
621 num_pages * sizeof(*queue->kernel_if->u.h.page); 630 if (num_pages > (SIZE_MAX - queue_size) /
631 sizeof(*queue->kernel_if->u.h.page))
632 return NULL;
633
634 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
622 635
623 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 636 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
624 if (queue) { 637 if (queue) {
@@ -737,7 +750,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
737 produce_q->kernel_if->num_pages, 1, 750 produce_q->kernel_if->num_pages, 1,
738 produce_q->kernel_if->u.h.header_page); 751 produce_q->kernel_if->u.h.header_page);
739 if (retval < produce_q->kernel_if->num_pages) { 752 if (retval < produce_q->kernel_if->num_pages) {
740 pr_warn("get_user_pages(produce) failed (retval=%d)", retval); 753 pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
754 retval);
741 qp_release_pages(produce_q->kernel_if->u.h.header_page, 755 qp_release_pages(produce_q->kernel_if->u.h.header_page,
742 retval, false); 756 retval, false);
743 err = VMCI_ERROR_NO_MEM; 757 err = VMCI_ERROR_NO_MEM;
@@ -748,7 +762,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
748 consume_q->kernel_if->num_pages, 1, 762 consume_q->kernel_if->num_pages, 1,
749 consume_q->kernel_if->u.h.header_page); 763 consume_q->kernel_if->u.h.header_page);
750 if (retval < consume_q->kernel_if->num_pages) { 764 if (retval < consume_q->kernel_if->num_pages) {
751 pr_warn("get_user_pages(consume) failed (retval=%d)", retval); 765 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
766 retval);
752 qp_release_pages(consume_q->kernel_if->u.h.header_page, 767 qp_release_pages(consume_q->kernel_if->u.h.header_page,
753 retval, false); 768 retval, false);
754 qp_release_pages(produce_q->kernel_if->u.h.header_page, 769 qp_release_pages(produce_q->kernel_if->u.h.header_page,