aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-01 19:13:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-01 19:13:21 -0400
commit675c354a95d5375153b8bb80a0448cab916c7991 (patch)
tree88cbc5a5a31dd1c1016271006a8d56cfe0abf7bd /drivers
parentc70929147a10fa4538886cb23b934b509c4c0e49 (diff)
parent1b3fa22e0234d613df967445cd34807e10fa54fa (diff)
Merge tag 'char-misc-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver patches from Greg KH: "Here's the big char/misc driver updates for 3.15-rc1. Lots of various things here, including the new mcb driver subsystem. All of these have been in linux-next for a while" * tag 'char-misc-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (118 commits) extcon: Move OF helper function to extcon core and change function name extcon: of: Remove unnecessary function call by using the name of device_node extcon: gpio: Use SIMPLE_DEV_PM_OPS macro extcon: palmas: Use SIMPLE_DEV_PM_OPS macro mei: don't use deprecated DEFINE_PCI_DEVICE_TABLE macro mei: amthif: fix checkpatch error mei: client.h fix checkpatch errors mei: use cl_dbg where appropriate mei: fix Unnecessary space after function pointer name mei: report consistently copy_from/to_user failures mei: drop pr_fmt macros mei: make me hw headers private to me hw. mei: fix memory leak of pending write cb objects mei: me: do not reset when less than expected data is received drivers: mcb: Fix build error discovered by 0-day bot cs5535-mfgpt: Simplify dependencies spmi: pm: drop bus-level PM suspend/resume routines spmi: pmic_arb: make selectable on ARCH_QCOM Drivers: hv: vmbus: Increase the limit on the number of pfns we can handle pch_phub: Report error writing MAC back to user ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/base/regmap/regmap-spmi.c228
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/agp/generic.c1
-rw-r--r--drivers/char/agp/intel-gtt.c1
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c1
-rw-r--r--drivers/char/hw_random/core.c1
-rw-r--r--drivers/char/hw_random/exynos-rng.c1
-rw-r--r--drivers/char/hw_random/n2-drv.c1
-rw-r--r--drivers/char/hw_random/nomadik-rng.c1
-rw-r--r--drivers/char/hw_random/octeon-rng.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c1
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/mwave/3780i.c1
-rw-r--r--drivers/char/tile-srom.c1
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c1
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c1
-rw-r--r--drivers/connector/cn_proc.c18
-rw-r--r--drivers/connector/connector.c20
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/Makefile2
-rw-r--r--drivers/extcon/extcon-class.c42
-rw-r--r--drivers/extcon/extcon-gpio.c4
-rw-r--r--drivers/extcon/extcon-palmas.c5
-rw-r--r--drivers/extcon/of_extcon.c64
-rw-r--r--drivers/fmc/fmc-core.c22
-rw-r--r--drivers/fmc/fmc-sdb.c41
-rw-r--r--drivers/hv/Makefile2
-rw-r--r--drivers/hv/channel.c42
-rw-r--r--drivers/hv/hv_balloon.c3
-rw-r--r--drivers/hv/hv_fcopy.c414
-rw-r--r--drivers/hv/hv_kvp.c4
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hv_util.c11
-rw-r--r--drivers/hv/hyperv_vmbus.h8
-rw-r--r--drivers/hv/ring_buffer.c17
-rw-r--r--drivers/hv/vmbus_drv.c51
-rw-r--r--drivers/iio/adc/Kconfig10
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/men_z188_adc.c172
-rw-r--r--drivers/mcb/Kconfig31
-rw-r--r--drivers/mcb/Makefile7
-rw-r--r--drivers/mcb/mcb-core.c414
-rw-r--r--drivers/mcb/mcb-internal.h118
-rw-r--r--drivers/mcb/mcb-parse.c159
-rw-r--r--drivers/mcb/mcb-pci.c114
-rw-r--r--drivers/md/dm-log-userspace-transfer.c2
-rw-r--r--drivers/memory/Kconfig15
-rw-r--r--drivers/memory/Makefile2
-rw-r--r--drivers/memory/fsl_ifc.c309
-rw-r--r--drivers/memory/ti-aemif.c427
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/ad525x_dpot.c1
-rw-r--r--drivers/misc/apds9802als.c1
-rw-r--r--drivers/misc/bmp085.c1
-rw-r--r--drivers/misc/carma/carma-fpga.c1
-rw-r--r--drivers/misc/ds1682.c1
-rw-r--r--drivers/misc/eeprom/at25.c1
-rw-r--r--drivers/misc/eeprom/eeprom.c1
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/eeprom/max6875.c1
-rw-r--r--drivers/misc/eeprom/sunxi_sid.c3
-rw-r--r--drivers/misc/genwqe/card_debugfs.c1
-rw-r--r--drivers/misc/hmc6352.c1
-rw-r--r--drivers/misc/isl29003.c1
-rw-r--r--drivers/misc/isl29020.c1
-rw-r--r--drivers/misc/lattice-ecp3-config.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c1
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c1
-rw-r--r--drivers/misc/lkdtm.c74
-rw-r--r--drivers/misc/mei/Kconfig9
-rw-r--r--drivers/misc/mei/Makefile6
-rw-r--r--drivers/misc/mei/amthif.c72
-rw-r--r--drivers/misc/mei/bus.c21
-rw-r--r--drivers/misc/mei/client.c300
-rw-r--r--drivers/misc/mei/client.h30
-rw-r--r--drivers/misc/mei/debugfs.c54
-rw-r--r--drivers/misc/mei/hbm.c281
-rw-r--r--drivers/misc/mei/hbm.h1
-rw-r--r--drivers/misc/mei/hw-me.c30
-rw-r--r--drivers/misc/mei/hw-txe-regs.h294
-rw-r--r--drivers/misc/mei/hw-txe.c1107
-rw-r--r--drivers/misc/mei/hw-txe.h74
-rw-r--r--drivers/misc/mei/hw.h20
-rw-r--r--drivers/misc/mei/init.c43
-rw-r--r--drivers/misc/mei/interrupt.c159
-rw-r--r--drivers/misc/mei/main.c23
-rw-r--r--drivers/misc/mei/mei_dev.h54
-rw-r--r--drivers/misc/mei/nfc.c14
-rw-r--r--drivers/misc/mei/pci-me.c14
-rw-r--r--drivers/misc/mei/pci-txe.c293
-rw-r--r--drivers/misc/mei/wd.c136
-rw-r--r--drivers/misc/mic/host/mic_intr.c2
-rw-r--r--drivers/misc/pch_phub.c5
-rw-r--r--drivers/misc/sram.c127
-rw-r--r--drivers/misc/ti-st/st_core.c1
-rw-r--r--drivers/misc/ti_dac7512.c1
-rw-r--r--drivers/misc/tsl2550.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c7
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c2
-rw-r--r--drivers/parport/share.c3
-rw-r--r--drivers/spmi/Kconfig27
-rw-r--r--drivers/spmi/Makefile6
-rw-r--r--drivers/spmi/spmi-pmic-arb.c778
-rw-r--r--drivers/spmi/spmi.c574
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c3
-rw-r--r--drivers/video/hyperv_fb.c88
-rw-r--r--drivers/video/uvesafb.c4
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c29
-rw-r--r--drivers/vme/bridges/vme_tsi148.c18
-rw-r--r--drivers/w1/masters/Kconfig3
-rw-r--r--drivers/w1/masters/ds2490.c155
-rw-r--r--drivers/w1/masters/mxc_w1.c43
-rw-r--r--drivers/w1/masters/w1-gpio.c19
-rw-r--r--drivers/w1/slaves/Kconfig5
-rw-r--r--drivers/w1/slaves/w1_therm.c21
-rw-r--r--drivers/w1/w1.c269
-rw-r--r--drivers/w1/w1.h186
-rw-r--r--drivers/w1/w1_family.c8
-rw-r--r--drivers/w1/w1_family.h13
-rw-r--r--drivers/w1/w1_int.c25
-rw-r--r--drivers/w1/w1_io.c102
-rw-r--r--drivers/w1/w1_netlink.c359
-rw-r--r--drivers/w1/w1_netlink.h33
128 files changed, 7665 insertions, 1196 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b3138fbb46a4..0a0a90f52d26 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig"
52 52
53source "drivers/spi/Kconfig" 53source "drivers/spi/Kconfig"
54 54
55source "drivers/spmi/Kconfig"
56
55source "drivers/hsi/Kconfig" 57source "drivers/hsi/Kconfig"
56 58
57source "drivers/pps/Kconfig" 59source "drivers/pps/Kconfig"
@@ -170,4 +172,6 @@ source "drivers/phy/Kconfig"
170 172
171source "drivers/powercap/Kconfig" 173source "drivers/powercap/Kconfig"
172 174
175source "drivers/mcb/Kconfig"
176
173endmenu 177endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 8e3b8b06c0b2..e3ced91b1784 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_ATA) += ata/
66obj-$(CONFIG_TARGET_CORE) += target/ 66obj-$(CONFIG_TARGET_CORE) += target/
67obj-$(CONFIG_MTD) += mtd/ 67obj-$(CONFIG_MTD) += mtd/
68obj-$(CONFIG_SPI) += spi/ 68obj-$(CONFIG_SPI) += spi/
69obj-$(CONFIG_SPMI) += spmi/
69obj-y += hsi/ 70obj-y += hsi/
70obj-y += net/ 71obj-y += net/
71obj-$(CONFIG_ATM) += atm/ 72obj-$(CONFIG_ATM) += atm/
@@ -155,3 +156,4 @@ obj-$(CONFIG_IPACK_BUS) += ipack/
155obj-$(CONFIG_NTB) += ntb/ 156obj-$(CONFIG_NTB) += ntb/
156obj-$(CONFIG_FMC) += fmc/ 157obj-$(CONFIG_FMC) += fmc/
157obj-$(CONFIG_POWERCAP) += powercap/ 158obj-$(CONFIG_POWERCAP) += powercap/
159obj-$(CONFIG_MCB) += mcb/
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index ac2391013db1..d7026dc33388 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -22,69 +22,235 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/init.h> 23#include <linux/init.h>
24 24
25static int regmap_spmi_read(void *context, 25static int regmap_spmi_base_read(void *context,
26 const void *reg, size_t reg_size, 26 const void *reg, size_t reg_size,
27 void *val, size_t val_size) 27 void *val, size_t val_size)
28{ 28{
29 u8 addr = *(u8 *)reg;
30 int err = 0;
31
32 BUG_ON(reg_size != 1);
33
34 while (val_size-- && !err)
35 err = spmi_register_read(context, addr++, val++);
36
37 return err;
38}
39
40static int regmap_spmi_base_gather_write(void *context,
41 const void *reg, size_t reg_size,
42 const void *val, size_t val_size)
43{
44 const u8 *data = val;
45 u8 addr = *(u8 *)reg;
46 int err = 0;
47
48 BUG_ON(reg_size != 1);
49
50 /*
51 * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence,
52 * use it when possible.
53 */
54 if (addr == 0 && val_size) {
55 err = spmi_register_zero_write(context, *data);
56 if (err)
57 goto err_out;
58
59 data++;
60 addr++;
61 val_size--;
62 }
63
64 while (val_size) {
65 err = spmi_register_write(context, addr, *data);
66 if (err)
67 goto err_out;
68
69 data++;
70 addr++;
71 val_size--;
72 }
73
74err_out:
75 return err;
76}
77
78static int regmap_spmi_base_write(void *context, const void *data,
79 size_t count)
80{
81 BUG_ON(count < 1);
82 return regmap_spmi_base_gather_write(context, data, 1, data + 1,
83 count - 1);
84}
85
86static struct regmap_bus regmap_spmi_base = {
87 .read = regmap_spmi_base_read,
88 .write = regmap_spmi_base_write,
89 .gather_write = regmap_spmi_base_gather_write,
90 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
91 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
92};
93
94/**
95 * regmap_init_spmi_base(): Create regmap for the Base register space
96 * @sdev: SPMI device that will be interacted with
97 * @config: Configuration for register map
98 *
99 * The return value will be an ERR_PTR() on error or a valid pointer to
100 * a struct regmap.
101 */
102struct regmap *regmap_init_spmi_base(struct spmi_device *sdev,
103 const struct regmap_config *config)
104{
105 return regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
106}
107EXPORT_SYMBOL_GPL(regmap_init_spmi_base);
108
109/**
110 * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
111 * @sdev: SPMI device that will be interacted with
112 * @config: Configuration for register map
113 *
114 * The return value will be an ERR_PTR() on error or a valid pointer
115 * to a struct regmap. The regmap will be automatically freed by the
116 * device management code.
117 */
118struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev,
119 const struct regmap_config *config)
120{
121 return devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
122}
123EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base);
124
125static int regmap_spmi_ext_read(void *context,
126 const void *reg, size_t reg_size,
127 void *val, size_t val_size)
128{
129 int err = 0;
130 size_t len;
131 u16 addr;
132
29 BUG_ON(reg_size != 2); 133 BUG_ON(reg_size != 2);
30 return spmi_ext_register_readl(context, *(u16 *)reg, 134
31 val, val_size); 135 addr = *(u16 *)reg;
136
137 /*
138 * Split accesses into two to take advantage of the more
139 * bandwidth-efficient 'Extended Register Read' command when possible
140 */
141 while (addr <= 0xFF && val_size) {
142 len = min_t(size_t, val_size, 16);
143
144 err = spmi_ext_register_read(context, addr, val, len);
145 if (err)
146 goto err_out;
147
148 addr += len;
149 val += len;
150 val_size -= len;
151 }
152
153 while (val_size) {
154 len = min_t(size_t, val_size, 8);
155
156 err = spmi_ext_register_readl(context, addr, val, val_size);
157 if (err)
158 goto err_out;
159
160 addr += len;
161 val += len;
162 val_size -= len;
163 }
164
165err_out:
166 return err;
32} 167}
33 168
34static int regmap_spmi_gather_write(void *context, 169static int regmap_spmi_ext_gather_write(void *context,
35 const void *reg, size_t reg_size, 170 const void *reg, size_t reg_size,
36 const void *val, size_t val_size) 171 const void *val, size_t val_size)
37{ 172{
173 int err = 0;
174 size_t len;
175 u16 addr;
176
38 BUG_ON(reg_size != 2); 177 BUG_ON(reg_size != 2);
39 return spmi_ext_register_writel(context, *(u16 *)reg, val, val_size); 178
179 addr = *(u16 *)reg;
180
181 while (addr <= 0xFF && val_size) {
182 len = min_t(size_t, val_size, 16);
183
184 err = spmi_ext_register_write(context, addr, val, len);
185 if (err)
186 goto err_out;
187
188 addr += len;
189 val += len;
190 val_size -= len;
191 }
192
193 while (val_size) {
194 len = min_t(size_t, val_size, 8);
195
196 err = spmi_ext_register_writel(context, addr, val, len);
197 if (err)
198 goto err_out;
199
200 addr += len;
201 val += len;
202 val_size -= len;
203 }
204
205err_out:
206 return err;
40} 207}
41 208
42static int regmap_spmi_write(void *context, const void *data, 209static int regmap_spmi_ext_write(void *context, const void *data,
43 size_t count) 210 size_t count)
44{ 211{
45 BUG_ON(count < 2); 212 BUG_ON(count < 2);
46 return regmap_spmi_gather_write(context, data, 2, data + 2, count - 2); 213 return regmap_spmi_ext_gather_write(context, data, 2, data + 2,
214 count - 2);
47} 215}
48 216
49static struct regmap_bus regmap_spmi = { 217static struct regmap_bus regmap_spmi_ext = {
50 .read = regmap_spmi_read, 218 .read = regmap_spmi_ext_read,
51 .write = regmap_spmi_write, 219 .write = regmap_spmi_ext_write,
52 .gather_write = regmap_spmi_gather_write, 220 .gather_write = regmap_spmi_ext_gather_write,
53 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, 221 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
54 .val_format_endian_default = REGMAP_ENDIAN_NATIVE, 222 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
55}; 223};
56 224
57/** 225/**
58 * regmap_init_spmi(): Initialize register map 226 * regmap_init_spmi_ext(): Create regmap for Ext register space
59 * 227 * @sdev: Device that will be interacted with
60 * @sdev: Device that will be interacted with 228 * @config: Configuration for register map
61 * @config: Configuration for register map
62 * 229 *
63 * The return value will be an ERR_PTR() on error or a valid pointer to 230 * The return value will be an ERR_PTR() on error or a valid pointer to
64 * a struct regmap. 231 * a struct regmap.
65 */ 232 */
66struct regmap *regmap_init_spmi(struct spmi_device *sdev, 233struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev,
67 const struct regmap_config *config) 234 const struct regmap_config *config)
68{ 235{
69 return regmap_init(&sdev->dev, &regmap_spmi, sdev, config); 236 return regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
70} 237}
71EXPORT_SYMBOL_GPL(regmap_init_spmi); 238EXPORT_SYMBOL_GPL(regmap_init_spmi_ext);
72 239
73/** 240/**
74 * devm_regmap_init_spmi(): Initialise managed register map 241 * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
75 * 242 * @sdev: SPMI device that will be interacted with
76 * @sdev: Device that will be interacted with 243 * @config: Configuration for register map
77 * @config: Configuration for register map
78 * 244 *
79 * The return value will be an ERR_PTR() on error or a valid pointer 245 * The return value will be an ERR_PTR() on error or a valid pointer
80 * to a struct regmap. The regmap will be automatically freed by the 246 * to a struct regmap. The regmap will be automatically freed by the
81 * device management code. 247 * device management code.
82 */ 248 */
83struct regmap *devm_regmap_init_spmi(struct spmi_device *sdev, 249struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev,
84 const struct regmap_config *config) 250 const struct regmap_config *config)
85{ 251{
86 return devm_regmap_init(&sdev->dev, &regmap_spmi, sdev, config); 252 return devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
87} 253}
88EXPORT_SYMBOL_GPL(devm_regmap_init_spmi); 254EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext);
89 255
90MODULE_LICENSE("GPL"); 256MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 1b192395a90c..8121b4c70ede 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -31,7 +31,6 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/mman.h> 32#include <linux/mman.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
36#include <linux/agp_backend.h> 35#include <linux/agp_backend.h>
37#include <linux/agpgart.h> 36#include <linux/agpgart.h>
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index f39437addb58..0fbccce1cee9 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -29,7 +29,6 @@
29 */ 29 */
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/init.h>
33#include <linux/pagemap.h> 32#include <linux/pagemap.h>
34#include <linux/miscdevice.h> 33#include <linux/miscdevice.h>
35#include <linux/pm.h> 34#include <linux/pm.h>
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 5c85350f4c3d..9a024f899dd4 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/kernel.h> 20#include <linux/kernel.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/agp_backend.h> 22#include <linux/agp_backend.h>
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index 05b8d0241bde..3051c73bc383 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -15,7 +15,6 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/agp_backend.h> 18#include <linux/agp_backend.h>
20#include <asm/sn/addrs.h> 19#include <asm/sn/addrs.h>
21#include <asm/sn/io.h> 20#include <asm/sn/io.h>
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 43577ca780e3..8c3b255e629a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/hw_random.h> 10#include <linux/hw_random.h>
11#include <linux/init.h>
12#include <linux/io.h> 11#include <linux/io.h>
13#include <linux/kernel.h> 12#include <linux/kernel.h>
14#include <linux/module.h> 13#include <linux/module.h>
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a0f7724852eb..b9495a8c05c6 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -37,7 +37,6 @@
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/init.h>
41#include <linux/miscdevice.h> 40#include <linux/miscdevice.h>
42#include <linux/delay.h> 41#include <linux/delay.h>
43#include <linux/slab.h> 42#include <linux/slab.h>
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 402ccfb625c5..9f8277cc44b4 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -22,7 +22,6 @@
22#include <linux/hw_random.h> 22#include <linux/hw_random.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/io.h> 25#include <linux/io.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <linux/clk.h> 27#include <linux/clk.h>
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index f9beed54d0c8..432232eefe05 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -7,7 +7,6 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/init.h>
11#include <linux/slab.h> 10#include <linux/slab.h>
12#include <linux/workqueue.h> 11#include <linux/workqueue.h>
13#include <linux/preempt.h> 12#include <linux/preempt.h>
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 232b87fb5fc9..00e9d2d46634 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -10,7 +10,6 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/device.h> 13#include <linux/device.h>
15#include <linux/amba/bus.h> 14#include <linux/amba/bus.h>
16#include <linux/hw_random.h> 15#include <linux/hw_random.h>
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index f2885dbe1849..b5cc3420c659 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/platform_device.h> 13#include <linux/platform_device.h>
15#include <linux/device.h> 14#include <linux/device.h>
16#include <linux/hw_random.h> 15#include <linux/hw_random.h>
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 03f41896d090..b7efd3c1a882 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -61,7 +61,6 @@
61#include <linux/ipmi_smi.h> 61#include <linux/ipmi_smi.h>
62#include <asm/io.h> 62#include <asm/io.h>
63#include "ipmi_si_sm.h" 63#include "ipmi_si_sm.h"
64#include <linux/init.h>
65#include <linux/dmi.h> 64#include <linux/dmi.h>
66#include <linux/string.h> 65#include <linux/string.h>
67#include <linux/ctype.h> 66#include <linux/ctype.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 92c5937f80c3..917403fe10da 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -99,6 +99,9 @@ static ssize_t read_mem(struct file *file, char __user *buf,
99 ssize_t read, sz; 99 ssize_t read, sz;
100 char *ptr; 100 char *ptr;
101 101
102 if (p != *ppos)
103 return 0;
104
102 if (!valid_phys_addr_range(p, count)) 105 if (!valid_phys_addr_range(p, count))
103 return -EFAULT; 106 return -EFAULT;
104 read = 0; 107 read = 0;
@@ -157,6 +160,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
157 unsigned long copied; 160 unsigned long copied;
158 void *ptr; 161 void *ptr;
159 162
163 if (p != *ppos)
164 return -EFBIG;
165
160 if (!valid_phys_addr_range(p, count)) 166 if (!valid_phys_addr_range(p, count))
161 return -EFAULT; 167 return -EFAULT;
162 168
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 881c9e595939..28740046bc83 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -50,7 +50,6 @@
50#include <linux/unistd.h> 50#include <linux/unistd.h>
51#include <linux/delay.h> 51#include <linux/delay.h>
52#include <linux/ioport.h> 52#include <linux/ioport.h>
53#include <linux/init.h>
54#include <linux/bitops.h> 53#include <linux/bitops.h>
55#include <linux/sched.h> /* cond_resched() */ 54#include <linux/sched.h> /* cond_resched() */
56 55
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 0e506bad1986..bd377472dcfb 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -20,7 +20,6 @@
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
23#include <linux/init.h>
24#include <linux/kernel.h> /* printk() */ 23#include <linux/kernel.h> /* printk() */
25#include <linux/slab.h> /* kmalloc() */ 24#include <linux/slab.h> /* kmalloc() */
26#include <linux/fs.h> /* everything... */ 25#include <linux/fs.h> /* everything... */
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 52b9b2b2f300..472af4bb1b61 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -21,7 +21,6 @@
21 * 21 *
22 * 22 *
23 */ 23 */
24#include <linux/init.h>
25#include <linux/i2c.h> 24#include <linux/i2c.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/wait.h> 26#include <linux/wait.h>
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 5b0dd8ef74c0..3b7bf2162898 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -38,7 +38,6 @@
38#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/init.h>
42#include <linux/wait.h> 41#include <linux/wait.h>
43#include <linux/string.h> 42#include <linux/string.h>
44#include <linux/interrupt.h> 43#include <linux/interrupt.h>
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 18c5b9b16645..148d707a1d43 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -95,7 +95,7 @@ void proc_fork_connector(struct task_struct *task)
95 msg->len = sizeof(*ev); 95 msg->len = sizeof(*ev);
96 msg->flags = 0; /* not used */ 96 msg->flags = 0; /* not used */
97 /* If cn_netlink_send() failed, the data is not sent */ 97 /* If cn_netlink_send() failed, the data is not sent */
98 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 98 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
99} 99}
100 100
101void proc_exec_connector(struct task_struct *task) 101void proc_exec_connector(struct task_struct *task)
@@ -122,7 +122,7 @@ void proc_exec_connector(struct task_struct *task)
122 msg->ack = 0; /* not used */ 122 msg->ack = 0; /* not used */
123 msg->len = sizeof(*ev); 123 msg->len = sizeof(*ev);
124 msg->flags = 0; /* not used */ 124 msg->flags = 0; /* not used */
125 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 125 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
126} 126}
127 127
128void proc_id_connector(struct task_struct *task, int which_id) 128void proc_id_connector(struct task_struct *task, int which_id)
@@ -163,7 +163,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
163 msg->ack = 0; /* not used */ 163 msg->ack = 0; /* not used */
164 msg->len = sizeof(*ev); 164 msg->len = sizeof(*ev);
165 msg->flags = 0; /* not used */ 165 msg->flags = 0; /* not used */
166 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 166 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
167} 167}
168 168
169void proc_sid_connector(struct task_struct *task) 169void proc_sid_connector(struct task_struct *task)
@@ -190,7 +190,7 @@ void proc_sid_connector(struct task_struct *task)
190 msg->ack = 0; /* not used */ 190 msg->ack = 0; /* not used */
191 msg->len = sizeof(*ev); 191 msg->len = sizeof(*ev);
192 msg->flags = 0; /* not used */ 192 msg->flags = 0; /* not used */
193 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 193 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
194} 194}
195 195
196void proc_ptrace_connector(struct task_struct *task, int ptrace_id) 196void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
@@ -225,7 +225,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
225 msg->ack = 0; /* not used */ 225 msg->ack = 0; /* not used */
226 msg->len = sizeof(*ev); 226 msg->len = sizeof(*ev);
227 msg->flags = 0; /* not used */ 227 msg->flags = 0; /* not used */
228 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 228 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
229} 229}
230 230
231void proc_comm_connector(struct task_struct *task) 231void proc_comm_connector(struct task_struct *task)
@@ -253,7 +253,7 @@ void proc_comm_connector(struct task_struct *task)
253 msg->ack = 0; /* not used */ 253 msg->ack = 0; /* not used */
254 msg->len = sizeof(*ev); 254 msg->len = sizeof(*ev);
255 msg->flags = 0; /* not used */ 255 msg->flags = 0; /* not used */
256 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 256 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
257} 257}
258 258
259void proc_coredump_connector(struct task_struct *task) 259void proc_coredump_connector(struct task_struct *task)
@@ -280,7 +280,7 @@ void proc_coredump_connector(struct task_struct *task)
280 msg->ack = 0; /* not used */ 280 msg->ack = 0; /* not used */
281 msg->len = sizeof(*ev); 281 msg->len = sizeof(*ev);
282 msg->flags = 0; /* not used */ 282 msg->flags = 0; /* not used */
283 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 283 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
284} 284}
285 285
286void proc_exit_connector(struct task_struct *task) 286void proc_exit_connector(struct task_struct *task)
@@ -309,7 +309,7 @@ void proc_exit_connector(struct task_struct *task)
309 msg->ack = 0; /* not used */ 309 msg->ack = 0; /* not used */
310 msg->len = sizeof(*ev); 310 msg->len = sizeof(*ev);
311 msg->flags = 0; /* not used */ 311 msg->flags = 0; /* not used */
312 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 312 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
313} 313}
314 314
315/* 315/*
@@ -343,7 +343,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
343 msg->ack = rcvd_ack + 1; 343 msg->ack = rcvd_ack + 1;
344 msg->len = sizeof(*ev); 344 msg->len = sizeof(*ev);
345 msg->flags = 0; /* not used */ 345 msg->flags = 0; /* not used */
346 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 346 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
347} 347}
348 348
349/** 349/**
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index a36749f1e44a..77afe7487d34 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -50,7 +50,7 @@ static int cn_already_initialized;
50 * 50 *
51 * Sequence number is incremented with each message to be sent. 51 * Sequence number is incremented with each message to be sent.
52 * 52 *
53 * If we expect reply to our message then the sequence number in 53 * If we expect a reply to our message then the sequence number in
54 * received message MUST be the same as in original message, and 54 * received message MUST be the same as in original message, and
55 * acknowledge number MUST be the same + 1. 55 * acknowledge number MUST be the same + 1.
56 * 56 *
@@ -62,8 +62,11 @@ static int cn_already_initialized;
62 * the acknowledgement number in the original message + 1, then it is 62 * the acknowledgement number in the original message + 1, then it is
63 * a new message. 63 * a new message.
64 * 64 *
65 * The message is sent to, the portid if given, the group if given, both if
66 * both, or if both are zero then the group is looked up and sent there.
65 */ 67 */
66int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) 68int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
69 gfp_t gfp_mask)
67{ 70{
68 struct cn_callback_entry *__cbq; 71 struct cn_callback_entry *__cbq;
69 unsigned int size; 72 unsigned int size;
@@ -74,7 +77,9 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
74 u32 group = 0; 77 u32 group = 0;
75 int found = 0; 78 int found = 0;
76 79
77 if (!__group) { 80 if (portid || __group) {
81 group = __group;
82 } else {
78 spin_lock_bh(&dev->cbdev->queue_lock); 83 spin_lock_bh(&dev->cbdev->queue_lock);
79 list_for_each_entry(__cbq, &dev->cbdev->queue_list, 84 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
80 callback_entry) { 85 callback_entry) {
@@ -88,11 +93,9 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
88 93
89 if (!found) 94 if (!found)
90 return -ENODEV; 95 return -ENODEV;
91 } else {
92 group = __group;
93 } 96 }
94 97
95 if (!netlink_has_listeners(dev->nls, group)) 98 if (!portid && !netlink_has_listeners(dev->nls, group))
96 return -ESRCH; 99 return -ESRCH;
97 100
98 size = sizeof(*msg) + msg->len; 101 size = sizeof(*msg) + msg->len;
@@ -113,7 +116,10 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
113 116
114 NETLINK_CB(skb).dst_group = group; 117 NETLINK_CB(skb).dst_group = group;
115 118
116 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask); 119 if (group)
120 return netlink_broadcast(dev->nls, skb, portid, group,
121 gfp_mask);
122 return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT));
117} 123}
118EXPORT_SYMBOL_GPL(cn_netlink_send); 124EXPORT_SYMBOL_GPL(cn_netlink_send);
119 125
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index bdb5a00f1dfa..be56e8ac95e6 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -14,10 +14,6 @@ if EXTCON
14 14
15comment "Extcon Device Drivers" 15comment "Extcon Device Drivers"
16 16
17config OF_EXTCON
18 def_tristate y
19 depends on OF
20
21config EXTCON_GPIO 17config EXTCON_GPIO
22 tristate "GPIO extcon support" 18 tristate "GPIO extcon support"
23 depends on GPIOLIB 19 depends on GPIOLIB
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 43eccc0e3448..bf7861ec0906 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,8 +2,6 @@
2# Makefile for external connector class (extcon) devices 2# Makefile for external connector class (extcon) devices
3# 3#
4 4
5obj-$(CONFIG_OF_EXTCON) += of_extcon.o
6
7obj-$(CONFIG_EXTCON) += extcon-class.o 5obj-$(CONFIG_EXTCON) += extcon-class.o
8obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o 6obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
9obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o 7obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 76322330cbd7..7ab21aa6eaa1 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -31,6 +31,7 @@
31#include <linux/extcon.h> 31#include <linux/extcon.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/sysfs.h> 33#include <linux/sysfs.h>
34#include <linux/of.h>
34 35
35/* 36/*
36 * extcon_cable_name suggests the standard cable names for commonly used 37 * extcon_cable_name suggests the standard cable names for commonly used
@@ -818,6 +819,47 @@ void extcon_dev_unregister(struct extcon_dev *edev)
818} 819}
819EXPORT_SYMBOL_GPL(extcon_dev_unregister); 820EXPORT_SYMBOL_GPL(extcon_dev_unregister);
820 821
822#ifdef CONFIG_OF
823/*
824 * extcon_get_edev_by_phandle - Get the extcon device from devicetree
825 * @dev - instance to the given device
826 * @index - index into list of extcon_dev
827 *
828 * return the instance of extcon device
829 */
830struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
831{
832 struct device_node *node;
833 struct extcon_dev *edev;
834
835 if (!dev->of_node) {
836 dev_err(dev, "device does not have a device node entry\n");
837 return ERR_PTR(-EINVAL);
838 }
839
840 node = of_parse_phandle(dev->of_node, "extcon", index);
841 if (!node) {
842 dev_err(dev, "failed to get phandle in %s node\n",
843 dev->of_node->full_name);
844 return ERR_PTR(-ENODEV);
845 }
846
847 edev = extcon_get_extcon_dev(node->name);
848 if (!edev) {
849 dev_err(dev, "unable to get extcon device : %s\n", node->name);
850 return ERR_PTR(-ENODEV);
851 }
852
853 return edev;
854}
855#else
856struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
857{
858 return ERR_PTR(-ENOSYS);
859}
860#endif /* CONFIG_OF */
861EXPORT_SYMBOL_GPL(extcon_get_edev_by_phandle);
862
821static int __init extcon_class_init(void) 863static int __init extcon_class_init(void)
822{ 864{
823 return create_extcon_class(); 865 return create_extcon_class();
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index a63a6b21c9ad..13d522255d81 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -176,9 +176,7 @@ static int gpio_extcon_resume(struct device *dev)
176} 176}
177#endif 177#endif
178 178
179static const struct dev_pm_ops gpio_extcon_pm_ops = { 179static SIMPLE_DEV_PM_OPS(gpio_extcon_pm_ops, NULL, gpio_extcon_resume);
180 SET_SYSTEM_SLEEP_PM_OPS(NULL, gpio_extcon_resume)
181};
182 180
183static struct platform_driver gpio_extcon_driver = { 181static struct platform_driver gpio_extcon_driver = {
184 .probe = gpio_extcon_probe, 182 .probe = gpio_extcon_probe,
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 2aea4bcdd7f3..ddff2b72f0a8 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -271,10 +271,7 @@ static int palmas_usb_resume(struct device *dev)
271}; 271};
272#endif 272#endif
273 273
274static const struct dev_pm_ops palmas_pm_ops = { 274static SIMPLE_DEV_PM_OPS(palmas_pm_ops, palmas_usb_suspend, palmas_usb_resume);
275 SET_SYSTEM_SLEEP_PM_OPS(palmas_usb_suspend,
276 palmas_usb_resume)
277};
278 275
279static struct of_device_id of_palmas_match_tbl[] = { 276static struct of_device_id of_palmas_match_tbl[] = {
280 { .compatible = "ti,palmas-usb", }, 277 { .compatible = "ti,palmas-usb", },
diff --git a/drivers/extcon/of_extcon.c b/drivers/extcon/of_extcon.c
deleted file mode 100644
index 72173ecbb311..000000000000
--- a/drivers/extcon/of_extcon.c
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * OF helpers for External connector (extcon) framework
3 *
4 * Copyright (C) 2013 Texas Instruments, Inc.
5 * Kishon Vijay Abraham I <kishon@ti.com>
6 *
7 * Copyright (C) 2013 Samsung Electronics
8 * Chanwoo Choi <cw00.choi@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/err.h>
19#include <linux/extcon.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/extcon/of_extcon.h>
23
24/*
25 * of_extcon_get_extcon_dev - Get the name of extcon device from devicetree
26 * @dev - instance to the given device
27 * @index - index into list of extcon_dev
28 *
29 * return the instance of extcon device
30 */
31struct extcon_dev *of_extcon_get_extcon_dev(struct device *dev, int index)
32{
33 struct device_node *node;
34 struct extcon_dev *edev;
35 struct platform_device *extcon_parent_dev;
36
37 if (!dev->of_node) {
38 dev_dbg(dev, "device does not have a device node entry\n");
39 return ERR_PTR(-EINVAL);
40 }
41
42 node = of_parse_phandle(dev->of_node, "extcon", index);
43 if (!node) {
44 dev_dbg(dev, "failed to get phandle in %s node\n",
45 dev->of_node->full_name);
46 return ERR_PTR(-ENODEV);
47 }
48
49 extcon_parent_dev = of_find_device_by_node(node);
50 if (!extcon_parent_dev) {
51 dev_dbg(dev, "unable to find device by node\n");
52 return ERR_PTR(-EPROBE_DEFER);
53 }
54
55 edev = extcon_get_extcon_dev(dev_name(&extcon_parent_dev->dev));
56 if (!edev) {
57 dev_dbg(dev, "unable to get extcon device : %s\n",
58 dev_name(&extcon_parent_dev->dev));
59 return ERR_PTR(-ENODEV);
60 }
61
62 return edev;
63}
64EXPORT_SYMBOL_GPL(of_extcon_get_extcon_dev);
diff --git a/drivers/fmc/fmc-core.c b/drivers/fmc/fmc-core.c
index 24d52497524d..353fc546fb08 100644
--- a/drivers/fmc/fmc-core.c
+++ b/drivers/fmc/fmc-core.c
@@ -99,10 +99,23 @@ static ssize_t fmc_read_eeprom(struct file *file, struct kobject *kobj,
99 return count; 99 return count;
100} 100}
101 101
102static ssize_t fmc_write_eeprom(struct file *file, struct kobject *kobj,
103 struct bin_attribute *bin_attr,
104 char *buf, loff_t off, size_t count)
105{
106 struct device *dev;
107 struct fmc_device *fmc;
108
109 dev = container_of(kobj, struct device, kobj);
110 fmc = container_of(dev, struct fmc_device, dev);
111 return fmc->op->write_ee(fmc, off, buf, count);
112}
113
102static struct bin_attribute fmc_eeprom_attr = { 114static struct bin_attribute fmc_eeprom_attr = {
103 .attr = { .name = "eeprom", .mode = S_IRUGO, }, 115 .attr = { .name = "eeprom", .mode = S_IRUGO | S_IWUSR, },
104 .size = 8192, /* more or less standard */ 116 .size = 8192, /* more or less standard */
105 .read = fmc_read_eeprom, 117 .read = fmc_read_eeprom,
118 .write = fmc_write_eeprom,
106}; 119};
107 120
108/* 121/*
@@ -154,7 +167,7 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
154 ret = -EINVAL; 167 ret = -EINVAL;
155 break; 168 break;
156 } 169 }
157 if (fmc->flags == FMC_DEVICE_NO_MEZZANINE) { 170 if (fmc->flags & FMC_DEVICE_NO_MEZZANINE) {
158 dev_info(fmc->hwdev, "absent mezzanine in slot %d\n", 171 dev_info(fmc->hwdev, "absent mezzanine in slot %d\n",
159 fmc->slot_id); 172 fmc->slot_id);
160 continue; 173 continue;
@@ -189,9 +202,6 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
189 for (i = 0; i < n; i++) { 202 for (i = 0; i < n; i++) {
190 fmc = devarray[i]; 203 fmc = devarray[i];
191 204
192 if (fmc->flags == FMC_DEVICE_NO_MEZZANINE)
193 continue; /* dev_info already done above */
194
195 fmc->nr_slots = n; /* each slot must know how many are there */ 205 fmc->nr_slots = n; /* each slot must know how many are there */
196 fmc->devarray = devarray; 206 fmc->devarray = devarray;
197 207
@@ -263,8 +273,6 @@ void fmc_device_unregister_n(struct fmc_device **devs, int n)
263 kfree(devs[0]->devarray); 273 kfree(devs[0]->devarray);
264 274
265 for (i = 0; i < n; i++) { 275 for (i = 0; i < n; i++) {
266 if (devs[i]->flags == FMC_DEVICE_NO_MEZZANINE)
267 continue;
268 sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr); 276 sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
269 device_del(&devs[i]->dev); 277 device_del(&devs[i]->dev);
270 fmc_free_id_info(devs[i]); 278 fmc_free_id_info(devs[i]);
diff --git a/drivers/fmc/fmc-sdb.c b/drivers/fmc/fmc-sdb.c
index 79adc39221ea..4603fdb74465 100644
--- a/drivers/fmc/fmc-sdb.c
+++ b/drivers/fmc/fmc-sdb.c
@@ -150,23 +150,36 @@ int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
150} 150}
151EXPORT_SYMBOL(fmc_reprogram); 151EXPORT_SYMBOL(fmc_reprogram);
152 152
153static char *__strip_trailing_space(char *buf, char *str, int len)
154{
155 int i = len - 1;
156
157 memcpy(buf, str, len);
158 while(i >= 0 && buf[i] == ' ')
159 buf[i--] = '\0';
160 return buf;
161}
162
163#define __sdb_string(buf, field) ({ \
164 BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
165 __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
166 })
167
153static void __fmc_show_sdb_tree(const struct fmc_device *fmc, 168static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
154 const struct sdb_array *arr) 169 const struct sdb_array *arr)
155{ 170{
171 unsigned long base = arr->baseaddr;
156 int i, j, n = arr->len, level = arr->level; 172 int i, j, n = arr->len, level = arr->level;
157 const struct sdb_array *ap; 173 char buf[64];
158 174
159 for (i = 0; i < n; i++) { 175 for (i = 0; i < n; i++) {
160 unsigned long base;
161 union sdb_record *r; 176 union sdb_record *r;
162 struct sdb_product *p; 177 struct sdb_product *p;
163 struct sdb_component *c; 178 struct sdb_component *c;
164 r = &arr->record[i]; 179 r = &arr->record[i];
165 c = &r->dev.sdb_component; 180 c = &r->dev.sdb_component;
166 p = &c->product; 181 p = &c->product;
167 base = 0; 182
168 for (ap = arr; ap; ap = ap->parent)
169 base += ap->baseaddr;
170 dev_info(&fmc->dev, "SDB: "); 183 dev_info(&fmc->dev, "SDB: ");
171 184
172 for (j = 0; j < level; j++) 185 for (j = 0; j < level; j++)
@@ -193,8 +206,8 @@ static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
193 p->name, 206 p->name,
194 __be64_to_cpu(c->addr_first) + base); 207 __be64_to_cpu(c->addr_first) + base);
195 if (IS_ERR(arr->subtree[i])) { 208 if (IS_ERR(arr->subtree[i])) {
196 printk(KERN_CONT "(bridge error %li)\n", 209 dev_info(&fmc->dev, "SDB: (bridge error %li)\n",
197 PTR_ERR(arr->subtree[i])); 210 PTR_ERR(arr->subtree[i]));
198 break; 211 break;
199 } 212 }
200 __fmc_show_sdb_tree(fmc, arr->subtree[i]); 213 __fmc_show_sdb_tree(fmc, arr->subtree[i]);
@@ -203,10 +216,20 @@ static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
203 printk(KERN_CONT "integration\n"); 216 printk(KERN_CONT "integration\n");
204 break; 217 break;
205 case sdb_type_repo_url: 218 case sdb_type_repo_url:
206 printk(KERN_CONT "repo-url\n"); 219 printk(KERN_CONT "Synthesis repository: %s\n",
220 __sdb_string(buf, r->repo_url.repo_url));
207 break; 221 break;
208 case sdb_type_synthesis: 222 case sdb_type_synthesis:
209 printk(KERN_CONT "synthesis-info\n"); 223 printk(KERN_CONT "Bitstream '%s' ",
224 __sdb_string(buf, r->synthesis.syn_name));
225 printk(KERN_CONT "synthesized %08x by %s ",
226 __be32_to_cpu(r->synthesis.date),
227 __sdb_string(buf, r->synthesis.user_name));
228 printk(KERN_CONT "(%s version %x), ",
229 __sdb_string(buf, r->synthesis.tool_name),
230 __be32_to_cpu(r->synthesis.tool_version));
231 printk(KERN_CONT "commit %pm\n",
232 r->synthesis.commit_id);
210 break; 233 break;
211 case sdb_type_empty: 234 case sdb_type_empty:
212 printk(KERN_CONT "empty\n"); 235 printk(KERN_CONT "empty\n");
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 0a74b5661186..5e4dfa4cfe22 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
5hv_vmbus-y := vmbus_drv.o \ 5hv_vmbus-y := vmbus_drv.o \
6 hv.o connection.o channel.o \ 6 hv.o connection.o channel.o \
7 channel_mgmt.o ring_buffer.o 7 channel_mgmt.o ring_buffer.o
8hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o 8hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 69ea36f07b4d..602ca86a6488 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30#include <linux/uio.h>
30 31
31#include "hyperv_vmbus.h" 32#include "hyperv_vmbus.h"
32 33
@@ -554,14 +555,14 @@ EXPORT_SYMBOL_GPL(vmbus_close);
554 * 555 *
555 * Mainly used by Hyper-V drivers. 556 * Mainly used by Hyper-V drivers.
556 */ 557 */
557int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer, 558int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
558 u32 bufferlen, u64 requestid, 559 u32 bufferlen, u64 requestid,
559 enum vmbus_packet_type type, u32 flags) 560 enum vmbus_packet_type type, u32 flags)
560{ 561{
561 struct vmpacket_descriptor desc; 562 struct vmpacket_descriptor desc;
562 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; 563 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
563 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 564 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
564 struct scatterlist bufferlist[3]; 565 struct kvec bufferlist[3];
565 u64 aligned_data = 0; 566 u64 aligned_data = 0;
566 int ret; 567 int ret;
567 bool signal = false; 568 bool signal = false;
@@ -575,11 +576,12 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
575 desc.len8 = (u16)(packetlen_aligned >> 3); 576 desc.len8 = (u16)(packetlen_aligned >> 3);
576 desc.trans_id = requestid; 577 desc.trans_id = requestid;
577 578
578 sg_init_table(bufferlist, 3); 579 bufferlist[0].iov_base = &desc;
579 sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor)); 580 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
580 sg_set_buf(&bufferlist[1], buffer, bufferlen); 581 bufferlist[1].iov_base = buffer;
581 sg_set_buf(&bufferlist[2], &aligned_data, 582 bufferlist[1].iov_len = bufferlen;
582 packetlen_aligned - packetlen); 583 bufferlist[2].iov_base = &aligned_data;
584 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
583 585
584 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 586 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
585 587
@@ -605,7 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
605 u32 descsize; 607 u32 descsize;
606 u32 packetlen; 608 u32 packetlen;
607 u32 packetlen_aligned; 609 u32 packetlen_aligned;
608 struct scatterlist bufferlist[3]; 610 struct kvec bufferlist[3];
609 u64 aligned_data = 0; 611 u64 aligned_data = 0;
610 bool signal = false; 612 bool signal = false;
611 613
@@ -637,11 +639,12 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
637 desc.range[i].pfn = pagebuffers[i].pfn; 639 desc.range[i].pfn = pagebuffers[i].pfn;
638 } 640 }
639 641
640 sg_init_table(bufferlist, 3); 642 bufferlist[0].iov_base = &desc;
641 sg_set_buf(&bufferlist[0], &desc, descsize); 643 bufferlist[0].iov_len = descsize;
642 sg_set_buf(&bufferlist[1], buffer, bufferlen); 644 bufferlist[1].iov_base = buffer;
643 sg_set_buf(&bufferlist[2], &aligned_data, 645 bufferlist[1].iov_len = bufferlen;
644 packetlen_aligned - packetlen); 646 bufferlist[2].iov_base = &aligned_data;
647 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
645 648
646 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 649 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
647 650
@@ -665,7 +668,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
665 u32 descsize; 668 u32 descsize;
666 u32 packetlen; 669 u32 packetlen;
667 u32 packetlen_aligned; 670 u32 packetlen_aligned;
668 struct scatterlist bufferlist[3]; 671 struct kvec bufferlist[3];
669 u64 aligned_data = 0; 672 u64 aligned_data = 0;
670 bool signal = false; 673 bool signal = false;
671 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 674 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
@@ -700,11 +703,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
700 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array, 703 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
701 pfncount * sizeof(u64)); 704 pfncount * sizeof(u64));
702 705
703 sg_init_table(bufferlist, 3); 706 bufferlist[0].iov_base = &desc;
704 sg_set_buf(&bufferlist[0], &desc, descsize); 707 bufferlist[0].iov_len = descsize;
705 sg_set_buf(&bufferlist[1], buffer, bufferlen); 708 bufferlist[1].iov_base = buffer;
706 sg_set_buf(&bufferlist[2], &aligned_data, 709 bufferlist[1].iov_len = bufferlen;
707 packetlen_aligned - packetlen); 710 bufferlist[2].iov_base = &aligned_data;
711 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
708 712
709 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 713 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
710 714
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 7e17a5495e02..7e6d78dc9437 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1171,7 +1171,8 @@ static int dm_thread_func(void *dm_dev)
1171 int t; 1171 int t;
1172 1172
1173 while (!kthread_should_stop()) { 1173 while (!kthread_should_stop()) {
1174 t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ); 1174 t = wait_for_completion_interruptible_timeout(
1175 &dm_device.config_event, 1*HZ);
1175 /* 1176 /*
1176 * The host expects us to post information on the memory 1177 * The host expects us to post information on the memory
1177 * pressure every second. 1178 * pressure every second.
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
new file mode 100644
index 000000000000..eaaa3d843b80
--- /dev/null
+++ b/drivers/hv/hv_fcopy.c
@@ -0,0 +1,414 @@
1/*
2 * An implementation of file copy service.
3 *
4 * Copyright (C) 2014, Microsoft, Inc.
5 *
6 * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15 * NON INFRINGEMENT. See the GNU General Public License for more
16 * details.
17 *
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/semaphore.h>
23#include <linux/fs.h>
24#include <linux/nls.h>
25#include <linux/workqueue.h>
26#include <linux/cdev.h>
27#include <linux/hyperv.h>
28#include <linux/sched.h>
29#include <linux/uaccess.h>
30#include <linux/miscdevice.h>
31
32#include "hyperv_vmbus.h"
33
34#define WIN8_SRV_MAJOR 1
35#define WIN8_SRV_MINOR 1
36#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
37
38/*
39 * Global state maintained for transaction that is being processed.
40 * For a class of integration services, including the "file copy service",
41 * the specified protocol is a "request/response" protocol which means that
42 * there can only be single outstanding transaction from the host at any
43 * given point in time. We use this to simplify memory management in this
44 * driver - we cache and process only one message at a time.
45 *
46 * While the request/response protocol is guaranteed by the host, we further
47 * ensure this by serializing packet processing in this driver - we do not
48 * read additional packets from the VMBUs until the current packet is fully
49 * handled.
50 *
51 * The transaction "active" state is set when we receive a request from the
52 * host and we cleanup this state when the transaction is completed - when we
53 * respond to the host with our response. When the transaction active state is
54 * set, we defer handling incoming packets.
55 */
56
57static struct {
58 bool active; /* transaction status - active or not */
59 int recv_len; /* number of bytes received. */
60 struct hv_fcopy_hdr *fcopy_msg; /* current message */
61 struct hv_start_fcopy message; /* sent to daemon */
62 struct vmbus_channel *recv_channel; /* chn we got the request */
63 u64 recv_req_id; /* request ID. */
64 void *fcopy_context; /* for the channel callback */
65 struct semaphore read_sema;
66} fcopy_transaction;
67
68static bool opened; /* currently device opened */
69
70/*
71 * Before we can accept copy messages from the host, we need
72 * to handshake with the user level daemon. This state tracks
73 * if we are in the handshake phase.
74 */
75static bool in_hand_shake = true;
76static void fcopy_send_data(void);
77static void fcopy_respond_to_host(int error);
78static void fcopy_work_func(struct work_struct *dummy);
79static DECLARE_DELAYED_WORK(fcopy_work, fcopy_work_func);
80static u8 *recv_buffer;
81
82static void fcopy_work_func(struct work_struct *dummy)
83{
84 /*
85 * If the timer fires, the user-mode component has not responded;
86 * process the pending transaction.
87 */
88 fcopy_respond_to_host(HV_E_FAIL);
89}
90
91static int fcopy_handle_handshake(u32 version)
92{
93 switch (version) {
94 case FCOPY_CURRENT_VERSION:
95 break;
96 default:
97 /*
98 * For now we will fail the registration.
99 * If and when we have multiple versions to
100 * deal with, we will be backward compatible.
101 * We will add this code when needed.
102 */
103 return -EINVAL;
104 }
105 pr_info("FCP: user-mode registering done. Daemon version: %d\n",
106 version);
107 fcopy_transaction.active = false;
108 if (fcopy_transaction.fcopy_context)
109 hv_fcopy_onchannelcallback(fcopy_transaction.fcopy_context);
110 in_hand_shake = false;
111 return 0;
112}
113
114static void fcopy_send_data(void)
115{
116 struct hv_start_fcopy *smsg_out = &fcopy_transaction.message;
117 int operation = fcopy_transaction.fcopy_msg->operation;
118 struct hv_start_fcopy *smsg_in;
119
120 /*
121 * The strings sent from the host are encoded in
122 * in utf16; convert it to utf8 strings.
123 * The host assures us that the utf16 strings will not exceed
124 * the max lengths specified. We will however, reserve room
125 * for the string terminating character - in the utf16s_utf8s()
126 * function we limit the size of the buffer where the converted
127 * string is placed to W_MAX_PATH -1 to guarantee
128 * that the strings can be properly terminated!
129 */
130
131 switch (operation) {
132 case START_FILE_COPY:
133 memset(smsg_out, 0, sizeof(struct hv_start_fcopy));
134 smsg_out->hdr.operation = operation;
135 smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
136
137 utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
138 UTF16_LITTLE_ENDIAN,
139 (__u8 *)smsg_out->file_name, W_MAX_PATH - 1);
140
141 utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
142 UTF16_LITTLE_ENDIAN,
143 (__u8 *)smsg_out->path_name, W_MAX_PATH - 1);
144
145 smsg_out->copy_flags = smsg_in->copy_flags;
146 smsg_out->file_size = smsg_in->file_size;
147 break;
148
149 default:
150 break;
151 }
152 up(&fcopy_transaction.read_sema);
153 return;
154}
155
156/*
157 * Send a response back to the host.
158 */
159
160static void
161fcopy_respond_to_host(int error)
162{
163 struct icmsg_hdr *icmsghdr;
164 u32 buf_len;
165 struct vmbus_channel *channel;
166 u64 req_id;
167
168 /*
169 * Copy the global state for completing the transaction. Note that
170 * only one transaction can be active at a time. This is guaranteed
171 * by the file copy protocol implemented by the host. Furthermore,
172 * the "transaction active" state we maintain ensures that there can
173 * only be one active transaction at a time.
174 */
175
176 buf_len = fcopy_transaction.recv_len;
177 channel = fcopy_transaction.recv_channel;
178 req_id = fcopy_transaction.recv_req_id;
179
180 fcopy_transaction.active = false;
181
182 icmsghdr = (struct icmsg_hdr *)
183 &recv_buffer[sizeof(struct vmbuspipe_hdr)];
184
185 if (channel->onchannel_callback == NULL)
186 /*
187 * We have raced with util driver being unloaded;
188 * silently return.
189 */
190 return;
191
192 icmsghdr->status = error;
193 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
194 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
195 VM_PKT_DATA_INBAND, 0);
196}
197
198void hv_fcopy_onchannelcallback(void *context)
199{
200 struct vmbus_channel *channel = context;
201 u32 recvlen;
202 u64 requestid;
203 struct hv_fcopy_hdr *fcopy_msg;
204 struct icmsg_hdr *icmsghdr;
205 struct icmsg_negotiate *negop = NULL;
206 int util_fw_version;
207 int fcopy_srv_version;
208
209 if (fcopy_transaction.active) {
210 /*
211 * We will defer processing this callback once
212 * the current transaction is complete.
213 */
214 fcopy_transaction.fcopy_context = context;
215 return;
216 }
217
218 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
219 &requestid);
220 if (recvlen <= 0)
221 return;
222
223 icmsghdr = (struct icmsg_hdr *)&recv_buffer[
224 sizeof(struct vmbuspipe_hdr)];
225 if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
226 util_fw_version = UTIL_FW_VERSION;
227 fcopy_srv_version = WIN8_SRV_VERSION;
228 vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer,
229 util_fw_version, fcopy_srv_version);
230 } else {
231 fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
232 sizeof(struct vmbuspipe_hdr) +
233 sizeof(struct icmsg_hdr)];
234
235 /*
236 * Stash away this global state for completing the
237 * transaction; note transactions are serialized.
238 */
239
240 fcopy_transaction.active = true;
241 fcopy_transaction.recv_len = recvlen;
242 fcopy_transaction.recv_channel = channel;
243 fcopy_transaction.recv_req_id = requestid;
244 fcopy_transaction.fcopy_msg = fcopy_msg;
245
246 /*
247 * Send the information to the user-level daemon.
248 */
249 fcopy_send_data();
250 schedule_delayed_work(&fcopy_work, 5*HZ);
251 return;
252 }
253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
254 vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
255 VM_PKT_DATA_INBAND, 0);
256}
257
258/*
259 * Create a char device that can support read/write for passing
260 * the payload.
261 */
262
263static ssize_t fcopy_read(struct file *file, char __user *buf,
264 size_t count, loff_t *ppos)
265{
266 void *src;
267 size_t copy_size;
268 int operation;
269
270 /*
271 * Wait until there is something to be read.
272 */
273 if (down_interruptible(&fcopy_transaction.read_sema))
274 return -EINTR;
275
276 /*
277 * The channel may be rescinded and in this case, we will wakeup the
278 * the thread blocked on the semaphore and we will use the opened
279 * state to correctly handle this case.
280 */
281 if (!opened)
282 return -ENODEV;
283
284 operation = fcopy_transaction.fcopy_msg->operation;
285
286 if (operation == START_FILE_COPY) {
287 src = &fcopy_transaction.message;
288 copy_size = sizeof(struct hv_start_fcopy);
289 if (count < copy_size)
290 return 0;
291 } else {
292 src = fcopy_transaction.fcopy_msg;
293 copy_size = sizeof(struct hv_do_fcopy);
294 if (count < copy_size)
295 return 0;
296 }
297 if (copy_to_user(buf, src, copy_size))
298 return -EFAULT;
299
300 return copy_size;
301}
302
303static ssize_t fcopy_write(struct file *file, const char __user *buf,
304 size_t count, loff_t *ppos)
305{
306 int response = 0;
307
308 if (count != sizeof(int))
309 return -EINVAL;
310
311 if (copy_from_user(&response, buf, sizeof(int)))
312 return -EFAULT;
313
314 if (in_hand_shake) {
315 if (fcopy_handle_handshake(response))
316 return -EINVAL;
317 return sizeof(int);
318 }
319
320 /*
321 * Complete the transaction by forwarding the result
322 * to the host. But first, cancel the timeout.
323 */
324 if (cancel_delayed_work_sync(&fcopy_work))
325 fcopy_respond_to_host(response);
326
327 return sizeof(int);
328}
329
330static int fcopy_open(struct inode *inode, struct file *f)
331{
332 /*
333 * The user level daemon that will open this device is
334 * really an extension of this driver. We can have only
335 * active open at a time.
336 */
337 if (opened)
338 return -EBUSY;
339
340 /*
341 * The daemon is alive; setup the state.
342 */
343 opened = true;
344 return 0;
345}
346
347static int fcopy_release(struct inode *inode, struct file *f)
348{
349 /*
350 * The daemon has exited; reset the state.
351 */
352 in_hand_shake = true;
353 opened = false;
354 return 0;
355}
356
357
358static const struct file_operations fcopy_fops = {
359 .read = fcopy_read,
360 .write = fcopy_write,
361 .release = fcopy_release,
362 .open = fcopy_open,
363};
364
365static struct miscdevice fcopy_misc = {
366 .minor = MISC_DYNAMIC_MINOR,
367 .name = "vmbus/hv_fcopy",
368 .fops = &fcopy_fops,
369};
370
371static int fcopy_dev_init(void)
372{
373 return misc_register(&fcopy_misc);
374}
375
376static void fcopy_dev_deinit(void)
377{
378
379 /*
380 * The device is going away - perhaps because the
381 * host has rescinded the channel. Setup state so that
382 * user level daemon can gracefully exit if it is blocked
383 * on the read semaphore.
384 */
385 opened = false;
386 /*
387 * Signal the semaphore as the device is
388 * going away.
389 */
390 up(&fcopy_transaction.read_sema);
391 misc_deregister(&fcopy_misc);
392}
393
394int hv_fcopy_init(struct hv_util_service *srv)
395{
396 recv_buffer = srv->recv_buffer;
397
398 /*
399 * When this driver loads, the user level daemon that
400 * processes the host requests may not yet be running.
401 * Defer processing channel callbacks until the daemon
402 * has registered.
403 */
404 fcopy_transaction.active = true;
405 sema_init(&fcopy_transaction.read_sema, 0);
406
407 return fcopy_dev_init();
408}
409
410void hv_fcopy_deinit(void)
411{
412 cancel_delayed_work_sync(&fcopy_work);
413 fcopy_dev_deinit();
414}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 09988b289622..ea852537307e 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -113,7 +113,7 @@ kvp_register(int reg_value)
113 kvp_msg->kvp_hdr.operation = reg_value; 113 kvp_msg->kvp_hdr.operation = reg_value;
114 strcpy(version, HV_DRV_VERSION); 114 strcpy(version, HV_DRV_VERSION);
115 msg->len = sizeof(struct hv_kvp_msg); 115 msg->len = sizeof(struct hv_kvp_msg);
116 cn_netlink_send(msg, 0, GFP_ATOMIC); 116 cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
117 kfree(msg); 117 kfree(msg);
118 } 118 }
119} 119}
@@ -435,7 +435,7 @@ kvp_send_key(struct work_struct *dummy)
435 } 435 }
436 436
437 msg->len = sizeof(struct hv_kvp_msg); 437 msg->len = sizeof(struct hv_kvp_msg);
438 cn_netlink_send(msg, 0, GFP_ATOMIC); 438 cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
439 kfree(msg); 439 kfree(msg);
440 440
441 return; 441 return;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 0c3546224376..34f14fddb666 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -98,7 +98,7 @@ static void vss_send_op(struct work_struct *dummy)
98 vss_msg->vss_hdr.operation = op; 98 vss_msg->vss_hdr.operation = op;
99 msg->len = sizeof(struct hv_vss_msg); 99 msg->len = sizeof(struct hv_vss_msg);
100 100
101 cn_netlink_send(msg, 0, GFP_ATOMIC); 101 cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
102 kfree(msg); 102 kfree(msg);
103 103
104 return; 104 return;
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 62dfd246b948..dd761806f0e8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,6 +28,7 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30 30
31#include "hyperv_vmbus.h"
31 32
32#define SD_MAJOR 3 33#define SD_MAJOR 3
33#define SD_MINOR 0 34#define SD_MINOR 0
@@ -82,6 +83,12 @@ static struct hv_util_service util_vss = {
82 .util_deinit = hv_vss_deinit, 83 .util_deinit = hv_vss_deinit,
83}; 84};
84 85
86static struct hv_util_service util_fcopy = {
87 .util_cb = hv_fcopy_onchannelcallback,
88 .util_init = hv_fcopy_init,
89 .util_deinit = hv_fcopy_deinit,
90};
91
85static void perform_shutdown(struct work_struct *dummy) 92static void perform_shutdown(struct work_struct *dummy)
86{ 93{
87 orderly_poweroff(true); 94 orderly_poweroff(true);
@@ -401,6 +408,10 @@ static const struct hv_vmbus_device_id id_table[] = {
401 { HV_VSS_GUID, 408 { HV_VSS_GUID,
402 .driver_data = (unsigned long)&util_vss 409 .driver_data = (unsigned long)&util_vss
403 }, 410 },
411 /* File copy GUID */
412 { HV_FCOPY_GUID,
413 .driver_data = (unsigned long)&util_fcopy
414 },
404 { }, 415 { },
405}; 416};
406 417
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index e05517616a06..860134da8039 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -559,8 +559,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer,
559void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); 559void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
560 560
561int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, 561int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
562 struct scatterlist *sglist, 562 struct kvec *kv_list,
563 u32 sgcount, bool *signal); 563 u32 kv_count, bool *signal);
564 564
565int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer, 565int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
566 u32 buflen); 566 u32 buflen);
@@ -669,5 +669,9 @@ int vmbus_set_event(struct vmbus_channel *channel);
669 669
670void vmbus_on_event(unsigned long data); 670void vmbus_on_event(unsigned long data);
671 671
672int hv_fcopy_init(struct hv_util_service *);
673void hv_fcopy_deinit(void);
674void hv_fcopy_onchannelcallback(void *);
675
672 676
673#endif /* _HYPERV_VMBUS_H */ 677#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 26c93cf9f6be..15db66b74141 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29#include <linux/uio.h>
29 30
30#include "hyperv_vmbus.h" 31#include "hyperv_vmbus.h"
31 32
@@ -387,23 +388,20 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
387 * 388 *
388 */ 389 */
389int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, 390int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
390 struct scatterlist *sglist, u32 sgcount, bool *signal) 391 struct kvec *kv_list, u32 kv_count, bool *signal)
391{ 392{
392 int i = 0; 393 int i = 0;
393 u32 bytes_avail_towrite; 394 u32 bytes_avail_towrite;
394 u32 bytes_avail_toread; 395 u32 bytes_avail_toread;
395 u32 totalbytes_towrite = 0; 396 u32 totalbytes_towrite = 0;
396 397
397 struct scatterlist *sg;
398 u32 next_write_location; 398 u32 next_write_location;
399 u32 old_write; 399 u32 old_write;
400 u64 prev_indices = 0; 400 u64 prev_indices = 0;
401 unsigned long flags; 401 unsigned long flags;
402 402
403 for_each_sg(sglist, sg, sgcount, i) 403 for (i = 0; i < kv_count; i++)
404 { 404 totalbytes_towrite += kv_list[i].iov_len;
405 totalbytes_towrite += sg->length;
406 }
407 405
408 totalbytes_towrite += sizeof(u64); 406 totalbytes_towrite += sizeof(u64);
409 407
@@ -427,12 +425,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
427 425
428 old_write = next_write_location; 426 old_write = next_write_location;
429 427
430 for_each_sg(sglist, sg, sgcount, i) 428 for (i = 0; i < kv_count; i++) {
431 {
432 next_write_location = hv_copyto_ringbuffer(outring_info, 429 next_write_location = hv_copyto_ringbuffer(outring_info,
433 next_write_location, 430 next_write_location,
434 sg_virt(sg), 431 kv_list[i].iov_base,
435 sg->length); 432 kv_list[i].iov_len);
436 } 433 }
437 434
438 /* Set previous packet start */ 435 /* Set previous packet start */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 3f0a95290e14..8e53a3c2607e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -43,6 +43,12 @@ static struct tasklet_struct msg_dpc;
43static struct completion probe_event; 43static struct completion probe_event;
44static int irq; 44static int irq;
45 45
46struct resource hyperv_mmio = {
47 .name = "hyperv mmio",
48 .flags = IORESOURCE_MEM,
49};
50EXPORT_SYMBOL_GPL(hyperv_mmio);
51
46static int vmbus_exists(void) 52static int vmbus_exists(void)
47{ 53{
48 if (hv_acpi_dev == NULL) 54 if (hv_acpi_dev == NULL)
@@ -843,18 +849,21 @@ void vmbus_device_unregister(struct hv_device *device_obj)
843 849
844 850
845/* 851/*
846 * VMBUS is an acpi enumerated device. Get the the IRQ information 852 * VMBUS is an acpi enumerated device. Get the the information we
847 * from DSDT. 853 * need from DSDT.
848 */ 854 */
849 855
850static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq) 856static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
851{ 857{
858 switch (res->type) {
859 case ACPI_RESOURCE_TYPE_IRQ:
860 irq = res->data.irq.interrupts[0];
861 break;
852 862
853 if (res->type == ACPI_RESOURCE_TYPE_IRQ) { 863 case ACPI_RESOURCE_TYPE_ADDRESS64:
854 struct acpi_resource_irq *irqp; 864 hyperv_mmio.start = res->data.address64.minimum;
855 irqp = &res->data.irq; 865 hyperv_mmio.end = res->data.address64.maximum;
856 866 break;
857 *((unsigned int *)irq) = irqp->interrupts[0];
858 } 867 }
859 868
860 return AE_OK; 869 return AE_OK;
@@ -863,18 +872,34 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
863static int vmbus_acpi_add(struct acpi_device *device) 872static int vmbus_acpi_add(struct acpi_device *device)
864{ 873{
865 acpi_status result; 874 acpi_status result;
875 int ret_val = -ENODEV;
866 876
867 hv_acpi_dev = device; 877 hv_acpi_dev = device;
868 878
869 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 879 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
870 vmbus_walk_resources, &irq); 880 vmbus_walk_resources, NULL);
871 881
872 if (ACPI_FAILURE(result)) { 882 if (ACPI_FAILURE(result))
873 complete(&probe_event); 883 goto acpi_walk_err;
874 return -ENODEV; 884 /*
885 * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
886 * has the mmio ranges. Get that.
887 */
888 if (device->parent) {
889 result = acpi_walk_resources(device->parent->handle,
890 METHOD_NAME__CRS,
891 vmbus_walk_resources, NULL);
892
893 if (ACPI_FAILURE(result))
894 goto acpi_walk_err;
895 if (hyperv_mmio.start && hyperv_mmio.end)
896 request_resource(&iomem_resource, &hyperv_mmio);
875 } 897 }
898 ret_val = 0;
899
900acpi_walk_err:
876 complete(&probe_event); 901 complete(&probe_event);
877 return 0; 902 return ret_val;
878} 903}
879 904
880static const struct acpi_device_id vmbus_acpi_device_ids[] = { 905static const struct acpi_device_id vmbus_acpi_device_ids[] = {
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 2209f28441e9..5c63f0918d12 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -155,6 +155,16 @@ config MCP3422
155 This driver can also be built as a module. If so, the module will be 155 This driver can also be built as a module. If so, the module will be
156 called mcp3422. 156 called mcp3422.
157 157
158config MEN_Z188_ADC
159 tristate "MEN 16z188 ADC IP Core support"
160 depends on MCB
161 help
162 Say yes here to enable support for the MEN 16z188 ADC IP-Core on a MCB
163 carrier.
164
165 This driver can also be built as a module. If so, the module will be
166 called men_z188_adc.
167
158config NAU7802 168config NAU7802
159 tristate "Nuvoton NAU7802 ADC driver" 169 tristate "Nuvoton NAU7802 ADC driver"
160 depends on I2C 170 depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ba9a10a24cd0..85a4a045f1f0 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
17obj-$(CONFIG_MAX1363) += max1363.o 17obj-$(CONFIG_MAX1363) += max1363.o
18obj-$(CONFIG_MCP320X) += mcp320x.o 18obj-$(CONFIG_MCP320X) += mcp320x.o
19obj-$(CONFIG_MCP3422) += mcp3422.o 19obj-$(CONFIG_MCP3422) += mcp3422.o
20obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
20obj-$(CONFIG_NAU7802) += nau7802.o 21obj-$(CONFIG_NAU7802) += nau7802.o
21obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o 22obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
22obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o 23obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
new file mode 100644
index 000000000000..6989c16aec2b
--- /dev/null
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -0,0 +1,172 @@
1/*
2 * MEN 16z188 Analog to Digial Converter
3 *
4 * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
5 * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; version 2 of the License.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/mcb.h>
15#include <linux/io.h>
16#include <linux/iio/iio.h>
17
18#define Z188_ADC_MAX_CHAN 8
19#define Z188_ADC_GAIN 0x0700000
20#define Z188_MODE_VOLTAGE BIT(27)
21#define Z188_CFG_AUTO 0x1
22#define Z188_CTRL_REG 0x40
23
24#define ADC_DATA(x) (((x) >> 2) & 0x7ffffc)
25#define ADC_OVR(x) ((x) & 0x1)
26
27struct z188_adc {
28 struct resource *mem;
29 void __iomem *base;
30};
31
32#define Z188_ADC_CHANNEL(idx) { \
33 .type = IIO_VOLTAGE, \
34 .indexed = 1, \
35 .channel = (idx), \
36 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
37}
38
39static const struct iio_chan_spec z188_adc_iio_channels[] = {
40 Z188_ADC_CHANNEL(0),
41 Z188_ADC_CHANNEL(1),
42 Z188_ADC_CHANNEL(2),
43 Z188_ADC_CHANNEL(3),
44 Z188_ADC_CHANNEL(4),
45 Z188_ADC_CHANNEL(5),
46 Z188_ADC_CHANNEL(6),
47 Z188_ADC_CHANNEL(7),
48};
49
50static int z188_iio_read_raw(struct iio_dev *iio_dev,
51 struct iio_chan_spec const *chan,
52 int *val,
53 int *val2,
54 long info)
55{
56 struct z188_adc *adc = iio_priv(iio_dev);
57 int ret;
58 u16 tmp;
59
60 switch (info) {
61 case IIO_CHAN_INFO_RAW:
62 tmp = readw(adc->base + chan->channel * 4);
63
64 if (ADC_OVR(tmp)) {
65 dev_info(&iio_dev->dev,
66 "Oversampling error on ADC channel %d\n",
67 chan->channel);
68 return -EIO;
69 }
70 *val = ADC_DATA(tmp);
71 ret = IIO_VAL_INT;
72 break;
73 default:
74 ret = -EINVAL;
75 break;
76 }
77
78 return ret;
79}
80
81static struct iio_info z188_adc_info = {
82 .read_raw = &z188_iio_read_raw,
83 .driver_module = THIS_MODULE,
84};
85
86static void men_z188_config_channels(void __iomem *addr)
87{
88 int i;
89 u32 cfg;
90 u32 ctl;
91
92 ctl = readl(addr + Z188_CTRL_REG);
93 ctl |= Z188_CFG_AUTO;
94 writel(ctl, addr + Z188_CTRL_REG);
95
96 for (i = 0; i < Z188_ADC_MAX_CHAN; i++) {
97 cfg = readl(addr + i);
98 cfg &= ~Z188_ADC_GAIN;
99 cfg |= Z188_MODE_VOLTAGE;
100 writel(cfg, addr + i);
101 }
102}
103
104static int men_z188_probe(struct mcb_device *dev,
105 const struct mcb_device_id *id)
106{
107 struct z188_adc *adc;
108 struct iio_dev *indio_dev;
109 struct resource *mem;
110
111 indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
112 if (!indio_dev)
113 return -ENOMEM;
114
115 adc = iio_priv(indio_dev);
116 indio_dev->name = "z188-adc";
117 indio_dev->dev.parent = &dev->dev;
118 indio_dev->info = &z188_adc_info;
119 indio_dev->modes = INDIO_DIRECT_MODE;
120 indio_dev->channels = z188_adc_iio_channels;
121 indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
122
123 mem = mcb_request_mem(dev, "z188-adc");
124 if (!mem)
125 return -ENOMEM;
126
127 adc->base = ioremap(mem->start, resource_size(mem));
128 if (adc->base == NULL)
129 goto err;
130
131 men_z188_config_channels(adc->base);
132
133 adc->mem = mem;
134 mcb_set_drvdata(dev, indio_dev);
135
136 return iio_device_register(indio_dev);
137
138err:
139 mcb_release_mem(mem);
140 return -ENXIO;
141}
142
143static void men_z188_remove(struct mcb_device *dev)
144{
145 struct iio_dev *indio_dev = mcb_get_drvdata(dev);
146 struct z188_adc *adc = iio_priv(indio_dev);
147
148 iio_device_unregister(indio_dev);
149 iounmap(adc->base);
150 mcb_release_mem(adc->mem);
151}
152
153static const struct mcb_device_id men_z188_ids[] = {
154 { .device = 0xbc },
155};
156MODULE_DEVICE_TABLE(mcb, men_z188_ids);
157
158static struct mcb_driver men_z188_driver = {
159 .driver = {
160 .name = "z188-adc",
161 .owner = THIS_MODULE,
162 },
163 .probe = men_z188_probe,
164 .remove = men_z188_remove,
165 .id_table = men_z188_ids,
166};
167module_mcb_driver(men_z188_driver);
168
169MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
170MODULE_LICENSE("GPL");
171MODULE_DESCRIPTION("IIO ADC driver for MEN 16z188 ADC Core");
172MODULE_ALIAS("mcb:16z188");
diff --git a/drivers/mcb/Kconfig b/drivers/mcb/Kconfig
new file mode 100644
index 000000000000..e9a6976e1010
--- /dev/null
+++ b/drivers/mcb/Kconfig
@@ -0,0 +1,31 @@
1#
2# MEN Chameleon Bus (MCB) support
3#
4
5menuconfig MCB
6 tristate "MCB support"
7 default n
8 depends on HAS_IOMEM
9 help
10
11 The MCB (MEN Chameleon Bus) is a Bus specific to MEN Mikroelektronik
12 FPGA based devices. It is used to identify MCB based IP-Cores within
13 an FPGA and provide the necessary framework for instantiating drivers
14 for these devices.
15
16 If build as a module, the module is called mcb.ko
17
18if MCB
19config MCB_PCI
20 tristate "PCI based MCB carrier"
21 default n
22 depends on PCI
23 help
24
25 This is a MCB carrier on a PCI device. Both PCI attached on-board
26 FPGAs as well as CompactPCI attached MCB FPGAs are supported with
27 this driver.
28
29 If build as a module, the module is called mcb-pci.ko
30
31endif # MCB
diff --git a/drivers/mcb/Makefile b/drivers/mcb/Makefile
new file mode 100644
index 000000000000..1ae141311def
--- /dev/null
+++ b/drivers/mcb/Makefile
@@ -0,0 +1,7 @@
1
2obj-$(CONFIG_MCB) += mcb.o
3
4mcb-y += mcb-core.o
5mcb-y += mcb-parse.o
6
7obj-$(CONFIG_MCB_PCI) += mcb-pci.o
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
new file mode 100644
index 000000000000..bbe12932d404
--- /dev/null
+++ b/drivers/mcb/mcb-core.c
@@ -0,0 +1,414 @@
1/*
2 * MEN Chameleon Bus.
3 *
4 * Copyright (C) 2013 MEN Mikroelektronik GmbH (www.men.de)
5 * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; version 2 of the License.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/idr.h>
16#include <linux/mcb.h>
17
18static DEFINE_IDA(mcb_ida);
19
20static const struct mcb_device_id *mcb_match_id(const struct mcb_device_id *ids,
21 struct mcb_device *dev)
22{
23 if (ids) {
24 while (ids->device) {
25 if (ids->device == dev->id)
26 return ids;
27 ids++;
28 }
29 }
30
31 return NULL;
32}
33
34static int mcb_match(struct device *dev, struct device_driver *drv)
35{
36 struct mcb_driver *mdrv = to_mcb_driver(drv);
37 struct mcb_device *mdev = to_mcb_device(dev);
38 const struct mcb_device_id *found_id;
39
40 found_id = mcb_match_id(mdrv->id_table, mdev);
41 if (found_id)
42 return 1;
43
44 return 0;
45}
46
47static int mcb_uevent(struct device *dev, struct kobj_uevent_env *env)
48{
49 struct mcb_device *mdev = to_mcb_device(dev);
50 int ret;
51
52 ret = add_uevent_var(env, "MODALIAS=mcb:16z%03d", mdev->id);
53 if (ret)
54 return -ENOMEM;
55
56 return 0;
57}
58
59static int mcb_probe(struct device *dev)
60{
61 struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
62 struct mcb_device *mdev = to_mcb_device(dev);
63 const struct mcb_device_id *found_id;
64
65 found_id = mcb_match_id(mdrv->id_table, mdev);
66 if (!found_id)
67 return -ENODEV;
68
69 return mdrv->probe(mdev, found_id);
70}
71
72static int mcb_remove(struct device *dev)
73{
74 struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
75 struct mcb_device *mdev = to_mcb_device(dev);
76
77 mdrv->remove(mdev);
78
79 put_device(&mdev->dev);
80
81 return 0;
82}
83
84static void mcb_shutdown(struct device *dev)
85{
86 struct mcb_device *mdev = to_mcb_device(dev);
87 struct mcb_driver *mdrv = mdev->driver;
88
89 if (mdrv && mdrv->shutdown)
90 mdrv->shutdown(mdev);
91}
92
93static struct bus_type mcb_bus_type = {
94 .name = "mcb",
95 .match = mcb_match,
96 .uevent = mcb_uevent,
97 .probe = mcb_probe,
98 .remove = mcb_remove,
99 .shutdown = mcb_shutdown,
100};
101
102/**
103 * __mcb_register_driver() - Register a @mcb_driver at the system
104 * @drv: The @mcb_driver
105 * @owner: The @mcb_driver's module
106 * @mod_name: The name of the @mcb_driver's module
107 *
108 * Register a @mcb_driver at the system. Perform some sanity checks, if
109 * the .probe and .remove methods are provided by the driver.
110 */
111int __mcb_register_driver(struct mcb_driver *drv, struct module *owner,
112 const char *mod_name)
113{
114 if (!drv->probe || !drv->remove)
115 return -EINVAL;
116
117 drv->driver.owner = owner;
118 drv->driver.bus = &mcb_bus_type;
119 drv->driver.mod_name = mod_name;
120
121 return driver_register(&drv->driver);
122}
123EXPORT_SYMBOL_GPL(__mcb_register_driver);
124
125/**
126 * mcb_unregister_driver() - Unregister a @mcb_driver from the system
127 * @drv: The @mcb_driver
128 *
129 * Unregister a @mcb_driver from the system.
130 */
131void mcb_unregister_driver(struct mcb_driver *drv)
132{
133 driver_unregister(&drv->driver);
134}
135EXPORT_SYMBOL_GPL(mcb_unregister_driver);
136
137static void mcb_release_dev(struct device *dev)
138{
139 struct mcb_device *mdev = to_mcb_device(dev);
140
141 mcb_bus_put(mdev->bus);
142 kfree(mdev);
143}
144
145/**
146 * mcb_device_register() - Register a mcb_device
147 * @bus: The @mcb_bus of the device
148 * @dev: The @mcb_device
149 *
150 * Register a specific @mcb_device at a @mcb_bus and the system itself.
151 */
152int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
153{
154 int ret;
155 int device_id;
156
157 device_initialize(&dev->dev);
158 dev->dev.bus = &mcb_bus_type;
159 dev->dev.parent = bus->dev.parent;
160 dev->dev.release = mcb_release_dev;
161
162 device_id = dev->id;
163 dev_set_name(&dev->dev, "mcb%d-16z%03d-%d:%d:%d",
164 bus->bus_nr, device_id, dev->inst, dev->group, dev->var);
165
166 ret = device_add(&dev->dev);
167 if (ret < 0) {
168 pr_err("Failed registering device 16z%03d on bus mcb%d (%d)\n",
169 device_id, bus->bus_nr, ret);
170 goto out;
171 }
172
173 return 0;
174
175out:
176
177 return ret;
178}
179EXPORT_SYMBOL_GPL(mcb_device_register);
180
181/**
182 * mcb_alloc_bus() - Allocate a new @mcb_bus
183 *
184 * Allocate a new @mcb_bus.
185 */
186struct mcb_bus *mcb_alloc_bus(void)
187{
188 struct mcb_bus *bus;
189 int bus_nr;
190
191 bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL);
192 if (!bus)
193 return NULL;
194
195 bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
196 if (bus_nr < 0) {
197 kfree(bus);
198 return ERR_PTR(bus_nr);
199 }
200
201 INIT_LIST_HEAD(&bus->children);
202 bus->bus_nr = bus_nr;
203
204 return bus;
205}
206EXPORT_SYMBOL_GPL(mcb_alloc_bus);
207
208static int __mcb_devices_unregister(struct device *dev, void *data)
209{
210 device_unregister(dev);
211 return 0;
212}
213
214static void mcb_devices_unregister(struct mcb_bus *bus)
215{
216 bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_devices_unregister);
217}
218/**
219 * mcb_release_bus() - Free a @mcb_bus
220 * @bus: The @mcb_bus to release
221 *
222 * Release an allocated @mcb_bus from the system.
223 */
224void mcb_release_bus(struct mcb_bus *bus)
225{
226 mcb_devices_unregister(bus);
227
228 ida_simple_remove(&mcb_ida, bus->bus_nr);
229
230 kfree(bus);
231}
232EXPORT_SYMBOL_GPL(mcb_release_bus);
233
234/**
235 * mcb_bus_put() - Increment refcnt
236 * @bus: The @mcb_bus
237 *
238 * Get a @mcb_bus' ref
239 */
240struct mcb_bus *mcb_bus_get(struct mcb_bus *bus)
241{
242 if (bus)
243 get_device(&bus->dev);
244
245 return bus;
246}
247EXPORT_SYMBOL_GPL(mcb_bus_get);
248
249/**
250 * mcb_bus_put() - Decrement refcnt
251 * @bus: The @mcb_bus
252 *
253 * Release a @mcb_bus' ref
254 */
255void mcb_bus_put(struct mcb_bus *bus)
256{
257 if (bus)
258 put_device(&bus->dev);
259}
260EXPORT_SYMBOL_GPL(mcb_bus_put);
261
262/**
263 * mcb_alloc_dev() - Allocate a device
264 * @bus: The @mcb_bus the device is part of
265 *
266 * Allocate a @mcb_device and add bus.
267 */
268struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
269{
270 struct mcb_device *dev;
271
272 dev = kzalloc(sizeof(struct mcb_device), GFP_KERNEL);
273 if (!dev)
274 return NULL;
275
276 INIT_LIST_HEAD(&dev->bus_list);
277 dev->bus = bus;
278
279 return dev;
280}
281EXPORT_SYMBOL_GPL(mcb_alloc_dev);
282
283/**
284 * mcb_free_dev() - Free @mcb_device
285 * @dev: The device to free
286 *
287 * Free a @mcb_device
288 */
289void mcb_free_dev(struct mcb_device *dev)
290{
291 kfree(dev);
292}
293EXPORT_SYMBOL_GPL(mcb_free_dev);
294
295static int __mcb_bus_add_devices(struct device *dev, void *data)
296{
297 struct mcb_device *mdev = to_mcb_device(dev);
298 int retval;
299
300 if (mdev->is_added)
301 return 0;
302
303 retval = device_attach(dev);
304 if (retval < 0)
305 dev_err(dev, "Error adding device (%d)\n", retval);
306
307 mdev->is_added = true;
308
309 return 0;
310}
311
312static int __mcb_bus_add_child(struct device *dev, void *data)
313{
314 struct mcb_device *mdev = to_mcb_device(dev);
315 struct mcb_bus *child;
316
317 BUG_ON(!mdev->is_added);
318 child = mdev->subordinate;
319
320 if (child)
321 mcb_bus_add_devices(child);
322
323 return 0;
324}
325
326/**
327 * mcb_bus_add_devices() - Add devices in the bus' internal device list
328 * @bus: The @mcb_bus we add the devices
329 *
330 * Add devices in the bus' internal device list to the system.
331 */
332void mcb_bus_add_devices(const struct mcb_bus *bus)
333{
334 bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
335 bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_child);
336
337}
338EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
339
340/**
341 * mcb_request_mem() - Request memory
342 * @dev: The @mcb_device the memory is for
343 * @name: The name for the memory reference.
344 *
345 * Request memory for a @mcb_device. If @name is NULL the driver name will
346 * be used.
347 */
348struct resource *mcb_request_mem(struct mcb_device *dev, const char *name)
349{
350 struct resource *mem;
351 u32 size;
352
353 if (!name)
354 name = dev->dev.driver->name;
355
356 size = resource_size(&dev->mem);
357
358 mem = request_mem_region(dev->mem.start, size, name);
359 if (!mem)
360 return ERR_PTR(-EBUSY);
361
362 return mem;
363}
364EXPORT_SYMBOL_GPL(mcb_request_mem);
365
366/**
367 * mcb_release_mem() - Release memory requested by device
368 * @dev: The @mcb_device that requested the memory
369 *
370 * Release memory that was prior requested via @mcb_request_mem().
371 */
372void mcb_release_mem(struct resource *mem)
373{
374 u32 size;
375
376 size = resource_size(mem);
377 release_mem_region(mem->start, size);
378}
379EXPORT_SYMBOL_GPL(mcb_release_mem);
380
381/**
382 * mcb_get_irq() - Get device's IRQ number
383 * @dev: The @mcb_device the IRQ is for
384 *
385 * Get the IRQ number of a given @mcb_device.
386 */
387int mcb_get_irq(struct mcb_device *dev)
388{
389 struct resource *irq = &dev->irq;
390
391 return irq->start;
392}
393EXPORT_SYMBOL_GPL(mcb_get_irq);
394
395static int mcb_init(void)
396{
397 return bus_register(&mcb_bus_type);
398}
399
400static void mcb_exit(void)
401{
402 bus_unregister(&mcb_bus_type);
403}
404
405/* mcb must be initialized after PCI but before the chameleon drivers.
406 * That means we must use some initcall between subsys_initcall and
407 * device_initcall.
408 */
409fs_initcall(mcb_init);
410module_exit(mcb_exit);
411
412MODULE_DESCRIPTION("MEN Chameleon Bus Driver");
413MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
414MODULE_LICENSE("GPL v2");
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
new file mode 100644
index 000000000000..f956ef26c0ce
--- /dev/null
+++ b/drivers/mcb/mcb-internal.h
@@ -0,0 +1,118 @@
1#ifndef __MCB_INTERNAL
2#define __MCB_INTERNAL
3
4#include <linux/types.h>
5
6#define PCI_VENDOR_ID_MEN 0x1a88
7#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45
8#define CHAMELEON_FILENAME_LEN 12
9#define CHAMELEONV2_MAGIC 0xabce
10
11enum chameleon_descriptor_type {
12 CHAMELEON_DTYPE_GENERAL = 0x0,
13 CHAMELEON_DTYPE_BRIDGE = 0x1,
14 CHAMELEON_DTYPE_CPU = 0x2,
15 CHAMELEON_DTYPE_BAR = 0x3,
16 CHAMELEON_DTYPE_END = 0xf,
17};
18
19enum chameleon_bus_type {
20 CHAMELEON_BUS_WISHBONE,
21 CHAMELEON_BUS_AVALON,
22 CHAMELEON_BUS_LPC,
23 CHAMELEON_BUS_ISA,
24};
25
26/**
27 * struct chameleon_fpga_header
28 *
29 * @revision: Revison of Chameleon table in FPGA
30 * @model: Chameleon table model ASCII char
31 * @minor: Revision minor
32 * @bus_type: Bus type (usually %CHAMELEON_BUS_WISHBONE)
33 * @magic: Chameleon header magic number (0xabce for version 2)
34 * @reserved: Reserved
35 * @filename: Filename of FPGA bitstream
36 */
37struct chameleon_fpga_header {
38 u8 revision;
39 char model;
40 u8 minor;
41 u8 bus_type;
42 u16 magic;
43 u16 reserved;
44 /* This one has no '\0' at the end!!! */
45 char filename[CHAMELEON_FILENAME_LEN];
46} __packed;
47#define HEADER_MAGIC_OFFSET 0x4
48
49/**
50 * struct chameleon_gdd - Chameleon General Device Descriptor
51 *
52 * @irq: the position in the FPGA's IRQ controller vector
53 * @rev: the revision of the variant's implementation
54 * @var: the variant of the IP core
55 * @dev: the device the IP core is
56 * @dtype: device descriptor type
57 * @bar: BAR offset that must be added to module offset
58 * @inst: the instance number of the device, 0 is first instance
59 * @group: the group the device belongs to (0 = no group)
60 * @reserved: reserved
61 * @offset: beginning of the address window of desired module
62 * @size: size of the module's address window
63 */
64struct chameleon_gdd {
65 __le32 reg1;
66 __le32 reg2;
67 __le32 offset;
68 __le32 size;
69
70} __packed;
71
72/* GDD Register 1 fields */
73#define GDD_IRQ(x) ((x) & 0x1f)
74#define GDD_REV(x) (((x) >> 5) & 0x3f)
75#define GDD_VAR(x) (((x) >> 11) & 0x3f)
76#define GDD_DEV(x) (((x) >> 18) & 0x3ff)
77#define GDD_DTY(x) (((x) >> 28) & 0xf)
78
79/* GDD Register 2 fields */
80#define GDD_BAR(x) ((x) & 0x7)
81#define GDD_INS(x) (((x) >> 3) & 0x3f)
82#define GDD_GRP(x) (((x) >> 9) & 0x3f)
83
84/**
85 * struct chameleon_bdd - Chameleon Bridge Device Descriptor
86 *
87 * @irq: the position in the FPGA's IRQ controller vector
88 * @rev: the revision of the variant's implementation
89 * @var: the variant of the IP core
90 * @dev: the device the IP core is
91 * @dtype: device descriptor type
92 * @bar: BAR offset that must be added to module offset
93 * @inst: the instance number of the device, 0 is first instance
94 * @dbar: destination bar from the bus _behind_ the bridge
95 * @chamoff: offset within the BAR of the source bus
96 * @offset:
97 * @size:
98 */
99struct chameleon_bdd {
100 unsigned int irq:6;
101 unsigned int rev:6;
102 unsigned int var:6;
103 unsigned int dev:10;
104 unsigned int dtype:4;
105 unsigned int bar:3;
106 unsigned int inst:6;
107 unsigned int dbar:3;
108 unsigned int group:6;
109 unsigned int reserved:14;
110 u32 chamoff;
111 u32 offset;
112 u32 size;
113} __packed;
114
115int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
116 void __iomem *base);
117
118#endif
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
new file mode 100644
index 000000000000..d1278b5f3028
--- /dev/null
+++ b/drivers/mcb/mcb-parse.c
@@ -0,0 +1,159 @@
1#include <linux/types.h>
2#include <linux/ioport.h>
3#include <linux/slab.h>
4#include <linux/export.h>
5#include <linux/io.h>
6#include <linux/mcb.h>
7
8#include "mcb-internal.h"
9
10struct mcb_parse_priv {
11 phys_addr_t mapbase;
12 void __iomem *base;
13};
14
15#define for_each_chameleon_cell(dtype, p) \
16 for ((dtype) = get_next_dtype((p)); \
17 (dtype) != CHAMELEON_DTYPE_END; \
18 (dtype) = get_next_dtype((p)))
19
20static inline uint32_t get_next_dtype(void __iomem *p)
21{
22 uint32_t dtype;
23
24 dtype = readl(p);
25 return dtype >> 28;
26}
27
28static int chameleon_parse_bdd(struct mcb_bus *bus,
29 phys_addr_t mapbase,
30 void __iomem *base)
31{
32 return 0;
33}
34
35static int chameleon_parse_gdd(struct mcb_bus *bus,
36 phys_addr_t mapbase,
37 void __iomem *base)
38{
39 struct chameleon_gdd __iomem *gdd =
40 (struct chameleon_gdd __iomem *) base;
41 struct mcb_device *mdev;
42 u32 offset;
43 u32 size;
44 int ret;
45 __le32 reg1;
46 __le32 reg2;
47
48 mdev = mcb_alloc_dev(bus);
49 if (!mdev)
50 return -ENOMEM;
51
52 reg1 = readl(&gdd->reg1);
53 reg2 = readl(&gdd->reg2);
54 offset = readl(&gdd->offset);
55 size = readl(&gdd->size);
56
57 mdev->id = GDD_DEV(reg1);
58 mdev->rev = GDD_REV(reg1);
59 mdev->var = GDD_VAR(reg1);
60 mdev->bar = GDD_BAR(reg1);
61 mdev->group = GDD_GRP(reg2);
62 mdev->inst = GDD_INS(reg2);
63
64 pr_debug("Found a 16z%03d\n", mdev->id);
65
66 mdev->irq.start = GDD_IRQ(reg1);
67 mdev->irq.end = GDD_IRQ(reg1);
68 mdev->irq.flags = IORESOURCE_IRQ;
69
70 mdev->mem.start = mapbase + offset;
71 mdev->mem.end = mdev->mem.start + size - 1;
72 mdev->mem.flags = IORESOURCE_MEM;
73
74 mdev->is_added = false;
75
76 ret = mcb_device_register(bus, mdev);
77 if (ret < 0)
78 goto err;
79
80 return 0;
81
82err:
83 mcb_free_dev(mdev);
84
85 return ret;
86}
87
88int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
89 void __iomem *base)
90{
91 char __iomem *p = base;
92 struct chameleon_fpga_header *header;
93 uint32_t dtype;
94 int num_cells = 0;
95 int ret = 0;
96 u32 hsize;
97
98 hsize = sizeof(struct chameleon_fpga_header);
99
100 header = kzalloc(hsize, GFP_KERNEL);
101 if (!header)
102 return -ENOMEM;
103
104 /* Extract header information */
105 memcpy_fromio(header, p, hsize);
106 /* We only support chameleon v2 at the moment */
107 header->magic = le16_to_cpu(header->magic);
108 if (header->magic != CHAMELEONV2_MAGIC) {
109 pr_err("Unsupported chameleon version 0x%x\n",
110 header->magic);
111 kfree(header);
112 return -ENODEV;
113 }
114 p += hsize;
115
116 pr_debug("header->revision = %d\n", header->revision);
117 pr_debug("header->model = 0x%x ('%c')\n", header->model,
118 header->model);
119 pr_debug("header->minor = %d\n", header->minor);
120 pr_debug("header->bus_type = 0x%x\n", header->bus_type);
121
122
123 pr_debug("header->magic = 0x%x\n", header->magic);
124 pr_debug("header->filename = \"%.*s\"\n", CHAMELEON_FILENAME_LEN,
125 header->filename);
126
127 for_each_chameleon_cell(dtype, p) {
128 switch (dtype) {
129 case CHAMELEON_DTYPE_GENERAL:
130 ret = chameleon_parse_gdd(bus, mapbase, p);
131 if (ret < 0)
132 goto out;
133 p += sizeof(struct chameleon_gdd);
134 break;
135 case CHAMELEON_DTYPE_BRIDGE:
136 chameleon_parse_bdd(bus, mapbase, p);
137 p += sizeof(struct chameleon_bdd);
138 break;
139 case CHAMELEON_DTYPE_END:
140 break;
141 default:
142 pr_err("Invalid chameleon descriptor type 0x%x\n",
143 dtype);
144 return -EINVAL;
145 }
146 num_cells++;
147 }
148
149 if (num_cells == 0)
150 num_cells = -EINVAL;
151
152 kfree(header);
153 return num_cells;
154
155out:
156 kfree(header);
157 return ret;
158}
159EXPORT_SYMBOL_GPL(chameleon_parse_cells);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
new file mode 100644
index 000000000000..99c742cbfb5b
--- /dev/null
+++ b/drivers/mcb/mcb-pci.c
@@ -0,0 +1,114 @@
1/*
2 * MEN Chameleon Bus.
3 *
4 * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
5 * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; version 2 of the License.
10 */
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/mcb.h>
15
16#include "mcb-internal.h"
17
18struct priv {
19 struct mcb_bus *bus;
20 void __iomem *base;
21};
22
23static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
24{
25 struct priv *priv;
26 phys_addr_t mapbase;
27 int ret;
28 int num_cells;
29 unsigned long flags;
30
31 priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
32 if (!priv)
33 return -ENOMEM;
34
35 ret = pci_enable_device(pdev);
36 if (ret) {
37 dev_err(&pdev->dev, "Failed to enable PCI device\n");
38 return -ENODEV;
39 }
40
41 mapbase = pci_resource_start(pdev, 0);
42 if (!mapbase) {
43 dev_err(&pdev->dev, "No PCI resource\n");
44 goto err_start;
45 }
46
47 ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
48 if (ret) {
49 dev_err(&pdev->dev, "Failed to request PCI BARs\n");
50 goto err_start;
51 }
52
53 priv->base = pci_iomap(pdev, 0, 0);
54 if (!priv->base) {
55 dev_err(&pdev->dev, "Cannot ioremap\n");
56 ret = -ENOMEM;
57 goto err_ioremap;
58 }
59
60 flags = pci_resource_flags(pdev, 0);
61 if (flags & IORESOURCE_IO) {
62 ret = -ENOTSUPP;
63 dev_err(&pdev->dev,
64 "IO mapped PCI devices are not supported\n");
65 goto err_ioremap;
66 }
67
68 pci_set_drvdata(pdev, priv);
69
70 priv->bus = mcb_alloc_bus();
71
72 ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
73 if (ret < 0)
74 goto err_drvdata;
75 num_cells = ret;
76
77 dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
78
79 mcb_bus_add_devices(priv->bus);
80
81err_drvdata:
82 pci_iounmap(pdev, priv->base);
83err_ioremap:
84 pci_release_region(pdev, 0);
85err_start:
86 pci_disable_device(pdev);
87 return ret;
88}
89
90static void mcb_pci_remove(struct pci_dev *pdev)
91{
92 struct priv *priv = pci_get_drvdata(pdev);
93
94 mcb_release_bus(priv->bus);
95}
96
97static const struct pci_device_id mcb_pci_tbl[] = {
98 { PCI_DEVICE(PCI_VENDOR_ID_MEN, PCI_DEVICE_ID_MEN_CHAMELEON) },
99 { 0 },
100};
101MODULE_DEVICE_TABLE(pci, mcb_pci_tbl);
102
103static struct pci_driver mcb_pci_driver = {
104 .name = "mcb-pci",
105 .id_table = mcb_pci_tbl,
106 .probe = mcb_pci_probe,
107 .remove = mcb_pci_remove,
108};
109
110module_pci_driver(mcb_pci_driver);
111
112MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
113MODULE_LICENSE("GPL");
114MODULE_DESCRIPTION("MCB over PCI support");
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 08d9a207259a..b428c0ae63d5 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -66,7 +66,7 @@ static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
66 msg->seq = tfr->seq; 66 msg->seq = tfr->seq;
67 msg->len = sizeof(struct dm_ulog_request) + tfr->data_size; 67 msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
68 68
69 r = cn_netlink_send(msg, 0, gfp_any()); 69 r = cn_netlink_send(msg, 0, 0, gfp_any());
70 70
71 return r; 71 return r;
72} 72}
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 29a11db365bc..c59e9c96e86d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -7,6 +7,17 @@ menuconfig MEMORY
7 7
8if MEMORY 8if MEMORY
9 9
10config TI_AEMIF
11 tristate "Texas Instruments AEMIF driver"
12 depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
13 help
14 This driver is for the AEMIF module available in Texas Instruments
15 SoCs. AEMIF stands for Asynchronous External Memory Interface and
16 is intended to provide a glue-less interface to a variety of
17 asynchronuous memory devices like ASRAM, NOR and NAND memory. A total
18 of 256M bytes of any of these memories can be accessed at a given
19 time via four chip selects with 64M byte access per chip select.
20
10config TI_EMIF 21config TI_EMIF
11 tristate "Texas Instruments EMIF driver" 22 tristate "Texas Instruments EMIF driver"
12 depends on ARCH_OMAP2PLUS 23 depends on ARCH_OMAP2PLUS
@@ -50,4 +61,8 @@ config TEGRA30_MC
50 analysis, especially for IOMMU/SMMU(System Memory Management 61 analysis, especially for IOMMU/SMMU(System Memory Management
51 Unit) module. 62 Unit) module.
52 63
64config FSL_IFC
65 bool
66 depends on FSL_SOC
67
53endif 68endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 969d923dad93..71160a2b7313 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -5,7 +5,9 @@
5ifeq ($(CONFIG_DDR),y) 5ifeq ($(CONFIG_DDR),y)
6obj-$(CONFIG_OF) += of_memory.o 6obj-$(CONFIG_OF) += of_memory.o
7endif 7endif
8obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
8obj-$(CONFIG_TI_EMIF) += emif.o 9obj-$(CONFIG_TI_EMIF) += emif.o
10obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
9obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o 11obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
10obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o 12obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
11obj-$(CONFIG_TEGRA30_MC) += tegra30-mc.o 13obj-$(CONFIG_TEGRA30_MC) += tegra30-mc.o
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
new file mode 100644
index 000000000000..3d5d792d5cb2
--- /dev/null
+++ b/drivers/memory/fsl_ifc.c
@@ -0,0 +1,309 @@
1/*
2 * Copyright 2011 Freescale Semiconductor, Inc
3 *
4 * Freescale Integrated Flash Controller
5 *
6 * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/compiler.h>
25#include <linux/spinlock.h>
26#include <linux/types.h>
27#include <linux/slab.h>
28#include <linux/io.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/platform_device.h>
32#include <linux/fsl_ifc.h>
33#include <asm/prom.h>
34
35struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
36EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
37
38/*
39 * convert_ifc_address - convert the base address
40 * @addr_base: base address of the memory bank
41 */
42unsigned int convert_ifc_address(phys_addr_t addr_base)
43{
44 return addr_base & CSPR_BA;
45}
46EXPORT_SYMBOL(convert_ifc_address);
47
48/*
49 * fsl_ifc_find - find IFC bank
50 * @addr_base: base address of the memory bank
51 *
52 * This function walks IFC banks comparing "Base address" field of the CSPR
53 * registers with the supplied addr_base argument. When bases match this
54 * function returns bank number (starting with 0), otherwise it returns
55 * appropriate errno value.
56 */
57int fsl_ifc_find(phys_addr_t addr_base)
58{
59 int i = 0;
60
61 if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
62 return -ENODEV;
63
64 for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) {
65 u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
66 if (cspr & CSPR_V && (cspr & CSPR_BA) ==
67 convert_ifc_address(addr_base))
68 return i;
69 }
70
71 return -ENOENT;
72}
73EXPORT_SYMBOL(fsl_ifc_find);
74
75static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
76{
77 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
78
79 /*
80 * Clear all the common status and event registers
81 */
82 if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
83 out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
84
85 /* enable all error and events */
86 out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
87
88 /* enable all error and event interrupts */
89 out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
90 out_be32(&ifc->cm_erattr0, 0x0);
91 out_be32(&ifc->cm_erattr1, 0x0);
92
93 return 0;
94}
95
96static int fsl_ifc_ctrl_remove(struct platform_device *dev)
97{
98 struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev);
99
100 free_irq(ctrl->nand_irq, ctrl);
101 free_irq(ctrl->irq, ctrl);
102
103 irq_dispose_mapping(ctrl->nand_irq);
104 irq_dispose_mapping(ctrl->irq);
105
106 iounmap(ctrl->regs);
107
108 dev_set_drvdata(&dev->dev, NULL);
109 kfree(ctrl);
110
111 return 0;
112}
113
114/*
115 * NAND events are split between an operational interrupt which only
116 * receives OPC, and an error interrupt that receives everything else,
117 * including non-NAND errors. Whichever interrupt gets to it first
118 * records the status and wakes the wait queue.
119 */
120static DEFINE_SPINLOCK(nand_irq_lock);
121
122static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
123{
124 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
125 unsigned long flags;
126 u32 stat;
127
128 spin_lock_irqsave(&nand_irq_lock, flags);
129
130 stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
131 if (stat) {
132 out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
133 ctrl->nand_stat = stat;
134 wake_up(&ctrl->nand_wait);
135 }
136
137 spin_unlock_irqrestore(&nand_irq_lock, flags);
138
139 return stat;
140}
141
142static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
143{
144 struct fsl_ifc_ctrl *ctrl = data;
145
146 if (check_nand_stat(ctrl))
147 return IRQ_HANDLED;
148
149 return IRQ_NONE;
150}
151
152/*
153 * NOTE: This interrupt is used to report ifc events of various kinds,
154 * such as transaction errors on the chipselects.
155 */
156static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
157{
158 struct fsl_ifc_ctrl *ctrl = data;
159 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
160 u32 err_axiid, err_srcid, status, cs_err, err_addr;
161 irqreturn_t ret = IRQ_NONE;
162
163 /* read for chip select error */
164 cs_err = in_be32(&ifc->cm_evter_stat);
165 if (cs_err) {
166 dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
167 "any memory bank 0x%08X\n", cs_err);
168 /* clear the chip select error */
169 out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
170
171 /* read error attribute registers print the error information */
172 status = in_be32(&ifc->cm_erattr0);
173 err_addr = in_be32(&ifc->cm_erattr1);
174
175 if (status & IFC_CM_ERATTR0_ERTYP_READ)
176 dev_err(ctrl->dev, "Read transaction error"
177 "CM_ERATTR0 0x%08X\n", status);
178 else
179 dev_err(ctrl->dev, "Write transaction error"
180 "CM_ERATTR0 0x%08X\n", status);
181
182 err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
183 IFC_CM_ERATTR0_ERAID_SHIFT;
184 dev_err(ctrl->dev, "AXI ID of the error"
185 "transaction 0x%08X\n", err_axiid);
186
187 err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
188 IFC_CM_ERATTR0_ESRCID_SHIFT;
189 dev_err(ctrl->dev, "SRC ID of the error"
190 "transaction 0x%08X\n", err_srcid);
191
192 dev_err(ctrl->dev, "Transaction Address corresponding to error"
193 "ERADDR 0x%08X\n", err_addr);
194
195 ret = IRQ_HANDLED;
196 }
197
198 if (check_nand_stat(ctrl))
199 ret = IRQ_HANDLED;
200
201 return ret;
202}
203
204/*
205 * fsl_ifc_ctrl_probe
206 *
207 * called by device layer when it finds a device matching
208 * one our driver can handled. This code allocates all of
209 * the resources needed for the controller only. The
210 * resources for the NAND banks themselves are allocated
211 * in the chip probe function.
212*/
213static int fsl_ifc_ctrl_probe(struct platform_device *dev)
214{
215 int ret = 0;
216
217
218 dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
219
220 fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL);
221 if (!fsl_ifc_ctrl_dev)
222 return -ENOMEM;
223
224 dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
225
226 /* IOMAP the entire IFC region */
227 fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
228 if (!fsl_ifc_ctrl_dev->regs) {
229 dev_err(&dev->dev, "failed to get memory region\n");
230 ret = -ENODEV;
231 goto err;
232 }
233
234 /* get the Controller level irq */
235 fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
236 if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
237 dev_err(&dev->dev, "failed to get irq resource "
238 "for IFC\n");
239 ret = -ENODEV;
240 goto err;
241 }
242
243 /* get the nand machine irq */
244 fsl_ifc_ctrl_dev->nand_irq =
245 irq_of_parse_and_map(dev->dev.of_node, 1);
246
247 fsl_ifc_ctrl_dev->dev = &dev->dev;
248
249 ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
250 if (ret < 0)
251 goto err;
252
253 init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
254
255 ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED,
256 "fsl-ifc", fsl_ifc_ctrl_dev);
257 if (ret != 0) {
258 dev_err(&dev->dev, "failed to install irq (%d)\n",
259 fsl_ifc_ctrl_dev->irq);
260 goto err_irq;
261 }
262
263 if (fsl_ifc_ctrl_dev->nand_irq) {
264 ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq,
265 0, "fsl-ifc-nand", fsl_ifc_ctrl_dev);
266 if (ret != 0) {
267 dev_err(&dev->dev, "failed to install irq (%d)\n",
268 fsl_ifc_ctrl_dev->nand_irq);
269 goto err_nandirq;
270 }
271 }
272
273 return 0;
274
275err_nandirq:
276 free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
277 irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
278err_irq:
279 free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
280 irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
281err:
282 return ret;
283}
284
285static const struct of_device_id fsl_ifc_match[] = {
286 {
287 .compatible = "fsl,ifc",
288 },
289 {},
290};
291
292static struct platform_driver fsl_ifc_ctrl_driver = {
293 .driver = {
294 .name = "fsl-ifc",
295 .of_match_table = fsl_ifc_match,
296 },
297 .probe = fsl_ifc_ctrl_probe,
298 .remove = fsl_ifc_ctrl_remove,
299};
300
301static int __init fsl_ifc_init(void)
302{
303 return platform_driver_register(&fsl_ifc_ctrl_driver);
304}
305subsys_initcall(fsl_ifc_init);
306
307MODULE_LICENSE("GPL");
308MODULE_AUTHOR("Freescale Semiconductor");
309MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver");
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
new file mode 100644
index 000000000000..d3df7602f406
--- /dev/null
+++ b/drivers/memory/ti-aemif.c
@@ -0,0 +1,427 @@
1/*
2 * TI AEMIF driver
3 *
4 * Copyright (C) 2010 - 2013 Texas Instruments Incorporated. http://www.ti.com/
5 *
6 * Authors:
7 * Murali Karicheri <m-karicheri2@ti.com>
8 * Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23
24#define TA_SHIFT 2
25#define RHOLD_SHIFT 4
26#define RSTROBE_SHIFT 7
27#define RSETUP_SHIFT 13
28#define WHOLD_SHIFT 17
29#define WSTROBE_SHIFT 20
30#define WSETUP_SHIFT 26
31#define EW_SHIFT 30
32#define SS_SHIFT 31
33
34#define TA(x) ((x) << TA_SHIFT)
35#define RHOLD(x) ((x) << RHOLD_SHIFT)
36#define RSTROBE(x) ((x) << RSTROBE_SHIFT)
37#define RSETUP(x) ((x) << RSETUP_SHIFT)
38#define WHOLD(x) ((x) << WHOLD_SHIFT)
39#define WSTROBE(x) ((x) << WSTROBE_SHIFT)
40#define WSETUP(x) ((x) << WSETUP_SHIFT)
41#define EW(x) ((x) << EW_SHIFT)
42#define SS(x) ((x) << SS_SHIFT)
43
44#define ASIZE_MAX 0x1
45#define TA_MAX 0x3
46#define RHOLD_MAX 0x7
47#define RSTROBE_MAX 0x3f
48#define RSETUP_MAX 0xf
49#define WHOLD_MAX 0x7
50#define WSTROBE_MAX 0x3f
51#define WSETUP_MAX 0xf
52#define EW_MAX 0x1
53#define SS_MAX 0x1
54#define NUM_CS 4
55
56#define TA_VAL(x) (((x) & TA(TA_MAX)) >> TA_SHIFT)
57#define RHOLD_VAL(x) (((x) & RHOLD(RHOLD_MAX)) >> RHOLD_SHIFT)
58#define RSTROBE_VAL(x) (((x) & RSTROBE(RSTROBE_MAX)) >> RSTROBE_SHIFT)
59#define RSETUP_VAL(x) (((x) & RSETUP(RSETUP_MAX)) >> RSETUP_SHIFT)
60#define WHOLD_VAL(x) (((x) & WHOLD(WHOLD_MAX)) >> WHOLD_SHIFT)
61#define WSTROBE_VAL(x) (((x) & WSTROBE(WSTROBE_MAX)) >> WSTROBE_SHIFT)
62#define WSETUP_VAL(x) (((x) & WSETUP(WSETUP_MAX)) >> WSETUP_SHIFT)
63#define EW_VAL(x) (((x) & EW(EW_MAX)) >> EW_SHIFT)
64#define SS_VAL(x) (((x) & SS(SS_MAX)) >> SS_SHIFT)
65
66#define NRCSR_OFFSET 0x00
67#define AWCCR_OFFSET 0x04
68#define A1CR_OFFSET 0x10
69
70#define ACR_ASIZE_MASK 0x3
71#define ACR_EW_MASK BIT(30)
72#define ACR_SS_MASK BIT(31)
73#define ASIZE_16BIT 1
74
75#define CONFIG_MASK (TA(TA_MAX) | \
76 RHOLD(RHOLD_MAX) | \
77 RSTROBE(RSTROBE_MAX) | \
78 RSETUP(RSETUP_MAX) | \
79 WHOLD(WHOLD_MAX) | \
80 WSTROBE(WSTROBE_MAX) | \
81 WSETUP(WSETUP_MAX) | \
82 EW(EW_MAX) | SS(SS_MAX) | \
83 ASIZE_MAX)
84
85/**
86 * struct aemif_cs_data: structure to hold cs parameters
87 * @cs: chip-select number
88 * @wstrobe: write strobe width, ns
89 * @rstrobe: read strobe width, ns
90 * @wsetup: write setup width, ns
91 * @whold: write hold width, ns
92 * @rsetup: read setup width, ns
93 * @rhold: read hold width, ns
94 * @ta: minimum turn around time, ns
95 * @enable_ss: enable/disable select strobe mode
96 * @enable_ew: enable/disable extended wait mode
97 * @asize: width of the asynchronous device's data bus
98 */
99struct aemif_cs_data {
100 u8 cs;
101 u16 wstrobe;
102 u16 rstrobe;
103 u8 wsetup;
104 u8 whold;
105 u8 rsetup;
106 u8 rhold;
107 u8 ta;
108 u8 enable_ss;
109 u8 enable_ew;
110 u8 asize;
111};
112
113/**
114 * struct aemif_device: structure to hold device data
115 * @base: base address of AEMIF registers
116 * @clk: source clock
117 * @clk_rate: clock's rate in kHz
118 * @num_cs: number of assigned chip-selects
119 * @cs_offset: start number of cs nodes
120 * @cs_data: array of chip-select settings
121 */
122struct aemif_device {
123 void __iomem *base;
124 struct clk *clk;
125 unsigned long clk_rate;
126 u8 num_cs;
127 int cs_offset;
128 struct aemif_cs_data cs_data[NUM_CS];
129};
130
131/**
132 * aemif_calc_rate - calculate timing data.
133 * @pdev: platform device to calculate for
134 * @wanted: The cycle time needed in nanoseconds.
135 * @clk: The input clock rate in kHz.
136 * @max: The maximum divider value that can be programmed.
137 *
138 * On success, returns the calculated timing value minus 1 for easy
139 * programming into AEMIF timing registers, else negative errno.
140 */
141static int aemif_calc_rate(struct platform_device *pdev, int wanted,
142 unsigned long clk, int max)
143{
144 int result;
145
146 result = DIV_ROUND_UP((wanted * clk), NSEC_PER_MSEC) - 1;
147
148 dev_dbg(&pdev->dev, "%s: result %d from %ld, %d\n", __func__, result,
149 clk, wanted);
150
151 /* It is generally OK to have a more relaxed timing than requested... */
152 if (result < 0)
153 result = 0;
154
155 /* ... But configuring tighter timings is not an option. */
156 else if (result > max)
157 result = -EINVAL;
158
159 return result;
160}
161
162/**
163 * aemif_config_abus - configure async bus parameters
164 * @pdev: platform device to configure for
165 * @csnum: aemif chip select number
166 *
167 * This function programs the given timing values (in real clock) into the
168 * AEMIF registers taking the AEMIF clock into account.
169 *
170 * This function does not use any locking while programming the AEMIF
171 * because it is expected that there is only one user of a given
172 * chip-select.
173 *
174 * Returns 0 on success, else negative errno.
175 */
176static int aemif_config_abus(struct platform_device *pdev, int csnum)
177{
178 struct aemif_device *aemif = platform_get_drvdata(pdev);
179 struct aemif_cs_data *data = &aemif->cs_data[csnum];
180 int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
181 unsigned long clk_rate = aemif->clk_rate;
182 unsigned offset;
183 u32 set, val;
184
185 offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
186
187 ta = aemif_calc_rate(pdev, data->ta, clk_rate, TA_MAX);
188 rhold = aemif_calc_rate(pdev, data->rhold, clk_rate, RHOLD_MAX);
189 rstrobe = aemif_calc_rate(pdev, data->rstrobe, clk_rate, RSTROBE_MAX);
190 rsetup = aemif_calc_rate(pdev, data->rsetup, clk_rate, RSETUP_MAX);
191 whold = aemif_calc_rate(pdev, data->whold, clk_rate, WHOLD_MAX);
192 wstrobe = aemif_calc_rate(pdev, data->wstrobe, clk_rate, WSTROBE_MAX);
193 wsetup = aemif_calc_rate(pdev, data->wsetup, clk_rate, WSETUP_MAX);
194
195 if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
196 whold < 0 || wstrobe < 0 || wsetup < 0) {
197 dev_err(&pdev->dev, "%s: cannot get suitable timings\n",
198 __func__);
199 return -EINVAL;
200 }
201
202 set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
203 WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
204
205 set |= (data->asize & ACR_ASIZE_MASK);
206 if (data->enable_ew)
207 set |= ACR_EW_MASK;
208 if (data->enable_ss)
209 set |= ACR_SS_MASK;
210
211 val = readl(aemif->base + offset);
212 val &= ~CONFIG_MASK;
213 val |= set;
214 writel(val, aemif->base + offset);
215
216 return 0;
217}
218
219static inline int aemif_cycles_to_nsec(int val, unsigned long clk_rate)
220{
221 return ((val + 1) * NSEC_PER_MSEC) / clk_rate;
222}
223
224/**
225 * aemif_get_hw_params - function to read hw register values
226 * @pdev: platform device to read for
227 * @csnum: aemif chip select number
228 *
229 * This function reads the defaults from the registers and update
230 * the timing values. Required for get/set commands and also for
231 * the case when driver needs to use defaults in hardware.
232 */
233static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
234{
235 struct aemif_device *aemif = platform_get_drvdata(pdev);
236 struct aemif_cs_data *data = &aemif->cs_data[csnum];
237 unsigned long clk_rate = aemif->clk_rate;
238 u32 val, offset;
239
240 offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
241 val = readl(aemif->base + offset);
242
243 data->ta = aemif_cycles_to_nsec(TA_VAL(val), clk_rate);
244 data->rhold = aemif_cycles_to_nsec(RHOLD_VAL(val), clk_rate);
245 data->rstrobe = aemif_cycles_to_nsec(RSTROBE_VAL(val), clk_rate);
246 data->rsetup = aemif_cycles_to_nsec(RSETUP_VAL(val), clk_rate);
247 data->whold = aemif_cycles_to_nsec(WHOLD_VAL(val), clk_rate);
248 data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
249 data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
250 data->enable_ew = EW_VAL(val);
251 data->enable_ss = SS_VAL(val);
252 data->asize = val & ASIZE_MAX;
253}
254
255/**
256 * of_aemif_parse_abus_config - parse CS configuration from DT
257 * @pdev: platform device to parse for
258 * @np: device node ptr
259 *
260 * This function update the emif async bus configuration based on the values
261 * configured in a cs device binding node.
262 */
263static int of_aemif_parse_abus_config(struct platform_device *pdev,
264 struct device_node *np)
265{
266 struct aemif_device *aemif = platform_get_drvdata(pdev);
267 struct aemif_cs_data *data;
268 u32 cs;
269 u32 val;
270
271 if (of_property_read_u32(np, "ti,cs-chipselect", &cs)) {
272 dev_dbg(&pdev->dev, "cs property is required");
273 return -EINVAL;
274 }
275
276 if (cs - aemif->cs_offset >= NUM_CS || cs < aemif->cs_offset) {
277 dev_dbg(&pdev->dev, "cs number is incorrect %d", cs);
278 return -EINVAL;
279 }
280
281 if (aemif->num_cs >= NUM_CS) {
282 dev_dbg(&pdev->dev, "cs count is more than %d", NUM_CS);
283 return -EINVAL;
284 }
285
286 data = &aemif->cs_data[aemif->num_cs];
287 data->cs = cs;
288
289 /* read the current value in the hw register */
290 aemif_get_hw_params(pdev, aemif->num_cs++);
291
292 /* override the values from device node */
293 if (!of_property_read_u32(np, "ti,cs-min-turnaround-ns", &val))
294 data->ta = val;
295
296 if (!of_property_read_u32(np, "ti,cs-read-hold-ns", &val))
297 data->rhold = val;
298
299 if (!of_property_read_u32(np, "ti,cs-read-strobe-ns", &val))
300 data->rstrobe = val;
301
302 if (!of_property_read_u32(np, "ti,cs-read-setup-ns", &val))
303 data->rsetup = val;
304
305 if (!of_property_read_u32(np, "ti,cs-write-hold-ns", &val))
306 data->whold = val;
307
308 if (!of_property_read_u32(np, "ti,cs-write-strobe-ns", &val))
309 data->wstrobe = val;
310
311 if (!of_property_read_u32(np, "ti,cs-write-setup-ns", &val))
312 data->wsetup = val;
313
314 if (!of_property_read_u32(np, "ti,cs-bus-width", &val))
315 if (val == 16)
316 data->asize = 1;
317 data->enable_ew = of_property_read_bool(np, "ti,cs-extended-wait-mode");
318 data->enable_ss = of_property_read_bool(np, "ti,cs-select-strobe-mode");
319 return 0;
320}
321
322static const struct of_device_id aemif_of_match[] = {
323 { .compatible = "ti,davinci-aemif", },
324 { .compatible = "ti,da850-aemif", },
325 {},
326};
327
328static int aemif_probe(struct platform_device *pdev)
329{
330 int i;
331 int ret = -ENODEV;
332 struct resource *res;
333 struct device *dev = &pdev->dev;
334 struct device_node *np = dev->of_node;
335 struct device_node *child_np;
336 struct aemif_device *aemif;
337
338 if (np == NULL)
339 return 0;
340
341 aemif = devm_kzalloc(dev, sizeof(*aemif), GFP_KERNEL);
342 if (!aemif)
343 return -ENOMEM;
344
345 platform_set_drvdata(pdev, aemif);
346
347 aemif->clk = devm_clk_get(dev, NULL);
348 if (IS_ERR(aemif->clk)) {
349 dev_err(dev, "cannot get clock 'aemif'\n");
350 return PTR_ERR(aemif->clk);
351 }
352
353 clk_prepare_enable(aemif->clk);
354 aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC;
355
356 if (of_device_is_compatible(np, "ti,da850-aemif"))
357 aemif->cs_offset = 2;
358
359 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
360 aemif->base = devm_ioremap_resource(dev, res);
361 if (IS_ERR(aemif->base)) {
362 ret = PTR_ERR(aemif->base);
363 goto error;
364 }
365
366 /*
367 * For every controller device node, there is a cs device node that
368 * describe the bus configuration parameters. This functions iterate
369 * over these nodes and update the cs data array.
370 */
371 for_each_available_child_of_node(np, child_np) {
372 ret = of_aemif_parse_abus_config(pdev, child_np);
373 if (ret < 0)
374 goto error;
375 }
376
377 for (i = 0; i < aemif->num_cs; i++) {
378 ret = aemif_config_abus(pdev, i);
379 if (ret < 0) {
380 dev_err(dev, "Error configuring chip select %d\n",
381 aemif->cs_data[i].cs);
382 goto error;
383 }
384 }
385
386 /*
387 * Create a child devices explicitly from here to
388 * guarantee that the child will be probed after the AEMIF timing
389 * parameters are set.
390 */
391 for_each_available_child_of_node(np, child_np) {
392 ret = of_platform_populate(child_np, NULL, NULL, dev);
393 if (ret < 0)
394 goto error;
395 }
396
397 return 0;
398error:
399 clk_disable_unprepare(aemif->clk);
400 return ret;
401}
402
403static int aemif_remove(struct platform_device *pdev)
404{
405 struct aemif_device *aemif = platform_get_drvdata(pdev);
406
407 clk_disable_unprepare(aemif->clk);
408 return 0;
409}
410
411static struct platform_driver aemif_driver = {
412 .probe = aemif_probe,
413 .remove = aemif_remove,
414 .driver = {
415 .name = KBUILD_MODNAME,
416 .owner = THIS_MODULE,
417 .of_match_table = of_match_ptr(aemif_of_match),
418 },
419};
420
421module_platform_driver(aemif_driver);
422
423MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
424MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>");
425MODULE_DESCRIPTION("Texas Instruments AEMIF driver");
426MODULE_LICENSE("GPL v2");
427MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6cb388e8fb7d..809afebe0dad 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -235,7 +235,7 @@ config SGI_XP
235 235
236config CS5535_MFGPT 236config CS5535_MFGPT
237 tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" 237 tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
238 depends on PCI && X86 && MFD_CS5535 238 depends on MFD_CS5535
239 default n 239 default n
240 help 240 help
241 This driver provides access to MFGPT functionality for other 241 This driver provides access to MFGPT functionality for other
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index d3eee113baeb..a43053daad0e 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -72,7 +72,6 @@
72#include <linux/module.h> 72#include <linux/module.h>
73#include <linux/device.h> 73#include <linux/device.h>
74#include <linux/kernel.h> 74#include <linux/kernel.h>
75#include <linux/init.h>
76#include <linux/delay.h> 75#include <linux/delay.h>
77#include <linux/slab.h> 76#include <linux/slab.h>
78 77
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 0c6e037153d2..c6cc3dc8ae1f 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/i2c.h> 26#include <linux/i2c.h>
28#include <linux/err.h> 27#include <linux/err.h>
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 820e53d0048f..9b313f7810f5 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -47,7 +47,6 @@
47 47
48#include <linux/module.h> 48#include <linux/module.h>
49#include <linux/device.h> 49#include <linux/device.h>
50#include <linux/init.h>
51#include <linux/slab.h> 50#include <linux/slab.h>
52#include <linux/of.h> 51#include <linux/of.h>
53#include "bmp085.h" 52#include "bmp085.h"
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 9e2b985293fc..14d90eae605b 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -101,7 +101,6 @@
101#include <linux/kernel.h> 101#include <linux/kernel.h>
102#include <linux/module.h> 102#include <linux/module.h>
103#include <linux/poll.h> 103#include <linux/poll.h>
104#include <linux/init.h>
105#include <linux/slab.h> 104#include <linux/slab.h>
106#include <linux/kref.h> 105#include <linux/kref.h>
107#include <linux/io.h> 106#include <linux/io.h>
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 154b02e5094f..6a672f9ef522 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -32,7 +32,6 @@
32 */ 32 */
33 33
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/i2c.h> 35#include <linux/i2c.h>
37#include <linux/string.h> 36#include <linux/string.h>
38#include <linux/list.h> 37#include <linux/list.h>
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 4f3bca1003a1..634f72929e12 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/slab.h> 14#include <linux/slab.h>
16#include <linux/delay.h> 15#include <linux/delay.h>
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index f0fa4e8ca124..33f8673d23a6 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -17,7 +17,6 @@
17 */ 17 */
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/module.h> 20#include <linux/module.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23#include <linux/jiffies.h> 22#include <linux/jiffies.h>
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 78e55b501c94..9ebeacdb8ec4 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -11,7 +11,6 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/mutex.h> 15#include <linux/mutex.h>
17#include <linux/slab.h> 16#include <linux/slab.h>
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index e36157d5d3ab..580ff9df5529 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/module.h> 30#include <linux/module.h>
32#include <linux/slab.h> 31#include <linux/slab.h>
33#include <linux/i2c.h> 32#include <linux/i2c.h>
diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
index 9c34e5704304..3f2b625b2032 100644
--- a/drivers/misc/eeprom/sunxi_sid.c
+++ b/drivers/misc/eeprom/sunxi_sid.c
@@ -21,7 +21,6 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/init.h>
25#include <linux/io.h> 24#include <linux/io.h>
26#include <linux/kernel.h> 25#include <linux/kernel.h>
27#include <linux/kobject.h> 26#include <linux/kobject.h>
@@ -96,7 +95,7 @@ static int sunxi_sid_remove(struct platform_device *pdev)
96} 95}
97 96
98static const struct of_device_id sunxi_sid_of_match[] = { 97static const struct of_device_id sunxi_sid_of_match[] = {
99 { .compatible = "allwinner,sun4i-sid", .data = (void *)16}, 98 { .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16},
100 { .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512}, 99 { .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512},
101 {/* sentinel */}, 100 {/* sentinel */},
102}; 101};
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 3bfdc07a7248..50d2096ea1c7 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/debugfs.h> 29#include <linux/debugfs.h>
31#include <linux/seq_file.h> 30#include <linux/seq_file.h>
32#include <linux/uaccess.h> 31#include <linux/uaccess.h>
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 170bd3daf336..90520d76633f 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/i2c.h> 26#include <linux/i2c.h>
28#include <linux/err.h> 27#include <linux/err.h>
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index e3183f26216b..12c30b486b27 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -26,7 +26,6 @@
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31#include <linux/i2c.h> 30#include <linux/i2c.h>
32#include <linux/mutex.h> 31#include <linux/mutex.h>
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index b7f84dacf822..4a9c50a43afb 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <linux/i2c.h> 27#include <linux/i2c.h>
29#include <linux/err.h> 28#include <linux/err.h>
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 61fbe6acabef..0a1565e63c71 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -12,7 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/delay.h> 17#include <linux/delay.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 036effe9a795..3ef4627f9cb1 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -23,7 +23,6 @@
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/dmi.h> 26#include <linux/dmi.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/types.h> 28#include <linux/types.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 7c97550240f1..d324f8a97b88 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/err.h> 29#include <linux/err.h>
31#include <linux/i2c.h> 30#include <linux/i2c.h>
32#include <linux/pm_runtime.h> 31#include <linux/pm_runtime.h>
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index 9aa2bd2a71ae..bd06d0cfac45 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -10,7 +10,6 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/err.h> 13#include <linux/err.h>
15#include <linux/input.h> 14#include <linux/input.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 49c7a23f02fc..d66a2f24f6b3 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -30,6 +30,7 @@
30 * 30 *
31 * See Documentation/fault-injection/provoke-crashes.txt for instructions 31 * See Documentation/fault-injection/provoke-crashes.txt for instructions
32 */ 32 */
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 34
34#include <linux/kernel.h> 35#include <linux/kernel.h>
35#include <linux/fs.h> 36#include <linux/fs.h>
@@ -45,6 +46,7 @@
45#include <linux/debugfs.h> 46#include <linux/debugfs.h>
46#include <linux/vmalloc.h> 47#include <linux/vmalloc.h>
47#include <linux/mman.h> 48#include <linux/mman.h>
49#include <asm/cacheflush.h>
48 50
49#ifdef CONFIG_IDE 51#ifdef CONFIG_IDE
50#include <linux/ide.h> 52#include <linux/ide.h>
@@ -101,6 +103,7 @@ enum ctype {
101 CT_EXEC_USERSPACE, 103 CT_EXEC_USERSPACE,
102 CT_ACCESS_USERSPACE, 104 CT_ACCESS_USERSPACE,
103 CT_WRITE_RO, 105 CT_WRITE_RO,
106 CT_WRITE_KERN,
104}; 107};
105 108
106static char* cp_name[] = { 109static char* cp_name[] = {
@@ -137,6 +140,7 @@ static char* cp_type[] = {
137 "EXEC_USERSPACE", 140 "EXEC_USERSPACE",
138 "ACCESS_USERSPACE", 141 "ACCESS_USERSPACE",
139 "WRITE_RO", 142 "WRITE_RO",
143 "WRITE_KERN",
140}; 144};
141 145
142static struct jprobe lkdtm; 146static struct jprobe lkdtm;
@@ -316,6 +320,13 @@ static void do_nothing(void)
316 return; 320 return;
317} 321}
318 322
323/* Must immediately follow do_nothing for size calculuations to work out. */
324static void do_overwritten(void)
325{
326 pr_info("do_overwritten wasn't overwritten!\n");
327 return;
328}
329
319static noinline void corrupt_stack(void) 330static noinline void corrupt_stack(void)
320{ 331{
321 /* Use default char array length that triggers stack protection. */ 332 /* Use default char array length that triggers stack protection. */
@@ -328,7 +339,12 @@ static void execute_location(void *dst)
328{ 339{
329 void (*func)(void) = dst; 340 void (*func)(void) = dst;
330 341
342 pr_info("attempting ok execution at %p\n", do_nothing);
343 do_nothing();
344
331 memcpy(dst, do_nothing, EXEC_SIZE); 345 memcpy(dst, do_nothing, EXEC_SIZE);
346 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
347 pr_info("attempting bad execution at %p\n", func);
332 func(); 348 func();
333} 349}
334 350
@@ -337,8 +353,13 @@ static void execute_user_location(void *dst)
337 /* Intentionally crossing kernel/user memory boundary. */ 353 /* Intentionally crossing kernel/user memory boundary. */
338 void (*func)(void) = dst; 354 void (*func)(void) = dst;
339 355
356 pr_info("attempting ok execution at %p\n", do_nothing);
357 do_nothing();
358
340 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) 359 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
341 return; 360 return;
361 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
362 pr_info("attempting bad execution at %p\n", func);
342 func(); 363 func();
343} 364}
344 365
@@ -463,8 +484,12 @@ static void lkdtm_do_action(enum ctype which)
463 } 484 }
464 485
465 ptr = (unsigned long *)user_addr; 486 ptr = (unsigned long *)user_addr;
487
488 pr_info("attempting bad read at %p\n", ptr);
466 tmp = *ptr; 489 tmp = *ptr;
467 tmp += 0xc0dec0de; 490 tmp += 0xc0dec0de;
491
492 pr_info("attempting bad write at %p\n", ptr);
468 *ptr = tmp; 493 *ptr = tmp;
469 494
470 vm_munmap(user_addr, PAGE_SIZE); 495 vm_munmap(user_addr, PAGE_SIZE);
@@ -475,10 +500,28 @@ static void lkdtm_do_action(enum ctype which)
475 unsigned long *ptr; 500 unsigned long *ptr;
476 501
477 ptr = (unsigned long *)&rodata; 502 ptr = (unsigned long *)&rodata;
503
504 pr_info("attempting bad write at %p\n", ptr);
478 *ptr ^= 0xabcd1234; 505 *ptr ^= 0xabcd1234;
479 506
480 break; 507 break;
481 } 508 }
509 case CT_WRITE_KERN: {
510 size_t size;
511 unsigned char *ptr;
512
513 size = (unsigned long)do_overwritten -
514 (unsigned long)do_nothing;
515 ptr = (unsigned char *)do_overwritten;
516
517 pr_info("attempting bad %zu byte write at %p\n", size, ptr);
518 memcpy(ptr, (unsigned char *)do_nothing, size);
519 flush_icache_range((unsigned long)ptr,
520 (unsigned long)(ptr + size));
521
522 do_overwritten();
523 break;
524 }
482 case CT_NONE: 525 case CT_NONE:
483 default: 526 default:
484 break; 527 break;
@@ -493,8 +536,8 @@ static void lkdtm_handler(void)
493 536
494 spin_lock_irqsave(&count_lock, flags); 537 spin_lock_irqsave(&count_lock, flags);
495 count--; 538 count--;
496 printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", 539 pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
497 cp_name_to_str(cpoint), cp_type_to_str(cptype), count); 540 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
498 541
499 if (count == 0) { 542 if (count == 0) {
500 do_it = true; 543 do_it = true;
@@ -551,18 +594,18 @@ static int lkdtm_register_cpoint(enum cname which)
551 lkdtm.kp.symbol_name = "generic_ide_ioctl"; 594 lkdtm.kp.symbol_name = "generic_ide_ioctl";
552 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; 595 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
553#else 596#else
554 printk(KERN_INFO "lkdtm: Crash point not available\n"); 597 pr_info("Crash point not available\n");
555 return -EINVAL; 598 return -EINVAL;
556#endif 599#endif
557 break; 600 break;
558 default: 601 default:
559 printk(KERN_INFO "lkdtm: Invalid Crash Point\n"); 602 pr_info("Invalid Crash Point\n");
560 return -EINVAL; 603 return -EINVAL;
561 } 604 }
562 605
563 cpoint = which; 606 cpoint = which;
564 if ((ret = register_jprobe(&lkdtm)) < 0) { 607 if ((ret = register_jprobe(&lkdtm)) < 0) {
565 printk(KERN_INFO "lkdtm: Couldn't register jprobe\n"); 608 pr_info("Couldn't register jprobe\n");
566 cpoint = CN_INVALID; 609 cpoint = CN_INVALID;
567 } 610 }
568 611
@@ -709,8 +752,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
709 if (type == CT_NONE) 752 if (type == CT_NONE)
710 return -EINVAL; 753 return -EINVAL;
711 754
712 printk(KERN_INFO "lkdtm: Performing direct entry %s\n", 755 pr_info("Performing direct entry %s\n", cp_type_to_str(type));
713 cp_type_to_str(type));
714 lkdtm_do_action(type); 756 lkdtm_do_action(type);
715 *off += count; 757 *off += count;
716 758
@@ -772,7 +814,7 @@ static int __init lkdtm_module_init(void)
772 /* Register debugfs interface */ 814 /* Register debugfs interface */
773 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); 815 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
774 if (!lkdtm_debugfs_root) { 816 if (!lkdtm_debugfs_root) {
775 printk(KERN_ERR "lkdtm: creating root dir failed\n"); 817 pr_err("creating root dir failed\n");
776 return -ENODEV; 818 return -ENODEV;
777 } 819 }
778 820
@@ -787,28 +829,26 @@ static int __init lkdtm_module_init(void)
787 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, 829 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
788 NULL, &cur->fops); 830 NULL, &cur->fops);
789 if (de == NULL) { 831 if (de == NULL) {
790 printk(KERN_ERR "lkdtm: could not create %s\n", 832 pr_err("could not create %s\n", cur->name);
791 cur->name);
792 goto out_err; 833 goto out_err;
793 } 834 }
794 } 835 }
795 836
796 if (lkdtm_parse_commandline() == -EINVAL) { 837 if (lkdtm_parse_commandline() == -EINVAL) {
797 printk(KERN_INFO "lkdtm: Invalid command\n"); 838 pr_info("Invalid command\n");
798 goto out_err; 839 goto out_err;
799 } 840 }
800 841
801 if (cpoint != CN_INVALID && cptype != CT_NONE) { 842 if (cpoint != CN_INVALID && cptype != CT_NONE) {
802 ret = lkdtm_register_cpoint(cpoint); 843 ret = lkdtm_register_cpoint(cpoint);
803 if (ret < 0) { 844 if (ret < 0) {
804 printk(KERN_INFO "lkdtm: Invalid crash point %d\n", 845 pr_info("Invalid crash point %d\n", cpoint);
805 cpoint);
806 goto out_err; 846 goto out_err;
807 } 847 }
808 printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n", 848 pr_info("Crash point %s of type %s registered\n",
809 cpoint_name, cpoint_type); 849 cpoint_name, cpoint_type);
810 } else { 850 } else {
811 printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n"); 851 pr_info("No crash points registered, enable through debugfs\n");
812 } 852 }
813 853
814 return 0; 854 return 0;
@@ -823,7 +863,7 @@ static void __exit lkdtm_module_exit(void)
823 debugfs_remove_recursive(lkdtm_debugfs_root); 863 debugfs_remove_recursive(lkdtm_debugfs_root);
824 864
825 unregister_jprobe(&lkdtm); 865 unregister_jprobe(&lkdtm);
826 printk(KERN_INFO "lkdtm: Crash point unregistered\n"); 866 pr_info("Crash point unregistered\n");
827} 867}
828 868
829module_init(lkdtm_module_init); 869module_init(lkdtm_module_init);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index c76fa31e9bf6..d23384dde73b 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -34,3 +34,12 @@ config INTEL_MEI_ME
34 82Q33 Express 34 82Q33 Express
35 82X38/X48 Express 35 82X38/X48 Express
36 36
37config INTEL_MEI_TXE
38 tristate "Intel Trusted Execution Environment with ME Interface"
39 select INTEL_MEI
40 depends on X86 && PCI && WATCHDOG_CORE
41 help
42 MEI Support for Trusted Execution Environment device on Intel SoCs
43
44 Supported SoCs:
45 Intel Bay Trail
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 08698a466268..8ebc6cda1373 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -1,6 +1,6 @@
1# 1#
2# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver 2# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver
3# Copyright (c) 2010-2011, Intel Corporation. 3# Copyright (c) 2010-2014, Intel Corporation.
4# 4#
5obj-$(CONFIG_INTEL_MEI) += mei.o 5obj-$(CONFIG_INTEL_MEI) += mei.o
6mei-objs := init.o 6mei-objs := init.o
@@ -17,3 +17,7 @@ mei-$(CONFIG_DEBUG_FS) += debugfs.o
17obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o 17obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o
18mei-me-objs := pci-me.o 18mei-me-objs := pci-me.o
19mei-me-objs += hw-me.o 19mei-me-objs += hw-me.o
20
21obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
22mei-txe-objs := pci-txe.o
23mei-txe-objs += hw-txe.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 2fad84432829..b8deb3455480 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -21,7 +21,6 @@
21#include <linux/fcntl.h> 21#include <linux/fcntl.h>
22#include <linux/aio.h> 22#include <linux/aio.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/ioctl.h> 24#include <linux/ioctl.h>
26#include <linux/cdev.h> 25#include <linux/cdev.h>
27#include <linux/list.h> 26#include <linux/list.h>
@@ -35,7 +34,6 @@
35 34
36#include "mei_dev.h" 35#include "mei_dev.h"
37#include "hbm.h" 36#include "hbm.h"
38#include "hw-me.h"
39#include "client.h" 37#include "client.h"
40 38
41const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 39const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
@@ -79,10 +77,9 @@ int mei_amthif_host_init(struct mei_device *dev)
79 77
80 i = mei_me_cl_by_uuid(dev, &mei_amthif_guid); 78 i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
81 if (i < 0) { 79 if (i < 0) {
82 ret = i;
83 dev_info(&dev->pdev->dev, 80 dev_info(&dev->pdev->dev,
84 "amthif: failed to find the client %d\n", ret); 81 "amthif: failed to find the client %d\n", i);
85 return ret; 82 return -ENOTTY;
86 } 83 }
87 84
88 cl->me_client_id = dev->me_clients[i].client_id; 85 cl->me_client_id = dev->me_clients[i].client_id;
@@ -116,14 +113,11 @@ int mei_amthif_host_init(struct mei_device *dev)
116 113
117 cl->state = MEI_FILE_CONNECTING; 114 cl->state = MEI_FILE_CONNECTING;
118 115
119 if (mei_hbm_cl_connect_req(dev, cl)) { 116 ret = mei_cl_connect(cl, NULL);
120 dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n"); 117
121 cl->state = MEI_FILE_DISCONNECTED; 118 dev->iamthif_state = MEI_IAMTHIF_IDLE;
122 cl->host_client_id = 0; 119
123 } else { 120 return ret;
124 cl->timer_count = MEI_CONNECT_TIMEOUT;
125 }
126 return 0;
127} 121}
128 122
129/** 123/**
@@ -137,14 +131,12 @@ int mei_amthif_host_init(struct mei_device *dev)
137struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, 131struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
138 struct file *file) 132 struct file *file)
139{ 133{
140 struct mei_cl_cb *pos = NULL; 134 struct mei_cl_cb *cb;
141 struct mei_cl_cb *next = NULL;
142 135
143 list_for_each_entry_safe(pos, next, 136 list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) {
144 &dev->amthif_rd_complete_list.list, list) { 137 if (cb->cl && cb->cl == &dev->iamthif_cl &&
145 if (pos->cl && pos->cl == &dev->iamthif_cl && 138 cb->file_object == file)
146 pos->file_object == file) 139 return cb;
147 return pos;
148 } 140 }
149 return NULL; 141 return NULL;
150} 142}
@@ -180,14 +172,13 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
180 /* Only possible if we are in timeout */ 172 /* Only possible if we are in timeout */
181 if (!cl || cl != &dev->iamthif_cl) { 173 if (!cl || cl != &dev->iamthif_cl) {
182 dev_dbg(&dev->pdev->dev, "bad file ext.\n"); 174 dev_dbg(&dev->pdev->dev, "bad file ext.\n");
183 return -ETIMEDOUT; 175 return -ETIME;
184 } 176 }
185 177
186 i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id); 178 i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
187
188 if (i < 0) { 179 if (i < 0) {
189 dev_dbg(&dev->pdev->dev, "amthif client not found.\n"); 180 dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
190 return -ENODEV; 181 return -ENOTTY;
191 } 182 }
192 dev_dbg(&dev->pdev->dev, "checking amthif data\n"); 183 dev_dbg(&dev->pdev->dev, "checking amthif data\n");
193 cb = mei_amthif_find_read_list_entry(dev, file); 184 cb = mei_amthif_find_read_list_entry(dev, file);
@@ -228,7 +219,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
228 dev_dbg(&dev->pdev->dev, "amthif Time out\n"); 219 dev_dbg(&dev->pdev->dev, "amthif Time out\n");
229 /* 15 sec for the message has expired */ 220 /* 15 sec for the message has expired */
230 list_del(&cb->list); 221 list_del(&cb->list);
231 rets = -ETIMEDOUT; 222 rets = -ETIME;
232 goto free; 223 goto free;
233 } 224 }
234 } 225 }
@@ -253,9 +244,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
253 * the buf_idx may point beyond */ 244 * the buf_idx may point beyond */
254 length = min_t(size_t, length, (cb->buf_idx - *offset)); 245 length = min_t(size_t, length, (cb->buf_idx - *offset));
255 246
256 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) 247 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
248 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
257 rets = -EFAULT; 249 rets = -EFAULT;
258 else { 250 } else {
259 rets = length; 251 rets = length;
260 if ((*offset + length) < cb->buf_idx) { 252 if ((*offset + length) < cb->buf_idx) {
261 *offset += length; 253 *offset += length;
@@ -302,9 +294,8 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
302 if (ret < 0) 294 if (ret < 0)
303 return ret; 295 return ret;
304 296
305 if (ret && dev->hbuf_is_ready) { 297 if (ret && mei_hbuf_acquire(dev)) {
306 ret = 0; 298 ret = 0;
307 dev->hbuf_is_ready = false;
308 if (cb->request_buffer.size > mei_hbuf_max_len(dev)) { 299 if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
309 mei_hdr.length = mei_hbuf_max_len(dev); 300 mei_hdr.length = mei_hbuf_max_len(dev);
310 mei_hdr.msg_complete = 0; 301 mei_hdr.msg_complete = 0;
@@ -336,10 +327,6 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
336 list_add_tail(&cb->list, &dev->write_list.list); 327 list_add_tail(&cb->list, &dev->write_list.list);
337 } 328 }
338 } else { 329 } else {
339 if (!dev->hbuf_is_ready)
340 dev_dbg(&dev->pdev->dev, "host buffer is not empty");
341
342 dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
343 list_add_tail(&cb->list, &dev->write_list.list); 330 list_add_tail(&cb->list, &dev->write_list.list);
344 } 331 }
345 return 0; 332 return 0;
@@ -365,7 +352,7 @@ int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb)
365 if (ret) 352 if (ret)
366 return ret; 353 return ret;
367 354
368 cb->fop_type = MEI_FOP_IOCTL; 355 cb->fop_type = MEI_FOP_WRITE;
369 356
370 if (!list_empty(&dev->amthif_cmd_list.list) || 357 if (!list_empty(&dev->amthif_cmd_list.list) ||
371 dev->iamthif_state != MEI_IAMTHIF_IDLE) { 358 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
@@ -447,23 +434,23 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
447 434
448 435
449/** 436/**
450 * mei_amthif_irq_write_completed - processes completed iamthif operation. 437 * mei_amthif_irq_write - write iamthif command in irq thread context.
451 * 438 *
452 * @dev: the device structure. 439 * @dev: the device structure.
453 * @slots: free slots.
454 * @cb_pos: callback block. 440 * @cb_pos: callback block.
455 * @cl: private data of the file object. 441 * @cl: private data of the file object.
456 * @cmpl_list: complete list. 442 * @cmpl_list: complete list.
457 * 443 *
458 * returns 0, OK; otherwise, error. 444 * returns 0, OK; otherwise, error.
459 */ 445 */
460int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, 446int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
461 s32 *slots, struct mei_cl_cb *cmpl_list) 447 struct mei_cl_cb *cmpl_list)
462{ 448{
463 struct mei_device *dev = cl->dev; 449 struct mei_device *dev = cl->dev;
464 struct mei_msg_hdr mei_hdr; 450 struct mei_msg_hdr mei_hdr;
465 size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index; 451 size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
466 u32 msg_slots = mei_data2slots(len); 452 u32 msg_slots = mei_data2slots(len);
453 int slots;
467 int rets; 454 int rets;
468 455
469 rets = mei_cl_flow_ctrl_creds(cl); 456 rets = mei_cl_flow_ctrl_creds(cl);
@@ -480,13 +467,15 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
480 mei_hdr.reserved = 0; 467 mei_hdr.reserved = 0;
481 mei_hdr.internal = 0; 468 mei_hdr.internal = 0;
482 469
483 if (*slots >= msg_slots) { 470 slots = mei_hbuf_empty_slots(dev);
471
472 if (slots >= msg_slots) {
484 mei_hdr.length = len; 473 mei_hdr.length = len;
485 mei_hdr.msg_complete = 1; 474 mei_hdr.msg_complete = 1;
486 /* Split the message only if we can write the whole host buffer */ 475 /* Split the message only if we can write the whole host buffer */
487 } else if (*slots == dev->hbuf_depth) { 476 } else if (slots == dev->hbuf_depth) {
488 msg_slots = *slots; 477 msg_slots = slots;
489 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 478 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
490 mei_hdr.length = len; 479 mei_hdr.length = len;
491 mei_hdr.msg_complete = 0; 480 mei_hdr.msg_complete = 0;
492 } else { 481 } else {
@@ -496,7 +485,6 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
496 485
497 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); 486 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
498 487
499 *slots -= msg_slots;
500 rets = mei_write_message(dev, &mei_hdr, 488 rets = mei_write_message(dev, &mei_hdr,
501 dev->iamthif_msg_buf + dev->iamthif_msg_buf_index); 489 dev->iamthif_msg_buf + dev->iamthif_msg_buf_index);
502 if (rets) { 490 if (rets) {
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 4bc7d620d695..ddc5ac92a200 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -26,7 +26,6 @@
26#include <linux/mei_cl_bus.h> 26#include <linux/mei_cl_bus.h>
27 27
28#include "mei_dev.h" 28#include "mei_dev.h"
29#include "hw-me.h"
30#include "client.h" 29#include "client.h"
31 30
32#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) 31#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
@@ -145,9 +144,9 @@ static struct device_type mei_cl_device_type = {
145static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev, 144static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev,
146 uuid_le uuid) 145 uuid_le uuid)
147{ 146{
148 struct mei_cl *cl, *next; 147 struct mei_cl *cl;
149 148
150 list_for_each_entry_safe(cl, next, &dev->device_list, device_link) { 149 list_for_each_entry(cl, &dev->device_list, device_link) {
151 if (!uuid_le_cmp(uuid, cl->device_uuid)) 150 if (!uuid_le_cmp(uuid, cl->device_uuid))
152 return cl; 151 return cl;
153 } 152 }
@@ -524,6 +523,22 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
524 schedule_work(&device->event_work); 523 schedule_work(&device->event_work);
525} 524}
526 525
526void mei_cl_bus_remove_devices(struct mei_device *dev)
527{
528 struct mei_cl *cl, *next;
529
530 mutex_lock(&dev->device_lock);
531 list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
532 if (cl->device)
533 mei_cl_remove_device(cl->device);
534
535 list_del(&cl->device_link);
536 mei_cl_unlink(cl);
537 kfree(cl);
538 }
539 mutex_unlock(&dev->device_lock);
540}
541
527int __init mei_cl_bus_init(void) 542int __init mei_cl_bus_init(void)
528{ 543{
529 return bus_register(&mei_cl_bus_type); 544 return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 89a557972d1b..8c078b808cd3 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -29,20 +29,21 @@
29 * mei_me_cl_by_uuid - locate index of me client 29 * mei_me_cl_by_uuid - locate index of me client
30 * 30 *
31 * @dev: mei device 31 * @dev: mei device
32 *
33 * Locking: called under "dev->device_lock" lock
34 *
32 * returns me client index or -ENOENT if not found 35 * returns me client index or -ENOENT if not found
33 */ 36 */
34int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) 37int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
35{ 38{
36 int i, res = -ENOENT; 39 int i;
37 40
38 for (i = 0; i < dev->me_clients_num; ++i) 41 for (i = 0; i < dev->me_clients_num; ++i)
39 if (uuid_le_cmp(*uuid, 42 if (uuid_le_cmp(*uuid,
40 dev->me_clients[i].props.protocol_name) == 0) { 43 dev->me_clients[i].props.protocol_name) == 0)
41 res = i; 44 return i;
42 break;
43 }
44 45
45 return res; 46 return -ENOENT;
46} 47}
47 48
48 49
@@ -60,37 +61,79 @@ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
60int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 61int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
61{ 62{
62 int i; 63 int i;
64
63 for (i = 0; i < dev->me_clients_num; i++) 65 for (i = 0; i < dev->me_clients_num; i++)
64 if (dev->me_clients[i].client_id == client_id) 66 if (dev->me_clients[i].client_id == client_id)
65 break; 67 return i;
66 if (WARN_ON(dev->me_clients[i].client_id != client_id))
67 return -ENOENT;
68 68
69 if (i == dev->me_clients_num) 69 return -ENOENT;
70 return -ENOENT;
71
72 return i;
73} 70}
74 71
75 72
76/** 73/**
77 * mei_io_list_flush - removes list entry belonging to cl. 74 * mei_cl_cmp_id - tells if the clients are the same
78 * 75 *
79 * @list: An instance of our list structure 76 * @cl1: host client 1
80 * @cl: host client 77 * @cl2: host client 2
78 *
79 * returns true - if the clients has same host and me ids
80 * false - otherwise
81 */
82static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
83 const struct mei_cl *cl2)
84{
85 return cl1 && cl2 &&
86 (cl1->host_client_id == cl2->host_client_id) &&
87 (cl1->me_client_id == cl2->me_client_id);
88}
89
90/**
91 * mei_io_list_flush - removes cbs belonging to cl.
92 *
93 * @list: an instance of our list structure
94 * @cl: host client, can be NULL for flushing the whole list
95 * @free: whether to free the cbs
81 */ 96 */
82void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 97static void __mei_io_list_flush(struct mei_cl_cb *list,
98 struct mei_cl *cl, bool free)
83{ 99{
84 struct mei_cl_cb *cb; 100 struct mei_cl_cb *cb;
85 struct mei_cl_cb *next; 101 struct mei_cl_cb *next;
86 102
103 /* enable removing everything if no cl is specified */
87 list_for_each_entry_safe(cb, next, &list->list, list) { 104 list_for_each_entry_safe(cb, next, &list->list, list) {
88 if (cb->cl && mei_cl_cmp_id(cl, cb->cl)) 105 if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
89 list_del(&cb->list); 106 list_del(&cb->list);
107 if (free)
108 mei_io_cb_free(cb);
109 }
90 } 110 }
91} 111}
92 112
93/** 113/**
114 * mei_io_list_flush - removes list entry belonging to cl.
115 *
116 * @list: An instance of our list structure
117 * @cl: host client
118 */
119static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
120{
121 __mei_io_list_flush(list, cl, false);
122}
123
124
125/**
126 * mei_io_list_free - removes cb belonging to cl and free them
127 *
128 * @list: An instance of our list structure
129 * @cl: host client
130 */
131static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
132{
133 __mei_io_list_flush(list, cl, true);
134}
135
136/**
94 * mei_io_cb_free - free mei_cb_private related memory 137 * mei_io_cb_free - free mei_cb_private related memory
95 * 138 *
96 * @cb: mei callback struct 139 * @cb: mei callback struct
@@ -196,8 +239,8 @@ int mei_cl_flush_queues(struct mei_cl *cl)
196 239
197 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 240 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
198 mei_io_list_flush(&cl->dev->read_list, cl); 241 mei_io_list_flush(&cl->dev->read_list, cl);
199 mei_io_list_flush(&cl->dev->write_list, cl); 242 mei_io_list_free(&cl->dev->write_list, cl);
200 mei_io_list_flush(&cl->dev->write_waiting_list, cl); 243 mei_io_list_free(&cl->dev->write_waiting_list, cl);
201 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 244 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
202 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 245 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
203 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 246 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
@@ -254,10 +297,9 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
254struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) 297struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
255{ 298{
256 struct mei_device *dev = cl->dev; 299 struct mei_device *dev = cl->dev;
257 struct mei_cl_cb *cb = NULL; 300 struct mei_cl_cb *cb;
258 struct mei_cl_cb *next = NULL;
259 301
260 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) 302 list_for_each_entry(cb, &dev->read_list.list, list)
261 if (mei_cl_cmp_id(cl, cb->cl)) 303 if (mei_cl_cmp_id(cl, cb->cl))
262 return cb; 304 return cb;
263 return NULL; 305 return NULL;
@@ -375,6 +417,23 @@ void mei_host_client_init(struct work_struct *work)
375 mutex_unlock(&dev->device_lock); 417 mutex_unlock(&dev->device_lock);
376} 418}
377 419
420/**
421 * mei_hbuf_acquire: try to acquire host buffer
422 *
423 * @dev: the device structure
424 * returns true if host buffer was acquired
425 */
426bool mei_hbuf_acquire(struct mei_device *dev)
427{
428 if (!dev->hbuf_is_ready) {
429 dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
430 return false;
431 }
432
433 dev->hbuf_is_ready = false;
434
435 return true;
436}
378 437
379/** 438/**
380 * mei_cl_disconnect - disconnect host client from the me one 439 * mei_cl_disconnect - disconnect host client from the me one
@@ -406,8 +465,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
406 return -ENOMEM; 465 return -ENOMEM;
407 466
408 cb->fop_type = MEI_FOP_CLOSE; 467 cb->fop_type = MEI_FOP_CLOSE;
409 if (dev->hbuf_is_ready) { 468 if (mei_hbuf_acquire(dev)) {
410 dev->hbuf_is_ready = false;
411 if (mei_hbm_cl_disconnect_req(dev, cl)) { 469 if (mei_hbm_cl_disconnect_req(dev, cl)) {
412 rets = -ENODEV; 470 rets = -ENODEV;
413 cl_err(dev, cl, "failed to disconnect.\n"); 471 cl_err(dev, cl, "failed to disconnect.\n");
@@ -461,17 +519,17 @@ free:
461bool mei_cl_is_other_connecting(struct mei_cl *cl) 519bool mei_cl_is_other_connecting(struct mei_cl *cl)
462{ 520{
463 struct mei_device *dev; 521 struct mei_device *dev;
464 struct mei_cl *pos; 522 struct mei_cl *ocl; /* the other client */
465 struct mei_cl *next;
466 523
467 if (WARN_ON(!cl || !cl->dev)) 524 if (WARN_ON(!cl || !cl->dev))
468 return false; 525 return false;
469 526
470 dev = cl->dev; 527 dev = cl->dev;
471 528
472 list_for_each_entry_safe(pos, next, &dev->file_list, link) { 529 list_for_each_entry(ocl, &dev->file_list, link) {
473 if ((pos->state == MEI_FILE_CONNECTING) && 530 if (ocl->state == MEI_FILE_CONNECTING &&
474 (pos != cl) && cl->me_client_id == pos->me_client_id) 531 ocl != cl &&
532 cl->me_client_id == ocl->me_client_id)
475 return true; 533 return true;
476 534
477 } 535 }
@@ -505,11 +563,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
505 goto out; 563 goto out;
506 } 564 }
507 565
508 cb->fop_type = MEI_FOP_IOCTL; 566 cb->fop_type = MEI_FOP_CONNECT;
509
510 if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
511 dev->hbuf_is_ready = false;
512 567
568 /* run hbuf acquire last so we don't have to undo */
569 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
513 if (mei_hbm_cl_connect_req(dev, cl)) { 570 if (mei_hbm_cl_connect_req(dev, cl)) {
514 rets = -ENODEV; 571 rets = -ENODEV;
515 goto out; 572 goto out;
@@ -521,18 +578,19 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
521 } 578 }
522 579
523 mutex_unlock(&dev->device_lock); 580 mutex_unlock(&dev->device_lock);
524 rets = wait_event_timeout(dev->wait_recvd_msg, 581 wait_event_timeout(dev->wait_recvd_msg,
525 (cl->state == MEI_FILE_CONNECTED || 582 (cl->state == MEI_FILE_CONNECTED ||
526 cl->state == MEI_FILE_DISCONNECTED), 583 cl->state == MEI_FILE_DISCONNECTED),
527 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 584 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
528 mutex_lock(&dev->device_lock); 585 mutex_lock(&dev->device_lock);
529 586
530 if (cl->state != MEI_FILE_CONNECTED) { 587 if (cl->state != MEI_FILE_CONNECTED) {
531 rets = -EFAULT; 588 /* something went really wrong */
589 if (!cl->status)
590 cl->status = -EFAULT;
532 591
533 mei_io_list_flush(&dev->ctrl_rd_list, cl); 592 mei_io_list_flush(&dev->ctrl_rd_list, cl);
534 mei_io_list_flush(&dev->ctrl_wr_list, cl); 593 mei_io_list_flush(&dev->ctrl_wr_list, cl);
535 goto out;
536 } 594 }
537 595
538 rets = cl->status; 596 rets = cl->status;
@@ -554,7 +612,8 @@ out:
554int mei_cl_flow_ctrl_creds(struct mei_cl *cl) 612int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
555{ 613{
556 struct mei_device *dev; 614 struct mei_device *dev;
557 int i; 615 struct mei_me_client *me_cl;
616 int id;
558 617
559 if (WARN_ON(!cl || !cl->dev)) 618 if (WARN_ON(!cl || !cl->dev))
560 return -EINVAL; 619 return -EINVAL;
@@ -567,19 +626,19 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
567 if (cl->mei_flow_ctrl_creds > 0) 626 if (cl->mei_flow_ctrl_creds > 0)
568 return 1; 627 return 1;
569 628
570 for (i = 0; i < dev->me_clients_num; i++) { 629 id = mei_me_cl_by_id(dev, cl->me_client_id);
571 struct mei_me_client *me_cl = &dev->me_clients[i]; 630 if (id < 0) {
572 if (me_cl->client_id == cl->me_client_id) { 631 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
573 if (me_cl->mei_flow_ctrl_creds) { 632 return id;
574 if (WARN_ON(me_cl->props.single_recv_buf == 0))
575 return -EINVAL;
576 return 1;
577 } else {
578 return 0;
579 }
580 }
581 } 633 }
582 return -ENOENT; 634
635 me_cl = &dev->me_clients[id];
636 if (me_cl->mei_flow_ctrl_creds) {
637 if (WARN_ON(me_cl->props.single_recv_buf == 0))
638 return -EINVAL;
639 return 1;
640 }
641 return 0;
583} 642}
584 643
585/** 644/**
@@ -595,32 +654,31 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
595int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) 654int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
596{ 655{
597 struct mei_device *dev; 656 struct mei_device *dev;
598 int i; 657 struct mei_me_client *me_cl;
658 int id;
599 659
600 if (WARN_ON(!cl || !cl->dev)) 660 if (WARN_ON(!cl || !cl->dev))
601 return -EINVAL; 661 return -EINVAL;
602 662
603 dev = cl->dev; 663 dev = cl->dev;
604 664
605 if (!dev->me_clients_num) 665 id = mei_me_cl_by_id(dev, cl->me_client_id);
606 return -ENOENT; 666 if (id < 0) {
667 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
668 return id;
669 }
607 670
608 for (i = 0; i < dev->me_clients_num; i++) { 671 me_cl = &dev->me_clients[id];
609 struct mei_me_client *me_cl = &dev->me_clients[i]; 672 if (me_cl->props.single_recv_buf != 0) {
610 if (me_cl->client_id == cl->me_client_id) { 673 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
611 if (me_cl->props.single_recv_buf != 0) { 674 return -EINVAL;
612 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) 675 me_cl->mei_flow_ctrl_creds--;
613 return -EINVAL; 676 } else {
614 dev->me_clients[i].mei_flow_ctrl_creds--; 677 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
615 } else { 678 return -EINVAL;
616 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) 679 cl->mei_flow_ctrl_creds--;
617 return -EINVAL;
618 cl->mei_flow_ctrl_creds--;
619 }
620 return 0;
621 }
622 } 680 }
623 return -ENOENT; 681 return 0;
624} 682}
625 683
626/** 684/**
@@ -652,7 +710,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
652 i = mei_me_cl_by_id(dev, cl->me_client_id); 710 i = mei_me_cl_by_id(dev, cl->me_client_id);
653 if (i < 0) { 711 if (i < 0) {
654 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 712 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
655 return -ENODEV; 713 return -ENOTTY;
656 } 714 }
657 715
658 cb = mei_io_cb_init(cl, NULL); 716 cb = mei_io_cb_init(cl, NULL);
@@ -666,8 +724,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
666 goto err; 724 goto err;
667 725
668 cb->fop_type = MEI_FOP_READ; 726 cb->fop_type = MEI_FOP_READ;
669 if (dev->hbuf_is_ready) { 727 if (mei_hbuf_acquire(dev)) {
670 dev->hbuf_is_ready = false;
671 if (mei_hbm_cl_flow_control_req(dev, cl)) { 728 if (mei_hbm_cl_flow_control_req(dev, cl)) {
672 cl_err(dev, cl, "flow control send failed\n"); 729 cl_err(dev, cl, "flow control send failed\n");
673 rets = -ENODEV; 730 rets = -ENODEV;
@@ -687,27 +744,26 @@ err:
687} 744}
688 745
689/** 746/**
690 * mei_cl_irq_write_complete - write a message to device 747 * mei_cl_irq_write - write a message to device
691 * from the interrupt thread context 748 * from the interrupt thread context
692 * 749 *
693 * @cl: client 750 * @cl: client
694 * @cb: callback block. 751 * @cb: callback block.
695 * @slots: free slots.
696 * @cmpl_list: complete list. 752 * @cmpl_list: complete list.
697 * 753 *
698 * returns 0, OK; otherwise error. 754 * returns 0, OK; otherwise error.
699 */ 755 */
700int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, 756int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
701 s32 *slots, struct mei_cl_cb *cmpl_list) 757 struct mei_cl_cb *cmpl_list)
702{ 758{
703 struct mei_device *dev; 759 struct mei_device *dev;
704 struct mei_msg_data *buf; 760 struct mei_msg_data *buf;
705 struct mei_msg_hdr mei_hdr; 761 struct mei_msg_hdr mei_hdr;
706 size_t len; 762 size_t len;
707 u32 msg_slots; 763 u32 msg_slots;
764 int slots;
708 int rets; 765 int rets;
709 766
710
711 if (WARN_ON(!cl || !cl->dev)) 767 if (WARN_ON(!cl || !cl->dev))
712 return -ENODEV; 768 return -ENODEV;
713 769
@@ -724,6 +780,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
724 return 0; 780 return 0;
725 } 781 }
726 782
783 slots = mei_hbuf_empty_slots(dev);
727 len = buf->size - cb->buf_idx; 784 len = buf->size - cb->buf_idx;
728 msg_slots = mei_data2slots(len); 785 msg_slots = mei_data2slots(len);
729 786
@@ -732,13 +789,13 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
732 mei_hdr.reserved = 0; 789 mei_hdr.reserved = 0;
733 mei_hdr.internal = cb->internal; 790 mei_hdr.internal = cb->internal;
734 791
735 if (*slots >= msg_slots) { 792 if (slots >= msg_slots) {
736 mei_hdr.length = len; 793 mei_hdr.length = len;
737 mei_hdr.msg_complete = 1; 794 mei_hdr.msg_complete = 1;
738 /* Split the message only if we can write the whole host buffer */ 795 /* Split the message only if we can write the whole host buffer */
739 } else if (*slots == dev->hbuf_depth) { 796 } else if (slots == dev->hbuf_depth) {
740 msg_slots = *slots; 797 msg_slots = slots;
741 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 798 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
742 mei_hdr.length = len; 799 mei_hdr.length = len;
743 mei_hdr.msg_complete = 0; 800 mei_hdr.msg_complete = 0;
744 } else { 801 } else {
@@ -749,7 +806,6 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
749 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", 806 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
750 cb->request_buffer.size, cb->buf_idx); 807 cb->request_buffer.size, cb->buf_idx);
751 808
752 *slots -= msg_slots;
753 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 809 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
754 if (rets) { 810 if (rets) {
755 cl->status = rets; 811 cl->status = rets;
@@ -802,21 +858,29 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
802 858
803 859
804 cb->fop_type = MEI_FOP_WRITE; 860 cb->fop_type = MEI_FOP_WRITE;
861 cb->buf_idx = 0;
862 cl->writing_state = MEI_IDLE;
863
864 mei_hdr.host_addr = cl->host_client_id;
865 mei_hdr.me_addr = cl->me_client_id;
866 mei_hdr.reserved = 0;
867 mei_hdr.msg_complete = 0;
868 mei_hdr.internal = cb->internal;
805 869
806 rets = mei_cl_flow_ctrl_creds(cl); 870 rets = mei_cl_flow_ctrl_creds(cl);
807 if (rets < 0) 871 if (rets < 0)
808 goto err; 872 goto err;
809 873
810 /* Host buffer is not ready, we queue the request */ 874 if (rets == 0) {
811 if (rets == 0 || !dev->hbuf_is_ready) { 875 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
812 cb->buf_idx = 0; 876 rets = buf->size;
813 /* unseting complete will enqueue the cb for write */ 877 goto out;
814 mei_hdr.msg_complete = 0; 878 }
879 if (!mei_hbuf_acquire(dev)) {
880 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
815 rets = buf->size; 881 rets = buf->size;
816 goto out; 882 goto out;
817 } 883 }
818
819 dev->hbuf_is_ready = false;
820 884
821 /* Check for a maximum length */ 885 /* Check for a maximum length */
822 if (buf->size > mei_hbuf_max_len(dev)) { 886 if (buf->size > mei_hbuf_max_len(dev)) {
@@ -827,12 +891,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
827 mei_hdr.msg_complete = 1; 891 mei_hdr.msg_complete = 1;
828 } 892 }
829 893
830 mei_hdr.host_addr = cl->host_client_id;
831 mei_hdr.me_addr = cl->me_client_id;
832 mei_hdr.reserved = 0;
833 mei_hdr.internal = cb->internal;
834
835
836 rets = mei_write_message(dev, &mei_hdr, buf->data); 894 rets = mei_write_message(dev, &mei_hdr, buf->data);
837 if (rets) 895 if (rets)
838 goto err; 896 goto err;
@@ -840,13 +898,12 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
840 cl->writing_state = MEI_WRITING; 898 cl->writing_state = MEI_WRITING;
841 cb->buf_idx = mei_hdr.length; 899 cb->buf_idx = mei_hdr.length;
842 900
843 rets = buf->size;
844out: 901out:
845 if (mei_hdr.msg_complete) { 902 if (mei_hdr.msg_complete) {
846 if (mei_cl_flow_ctrl_reduce(cl)) { 903 rets = mei_cl_flow_ctrl_reduce(cl);
847 rets = -ENODEV; 904 if (rets < 0)
848 goto err; 905 goto err;
849 } 906
850 list_add_tail(&cb->list, &dev->write_waiting_list.list); 907 list_add_tail(&cb->list, &dev->write_waiting_list.list);
851 } else { 908 } else {
852 list_add_tail(&cb->list, &dev->write_list.list); 909 list_add_tail(&cb->list, &dev->write_list.list);
@@ -856,15 +913,18 @@ out:
856 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 913 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
857 914
858 mutex_unlock(&dev->device_lock); 915 mutex_unlock(&dev->device_lock);
859 if (wait_event_interruptible(cl->tx_wait, 916 rets = wait_event_interruptible(cl->tx_wait,
860 cl->writing_state == MEI_WRITE_COMPLETE)) { 917 cl->writing_state == MEI_WRITE_COMPLETE);
861 if (signal_pending(current))
862 rets = -EINTR;
863 else
864 rets = -ERESTARTSYS;
865 }
866 mutex_lock(&dev->device_lock); 918 mutex_lock(&dev->device_lock);
919 /* wait_event_interruptible returns -ERESTARTSYS */
920 if (rets) {
921 if (signal_pending(current))
922 rets = -EINTR;
923 goto err;
924 }
867 } 925 }
926
927 rets = buf->size;
868err: 928err:
869 return rets; 929 return rets;
870} 930}
@@ -905,9 +965,9 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
905 965
906void mei_cl_all_disconnect(struct mei_device *dev) 966void mei_cl_all_disconnect(struct mei_device *dev)
907{ 967{
908 struct mei_cl *cl, *next; 968 struct mei_cl *cl;
909 969
910 list_for_each_entry_safe(cl, next, &dev->file_list, link) { 970 list_for_each_entry(cl, &dev->file_list, link) {
911 cl->state = MEI_FILE_DISCONNECTED; 971 cl->state = MEI_FILE_DISCONNECTED;
912 cl->mei_flow_ctrl_creds = 0; 972 cl->mei_flow_ctrl_creds = 0;
913 cl->timer_count = 0; 973 cl->timer_count = 0;
@@ -922,8 +982,8 @@ void mei_cl_all_disconnect(struct mei_device *dev)
922 */ 982 */
923void mei_cl_all_wakeup(struct mei_device *dev) 983void mei_cl_all_wakeup(struct mei_device *dev)
924{ 984{
925 struct mei_cl *cl, *next; 985 struct mei_cl *cl;
926 list_for_each_entry_safe(cl, next, &dev->file_list, link) { 986 list_for_each_entry(cl, &dev->file_list, link) {
927 if (waitqueue_active(&cl->rx_wait)) { 987 if (waitqueue_active(&cl->rx_wait)) {
928 cl_dbg(dev, cl, "Waking up reading client!\n"); 988 cl_dbg(dev, cl, "Waking up reading client!\n");
929 wake_up_interruptible(&cl->rx_wait); 989 wake_up_interruptible(&cl->rx_wait);
@@ -942,20 +1002,8 @@ void mei_cl_all_wakeup(struct mei_device *dev)
942 */ 1002 */
943void mei_cl_all_write_clear(struct mei_device *dev) 1003void mei_cl_all_write_clear(struct mei_device *dev)
944{ 1004{
945 struct mei_cl_cb *cb, *next; 1005 mei_io_list_free(&dev->write_list, NULL);
946 struct list_head *list; 1006 mei_io_list_free(&dev->write_waiting_list, NULL);
947
948 list = &dev->write_list.list;
949 list_for_each_entry_safe(cb, next, list, list) {
950 list_del(&cb->list);
951 mei_io_cb_free(cb);
952 }
953
954 list = &dev->write_waiting_list.list;
955 list_for_each_entry_safe(cb, next, list, list) {
956 list_del(&cb->list);
957 mei_io_cb_free(cb);
958 }
959} 1007}
960 1008
961 1009
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index c8396e582f1c..96d5de0389f9 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -45,8 +45,6 @@ static inline void mei_io_list_init(struct mei_cl_cb *list)
45{ 45{
46 INIT_LIST_HEAD(&list->list); 46 INIT_LIST_HEAD(&list->list);
47} 47}
48void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
49
50/* 48/*
51 * MEI Host Client Functions 49 * MEI Host Client Functions
52 */ 50 */
@@ -61,22 +59,6 @@ int mei_cl_unlink(struct mei_cl *cl);
61int mei_cl_flush_queues(struct mei_cl *cl); 59int mei_cl_flush_queues(struct mei_cl *cl);
62struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl); 60struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
63 61
64/**
65 * mei_cl_cmp_id - tells if file private data have same id
66 *
67 * @fe1: private data of 1. file object
68 * @fe2: private data of 2. file object
69 *
70 * returns true - if ids are the same and not NULL
71 */
72static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
73 const struct mei_cl *cl2)
74{
75 return cl1 && cl2 &&
76 (cl1->host_client_id == cl2->host_client_id) &&
77 (cl1->me_client_id == cl2->me_client_id);
78}
79
80 62
81int mei_cl_flow_ctrl_creds(struct mei_cl *cl); 63int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
82 64
@@ -86,15 +68,15 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
86 */ 68 */
87static inline bool mei_cl_is_connected(struct mei_cl *cl) 69static inline bool mei_cl_is_connected(struct mei_cl *cl)
88{ 70{
89 return (cl->dev && 71 return cl->dev &&
90 cl->dev->dev_state == MEI_DEV_ENABLED && 72 cl->dev->dev_state == MEI_DEV_ENABLED &&
91 cl->state == MEI_FILE_CONNECTED); 73 cl->state == MEI_FILE_CONNECTED;
92} 74}
93static inline bool mei_cl_is_transitioning(struct mei_cl *cl) 75static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
94{ 76{
95 return (MEI_FILE_INITIALIZING == cl->state || 77 return MEI_FILE_INITIALIZING == cl->state ||
96 MEI_FILE_DISCONNECTED == cl->state || 78 MEI_FILE_DISCONNECTED == cl->state ||
97 MEI_FILE_DISCONNECTING == cl->state); 79 MEI_FILE_DISCONNECTING == cl->state;
98} 80}
99 81
100bool mei_cl_is_other_connecting(struct mei_cl *cl); 82bool mei_cl_is_other_connecting(struct mei_cl *cl);
@@ -102,8 +84,8 @@ int mei_cl_disconnect(struct mei_cl *cl);
102int mei_cl_connect(struct mei_cl *cl, struct file *file); 84int mei_cl_connect(struct mei_cl *cl, struct file *file);
103int mei_cl_read_start(struct mei_cl *cl, size_t length); 85int mei_cl_read_start(struct mei_cl *cl, size_t length);
104int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); 86int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
105int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, 87int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
106 s32 *slots, struct mei_cl_cb *cmpl_list); 88 struct mei_cl_cb *cmpl_list);
107 89
108void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); 90void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
109 91
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index a3ae154444b2..ced5b777c70f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -75,6 +75,54 @@ static const struct file_operations mei_dbgfs_fops_meclients = {
75 .llseek = generic_file_llseek, 75 .llseek = generic_file_llseek,
76}; 76};
77 77
78static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
79 size_t cnt, loff_t *ppos)
80{
81 struct mei_device *dev = fp->private_data;
82 struct mei_cl *cl;
83 const size_t bufsz = 1024;
84 char *buf;
85 int i = 0;
86 int pos = 0;
87 int ret;
88
89 if (!dev)
90 return -ENODEV;
91
92 buf = kzalloc(bufsz, GFP_KERNEL);
93 if (!buf)
94 return -ENOMEM;
95
96 pos += scnprintf(buf + pos, bufsz - pos,
97 " |me|host|state|rd|wr|\n");
98
99 mutex_lock(&dev->device_lock);
100
101 /* if the driver is not enabled the list won't b consitent */
102 if (dev->dev_state != MEI_DEV_ENABLED)
103 goto out;
104
105 list_for_each_entry(cl, &dev->file_list, link) {
106
107 pos += scnprintf(buf + pos, bufsz - pos,
108 "%2d|%2d|%4d|%5d|%2d|%2d|\n",
109 i, cl->me_client_id, cl->host_client_id, cl->state,
110 cl->reading_state, cl->writing_state);
111 i++;
112 }
113out:
114 mutex_unlock(&dev->device_lock);
115 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
116 kfree(buf);
117 return ret;
118}
119
120static const struct file_operations mei_dbgfs_fops_active = {
121 .open = simple_open,
122 .read = mei_dbgfs_read_active,
123 .llseek = generic_file_llseek,
124};
125
78static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, 126static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
79 size_t cnt, loff_t *ppos) 127 size_t cnt, loff_t *ppos)
80{ 128{
@@ -128,6 +176,12 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
128 dev_err(&dev->pdev->dev, "meclients: registration failed\n"); 176 dev_err(&dev->pdev->dev, "meclients: registration failed\n");
129 goto err; 177 goto err;
130 } 178 }
179 f = debugfs_create_file("active", S_IRUSR, dir,
180 dev, &mei_dbgfs_fops_active);
181 if (!f) {
182 dev_err(&dev->pdev->dev, "meclients: registration failed\n");
183 goto err;
184 }
131 f = debugfs_create_file("devstate", S_IRUSR, dir, 185 f = debugfs_create_file("devstate", S_IRUSR, dir,
132 dev, &mei_dbgfs_fops_devstate); 186 dev, &mei_dbgfs_fops_devstate);
133 if (!f) { 187 if (!f) {
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 28cd74c073b9..4960288e543a 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -21,7 +21,41 @@
21 21
22#include "mei_dev.h" 22#include "mei_dev.h"
23#include "hbm.h" 23#include "hbm.h"
24#include "hw-me.h" 24#include "client.h"
25
26static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status)
27{
28#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status
29 switch (status) {
30 MEI_CL_CS(SUCCESS);
31 MEI_CL_CS(NOT_FOUND);
32 MEI_CL_CS(ALREADY_STARTED);
33 MEI_CL_CS(OUT_OF_RESOURCES);
34 MEI_CL_CS(MESSAGE_SMALL);
35 default: return "unknown";
36 }
37#undef MEI_CL_CCS
38}
39
40/**
41 * mei_cl_conn_status_to_errno - convert client connect response
42 * status to error code
43 *
44 * @status: client connect response status
45 *
46 * returns corresponding error code
47 */
48static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
49{
50 switch (status) {
51 case MEI_CL_CONN_SUCCESS: return 0;
52 case MEI_CL_CONN_NOT_FOUND: return -ENOTTY;
53 case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY;
54 case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
55 case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL;
56 default: return -EINVAL;
57 }
58}
25 59
26/** 60/**
27 * mei_hbm_me_cl_allocate - allocates storage for me clients 61 * mei_hbm_me_cl_allocate - allocates storage for me clients
@@ -100,33 +134,6 @@ bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
100 134
101 135
102/** 136/**
103 * is_treat_specially_client - checks if the message belongs
104 * to the file private data.
105 *
106 * @cl: private data of the file object
107 * @rs: connect response bus message
108 *
109 */
110static bool is_treat_specially_client(struct mei_cl *cl,
111 struct hbm_client_connect_response *rs)
112{
113 if (mei_hbm_cl_addr_equal(cl, rs)) {
114 if (!rs->status) {
115 cl->state = MEI_FILE_CONNECTED;
116 cl->status = 0;
117
118 } else {
119 cl->state = MEI_FILE_DISCONNECTED;
120 cl->status = -ENODEV;
121 }
122 cl->timer_count = 0;
123
124 return true;
125 }
126 return false;
127}
128
129/**
130 * mei_hbm_idle - set hbm to idle state 137 * mei_hbm_idle - set hbm to idle state
131 * 138 *
132 * @dev: the device structure 139 * @dev: the device structure
@@ -147,13 +154,13 @@ int mei_hbm_start_wait(struct mei_device *dev)
147 ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, 154 ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
148 dev->hbm_state == MEI_HBM_IDLE || 155 dev->hbm_state == MEI_HBM_IDLE ||
149 dev->hbm_state >= MEI_HBM_STARTED, 156 dev->hbm_state >= MEI_HBM_STARTED,
150 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); 157 mei_secs_to_jiffies(MEI_HBM_TIMEOUT));
151 mutex_lock(&dev->device_lock); 158 mutex_lock(&dev->device_lock);
152 159
153 if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) { 160 if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) {
154 dev->hbm_state = MEI_HBM_IDLE; 161 dev->hbm_state = MEI_HBM_IDLE;
155 dev_err(&dev->pdev->dev, "waiting for mei start failed\n"); 162 dev_err(&dev->pdev->dev, "waiting for mei start failed\n");
156 return -ETIMEDOUT; 163 return -ETIME;
157 } 164 }
158 return 0; 165 return 0;
159} 166}
@@ -283,17 +290,18 @@ static int mei_hbm_prop_req(struct mei_device *dev)
283} 290}
284 291
285/** 292/**
286 * mei_hbm_stop_req_prepare - prepare stop request message 293 * mei_hbm_stop_req - send stop request message
287 * 294 *
288 * @dev - mei device 295 * @dev - mei device
289 * @mei_hdr - mei message header 296 * @cl: client info
290 * @data - hbm message body buffer 297 *
298 * This function returns -EIO on write failure
291 */ 299 */
292static void mei_hbm_stop_req_prepare(struct mei_device *dev, 300static int mei_hbm_stop_req(struct mei_device *dev)
293 struct mei_msg_hdr *mei_hdr, unsigned char *data)
294{ 301{
302 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
295 struct hbm_host_stop_request *req = 303 struct hbm_host_stop_request *req =
296 (struct hbm_host_stop_request *)data; 304 (struct hbm_host_stop_request *)dev->wr_msg.data;
297 const size_t len = sizeof(struct hbm_host_stop_request); 305 const size_t len = sizeof(struct hbm_host_stop_request);
298 306
299 mei_hbm_hdr(mei_hdr, len); 307 mei_hbm_hdr(mei_hdr, len);
@@ -301,6 +309,8 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev,
301 memset(req, 0, len); 309 memset(req, 0, len);
302 req->hbm_cmd = HOST_STOP_REQ_CMD; 310 req->hbm_cmd = HOST_STOP_REQ_CMD;
303 req->reason = DRIVER_STOP_REQUEST; 311 req->reason = DRIVER_STOP_REQUEST;
312
313 return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
304} 314}
305 315
306/** 316/**
@@ -319,8 +329,7 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
319 mei_hbm_hdr(mei_hdr, len); 329 mei_hbm_hdr(mei_hdr, len);
320 mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len); 330 mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
321 331
322 dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n", 332 cl_dbg(dev, cl, "sending flow control\n");
323 cl->host_client_id, cl->me_client_id);
324 333
325 return mei_write_message(dev, mei_hdr, dev->wr_msg.data); 334 return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
326} 335}
@@ -330,27 +339,34 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
330 * 339 *
331 * @dev: the device structure 340 * @dev: the device structure
332 * @flow: flow control. 341 * @flow: flow control.
342 *
343 * return 0 on success, < 0 otherwise
333 */ 344 */
334static void mei_hbm_add_single_flow_creds(struct mei_device *dev, 345static int mei_hbm_add_single_flow_creds(struct mei_device *dev,
335 struct hbm_flow_control *flow) 346 struct hbm_flow_control *flow)
336{ 347{
337 struct mei_me_client *client; 348 struct mei_me_client *me_cl;
338 int i; 349 int id;
339 350
340 for (i = 0; i < dev->me_clients_num; i++) { 351 id = mei_me_cl_by_id(dev, flow->me_addr);
341 client = &dev->me_clients[i]; 352 if (id < 0) {
342 if (client && flow->me_addr == client->client_id) { 353 dev_err(&dev->pdev->dev, "no such me client %d\n",
343 if (client->props.single_recv_buf) { 354 flow->me_addr);
344 client->mei_flow_ctrl_creds++; 355 return id;
345 dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
346 flow->me_addr);
347 dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
348 client->mei_flow_ctrl_creds);
349 } else {
350 BUG(); /* error in flow control */
351 }
352 }
353 } 356 }
357
358 me_cl = &dev->me_clients[id];
359 if (me_cl->props.single_recv_buf) {
360 me_cl->mei_flow_ctrl_creds++;
361 dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
362 flow->me_addr);
363 dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
364 me_cl->mei_flow_ctrl_creds);
365 } else {
366 BUG(); /* error in flow control */
367 }
368
369 return 0;
354} 370}
355 371
356/** 372/**
@@ -362,8 +378,7 @@ static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
362static void mei_hbm_cl_flow_control_res(struct mei_device *dev, 378static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
363 struct hbm_flow_control *flow_control) 379 struct hbm_flow_control *flow_control)
364{ 380{
365 struct mei_cl *cl = NULL; 381 struct mei_cl *cl;
366 struct mei_cl *next = NULL;
367 382
368 if (!flow_control->host_addr) { 383 if (!flow_control->host_addr) {
369 /* single receive buffer */ 384 /* single receive buffer */
@@ -372,7 +387,7 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
372 } 387 }
373 388
374 /* normal connection */ 389 /* normal connection */
375 list_for_each_entry_safe(cl, next, &dev->file_list, link) { 390 list_for_each_entry(cl, &dev->file_list, link) {
376 if (mei_hbm_cl_addr_equal(cl, flow_control)) { 391 if (mei_hbm_cl_addr_equal(cl, flow_control)) {
377 cl->mei_flow_ctrl_creds++; 392 cl->mei_flow_ctrl_creds++;
378 dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n", 393 dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
@@ -405,6 +420,25 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
405} 420}
406 421
407/** 422/**
423 * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW
424 *
425 * @dev: the device structure
426 * @cl: a client to disconnect from
427 *
428 * This function returns -EIO on write failure
429 */
430int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
431{
432 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
433 const size_t len = sizeof(struct hbm_client_connect_response);
434
435 mei_hbm_hdr(mei_hdr, len);
436 mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, dev->wr_msg.data, len);
437
438 return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
439}
440
441/**
408 * mei_hbm_cl_disconnect_res - disconnect response from ME 442 * mei_hbm_cl_disconnect_res - disconnect response from ME
409 * 443 *
410 * @dev: the device structure 444 * @dev: the device structure
@@ -414,29 +448,23 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
414 struct hbm_client_connect_response *rs) 448 struct hbm_client_connect_response *rs)
415{ 449{
416 struct mei_cl *cl; 450 struct mei_cl *cl;
417 struct mei_cl_cb *pos = NULL, *next = NULL; 451 struct mei_cl_cb *cb, *next;
418 452
419 dev_dbg(&dev->pdev->dev, 453 dev_dbg(&dev->pdev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n",
420 "disconnect_response:\n" 454 rs->me_addr, rs->host_addr, rs->status);
421 "ME Client = %d\n" 455
422 "Host Client = %d\n" 456 list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
423 "Status = %d\n", 457 cl = cb->cl;
424 rs->me_addr, 458
425 rs->host_addr, 459 /* this should not happen */
426 rs->status); 460 if (WARN_ON(!cl)) {
427 461 list_del(&cb->list);
428 list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
429 cl = pos->cl;
430
431 if (!cl) {
432 list_del(&pos->list);
433 return; 462 return;
434 } 463 }
435 464
436 dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
437 if (mei_hbm_cl_addr_equal(cl, rs)) { 465 if (mei_hbm_cl_addr_equal(cl, rs)) {
438 list_del(&pos->list); 466 list_del(&cb->list);
439 if (!rs->status) 467 if (rs->status == MEI_CL_DISCONN_SUCCESS)
440 cl->state = MEI_FILE_DISCONNECTED; 468 cl->state = MEI_FILE_DISCONNECTED;
441 469
442 cl->status = 0; 470 cl->status = 0;
@@ -476,46 +504,41 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
476{ 504{
477 505
478 struct mei_cl *cl; 506 struct mei_cl *cl;
479 struct mei_cl_cb *pos = NULL, *next = NULL; 507 struct mei_cl_cb *cb, *next;
480 508
481 dev_dbg(&dev->pdev->dev, 509 dev_dbg(&dev->pdev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n",
482 "connect_response:\n" 510 rs->me_addr, rs->host_addr,
483 "ME Client = %d\n" 511 mei_cl_conn_status_str(rs->status));
484 "Host Client = %d\n"
485 "Status = %d\n",
486 rs->me_addr,
487 rs->host_addr,
488 rs->status);
489 512
490 /* if WD or iamthif client treat specially */ 513 cl = NULL;
491 514
492 if (is_treat_specially_client(&dev->wd_cl, rs)) { 515 list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
493 dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
494 mei_watchdog_register(dev);
495 516
496 return; 517 cl = cb->cl;
497 } 518 /* this should not happen */
519 if (WARN_ON(!cl)) {
520 list_del_init(&cb->list);
521 continue;
522 }
498 523
499 if (is_treat_specially_client(&dev->iamthif_cl, rs)) { 524 if (cb->fop_type != MEI_FOP_CONNECT)
500 dev->iamthif_state = MEI_IAMTHIF_IDLE; 525 continue;
501 return;
502 }
503 list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
504 526
505 cl = pos->cl; 527 if (mei_hbm_cl_addr_equal(cl, rs)) {
506 if (!cl) { 528 list_del(&cb->list);
507 list_del(&pos->list); 529 break;
508 return;
509 }
510 if (pos->fop_type == MEI_FOP_IOCTL) {
511 if (is_treat_specially_client(cl, rs)) {
512 list_del(&pos->list);
513 cl->status = 0;
514 cl->timer_count = 0;
515 break;
516 }
517 } 530 }
518 } 531 }
532
533 if (!cl)
534 return;
535
536 cl->timer_count = 0;
537 if (rs->status == MEI_CL_CONN_SUCCESS)
538 cl->state = MEI_FILE_CONNECTED;
539 else
540 cl->state = MEI_FILE_DISCONNECTED;
541 cl->status = mei_cl_conn_status_to_errno(rs->status);
519} 542}
520 543
521 544
@@ -525,32 +548,34 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
525 * 548 *
526 * @dev: the device structure. 549 * @dev: the device structure.
527 * @disconnect_req: disconnect request bus message from the me 550 * @disconnect_req: disconnect request bus message from the me
551 *
552 * returns -ENOMEM on allocation failure
528 */ 553 */
529static void mei_hbm_fw_disconnect_req(struct mei_device *dev, 554static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
530 struct hbm_client_connect_request *disconnect_req) 555 struct hbm_client_connect_request *disconnect_req)
531{ 556{
532 struct mei_cl *cl, *next; 557 struct mei_cl *cl;
533 const size_t len = sizeof(struct hbm_client_connect_response); 558 struct mei_cl_cb *cb;
534 559
535 list_for_each_entry_safe(cl, next, &dev->file_list, link) { 560 list_for_each_entry(cl, &dev->file_list, link) {
536 if (mei_hbm_cl_addr_equal(cl, disconnect_req)) { 561 if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
537 dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n", 562 dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
538 disconnect_req->host_addr, 563 disconnect_req->host_addr,
539 disconnect_req->me_addr); 564 disconnect_req->me_addr);
540 cl->state = MEI_FILE_DISCONNECTED; 565 cl->state = MEI_FILE_DISCONNECTED;
541 cl->timer_count = 0; 566 cl->timer_count = 0;
542 if (cl == &dev->wd_cl) 567
543 dev->wd_pending = false; 568 cb = mei_io_cb_init(cl, NULL);
544 else if (cl == &dev->iamthif_cl) 569 if (!cb)
545 dev->iamthif_timer = 0; 570 return -ENOMEM;
546 571 cb->fop_type = MEI_FOP_DISCONNECT_RSP;
547 /* prepare disconnect response */ 572 cl_dbg(dev, cl, "add disconnect response as first\n");
548 mei_hbm_hdr(&dev->wr_ext_msg.hdr, len); 573 list_add(&cb->list, &dev->ctrl_wr_list.list);
549 mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, 574
550 dev->wr_ext_msg.data, len);
551 break; 575 break;
552 } 576 }
553 } 577 }
578 return 0;
554} 579}
555 580
556 581
@@ -629,10 +654,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
629 dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); 654 dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n");
630 655
631 dev->hbm_state = MEI_HBM_STOPPED; 656 dev->hbm_state = MEI_HBM_STOPPED;
632 mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, 657 if (mei_hbm_stop_req(dev)) {
633 dev->wr_msg.data);
634 if (mei_write_message(dev, &dev->wr_msg.hdr,
635 dev->wr_msg.data)) {
636 dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); 658 dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
637 return -EIO; 659 return -EIO;
638 } 660 }
@@ -778,10 +800,11 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
778 800
779 case ME_STOP_REQ_CMD: 801 case ME_STOP_REQ_CMD:
780 dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); 802 dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n");
781
782 dev->hbm_state = MEI_HBM_STOPPED; 803 dev->hbm_state = MEI_HBM_STOPPED;
783 mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, 804 if (mei_hbm_stop_req(dev)) {
784 dev->wr_ext_msg.data); 805 dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
806 return -EIO;
807 }
785 break; 808 break;
786 default: 809 default:
787 BUG(); 810 BUG();
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 5f92188a5cd7..20e8782711c0 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -54,6 +54,7 @@ int mei_hbm_start_req(struct mei_device *dev);
54int mei_hbm_start_wait(struct mei_device *dev); 54int mei_hbm_start_wait(struct mei_device *dev);
55int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); 55int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
56int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl); 56int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
57int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
57int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); 58int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
58bool mei_hbm_version_is_supported(struct mei_device *dev); 59bool mei_hbm_version_is_supported(struct mei_device *dev);
59 60
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 6f656c053b14..8dbdaaef1af5 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -20,10 +20,10 @@
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21 21
22#include "mei_dev.h" 22#include "mei_dev.h"
23#include "hw-me.h"
24
25#include "hbm.h" 23#include "hbm.h"
26 24
25#include "hw-me.h"
26#include "hw-me-regs.h"
27 27
28/** 28/**
29 * mei_me_reg_read - Reads 32bit data from the mei device 29 * mei_me_reg_read - Reads 32bit data from the mei device
@@ -240,11 +240,11 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
240 mutex_unlock(&dev->device_lock); 240 mutex_unlock(&dev->device_lock);
241 err = wait_event_interruptible_timeout(dev->wait_hw_ready, 241 err = wait_event_interruptible_timeout(dev->wait_hw_ready,
242 dev->recvd_hw_ready, 242 dev->recvd_hw_ready,
243 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); 243 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
244 mutex_lock(&dev->device_lock); 244 mutex_lock(&dev->device_lock);
245 if (!err && !dev->recvd_hw_ready) { 245 if (!err && !dev->recvd_hw_ready) {
246 if (!err) 246 if (!err)
247 err = -ETIMEDOUT; 247 err = -ETIME;
248 dev_err(&dev->pdev->dev, 248 dev_err(&dev->pdev->dev,
249 "wait hw ready failed. status = %d\n", err); 249 "wait hw ready failed. status = %d\n", err);
250 return err; 250 return err;
@@ -303,7 +303,7 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev)
303 * 303 *
304 * @dev: the device structure 304 * @dev: the device structure
305 * 305 *
306 * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count 306 * returns -EOVERFLOW if overflow, otherwise empty slots count
307 */ 307 */
308static int mei_me_hbuf_empty_slots(struct mei_device *dev) 308static int mei_me_hbuf_empty_slots(struct mei_device *dev)
309{ 309{
@@ -326,7 +326,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
326 326
327 327
328/** 328/**
329 * mei_write_message - writes a message to mei device. 329 * mei_me_write_message - writes a message to mei device.
330 * 330 *
331 * @dev: the device structure 331 * @dev: the device structure
332 * @header: mei HECI header of message 332 * @header: mei HECI header of message
@@ -354,7 +354,7 @@ static int mei_me_write_message(struct mei_device *dev,
354 354
355 dw_cnt = mei_data2slots(length); 355 dw_cnt = mei_data2slots(length);
356 if (empty_slots < 0 || dw_cnt > empty_slots) 356 if (empty_slots < 0 || dw_cnt > empty_slots)
357 return -EIO; 357 return -EMSGSIZE;
358 358
359 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header)); 359 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
360 360
@@ -381,7 +381,7 @@ static int mei_me_write_message(struct mei_device *dev,
381 * 381 *
382 * @dev: the device structure 382 * @dev: the device structure
383 * 383 *
384 * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count 384 * returns -EOVERFLOW if overflow, otherwise filled slots count
385 */ 385 */
386static int mei_me_count_full_read_slots(struct mei_device *dev) 386static int mei_me_count_full_read_slots(struct mei_device *dev)
387{ 387{
@@ -505,17 +505,25 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
505 /* check slots available for reading */ 505 /* check slots available for reading */
506 slots = mei_count_full_read_slots(dev); 506 slots = mei_count_full_read_slots(dev);
507 while (slots > 0) { 507 while (slots > 0) {
508 /* we have urgent data to send so break the read */
509 if (dev->wr_ext_msg.hdr.length)
510 break;
511 dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots); 508 dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
512 rets = mei_irq_read_handler(dev, &complete_list, &slots); 509 rets = mei_irq_read_handler(dev, &complete_list, &slots);
510 /* There is a race between ME write and interrupt delivery:
511 * Not all data is always available immediately after the
512 * interrupt, so try to read again on the next interrupt.
513 */
514 if (rets == -ENODATA)
515 break;
516
513 if (rets && dev->dev_state != MEI_DEV_RESETTING) { 517 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
518 dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n",
519 rets);
514 schedule_work(&dev->reset_work); 520 schedule_work(&dev->reset_work);
515 goto end; 521 goto end;
516 } 522 }
517 } 523 }
518 524
525 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
526
519 rets = mei_irq_write_handler(dev, &complete_list); 527 rets = mei_irq_write_handler(dev, &complete_list);
520 528
521 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 529 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h
new file mode 100644
index 000000000000..7283c24c1af1
--- /dev/null
+++ b/drivers/misc/mei/hw-txe-regs.h
@@ -0,0 +1,294 @@
1/******************************************************************************
2 * Intel Management Engine Interface (Intel MEI) Linux driver
3 * Intel MEI Interface Header
4 *
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
7 *
8 * GPL LICENSE SUMMARY
9 *
10 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING
23 *
24 * Contact Information:
25 * Intel Corporation.
26 * linux-mei@linux.intel.com
27 * http://www.intel.com
28 *
29 * BSD LICENSE
30 *
31 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 *
38 * * Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * * Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in
42 * the documentation and/or other materials provided with the
43 * distribution.
44 * * Neither the name Intel Corporation nor the names of its
45 * contributors may be used to endorse or promote products derived
46 * from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
51 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
52 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
53 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
54 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
58 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 *****************************************************************************/
61#ifndef _MEI_HW_TXE_REGS_H_
62#define _MEI_HW_TXE_REGS_H_
63
64#include "hw.h"
65
66#define SEC_ALIVENESS_TIMER_TIMEOUT (5 * MSEC_PER_SEC)
67#define SEC_ALIVENESS_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
68#define SEC_RESET_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
69#define SEC_READY_WAIT_TIMEOUT (5 * MSEC_PER_SEC)
70#define START_MESSAGE_RESPONSE_WAIT_TIMEOUT (5 * MSEC_PER_SEC)
71#define RESET_CANCEL_WAIT_TIMEOUT (1 * MSEC_PER_SEC)
72
73enum {
74 SEC_BAR,
75 BRIDGE_BAR,
76
77 NUM_OF_MEM_BARS
78};
79
80/* SeC FW Status Register
81 *
82 * FW uses this register in order to report its status to host.
83 * This register resides in PCI-E config space.
84 */
85#define PCI_CFG_TXE_FW_STS0 0x40
86# define PCI_CFG_TXE_FW_STS0_WRK_ST_MSK 0x0000000F
87# define PCI_CFG_TXE_FW_STS0_OP_ST_MSK 0x000001C0
88# define PCI_CFG_TXE_FW_STS0_FW_INIT_CMPLT 0x00000200
89# define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000
90# define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000
91# define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000
92
93
94#define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */
95
96/* IPC Input Doorbell Register */
97#define SEC_IPC_INPUT_DOORBELL_REG (0x0000 + IPC_BASE_ADDR)
98
99/* IPC Input Status Register
100 * This register indicates whether or not processing of
101 * the most recent command has been completed by the SEC
102 * New commands and payloads should not be written by the Host
103 * until this indicates that the previous command has been processed.
104 */
105#define SEC_IPC_INPUT_STATUS_REG (0x0008 + IPC_BASE_ADDR)
106# define SEC_IPC_INPUT_STATUS_RDY BIT(0)
107
108/* IPC Host Interrupt Status Register */
109#define SEC_IPC_HOST_INT_STATUS_REG (0x0010 + IPC_BASE_ADDR)
110#define SEC_IPC_HOST_INT_STATUS_OUT_DB BIT(0)
111#define SEC_IPC_HOST_INT_STATUS_IN_RDY BIT(1)
112#define SEC_IPC_HOST_INT_STATUS_HDCP_M0_RCVD BIT(5)
113#define SEC_IPC_HOST_INT_STATUS_ILL_MEM_ACCESS BIT(17)
114#define SEC_IPC_HOST_INT_STATUS_AES_HKEY_ERR BIT(18)
115#define SEC_IPC_HOST_INT_STATUS_DES_HKEY_ERR BIT(19)
116#define SEC_IPC_HOST_INT_STATUS_TMRMTB_OVERFLOW BIT(21)
117
118/* Convenient mask for pending interrupts */
119#define SEC_IPC_HOST_INT_STATUS_PENDING \
120 (SEC_IPC_HOST_INT_STATUS_OUT_DB| \
121 SEC_IPC_HOST_INT_STATUS_IN_RDY)
122
123/* IPC Host Interrupt Mask Register */
124#define SEC_IPC_HOST_INT_MASK_REG (0x0014 + IPC_BASE_ADDR)
125
126# define SEC_IPC_HOST_INT_MASK_OUT_DB BIT(0) /* Output Doorbell Int Mask */
127# define SEC_IPC_HOST_INT_MASK_IN_RDY BIT(1) /* Input Ready Int Mask */
128
129/* IPC Input Payload RAM */
130#define SEC_IPC_INPUT_PAYLOAD_REG (0x0100 + IPC_BASE_ADDR)
131/* IPC Shared Payload RAM */
132#define IPC_SHARED_PAYLOAD_REG (0x0200 + IPC_BASE_ADDR)
133
134/* SeC Address Translation Table Entry 2 - Ctrl
135 *
136 * This register resides also in SeC's PCI-E Memory space.
137 */
138#define SATT2_CTRL_REG 0x1040
139# define SATT2_CTRL_VALID_MSK BIT(0)
140# define SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT 8
141# define SATT2_CTRL_BRIDGE_HOST_EN_MSK BIT(12)
142
143/* SATT Table Entry 2 SAP Base Address Register */
144#define SATT2_SAP_BA_REG 0x1044
145/* SATT Table Entry 2 SAP Size Register. */
146#define SATT2_SAP_SIZE_REG 0x1048
147 /* SATT Table Entry 2 SAP Bridge Address - LSB Register */
148#define SATT2_BRG_BA_LSB_REG 0x104C
149
150/* Host High-level Interrupt Status Register */
151#define HHISR_REG 0x2020
152/* Host High-level Interrupt Enable Register
153 *
154 * Resides in PCI memory space. This is the top hierarchy for
155 * interrupts from SeC to host, aggregating both interrupts that
156 * arrive through HICR registers as well as interrupts
157 * that arrive via IPC.
158 */
159#define HHIER_REG 0x2024
160#define IPC_HHIER_SEC BIT(0)
161#define IPC_HHIER_BRIDGE BIT(1)
162#define IPC_HHIER_MSK (IPC_HHIER_SEC | IPC_HHIER_BRIDGE)
163
164/* Host High-level Interrupt Mask Register.
165 *
166 * Resides in PCI memory space.
167 * This is the top hierarchy for masking interrupts from SeC to host.
168 */
169#define HHIMR_REG 0x2028
170#define IPC_HHIMR_SEC BIT(0)
171#define IPC_HHIMR_BRIDGE BIT(1)
172
173/* Host High-level IRQ Status Register */
174#define HHIRQSR_REG 0x202C
175
176/* Host Interrupt Cause Register 0 - SeC IPC Readiness
177 *
178 * This register is both an ICR to Host from PCI Memory Space
179 * and it is also exposed in the SeC memory space.
180 * This register is used by SeC's IPC driver in order
181 * to synchronize with host about IPC interface state.
182 */
183#define HICR_SEC_IPC_READINESS_REG 0x2040
184#define HICR_SEC_IPC_READINESS_HOST_RDY BIT(0)
185#define HICR_SEC_IPC_READINESS_SEC_RDY BIT(1)
186#define HICR_SEC_IPC_READINESS_SYS_RDY \
187 (HICR_SEC_IPC_READINESS_HOST_RDY | \
188 HICR_SEC_IPC_READINESS_SEC_RDY)
189#define HICR_SEC_IPC_READINESS_RDY_CLR BIT(2)
190
191/* Host Interrupt Cause Register 1 - Aliveness Response */
192/* This register is both an ICR to Host from PCI Memory Space
193 * and it is also exposed in the SeC memory space.
194 * The register may be used by SeC to ACK a host request for aliveness.
195 */
196#define HICR_HOST_ALIVENESS_RESP_REG 0x2044
197#define HICR_HOST_ALIVENESS_RESP_ACK BIT(0)
198
199/* Host Interrupt Cause Register 2 - SeC IPC Output Doorbell */
200#define HICR_SEC_IPC_OUTPUT_DOORBELL_REG 0x2048
201
202/* Host Interrupt Status Register.
203 *
204 * Resides in PCI memory space.
205 * This is the main register involved in generating interrupts
206 * from SeC to host via HICRs.
207 * The interrupt generation rules are as follows:
208 * An interrupt will be generated whenever for any i,
209 * there is a transition from a state where at least one of
210 * the following conditions did not hold, to a state where
211 * ALL the following conditions hold:
212 * A) HISR.INT[i]_STS == 1.
213 * B) HIER.INT[i]_EN == 1.
214 */
215#define HISR_REG 0x2060
216#define HISR_INT_0_STS BIT(0)
217#define HISR_INT_1_STS BIT(1)
218#define HISR_INT_2_STS BIT(2)
219#define HISR_INT_3_STS BIT(3)
220#define HISR_INT_4_STS BIT(4)
221#define HISR_INT_5_STS BIT(5)
222#define HISR_INT_6_STS BIT(6)
223#define HISR_INT_7_STS BIT(7)
224#define HISR_INT_STS_MSK \
225 (HISR_INT_0_STS | HISR_INT_1_STS | HISR_INT_2_STS)
226
227/* Host Interrupt Enable Register. Resides in PCI memory space. */
228#define HIER_REG 0x2064
229#define HIER_INT_0_EN BIT(0)
230#define HIER_INT_1_EN BIT(1)
231#define HIER_INT_2_EN BIT(2)
232#define HIER_INT_3_EN BIT(3)
233#define HIER_INT_4_EN BIT(4)
234#define HIER_INT_5_EN BIT(5)
235#define HIER_INT_6_EN BIT(6)
236#define HIER_INT_7_EN BIT(7)
237
238#define HIER_INT_EN_MSK \
239 (HIER_INT_0_EN | HIER_INT_1_EN | HIER_INT_2_EN)
240
241
242/* SEC Memory Space IPC output payload.
243 *
244 * This register is part of the output payload which SEC provides to host.
245 */
246#define BRIDGE_IPC_OUTPUT_PAYLOAD_REG 0x20C0
247
248/* SeC Interrupt Cause Register - Host Aliveness Request
249 * This register is both an ICR to SeC and it is also exposed
250 * in the host-visible PCI memory space.
251 * The register is used by host to request SeC aliveness.
252 */
253#define SICR_HOST_ALIVENESS_REQ_REG 0x214C
254#define SICR_HOST_ALIVENESS_REQ_REQUESTED BIT(0)
255
256
257/* SeC Interrupt Cause Register - Host IPC Readiness
258 *
259 * This register is both an ICR to SeC and it is also exposed
260 * in the host-visible PCI memory space.
261 * This register is used by the host's SeC driver uses in order
262 * to synchronize with SeC about IPC interface state.
263 */
264#define SICR_HOST_IPC_READINESS_REQ_REG 0x2150
265
266
267#define SICR_HOST_IPC_READINESS_HOST_RDY BIT(0)
268#define SICR_HOST_IPC_READINESS_SEC_RDY BIT(1)
269#define SICR_HOST_IPC_READINESS_SYS_RDY \
270 (SICR_HOST_IPC_READINESS_HOST_RDY | \
271 SICR_HOST_IPC_READINESS_SEC_RDY)
272#define SICR_HOST_IPC_READINESS_RDY_CLR BIT(2)
273
274/* SeC Interrupt Cause Register - SeC IPC Output Status
275 *
276 * This register indicates whether or not processing of the most recent
277 * command has been completed by the Host.
278 * New commands and payloads should not be written by SeC until this
279 * register indicates that the previous command has been processed.
280 */
281#define SICR_SEC_IPC_OUTPUT_STATUS_REG 0x2154
282# define SEC_IPC_OUTPUT_STATUS_RDY BIT(0)
283
284
285
286/* MEI IPC Message payload size 64 bytes */
287#define PAYLOAD_SIZE 64
288
289/* MAX size for SATT range 32MB */
290#define SATT_RANGE_MAX (32 << 20)
291
292
293#endif /* _MEI_HW_TXE_REGS_H_ */
294
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
new file mode 100644
index 000000000000..f60182a52f96
--- /dev/null
+++ b/drivers/misc/mei/hw-txe.c
@@ -0,0 +1,1107 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2013-2014, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
18#include <linux/jiffies.h>
19#include <linux/delay.h>
20#include <linux/kthread.h>
21#include <linux/irqreturn.h>
22
23#include <linux/mei.h>
24
25#include "mei_dev.h"
26#include "hw-txe.h"
27#include "client.h"
28#include "hbm.h"
29
30/**
31 * mei_txe_reg_read - Reads 32bit data from the device
32 *
33 * @base_addr: registers base address
34 * @offset: register offset
35 *
36 */
37static inline u32 mei_txe_reg_read(void __iomem *base_addr,
38 unsigned long offset)
39{
40 return ioread32(base_addr + offset);
41}
42
43/**
44 * mei_txe_reg_write - Writes 32bit data to the device
45 *
46 * @base_addr: registers base address
47 * @offset: register offset
48 * @value: the value to write
49 */
50static inline void mei_txe_reg_write(void __iomem *base_addr,
51 unsigned long offset, u32 value)
52{
53 iowrite32(value, base_addr + offset);
54}
55
56/**
57 * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR
58 *
59 * @dev: the device structure
60 * @offset: register offset
61 *
62 * Doesn't check for aliveness while Reads 32bit data from the SeC BAR
63 */
64static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
65 unsigned long offset)
66{
67 return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
68}
69
70/**
71 * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR
72 *
73 * @dev: the device structure
74 * @offset: register offset
75 *
76 * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set
77 */
78static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
79 unsigned long offset)
80{
81 WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
82 return mei_txe_sec_reg_read_silent(hw, offset);
83}
84/**
85 * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR
86 * doesn't check for aliveness
87 *
88 * @dev: the device structure
89 * @offset: register offset
90 * @value: value to write
91 *
92 * Doesn't check for aliveness while writes 32bit data from to the SeC BAR
93 */
94static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
95 unsigned long offset, u32 value)
96{
97 mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
98}
99
100/**
101 * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR
102 *
103 * @dev: the device structure
104 * @offset: register offset
105 * @value: value to write
106 *
107 * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set
108 */
109static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
110 unsigned long offset, u32 value)
111{
112 WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
113 mei_txe_sec_reg_write_silent(hw, offset, value);
114}
115/**
116 * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR
117 *
118 * @hw: the device structure
119 * @offset: offset from which to read the data
120 *
121 */
122static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
123 unsigned long offset)
124{
125 return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
126}
127
128/**
129 * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR
130 *
131 * @hw: the device structure
132 * @offset: offset from which to write the data
133 * @value: the byte to write
134 */
135static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
136 unsigned long offset, u32 value)
137{
138 mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
139}
140
141/**
142 * mei_txe_aliveness_set - request for aliveness change
143 *
144 * @dev: the device structure
145 * @req: requested aliveness value
146 *
147 * Request for aliveness change and returns true if the change is
148 * really needed and false if aliveness is already
149 * in the requested state
150 * Requires device lock to be held
151 */
152static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
153{
154
155 struct mei_txe_hw *hw = to_txe_hw(dev);
156 bool do_req = hw->aliveness != req;
157
158 dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n",
159 hw->aliveness, req);
160 if (do_req) {
161 hw->recvd_aliveness = false;
162 mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
163 }
164 return do_req;
165}
166
167
168/**
169 * mei_txe_aliveness_req_get - get aliveness requested register value
170 *
171 * @dev: the device structure
172 *
173 * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from
174 * from HICR_HOST_ALIVENESS_REQ register value
175 */
176static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
177{
178 struct mei_txe_hw *hw = to_txe_hw(dev);
179 u32 reg;
180 reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
181 return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
182}
183
184/**
185 * mei_txe_aliveness_get - get aliveness response register value
186 * @dev: the device structure
187 *
188 * Extract HICR_HOST_ALIVENESS_RESP_ACK bit
189 * from HICR_HOST_ALIVENESS_RESP register value
190 */
191static u32 mei_txe_aliveness_get(struct mei_device *dev)
192{
193 struct mei_txe_hw *hw = to_txe_hw(dev);
194 u32 reg;
195 reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
196 return reg & HICR_HOST_ALIVENESS_RESP_ACK;
197}
198
199/**
200 * mei_txe_aliveness_poll - waits for aliveness to settle
201 *
202 * @dev: the device structure
203 * @expected: expected aliveness value
204 *
205 * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
206 * returns > 0 if the expected value was received, -ETIME otherwise
207 */
208static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
209{
210 struct mei_txe_hw *hw = to_txe_hw(dev);
211 int t = 0;
212
213 do {
214 hw->aliveness = mei_txe_aliveness_get(dev);
215 if (hw->aliveness == expected) {
216 dev_dbg(&dev->pdev->dev,
217 "aliveness settled after %d msecs\n", t);
218 return t;
219 }
220 mutex_unlock(&dev->device_lock);
221 msleep(MSEC_PER_SEC / 5);
222 mutex_lock(&dev->device_lock);
223 t += MSEC_PER_SEC / 5;
224 } while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
225
226 dev_err(&dev->pdev->dev, "aliveness timed out\n");
227 return -ETIME;
228}
229
230/**
231 * mei_txe_aliveness_wait - waits for aliveness to settle
232 *
233 * @dev: the device structure
234 * @expected: expected aliveness value
235 *
236 * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
237 * returns returns 0 on success and < 0 otherwise
238 */
239static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
240{
241 struct mei_txe_hw *hw = to_txe_hw(dev);
242 const unsigned long timeout =
243 msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
244 long err;
245 int ret;
246
247 hw->aliveness = mei_txe_aliveness_get(dev);
248 if (hw->aliveness == expected)
249 return 0;
250
251 mutex_unlock(&dev->device_lock);
252 err = wait_event_timeout(hw->wait_aliveness,
253 hw->recvd_aliveness, timeout);
254 mutex_lock(&dev->device_lock);
255
256 hw->aliveness = mei_txe_aliveness_get(dev);
257 ret = hw->aliveness == expected ? 0 : -ETIME;
258
259 if (ret)
260 dev_err(&dev->pdev->dev, "aliveness timed out");
261 else
262 dev_dbg(&dev->pdev->dev, "aliveness settled after %d msecs\n",
263 jiffies_to_msecs(timeout - err));
264 hw->recvd_aliveness = false;
265 return ret;
266}
267
268/**
269 * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete
270 *
271 * @dev: the device structure
272 *
273 * returns returns 0 on success and < 0 otherwise
274 */
275int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
276{
277 if (mei_txe_aliveness_set(dev, req))
278 return mei_txe_aliveness_wait(dev, req);
279 return 0;
280}
281
282/**
283 * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt
284 *
285 * @dev: the device structure
286 */
287static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
288{
289 struct mei_txe_hw *hw = to_txe_hw(dev);
290 u32 hintmsk;
291 /* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */
292 hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
293 hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
294 mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
295}
296
297/**
298 * mei_txe_input_doorbell_set
299 * - Sets bit 0 in SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL.
300 * @dev: the device structure
301 */
302static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
303{
304 /* Clear the interrupt cause */
305 clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
306 mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
307}
308
309/**
310 * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1
311 *
312 * @dev: the device structure
313 */
314static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
315{
316 mei_txe_br_reg_write(hw,
317 SICR_SEC_IPC_OUTPUT_STATUS_REG,
318 SEC_IPC_OUTPUT_STATUS_RDY);
319}
320
321/**
322 * mei_txe_is_input_ready - check if TXE is ready for receiving data
323 *
324 * @dev: the device structure
325 */
326static bool mei_txe_is_input_ready(struct mei_device *dev)
327{
328 struct mei_txe_hw *hw = to_txe_hw(dev);
329 u32 status;
330 status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
331 return !!(SEC_IPC_INPUT_STATUS_RDY & status);
332}
333
334/**
335 * mei_txe_intr_clear - clear all interrupts
336 *
337 * @dev: the device structure
338 */
339static inline void mei_txe_intr_clear(struct mei_device *dev)
340{
341 struct mei_txe_hw *hw = to_txe_hw(dev);
342 mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
343 SEC_IPC_HOST_INT_STATUS_PENDING);
344 mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
345 mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
346}
347
348/**
349 * mei_txe_intr_disable - disable all interrupts
350 *
351 * @dev: the device structure
352 */
353static void mei_txe_intr_disable(struct mei_device *dev)
354{
355 struct mei_txe_hw *hw = to_txe_hw(dev);
356 mei_txe_br_reg_write(hw, HHIER_REG, 0);
357 mei_txe_br_reg_write(hw, HIER_REG, 0);
358}
359/**
360 * mei_txe_intr_disable - enable all interrupts
361 *
362 * @dev: the device structure
363 */
364static void mei_txe_intr_enable(struct mei_device *dev)
365{
366 struct mei_txe_hw *hw = to_txe_hw(dev);
367 mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
368 mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
369}
370
371/**
372 * mei_txe_pending_interrupts - check if there are pending interrupts
373 * only Aliveness, Input ready, and output doorbell are of relevance
374 *
375 * @dev: the device structure
376 *
377 * Checks if there are pending interrupts
378 * only Aliveness, Readiness, Input ready, and Output doorbell are relevant
379 */
380static bool mei_txe_pending_interrupts(struct mei_device *dev)
381{
382
383 struct mei_txe_hw *hw = to_txe_hw(dev);
384 bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
385 TXE_INTR_ALIVENESS |
386 TXE_INTR_IN_READY |
387 TXE_INTR_OUT_DB));
388
389 if (ret) {
390 dev_dbg(&dev->pdev->dev,
391 "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
392 !!(hw->intr_cause & TXE_INTR_IN_READY),
393 !!(hw->intr_cause & TXE_INTR_READINESS),
394 !!(hw->intr_cause & TXE_INTR_ALIVENESS),
395 !!(hw->intr_cause & TXE_INTR_OUT_DB));
396 }
397 return ret;
398}
399
400/**
401 * mei_txe_input_payload_write - write a dword to the host buffer
402 * at offset idx
403 *
404 * @dev: the device structure
405 * @idx: index in the host buffer
406 * @value: value
407 */
408static void mei_txe_input_payload_write(struct mei_device *dev,
409 unsigned long idx, u32 value)
410{
411 struct mei_txe_hw *hw = to_txe_hw(dev);
412 mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
413 (idx * sizeof(u32)), value);
414}
415
416/**
417 * mei_txe_out_data_read - read dword from the device buffer
418 * at offset idx
419 *
420 * @dev: the device structure
421 * @idx: index in the device buffer
422 *
423 * returns register value at index
424 */
425static u32 mei_txe_out_data_read(const struct mei_device *dev,
426 unsigned long idx)
427{
428 struct mei_txe_hw *hw = to_txe_hw(dev);
429 return mei_txe_br_reg_read(hw,
430 BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
431}
432
433/* Readiness */
434
435/**
436 * mei_txe_readiness_set_host_rdy
437 *
438 * @dev: the device structure
439 */
440static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
441{
442 struct mei_txe_hw *hw = to_txe_hw(dev);
443 mei_txe_br_reg_write(hw,
444 SICR_HOST_IPC_READINESS_REQ_REG,
445 SICR_HOST_IPC_READINESS_HOST_RDY);
446}
447
448/**
449 * mei_txe_readiness_clear
450 *
451 * @dev: the device structure
452 */
453static void mei_txe_readiness_clear(struct mei_device *dev)
454{
455 struct mei_txe_hw *hw = to_txe_hw(dev);
456 mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
457 SICR_HOST_IPC_READINESS_RDY_CLR);
458}
459/**
460 * mei_txe_readiness_get - Reads and returns
461 * the HICR_SEC_IPC_READINESS register value
462 *
463 * @dev: the device structure
464 */
465static u32 mei_txe_readiness_get(struct mei_device *dev)
466{
467 struct mei_txe_hw *hw = to_txe_hw(dev);
468 return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
469}
470
471
472/**
473 * mei_txe_readiness_is_sec_rdy - check readiness
474 * for HICR_SEC_IPC_READINESS_SEC_RDY
475 *
476 * @readiness - cached readiness state
477 */
478static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
479{
480 return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
481}
482
483/**
484 * mei_txe_hw_is_ready - check if the hw is ready
485 *
486 * @dev: the device structure
487 */
488static bool mei_txe_hw_is_ready(struct mei_device *dev)
489{
490 u32 readiness = mei_txe_readiness_get(dev);
491 return mei_txe_readiness_is_sec_rdy(readiness);
492}
493
494/**
495 * mei_txe_host_is_ready - check if the host is ready
496 *
497 * @dev: the device structure
498 */
499static inline bool mei_txe_host_is_ready(struct mei_device *dev)
500{
501 struct mei_txe_hw *hw = to_txe_hw(dev);
502 u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
503 return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
504}
505
506/**
507 * mei_txe_readiness_wait - wait till readiness settles
508 *
509 * @dev: the device structure
510 *
511 * returns 0 on success and -ETIME on timeout
512 */
513static int mei_txe_readiness_wait(struct mei_device *dev)
514{
515 if (mei_txe_hw_is_ready(dev))
516 return 0;
517
518 mutex_unlock(&dev->device_lock);
519 wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
520 msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
521 mutex_lock(&dev->device_lock);
522 if (!dev->recvd_hw_ready) {
523 dev_err(&dev->pdev->dev, "wait for readiness failed\n");
524 return -ETIME;
525 }
526
527 dev->recvd_hw_ready = false;
528 return 0;
529}
530
531/**
532 * mei_txe_hw_config - configure hardware at the start of the devices
533 *
534 * @dev: the device structure
535 *
536 * Configure hardware at the start of the device should be done only
537 * once at the device probe time
538 */
539static void mei_txe_hw_config(struct mei_device *dev)
540{
541
542 struct mei_txe_hw *hw = to_txe_hw(dev);
543 /* Doesn't change in runtime */
544 dev->hbuf_depth = PAYLOAD_SIZE / 4;
545
546 hw->aliveness = mei_txe_aliveness_get(dev);
547 hw->readiness = mei_txe_readiness_get(dev);
548
549 dev_dbg(&dev->pdev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
550 hw->aliveness, hw->readiness);
551}
552
553
554/**
555 * mei_txe_write - writes a message to device.
556 *
557 * @dev: the device structure
558 * @header: header of message
559 * @buf: message buffer will be written
560 * returns 1 if success, 0 - otherwise.
561 */
562
563static int mei_txe_write(struct mei_device *dev,
564 struct mei_msg_hdr *header, unsigned char *buf)
565{
566 struct mei_txe_hw *hw = to_txe_hw(dev);
567 unsigned long rem;
568 unsigned long length;
569 int slots = dev->hbuf_depth;
570 u32 *reg_buf = (u32 *)buf;
571 u32 dw_cnt;
572 int i;
573
574 if (WARN_ON(!header || !buf))
575 return -EINVAL;
576
577 length = header->length;
578
579 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
580
581 dw_cnt = mei_data2slots(length);
582 if (dw_cnt > slots)
583 return -EMSGSIZE;
584
585 if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
586 return -EAGAIN;
587
588 /* Enable Input Ready Interrupt. */
589 mei_txe_input_ready_interrupt_enable(dev);
590
591 if (!mei_txe_is_input_ready(dev)) {
592 dev_err(&dev->pdev->dev, "Input is not ready");
593 return -EAGAIN;
594 }
595
596 mei_txe_input_payload_write(dev, 0, *((u32 *)header));
597
598 for (i = 0; i < length / 4; i++)
599 mei_txe_input_payload_write(dev, i + 1, reg_buf[i]);
600
601 rem = length & 0x3;
602 if (rem > 0) {
603 u32 reg = 0;
604 memcpy(&reg, &buf[length - rem], rem);
605 mei_txe_input_payload_write(dev, i + 1, reg);
606 }
607
608 /* after each write the whole buffer is consumed */
609 hw->slots = 0;
610
611 /* Set Input-Doorbell */
612 mei_txe_input_doorbell_set(hw);
613
614 return 0;
615}
616
617/**
618 * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer
619 *
620 * @dev: the device structure
621 *
622 * returns the PAYLOAD_SIZE - 4
623 */
624static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
625{
626 return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr);
627}
628
629/**
630 * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer
631 *
632 * @dev: the device structure
633 *
634 * returns always hbuf_depth
635 */
636static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
637{
638 struct mei_txe_hw *hw = to_txe_hw(dev);
639 return hw->slots;
640}
641
642/**
643 * mei_txe_count_full_read_slots - mimics the me device circular buffer
644 *
645 * @dev: the device structure
646 *
647 * returns always buffer size in dwords count
648 */
649static int mei_txe_count_full_read_slots(struct mei_device *dev)
650{
651 /* read buffers has static size */
652 return PAYLOAD_SIZE / 4;
653}
654
655/**
656 * mei_txe_read_hdr - read message header which is always in 4 first bytes
657 *
658 * @dev: the device structure
659 *
660 * returns mei message header
661 */
662
663static u32 mei_txe_read_hdr(const struct mei_device *dev)
664{
665 return mei_txe_out_data_read(dev, 0);
666}
667/**
668 * mei_txe_read - reads a message from the txe device.
669 *
670 * @dev: the device structure
671 * @buf: message buffer will be written
672 * @len: message size will be read
673 *
674 * returns -EINVAL on error wrong argument and 0 on success
675 */
676static int mei_txe_read(struct mei_device *dev,
677 unsigned char *buf, unsigned long len)
678{
679
680 struct mei_txe_hw *hw = to_txe_hw(dev);
681 u32 i;
682 u32 *reg_buf = (u32 *)buf;
683 u32 rem = len & 0x3;
684
685 if (WARN_ON(!buf || !len))
686 return -EINVAL;
687
688 dev_dbg(&dev->pdev->dev,
689 "buffer-length = %lu buf[0]0x%08X\n",
690 len, mei_txe_out_data_read(dev, 0));
691
692 for (i = 0; i < len / 4; i++) {
693 /* skip header: index starts from 1 */
694 u32 reg = mei_txe_out_data_read(dev, i + 1);
695 dev_dbg(&dev->pdev->dev, "buf[%d] = 0x%08X\n", i, reg);
696 *reg_buf++ = reg;
697 }
698
699 if (rem) {
700 u32 reg = mei_txe_out_data_read(dev, i + 1);
701 memcpy(reg_buf, &reg, rem);
702 }
703
704 mei_txe_output_ready_set(hw);
705 return 0;
706}
707
708/**
709 * mei_txe_hw_reset - resets host and fw.
710 *
711 * @dev: the device structure
712 * @intr_enable: if interrupt should be enabled after reset.
713 *
714 * returns 0 on success and < 0 in case of error
715 */
716static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
717{
718 struct mei_txe_hw *hw = to_txe_hw(dev);
719
720 u32 aliveness_req;
721 /*
722 * read input doorbell to ensure consistency between Bridge and SeC
723 * return value might be garbage return
724 */
725 (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
726
727 aliveness_req = mei_txe_aliveness_req_get(dev);
728 hw->aliveness = mei_txe_aliveness_get(dev);
729
730 /* Disable interrupts in this stage we will poll */
731 mei_txe_intr_disable(dev);
732
733 /*
734 * If Aliveness Request and Aliveness Response are not equal then
735 * wait for them to be equal
736 * Since we might have interrupts disabled - poll for it
737 */
738 if (aliveness_req != hw->aliveness)
739 if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
740 dev_err(&dev->pdev->dev,
741 "wait for aliveness settle failed ... bailing out\n");
742 return -EIO;
743 }
744
745 /*
746 * If Aliveness Request and Aliveness Response are set then clear them
747 */
748 if (aliveness_req) {
749 mei_txe_aliveness_set(dev, 0);
750 if (mei_txe_aliveness_poll(dev, 0) < 0) {
751 dev_err(&dev->pdev->dev,
752 "wait for aliveness failed ... bailing out\n");
753 return -EIO;
754 }
755 }
756
757 /*
758 * Set rediness RDY_CLR bit
759 */
760 mei_txe_readiness_clear(dev);
761
762 return 0;
763}
764
765/**
766 * mei_txe_hw_start - start the hardware after reset
767 *
768 * @dev: the device structure
769 *
770 * returns 0 on success and < 0 in case of error
771 */
772static int mei_txe_hw_start(struct mei_device *dev)
773{
774 struct mei_txe_hw *hw = to_txe_hw(dev);
775 int ret;
776
777 u32 hisr;
778
779 /* bring back interrupts */
780 mei_txe_intr_enable(dev);
781
782 ret = mei_txe_readiness_wait(dev);
783 if (ret < 0) {
784 dev_err(&dev->pdev->dev, "wating for readiness failed\n");
785 return ret;
786 }
787
788 /*
789 * If HISR.INT2_STS interrupt status bit is set then clear it.
790 */
791 hisr = mei_txe_br_reg_read(hw, HISR_REG);
792 if (hisr & HISR_INT_2_STS)
793 mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
794
795 /* Clear the interrupt cause of OutputDoorbell */
796 clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
797
798 ret = mei_txe_aliveness_set_sync(dev, 1);
799 if (ret < 0) {
800 dev_err(&dev->pdev->dev, "wait for aliveness failed ... bailing out\n");
801 return ret;
802 }
803
804 /* enable input ready interrupts:
805 * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
806 */
807 mei_txe_input_ready_interrupt_enable(dev);
808
809
810 /* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */
811 mei_txe_output_ready_set(hw);
812
813 /* Set bit SICR_HOST_IPC_READINESS.HOST_RDY
814 */
815 mei_txe_readiness_set_host_rdy(dev);
816
817 return 0;
818}
819
820/**
821 * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into
822 * single bit mask and acknowledge the interrupts
823 *
824 * @dev: the device structure
825 * @do_ack: acknowledge interrupts
826 */
827static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
828{
829 struct mei_txe_hw *hw = to_txe_hw(dev);
830 u32 hisr;
831 u32 hhisr;
832 u32 ipc_isr;
833 u32 aliveness;
834 bool generated;
835
836 /* read interrupt registers */
837 hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
838 generated = (hhisr & IPC_HHIER_MSK);
839 if (!generated)
840 goto out;
841
842 hisr = mei_txe_br_reg_read(hw, HISR_REG);
843
844 aliveness = mei_txe_aliveness_get(dev);
845 if (hhisr & IPC_HHIER_SEC && aliveness)
846 ipc_isr = mei_txe_sec_reg_read_silent(hw,
847 SEC_IPC_HOST_INT_STATUS_REG);
848 else
849 ipc_isr = 0;
850
851 generated = generated ||
852 (hisr & HISR_INT_STS_MSK) ||
853 (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
854
855 if (generated && do_ack) {
856 /* Save the interrupt causes */
857 hw->intr_cause |= hisr & HISR_INT_STS_MSK;
858 if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
859 hw->intr_cause |= TXE_INTR_IN_READY;
860
861
862 mei_txe_intr_disable(dev);
863 /* Clear the interrupts in hierarchy:
864 * IPC and Bridge, than the High Level */
865 mei_txe_sec_reg_write_silent(hw,
866 SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
867 mei_txe_br_reg_write(hw, HISR_REG, hisr);
868 mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
869 }
870
871out:
872 return generated;
873}
874
875/**
876 * mei_txe_irq_quick_handler - The ISR of the MEI device
877 *
878 * @irq: The irq number
879 * @dev_id: pointer to the device structure
880 *
881 * returns irqreturn_t
882 */
883irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
884{
885 struct mei_device *dev = dev_id;
886
887 if (mei_txe_check_and_ack_intrs(dev, true))
888 return IRQ_WAKE_THREAD;
889 return IRQ_NONE;
890}
891
892
893/**
894 * mei_txe_irq_thread_handler - txe interrupt thread
895 *
896 * @irq: The irq number
897 * @dev_id: pointer to the device structure
898 *
899 * returns irqreturn_t
900 *
901 */
902irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
903{
904 struct mei_device *dev = (struct mei_device *) dev_id;
905 struct mei_txe_hw *hw = to_txe_hw(dev);
906 struct mei_cl_cb complete_list;
907 s32 slots;
908 int rets = 0;
909
910 dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
911 mei_txe_br_reg_read(hw, HHISR_REG),
912 mei_txe_br_reg_read(hw, HISR_REG),
913 mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
914
915
916 /* initialize our complete list */
917 mutex_lock(&dev->device_lock);
918 mei_io_list_init(&complete_list);
919
920 if (pci_dev_msi_enabled(dev->pdev))
921 mei_txe_check_and_ack_intrs(dev, true);
922
923 /* show irq events */
924 mei_txe_pending_interrupts(dev);
925
926 hw->aliveness = mei_txe_aliveness_get(dev);
927 hw->readiness = mei_txe_readiness_get(dev);
928
929 /* Readiness:
930 * Detection of TXE driver going through reset
931 * or TXE driver resetting the HECI interface.
932 */
933 if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
934 dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n");
935
936 /* Check if SeC is going through reset */
937 if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
938 dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
939 dev->recvd_hw_ready = true;
940 } else {
941 dev->recvd_hw_ready = false;
942 if (dev->dev_state != MEI_DEV_RESETTING) {
943
944 dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
945 schedule_work(&dev->reset_work);
946 goto end;
947
948 }
949 }
950 wake_up(&dev->wait_hw_ready);
951 }
952
953 /************************************************************/
954 /* Check interrupt cause:
955 * Aliveness: Detection of SeC acknowledge of host request that
956 * it remain alive or host cancellation of that request.
957 */
958
959 if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
960 /* Clear the interrupt cause */
961 dev_dbg(&dev->pdev->dev,
962 "Aliveness Interrupt: Status: %d\n", hw->aliveness);
963 hw->recvd_aliveness = true;
964 if (waitqueue_active(&hw->wait_aliveness))
965 wake_up(&hw->wait_aliveness);
966 }
967
968
969 /* Output Doorbell:
970 * Detection of SeC having sent output to host
971 */
972 slots = mei_count_full_read_slots(dev);
973 if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
974 /* Read from TXE */
975 rets = mei_irq_read_handler(dev, &complete_list, &slots);
976 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
977 dev_err(&dev->pdev->dev,
978 "mei_irq_read_handler ret = %d.\n", rets);
979
980 schedule_work(&dev->reset_work);
981 goto end;
982 }
983 }
984 /* Input Ready: Detection if host can write to SeC */
985 if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
986 dev->hbuf_is_ready = true;
987 hw->slots = dev->hbuf_depth;
988 }
989
990 if (hw->aliveness && dev->hbuf_is_ready) {
991 /* get the real register value */
992 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
993 rets = mei_irq_write_handler(dev, &complete_list);
994 if (rets && rets != -EMSGSIZE)
995 dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n",
996 rets);
997 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
998 }
999
1000 mei_irq_compl_handler(dev, &complete_list);
1001
1002end:
1003 dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
1004
1005 mutex_unlock(&dev->device_lock);
1006
1007 mei_enable_interrupts(dev);
1008 return IRQ_HANDLED;
1009}
1010
1011static const struct mei_hw_ops mei_txe_hw_ops = {
1012
1013 .host_is_ready = mei_txe_host_is_ready,
1014
1015 .hw_is_ready = mei_txe_hw_is_ready,
1016 .hw_reset = mei_txe_hw_reset,
1017 .hw_config = mei_txe_hw_config,
1018 .hw_start = mei_txe_hw_start,
1019
1020 .intr_clear = mei_txe_intr_clear,
1021 .intr_enable = mei_txe_intr_enable,
1022 .intr_disable = mei_txe_intr_disable,
1023
1024 .hbuf_free_slots = mei_txe_hbuf_empty_slots,
1025 .hbuf_is_ready = mei_txe_is_input_ready,
1026 .hbuf_max_len = mei_txe_hbuf_max_len,
1027
1028 .write = mei_txe_write,
1029
1030 .rdbuf_full_slots = mei_txe_count_full_read_slots,
1031 .read_hdr = mei_txe_read_hdr,
1032
1033 .read = mei_txe_read,
1034
1035};
1036
1037/**
1038 * mei_txe_dev_init - allocates and initializes txe hardware specific structure
1039 *
1040 * @pdev - pci device
1041 * returns struct mei_device * on success or NULL;
1042 *
1043 */
1044struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
1045{
1046 struct mei_device *dev;
1047 struct mei_txe_hw *hw;
1048
1049 dev = kzalloc(sizeof(struct mei_device) +
1050 sizeof(struct mei_txe_hw), GFP_KERNEL);
1051 if (!dev)
1052 return NULL;
1053
1054 mei_device_init(dev);
1055
1056 hw = to_txe_hw(dev);
1057
1058 init_waitqueue_head(&hw->wait_aliveness);
1059
1060 dev->ops = &mei_txe_hw_ops;
1061
1062 dev->pdev = pdev;
1063 return dev;
1064}
1065
1066/**
1067 * mei_txe_setup_satt2 - SATT2 configuration for DMA support.
1068 *
1069 * @dev: the device structure
1070 * @addr: physical address start of the range
1071 * @range: physical range size
1072 */
1073int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
1074{
1075 struct mei_txe_hw *hw = to_txe_hw(dev);
1076
1077 u32 lo32 = lower_32_bits(addr);
1078 u32 hi32 = upper_32_bits(addr);
1079 u32 ctrl;
1080
1081 /* SATT is limited to 36 Bits */
1082 if (hi32 & ~0xF)
1083 return -EINVAL;
1084
1085 /* SATT has to be 16Byte aligned */
1086 if (lo32 & 0xF)
1087 return -EINVAL;
1088
1089 /* SATT range has to be 4Bytes aligned */
1090 if (range & 0x4)
1091 return -EINVAL;
1092
1093 /* SATT is limited to 32 MB range*/
1094 if (range > SATT_RANGE_MAX)
1095 return -EINVAL;
1096
1097 ctrl = SATT2_CTRL_VALID_MSK;
1098 ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
1099
1100 mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
1101 mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
1102 mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
1103 dev_dbg(&dev->pdev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
1104 range, lo32, ctrl);
1105
1106 return 0;
1107}
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
new file mode 100644
index 000000000000..0812d98633a4
--- /dev/null
+++ b/drivers/misc/mei/hw-txe.h
@@ -0,0 +1,74 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2013-2014, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#ifndef _MEI_HW_TXE_H_
18#define _MEI_HW_TXE_H_
19
20#include <linux/irqreturn.h>
21
22#include "hw.h"
23#include "hw-txe-regs.h"
24
25/* Flatten Hierarchy interrupt cause */
26#define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */
27#define TXE_INTR_READINESS HISR_INT_0_STS
28#define TXE_INTR_ALIVENESS_BIT 1 /* HISR_INT_1_STS */
29#define TXE_INTR_ALIVENESS HISR_INT_1_STS
30#define TXE_INTR_OUT_DB_BIT 2 /* HISR_INT_2_STS */
31#define TXE_INTR_OUT_DB HISR_INT_2_STS
32#define TXE_INTR_IN_READY_BIT 8 /* beyond HISR */
33#define TXE_INTR_IN_READY BIT(8)
34
35/**
36 * struct mei_txe_hw - txe hardware specifics
37 *
38 * @mem_addr: SeC and BRIDGE bars
39 * @aliveness: aliveness (power gating) state of the hardware
40 * @readiness: readiness state of the hardware
41 * @wait_aliveness: aliveness wait queue
42 * @recvd_aliveness: aliveness interrupt was recived
43 * @intr_cause: translated interrupt cause
44 */
45struct mei_txe_hw {
46 void __iomem *mem_addr[NUM_OF_MEM_BARS];
47 u32 aliveness;
48 u32 readiness;
49 u32 slots;
50
51 wait_queue_head_t wait_aliveness;
52 bool recvd_aliveness;
53
54 unsigned long intr_cause;
55};
56
57#define to_txe_hw(dev) (struct mei_txe_hw *)((dev)->hw)
58
59static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw)
60{
61 return container_of((void *)hw, struct mei_device, hw);
62}
63
64struct mei_device *mei_txe_dev_init(struct pci_dev *pdev);
65
66irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id);
67irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id);
68
69int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req);
70
71int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range);
72
73
74#endif /* _MEI_HW_TXE_H_ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index dd44e33ad2b6..6b476ab49b2e 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -22,7 +22,7 @@
22/* 22/*
23 * Timeouts in Seconds 23 * Timeouts in Seconds
24 */ 24 */
25#define MEI_INTEROP_TIMEOUT 7 /* Timeout on ready message */ 25#define MEI_HW_READY_TIMEOUT 2 /* Timeout on ready message */
26#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */ 26#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */
27 27
28#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ 28#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
@@ -31,13 +31,13 @@
31#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ 31#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
32#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ 32#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
33 33
34#define MEI_HBM_TIMEOUT 1 /* 1 second */
34 35
35/* 36/*
36 * MEI Version 37 * MEI Version
37 */ 38 */
38#define HBM_MINOR_VERSION 0 39#define HBM_MINOR_VERSION 0
39#define HBM_MAJOR_VERSION 1 40#define HBM_MAJOR_VERSION 1
40#define HBM_TIMEOUT 1 /* 1 second */
41 41
42/* Host bus message command opcode */ 42/* Host bus message command opcode */
43#define MEI_HBM_CMD_OP_MSK 0x7f 43#define MEI_HBM_CMD_OP_MSK 0x7f
@@ -89,19 +89,19 @@ enum mei_stop_reason_types {
89 * Client Connect Status 89 * Client Connect Status
90 * used by hbm_client_connect_response.status 90 * used by hbm_client_connect_response.status
91 */ 91 */
92enum client_connect_status_types { 92enum mei_cl_connect_status {
93 CCS_SUCCESS = 0x00, 93 MEI_CL_CONN_SUCCESS = 0x00,
94 CCS_NOT_FOUND = 0x01, 94 MEI_CL_CONN_NOT_FOUND = 0x01,
95 CCS_ALREADY_STARTED = 0x02, 95 MEI_CL_CONN_ALREADY_STARTED = 0x02,
96 CCS_OUT_OF_RESOURCES = 0x03, 96 MEI_CL_CONN_OUT_OF_RESOURCES = 0x03,
97 CCS_MESSAGE_SMALL = 0x04 97 MEI_CL_CONN_MESSAGE_SMALL = 0x04
98}; 98};
99 99
100/* 100/*
101 * Client Disconnect Status 101 * Client Disconnect Status
102 */ 102 */
103enum client_disconnect_status_types { 103enum mei_cl_disconnect_status {
104 CDS_SUCCESS = 0x00 104 MEI_CL_DISCONN_SUCCESS = 0x00
105}; 105};
106 106
107/* 107/*
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index cdd31c2a2a2b..4460975c0eef 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -116,7 +116,6 @@ int mei_reset(struct mei_device *dev)
116 mei_cl_unlink(&dev->wd_cl); 116 mei_cl_unlink(&dev->wd_cl);
117 mei_cl_unlink(&dev->iamthif_cl); 117 mei_cl_unlink(&dev->iamthif_cl);
118 mei_amthif_reset_params(dev); 118 mei_amthif_reset_params(dev);
119 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
120 } 119 }
121 120
122 121
@@ -126,7 +125,6 @@ int mei_reset(struct mei_device *dev)
126 125
127 if (ret) { 126 if (ret) {
128 dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); 127 dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret);
129 dev->dev_state = MEI_DEV_DISABLED;
130 return ret; 128 return ret;
131 } 129 }
132 130
@@ -139,7 +137,6 @@ int mei_reset(struct mei_device *dev)
139 ret = mei_hw_start(dev); 137 ret = mei_hw_start(dev);
140 if (ret) { 138 if (ret) {
141 dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); 139 dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret);
142 dev->dev_state = MEI_DEV_DISABLED;
143 return ret; 140 return ret;
144 } 141 }
145 142
@@ -149,7 +146,7 @@ int mei_reset(struct mei_device *dev)
149 ret = mei_hbm_start_req(dev); 146 ret = mei_hbm_start_req(dev);
150 if (ret) { 147 if (ret) {
151 dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); 148 dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret);
152 dev->dev_state = MEI_DEV_DISABLED; 149 dev->dev_state = MEI_DEV_RESETTING;
153 return ret; 150 return ret;
154 } 151 }
155 152
@@ -166,6 +163,7 @@ EXPORT_SYMBOL_GPL(mei_reset);
166 */ 163 */
167int mei_start(struct mei_device *dev) 164int mei_start(struct mei_device *dev)
168{ 165{
166 int ret;
169 mutex_lock(&dev->device_lock); 167 mutex_lock(&dev->device_lock);
170 168
171 /* acknowledge interrupt and stop interrupts */ 169 /* acknowledge interrupt and stop interrupts */
@@ -175,10 +173,18 @@ int mei_start(struct mei_device *dev)
175 173
176 dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); 174 dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
177 175
178 dev->dev_state = MEI_DEV_INITIALIZING;
179 dev->reset_count = 0; 176 dev->reset_count = 0;
180 mei_reset(dev); 177 do {
178 dev->dev_state = MEI_DEV_INITIALIZING;
179 ret = mei_reset(dev);
180
181 if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
182 dev_err(&dev->pdev->dev, "reset failed ret = %d", ret);
183 goto err;
184 }
185 } while (ret);
181 186
187 /* we cannot start the device w/o hbm start message completed */
182 if (dev->dev_state == MEI_DEV_DISABLED) { 188 if (dev->dev_state == MEI_DEV_DISABLED) {
183 dev_err(&dev->pdev->dev, "reset failed"); 189 dev_err(&dev->pdev->dev, "reset failed");
184 goto err; 190 goto err;
@@ -238,27 +244,40 @@ int mei_restart(struct mei_device *dev)
238 244
239 mutex_unlock(&dev->device_lock); 245 mutex_unlock(&dev->device_lock);
240 246
241 if (err || dev->dev_state == MEI_DEV_DISABLED) 247 if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
248 dev_err(&dev->pdev->dev, "device disabled = %d\n", err);
242 return -ENODEV; 249 return -ENODEV;
250 }
251
252 /* try to start again */
253 if (err)
254 schedule_work(&dev->reset_work);
255
243 256
244 return 0; 257 return 0;
245} 258}
246EXPORT_SYMBOL_GPL(mei_restart); 259EXPORT_SYMBOL_GPL(mei_restart);
247 260
248
249static void mei_reset_work(struct work_struct *work) 261static void mei_reset_work(struct work_struct *work)
250{ 262{
251 struct mei_device *dev = 263 struct mei_device *dev =
252 container_of(work, struct mei_device, reset_work); 264 container_of(work, struct mei_device, reset_work);
265 int ret;
253 266
254 mutex_lock(&dev->device_lock); 267 mutex_lock(&dev->device_lock);
255 268
256 mei_reset(dev); 269 ret = mei_reset(dev);
257 270
258 mutex_unlock(&dev->device_lock); 271 mutex_unlock(&dev->device_lock);
259 272
260 if (dev->dev_state == MEI_DEV_DISABLED) 273 if (dev->dev_state == MEI_DEV_DISABLED) {
261 dev_err(&dev->pdev->dev, "reset failed"); 274 dev_err(&dev->pdev->dev, "device disabled = %d\n", ret);
275 return;
276 }
277
278 /* retry reset in case of failure */
279 if (ret)
280 schedule_work(&dev->reset_work);
262} 281}
263 282
264void mei_stop(struct mei_device *dev) 283void mei_stop(struct mei_device *dev)
@@ -269,6 +288,8 @@ void mei_stop(struct mei_device *dev)
269 288
270 mei_nfc_host_exit(dev); 289 mei_nfc_host_exit(dev);
271 290
291 mei_cl_bus_remove_devices(dev);
292
272 mutex_lock(&dev->device_lock); 293 mutex_lock(&dev->device_lock);
273 294
274 mei_wd_stop(dev); 295 mei_wd_stop(dev);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index f0fbb5179f80..29b5af8efb71 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -26,7 +26,6 @@
26 26
27#include "mei_dev.h" 27#include "mei_dev.h"
28#include "hbm.h" 28#include "hbm.h"
29#include "hw-me.h"
30#include "client.h" 29#include "client.h"
31 30
32 31
@@ -161,29 +160,63 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,
161} 160}
162 161
163/** 162/**
163 * mei_cl_irq_disconnect_rsp - send disconnection response message
164 *
165 * @cl: client
166 * @cb: callback block.
167 * @cmpl_list: complete list.
168 *
169 * returns 0, OK; otherwise, error.
170 */
171static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
172 struct mei_cl_cb *cmpl_list)
173{
174 struct mei_device *dev = cl->dev;
175 u32 msg_slots;
176 int slots;
177 int ret;
178
179 slots = mei_hbuf_empty_slots(dev);
180 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
181
182 if (slots < msg_slots)
183 return -EMSGSIZE;
184
185 ret = mei_hbm_cl_disconnect_rsp(dev, cl);
186
187 cl->state = MEI_FILE_DISCONNECTED;
188 cl->status = 0;
189 list_del(&cb->list);
190 mei_io_cb_free(cb);
191
192 return ret;
193}
194
195
196
197/**
164 * mei_cl_irq_close - processes close related operation from 198 * mei_cl_irq_close - processes close related operation from
165 * interrupt thread context - send disconnect request 199 * interrupt thread context - send disconnect request
166 * 200 *
167 * @cl: client 201 * @cl: client
168 * @cb: callback block. 202 * @cb: callback block.
169 * @slots: free slots.
170 * @cmpl_list: complete list. 203 * @cmpl_list: complete list.
171 * 204 *
172 * returns 0, OK; otherwise, error. 205 * returns 0, OK; otherwise, error.
173 */ 206 */
174static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, 207static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,
175 s32 *slots, struct mei_cl_cb *cmpl_list) 208 struct mei_cl_cb *cmpl_list)
176{ 209{
177 struct mei_device *dev = cl->dev; 210 struct mei_device *dev = cl->dev;
211 u32 msg_slots;
212 int slots;
178 213
179 u32 msg_slots = 214 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
180 mei_data2slots(sizeof(struct hbm_client_connect_request)); 215 slots = mei_hbuf_empty_slots(dev);
181 216
182 if (*slots < msg_slots) 217 if (slots < msg_slots)
183 return -EMSGSIZE; 218 return -EMSGSIZE;
184 219
185 *slots -= msg_slots;
186
187 if (mei_hbm_cl_disconnect_req(dev, cl)) { 220 if (mei_hbm_cl_disconnect_req(dev, cl)) {
188 cl->status = 0; 221 cl->status = 0;
189 cb->buf_idx = 0; 222 cb->buf_idx = 0;
@@ -207,27 +240,23 @@ static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,
207 * 240 *
208 * @cl: client 241 * @cl: client
209 * @cb: callback block. 242 * @cb: callback block.
210 * @slots: free slots.
211 * @cmpl_list: complete list. 243 * @cmpl_list: complete list.
212 * 244 *
213 * returns 0, OK; otherwise, error. 245 * returns 0, OK; otherwise, error.
214 */ 246 */
215static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, 247static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
216 s32 *slots, struct mei_cl_cb *cmpl_list) 248 struct mei_cl_cb *cmpl_list)
217{ 249{
218 struct mei_device *dev = cl->dev; 250 struct mei_device *dev = cl->dev;
219 u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); 251 u32 msg_slots;
220 252 int slots;
221 int ret; 253 int ret;
222 254
255 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
256 slots = mei_hbuf_empty_slots(dev);
223 257
224 if (*slots < msg_slots) { 258 if (slots < msg_slots)
225 /* return the cancel routine */
226 list_del(&cb->list);
227 return -EMSGSIZE; 259 return -EMSGSIZE;
228 }
229
230 *slots -= msg_slots;
231 260
232 ret = mei_hbm_cl_flow_control_req(dev, cl); 261 ret = mei_hbm_cl_flow_control_req(dev, cl);
233 if (ret) { 262 if (ret) {
@@ -244,32 +273,30 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
244 273
245 274
246/** 275/**
247 * mei_cl_irq_ioctl - processes client ioctl related operation from the 276 * mei_cl_irq_connect - send connect request in irq_thread context
248 * interrupt thread context - send connection request
249 * 277 *
250 * @cl: client 278 * @cl: client
251 * @cb: callback block. 279 * @cb: callback block.
252 * @slots: free slots.
253 * @cmpl_list: complete list. 280 * @cmpl_list: complete list.
254 * 281 *
255 * returns 0, OK; otherwise, error. 282 * returns 0, OK; otherwise, error.
256 */ 283 */
257static int mei_cl_irq_ioctl(struct mei_cl *cl, struct mei_cl_cb *cb, 284static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
258 s32 *slots, struct mei_cl_cb *cmpl_list) 285 struct mei_cl_cb *cmpl_list)
259{ 286{
260 struct mei_device *dev = cl->dev; 287 struct mei_device *dev = cl->dev;
288 u32 msg_slots;
289 int slots;
261 int ret; 290 int ret;
262 291
263 u32 msg_slots = 292 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
264 mei_data2slots(sizeof(struct hbm_client_connect_request)); 293 slots = mei_hbuf_empty_slots(dev);
265 294
266 if (*slots < msg_slots) { 295 if (mei_cl_is_other_connecting(cl))
267 /* return the cancel routine */ 296 return 0;
268 list_del(&cb->list);
269 return -EMSGSIZE;
270 }
271 297
272 *slots -= msg_slots; 298 if (slots < msg_slots)
299 return -EMSGSIZE;
273 300
274 cl->state = MEI_FILE_CONNECTING; 301 cl->state = MEI_FILE_CONNECTING;
275 302
@@ -323,7 +350,7 @@ int mei_irq_read_handler(struct mei_device *dev,
323 dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", 350 dev_err(&dev->pdev->dev, "less data available than length=%08x.\n",
324 *slots); 351 *slots);
325 /* we can't read the message */ 352 /* we can't read the message */
326 ret = -ERANGE; 353 ret = -ENODATA;
327 goto end; 354 goto end;
328 } 355 }
329 356
@@ -409,10 +436,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
409 s32 slots; 436 s32 slots;
410 int ret; 437 int ret;
411 438
412 if (!mei_hbuf_is_ready(dev)) { 439
413 dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n"); 440 if (!mei_hbuf_acquire(dev))
414 return 0; 441 return 0;
415 } 442
416 slots = mei_hbuf_empty_slots(dev); 443 slots = mei_hbuf_empty_slots(dev);
417 if (slots <= 0) 444 if (slots <= 0)
418 return -EMSGSIZE; 445 return -EMSGSIZE;
@@ -447,29 +474,16 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
447 474
448 if (dev->wd_state == MEI_WD_STOPPING) { 475 if (dev->wd_state == MEI_WD_STOPPING) {
449 dev->wd_state = MEI_WD_IDLE; 476 dev->wd_state = MEI_WD_IDLE;
450 wake_up_interruptible(&dev->wait_stop_wd); 477 wake_up(&dev->wait_stop_wd);
451 } 478 }
452 479
453 if (dev->wr_ext_msg.hdr.length) { 480 if (mei_cl_is_connected(&dev->wd_cl)) {
454 mei_write_message(dev, &dev->wr_ext_msg.hdr,
455 dev->wr_ext_msg.data);
456 slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
457 dev->wr_ext_msg.hdr.length = 0;
458 }
459 if (dev->dev_state == MEI_DEV_ENABLED) {
460 if (dev->wd_pending && 481 if (dev->wd_pending &&
461 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { 482 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
462 if (mei_wd_send(dev)) 483 ret = mei_wd_send(dev);
463 dev_dbg(&dev->pdev->dev, "wd send failed.\n"); 484 if (ret)
464 else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) 485 return ret;
465 return -ENODEV;
466
467 dev->wd_pending = false; 486 dev->wd_pending = false;
468
469 if (dev->wd_state == MEI_WD_RUNNING)
470 slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
471 else
472 slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
473 } 487 }
474 } 488 }
475 489
@@ -484,28 +498,31 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
484 switch (cb->fop_type) { 498 switch (cb->fop_type) {
485 case MEI_FOP_CLOSE: 499 case MEI_FOP_CLOSE:
486 /* send disconnect message */ 500 /* send disconnect message */
487 ret = mei_cl_irq_close(cl, cb, &slots, cmpl_list); 501 ret = mei_cl_irq_close(cl, cb, cmpl_list);
488 if (ret) 502 if (ret)
489 return ret; 503 return ret;
490 504
491 break; 505 break;
492 case MEI_FOP_READ: 506 case MEI_FOP_READ:
493 /* send flow control message */ 507 /* send flow control message */
494 ret = mei_cl_irq_read(cl, cb, &slots, cmpl_list); 508 ret = mei_cl_irq_read(cl, cb, cmpl_list);
495 if (ret) 509 if (ret)
496 return ret; 510 return ret;
497 511
498 break; 512 break;
499 case MEI_FOP_IOCTL: 513 case MEI_FOP_CONNECT:
500 /* connect message */ 514 /* connect message */
501 if (mei_cl_is_other_connecting(cl)) 515 ret = mei_cl_irq_connect(cl, cb, cmpl_list);
502 continue;
503 ret = mei_cl_irq_ioctl(cl, cb, &slots, cmpl_list);
504 if (ret) 516 if (ret)
505 return ret; 517 return ret;
506 518
507 break; 519 break;
508 520 case MEI_FOP_DISCONNECT_RSP:
521 /* send disconnect resp */
522 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
523 if (ret)
524 return ret;
525 break;
509 default: 526 default:
510 BUG(); 527 BUG();
511 } 528 }
@@ -518,11 +535,9 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
518 if (cl == NULL) 535 if (cl == NULL)
519 continue; 536 continue;
520 if (cl == &dev->iamthif_cl) 537 if (cl == &dev->iamthif_cl)
521 ret = mei_amthif_irq_write_complete(cl, cb, 538 ret = mei_amthif_irq_write(cl, cb, cmpl_list);
522 &slots, cmpl_list);
523 else 539 else
524 ret = mei_cl_irq_write_complete(cl, cb, 540 ret = mei_cl_irq_write(cl, cb, cmpl_list);
525 &slots, cmpl_list);
526 if (ret) 541 if (ret)
527 return ret; 542 return ret;
528 } 543 }
@@ -541,8 +556,7 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler);
541void mei_timer(struct work_struct *work) 556void mei_timer(struct work_struct *work)
542{ 557{
543 unsigned long timeout; 558 unsigned long timeout;
544 struct mei_cl *cl_pos = NULL; 559 struct mei_cl *cl;
545 struct mei_cl *cl_next = NULL;
546 struct mei_cl_cb *cb_pos = NULL; 560 struct mei_cl_cb *cb_pos = NULL;
547 struct mei_cl_cb *cb_next = NULL; 561 struct mei_cl_cb *cb_next = NULL;
548 562
@@ -570,9 +584,9 @@ void mei_timer(struct work_struct *work)
570 goto out; 584 goto out;
571 585
572 /*** connect/disconnect timeouts ***/ 586 /*** connect/disconnect timeouts ***/
573 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { 587 list_for_each_entry(cl, &dev->file_list, link) {
574 if (cl_pos->timer_count) { 588 if (cl->timer_count) {
575 if (--cl_pos->timer_count == 0) { 589 if (--cl->timer_count == 0) {
576 dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); 590 dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n");
577 mei_reset(dev); 591 mei_reset(dev);
578 goto out; 592 goto out;
@@ -580,6 +594,9 @@ void mei_timer(struct work_struct *work)
580 } 594 }
581 } 595 }
582 596
597 if (!mei_cl_is_connected(&dev->iamthif_cl))
598 goto out;
599
583 if (dev->iamthif_stall_timer) { 600 if (dev->iamthif_stall_timer) {
584 if (--dev->iamthif_stall_timer == 0) { 601 if (--dev->iamthif_stall_timer == 0) {
585 dev_err(&dev->pdev->dev, "timer: amthif hanged.\n"); 602 dev_err(&dev->pdev->dev, "timer: amthif hanged.\n");
@@ -619,10 +636,10 @@ void mei_timer(struct work_struct *work)
619 list_for_each_entry_safe(cb_pos, cb_next, 636 list_for_each_entry_safe(cb_pos, cb_next,
620 &dev->amthif_rd_complete_list.list, list) { 637 &dev->amthif_rd_complete_list.list, list) {
621 638
622 cl_pos = cb_pos->file_object->private_data; 639 cl = cb_pos->file_object->private_data;
623 640
624 /* Finding the AMTHI entry. */ 641 /* Finding the AMTHI entry. */
625 if (cl_pos == &dev->iamthif_cl) 642 if (cl == &dev->iamthif_cl)
626 list_del(&cb_pos->list); 643 list_del(&cb_pos->list);
627 } 644 }
628 mei_io_cb_free(dev->iamthif_current_cb); 645 mei_io_cb_free(dev->iamthif_current_cb);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 5424f8ff3f7f..b35594dbf52f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -13,9 +13,6 @@
13 * more details. 13 * more details.
14 * 14 *
15 */ 15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h> 16#include <linux/module.h>
20#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
21#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -40,7 +37,6 @@
40#include <linux/mei.h> 37#include <linux/mei.h>
41 38
42#include "mei_dev.h" 39#include "mei_dev.h"
43#include "hw-me.h"
44#include "client.h" 40#include "client.h"
45 41
46/** 42/**
@@ -129,17 +125,11 @@ static int mei_release(struct inode *inode, struct file *file)
129 } 125 }
130 if (cl->state == MEI_FILE_CONNECTED) { 126 if (cl->state == MEI_FILE_CONNECTED) {
131 cl->state = MEI_FILE_DISCONNECTING; 127 cl->state = MEI_FILE_DISCONNECTING;
132 dev_dbg(&dev->pdev->dev, 128 cl_dbg(dev, cl, "disconnecting\n");
133 "disconnecting client host client = %d, "
134 "ME client = %d\n",
135 cl->host_client_id,
136 cl->me_client_id);
137 rets = mei_cl_disconnect(cl); 129 rets = mei_cl_disconnect(cl);
138 } 130 }
139 mei_cl_flush_queues(cl); 131 mei_cl_flush_queues(cl);
140 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n", 132 cl_dbg(dev, cl, "removing\n");
141 cl->host_client_id,
142 cl->me_client_id);
143 133
144 mei_cl_unlink(cl); 134 mei_cl_unlink(cl);
145 135
@@ -284,6 +274,7 @@ copy_buffer:
284 length = min_t(size_t, length, cb->buf_idx - *offset); 274 length = min_t(size_t, length, cb->buf_idx - *offset);
285 275
286 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 276 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
277 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
287 rets = -EFAULT; 278 rets = -EFAULT;
288 goto free; 279 goto free;
289 } 280 }
@@ -340,7 +331,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
340 331
341 id = mei_me_cl_by_id(dev, cl->me_client_id); 332 id = mei_me_cl_by_id(dev, cl->me_client_id);
342 if (id < 0) { 333 if (id < 0) {
343 rets = -ENODEV; 334 rets = -ENOTTY;
344 goto out; 335 goto out;
345 } 336 }
346 337
@@ -404,7 +395,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
404 395
405 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); 396 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
406 if (rets) { 397 if (rets) {
407 dev_err(&dev->pdev->dev, "failed to copy data from userland\n"); 398 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
408 rets = -EFAULT; 399 rets = -EFAULT;
409 goto out; 400 goto out;
410 } 401 }
@@ -471,7 +462,7 @@ static int mei_ioctl_connect_client(struct file *file,
471 if (i < 0 || dev->me_clients[i].props.fixed_address) { 462 if (i < 0 || dev->me_clients[i].props.fixed_address) {
472 dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", 463 dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n",
473 &data->in_client_uuid); 464 &data->in_client_uuid);
474 rets = -ENODEV; 465 rets = -ENOTTY;
475 goto end; 466 goto end;
476 } 467 }
477 468
@@ -569,7 +560,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
569 dev_dbg(&dev->pdev->dev, "copy connect data from user\n"); 560 dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
570 if (copy_from_user(connect_data, (char __user *)data, 561 if (copy_from_user(connect_data, (char __user *)data,
571 sizeof(struct mei_connect_client_data))) { 562 sizeof(struct mei_connect_client_data))) {
572 dev_err(&dev->pdev->dev, "failed to copy data from userland\n"); 563 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
573 rets = -EFAULT; 564 rets = -EFAULT;
574 goto out; 565 goto out;
575 } 566 }
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f7de95b4cdd9..94a516716d22 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -24,7 +24,6 @@
24#include <linux/mei_cl_bus.h> 24#include <linux/mei_cl_bus.h>
25 25
26#include "hw.h" 26#include "hw.h"
27#include "hw-me-regs.h"
28#include "hbm.h" 27#include "hbm.h"
29 28
30/* 29/*
@@ -130,16 +129,18 @@ enum mei_wd_states {
130 129
131/** 130/**
132 * enum mei_cb_file_ops - file operation associated with the callback 131 * enum mei_cb_file_ops - file operation associated with the callback
133 * @MEI_FOP_READ - read 132 * @MEI_FOP_READ - read
134 * @MEI_FOP_WRITE - write 133 * @MEI_FOP_WRITE - write
135 * @MEI_FOP_IOCTL - ioctl 134 * @MEI_FOP_CONNECT - connect
136 * @MEI_FOP_OPEN - open 135 * @MEI_FOP_DISCONNECT_RSP - disconnect response
137 * @MEI_FOP_CLOSE - close 136 * @MEI_FOP_OPEN - open
137 * @MEI_FOP_CLOSE - close
138 */ 138 */
139enum mei_cb_file_ops { 139enum mei_cb_file_ops {
140 MEI_FOP_READ = 0, 140 MEI_FOP_READ = 0,
141 MEI_FOP_WRITE, 141 MEI_FOP_WRITE,
142 MEI_FOP_IOCTL, 142 MEI_FOP_CONNECT,
143 MEI_FOP_DISCONNECT_RSP,
143 MEI_FOP_OPEN, 144 MEI_FOP_OPEN,
144 MEI_FOP_CLOSE 145 MEI_FOP_CLOSE
145}; 146};
@@ -236,20 +237,20 @@ struct mei_cl {
236 */ 237 */
237struct mei_hw_ops { 238struct mei_hw_ops {
238 239
239 bool (*host_is_ready) (struct mei_device *dev); 240 bool (*host_is_ready)(struct mei_device *dev);
240 241
241 bool (*hw_is_ready) (struct mei_device *dev); 242 bool (*hw_is_ready)(struct mei_device *dev);
242 int (*hw_reset) (struct mei_device *dev, bool enable); 243 int (*hw_reset)(struct mei_device *dev, bool enable);
243 int (*hw_start) (struct mei_device *dev); 244 int (*hw_start)(struct mei_device *dev);
244 void (*hw_config) (struct mei_device *dev); 245 void (*hw_config)(struct mei_device *dev);
245 246
246 void (*intr_clear) (struct mei_device *dev); 247 void (*intr_clear)(struct mei_device *dev);
247 void (*intr_enable) (struct mei_device *dev); 248 void (*intr_enable)(struct mei_device *dev);
248 void (*intr_disable) (struct mei_device *dev); 249 void (*intr_disable)(struct mei_device *dev);
249 250
250 int (*hbuf_free_slots) (struct mei_device *dev); 251 int (*hbuf_free_slots)(struct mei_device *dev);
251 bool (*hbuf_is_ready) (struct mei_device *dev); 252 bool (*hbuf_is_ready)(struct mei_device *dev);
252 size_t (*hbuf_max_len) (const struct mei_device *dev); 253 size_t (*hbuf_max_len)(const struct mei_device *dev);
253 254
254 int (*write)(struct mei_device *dev, 255 int (*write)(struct mei_device *dev,
255 struct mei_msg_hdr *hdr, 256 struct mei_msg_hdr *hdr,
@@ -258,7 +259,7 @@ struct mei_hw_ops {
258 int (*rdbuf_full_slots)(struct mei_device *dev); 259 int (*rdbuf_full_slots)(struct mei_device *dev);
259 260
260 u32 (*read_hdr)(const struct mei_device *dev); 261 u32 (*read_hdr)(const struct mei_device *dev);
261 int (*read) (struct mei_device *dev, 262 int (*read)(struct mei_device *dev,
262 unsigned char *buf, unsigned long len); 263 unsigned char *buf, unsigned long len);
263}; 264};
264 265
@@ -294,6 +295,7 @@ int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length);
294int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length); 295int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length);
295int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); 296int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
296void mei_cl_bus_rx_event(struct mei_cl *cl); 297void mei_cl_bus_rx_event(struct mei_cl *cl);
298void mei_cl_bus_remove_devices(struct mei_device *dev);
297int mei_cl_bus_init(void); 299int mei_cl_bus_init(void);
298void mei_cl_bus_exit(void); 300void mei_cl_bus_exit(void);
299 301
@@ -339,7 +341,6 @@ struct mei_cl_device {
339 * @hbuf_depth - depth of hardware host/write buffer is slots 341 * @hbuf_depth - depth of hardware host/write buffer is slots
340 * @hbuf_is_ready - query if the host host/write buffer is ready 342 * @hbuf_is_ready - query if the host host/write buffer is ready
341 * @wr_msg - the buffer for hbm control messages 343 * @wr_msg - the buffer for hbm control messages
342 * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
343 */ 344 */
344struct mei_device { 345struct mei_device {
345 struct pci_dev *pdev; /* pointer to pci device struct */ 346 struct pci_dev *pdev; /* pointer to pci device struct */
@@ -394,11 +395,6 @@ struct mei_device {
394 unsigned char data[128]; 395 unsigned char data[128];
395 } wr_msg; 396 } wr_msg;
396 397
397 struct {
398 struct mei_msg_hdr hdr;
399 unsigned char data[4]; /* All HBM messages are 4 bytes */
400 } wr_ext_msg; /* for control responses */
401
402 struct hbm_version version; 398 struct hbm_version version;
403 399
404 struct mei_me_client *me_clients; /* Note: memory has to be allocated */ 400 struct mei_me_client *me_clients; /* Note: memory has to be allocated */
@@ -518,8 +514,8 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
518 514
519void mei_amthif_run_next_cmd(struct mei_device *dev); 515void mei_amthif_run_next_cmd(struct mei_device *dev);
520 516
521int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, 517int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
522 s32 *slots, struct mei_cl_cb *cmpl_list); 518 struct mei_cl_cb *cmpl_list);
523 519
524void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb); 520void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
525int mei_amthif_irq_read_msg(struct mei_device *dev, 521int mei_amthif_irq_read_msg(struct mei_device *dev,
@@ -546,7 +542,7 @@ int mei_wd_host_init(struct mei_device *dev);
546 * once we got connection to the WD Client 542 * once we got connection to the WD Client
547 * @dev - mei device 543 * @dev - mei device
548 */ 544 */
549void mei_watchdog_register(struct mei_device *dev); 545int mei_watchdog_register(struct mei_device *dev);
550/* 546/*
551 * mei_watchdog_unregister - Unregistering watchdog interface 547 * mei_watchdog_unregister - Unregistering watchdog interface
552 * @dev - mei device 548 * @dev - mei device
@@ -633,6 +629,8 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
633 return dev->ops->rdbuf_full_slots(dev); 629 return dev->ops->rdbuf_full_slots(dev);
634} 630}
635 631
632bool mei_hbuf_acquire(struct mei_device *dev);
633
636#if IS_ENABLED(CONFIG_DEBUG_FS) 634#if IS_ENABLED(CONFIG_DEBUG_FS)
637int mei_dbgfs_register(struct mei_device *dev, const char *name); 635int mei_dbgfs_register(struct mei_device *dev, const char *name);
638void mei_dbgfs_deregister(struct mei_device *dev); 636void mei_dbgfs_deregister(struct mei_device *dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index a58320c0c049..3095fc514a65 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -364,7 +364,7 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
364 if (!wait_event_interruptible_timeout(ndev->send_wq, 364 if (!wait_event_interruptible_timeout(ndev->send_wq,
365 ndev->recv_req_id == ndev->req_id, HZ)) { 365 ndev->recv_req_id == ndev->req_id, HZ)) {
366 dev_err(&dev->pdev->dev, "NFC MEI command timeout\n"); 366 dev_err(&dev->pdev->dev, "NFC MEI command timeout\n");
367 err = -ETIMEDOUT; 367 err = -ETIME;
368 } else { 368 } else {
369 ndev->req_id++; 369 ndev->req_id++;
370 } 370 }
@@ -502,7 +502,7 @@ int mei_nfc_host_init(struct mei_device *dev)
502 i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); 502 i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
503 if (i < 0) { 503 if (i < 0) {
504 dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); 504 dev_info(&dev->pdev->dev, "nfc: failed to find the client\n");
505 ret = -ENOENT; 505 ret = -ENOTTY;
506 goto err; 506 goto err;
507 } 507 }
508 508
@@ -520,7 +520,7 @@ int mei_nfc_host_init(struct mei_device *dev)
520 i = mei_me_cl_by_uuid(dev, &mei_nfc_guid); 520 i = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
521 if (i < 0) { 521 if (i < 0) {
522 dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); 522 dev_info(&dev->pdev->dev, "nfc: failed to find the client\n");
523 ret = -ENOENT; 523 ret = -ENOTTY;
524 goto err; 524 goto err;
525 } 525 }
526 526
@@ -552,13 +552,7 @@ err:
552void mei_nfc_host_exit(struct mei_device *dev) 552void mei_nfc_host_exit(struct mei_device *dev)
553{ 553{
554 struct mei_nfc_dev *ndev = &nfc_dev; 554 struct mei_nfc_dev *ndev = &nfc_dev;
555
556 cancel_work_sync(&ndev->init_work); 555 cancel_work_sync(&ndev->init_work);
556}
557 557
558 mutex_lock(&dev->device_lock);
559 if (ndev->cl && ndev->cl->device)
560 mei_cl_remove_device(ndev->cl->device);
561 558
562 mei_nfc_free(ndev);
563 mutex_unlock(&dev->device_lock);
564}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index ddadd08956f4..1c8fd3a3e135 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -13,9 +13,6 @@
13 * more details. 13 * more details.
14 * 14 *
15 */ 15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h> 16#include <linux/module.h>
20#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
21#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -27,7 +24,6 @@
27#include <linux/aio.h> 24#include <linux/aio.h>
28#include <linux/pci.h> 25#include <linux/pci.h>
29#include <linux/poll.h> 26#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/ioctl.h> 27#include <linux/ioctl.h>
32#include <linux/cdev.h> 28#include <linux/cdev.h>
33#include <linux/sched.h> 29#include <linux/sched.h>
@@ -40,11 +36,12 @@
40#include <linux/mei.h> 36#include <linux/mei.h>
41 37
42#include "mei_dev.h" 38#include "mei_dev.h"
43#include "hw-me.h"
44#include "client.h" 39#include "client.h"
40#include "hw-me-regs.h"
41#include "hw-me.h"
45 42
46/* mei_pci_tbl - PCI Device ID Table */ 43/* mei_pci_tbl - PCI Device ID Table */
47static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = { 44static const struct pci_device_id mei_me_pci_tbl[] = {
48 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)}, 45 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
49 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)}, 46 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
50 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)}, 47 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
@@ -270,7 +267,7 @@ static void mei_me_remove(struct pci_dev *pdev)
270 267
271 268
272} 269}
273#ifdef CONFIG_PM 270#ifdef CONFIG_PM_SLEEP
274static int mei_me_pci_suspend(struct device *device) 271static int mei_me_pci_suspend(struct device *device)
275{ 272{
276 struct pci_dev *pdev = to_pci_dev(device); 273 struct pci_dev *pdev = to_pci_dev(device);
@@ -330,11 +327,12 @@ static int mei_me_pci_resume(struct device *device)
330 327
331 return 0; 328 return 0;
332} 329}
330
333static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); 331static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume);
334#define MEI_ME_PM_OPS (&mei_me_pm_ops) 332#define MEI_ME_PM_OPS (&mei_me_pm_ops)
335#else 333#else
336#define MEI_ME_PM_OPS NULL 334#define MEI_ME_PM_OPS NULL
337#endif /* CONFIG_PM */ 335#endif /* CONFIG_PM_SLEEP */
338/* 336/*
339 * PCI driver structure 337 * PCI driver structure
340 */ 338 */
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
new file mode 100644
index 000000000000..ad3adb009a1e
--- /dev/null
+++ b/drivers/misc/mei/pci-txe.c
@@ -0,0 +1,293 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2013-2014, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/fs.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/sched.h>
26#include <linux/uuid.h>
27#include <linux/jiffies.h>
28#include <linux/interrupt.h>
29#include <linux/workqueue.h>
30
31#include <linux/mei.h>
32
33
34#include "mei_dev.h"
35#include "hw-txe.h"
36
37static const struct pci_device_id mei_txe_pci_tbl[] = {
38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F18)}, /* Baytrail */
39 {0, }
40};
41MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
42
43
44static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
45{
46 int i;
47 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
48 if (hw->mem_addr[i]) {
49 pci_iounmap(pdev, hw->mem_addr[i]);
50 hw->mem_addr[i] = NULL;
51 }
52 }
53}
54/**
55 * mei_probe - Device Initialization Routine
56 *
57 * @pdev: PCI device structure
58 * @ent: entry in mei_txe_pci_tbl
59 *
60 * returns 0 on success, <0 on failure.
61 */
62static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
63{
64 struct mei_device *dev;
65 struct mei_txe_hw *hw;
66 int err;
67 int i;
68
69 /* enable pci dev */
70 err = pci_enable_device(pdev);
71 if (err) {
72 dev_err(&pdev->dev, "failed to enable pci device.\n");
73 goto end;
74 }
75 /* set PCI host mastering */
76 pci_set_master(pdev);
77 /* pci request regions for mei driver */
78 err = pci_request_regions(pdev, KBUILD_MODNAME);
79 if (err) {
80 dev_err(&pdev->dev, "failed to get pci regions.\n");
81 goto disable_device;
82 }
83
84 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
85 if (err) {
86 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
87 if (err) {
88 dev_err(&pdev->dev, "No suitable DMA available.\n");
89 goto release_regions;
90 }
91 }
92
93 /* allocates and initializes the mei dev structure */
94 dev = mei_txe_dev_init(pdev);
95 if (!dev) {
96 err = -ENOMEM;
97 goto release_regions;
98 }
99 hw = to_txe_hw(dev);
100
101 /* mapping IO device memory */
102 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
103 hw->mem_addr[i] = pci_iomap(pdev, i, 0);
104 if (!hw->mem_addr[i]) {
105 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
106 err = -ENOMEM;
107 goto free_device;
108 }
109 }
110
111
112 pci_enable_msi(pdev);
113
114 /* clear spurious interrupts */
115 mei_clear_interrupts(dev);
116
117 /* request and enable interrupt */
118 if (pci_dev_msi_enabled(pdev))
119 err = request_threaded_irq(pdev->irq,
120 NULL,
121 mei_txe_irq_thread_handler,
122 IRQF_ONESHOT, KBUILD_MODNAME, dev);
123 else
124 err = request_threaded_irq(pdev->irq,
125 mei_txe_irq_quick_handler,
126 mei_txe_irq_thread_handler,
127 IRQF_SHARED, KBUILD_MODNAME, dev);
128 if (err) {
129 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
130 pdev->irq);
131 goto free_device;
132 }
133
134 if (mei_start(dev)) {
135 dev_err(&pdev->dev, "init hw failure.\n");
136 err = -ENODEV;
137 goto release_irq;
138 }
139
140 err = mei_register(dev);
141 if (err)
142 goto release_irq;
143
144 pci_set_drvdata(pdev, dev);
145
146 return 0;
147
148release_irq:
149
150 mei_cancel_work(dev);
151
152 /* disable interrupts */
153 mei_disable_interrupts(dev);
154
155 free_irq(pdev->irq, dev);
156 pci_disable_msi(pdev);
157
158free_device:
159 mei_txe_pci_iounmap(pdev, hw);
160
161 kfree(dev);
162release_regions:
163 pci_release_regions(pdev);
164disable_device:
165 pci_disable_device(pdev);
166end:
167 dev_err(&pdev->dev, "initialization failed.\n");
168 return err;
169}
170
171/**
172 * mei_remove - Device Removal Routine
173 *
174 * @pdev: PCI device structure
175 *
176 * mei_remove is called by the PCI subsystem to alert the driver
177 * that it should release a PCI device.
178 */
179static void mei_txe_remove(struct pci_dev *pdev)
180{
181 struct mei_device *dev;
182 struct mei_txe_hw *hw;
183
184 dev = pci_get_drvdata(pdev);
185 if (!dev) {
186 dev_err(&pdev->dev, "mei: dev =NULL\n");
187 return;
188 }
189
190 hw = to_txe_hw(dev);
191
192 mei_stop(dev);
193
194 /* disable interrupts */
195 mei_disable_interrupts(dev);
196 free_irq(pdev->irq, dev);
197 pci_disable_msi(pdev);
198
199 pci_set_drvdata(pdev, NULL);
200
201 mei_txe_pci_iounmap(pdev, hw);
202
203 mei_deregister(dev);
204
205 kfree(dev);
206
207 pci_release_regions(pdev);
208 pci_disable_device(pdev);
209}
210
211
212#ifdef CONFIG_PM_SLEEP
213static int mei_txe_pci_suspend(struct device *device)
214{
215 struct pci_dev *pdev = to_pci_dev(device);
216 struct mei_device *dev = pci_get_drvdata(pdev);
217
218 if (!dev)
219 return -ENODEV;
220
221 dev_dbg(&pdev->dev, "suspend\n");
222
223 mei_stop(dev);
224
225 mei_disable_interrupts(dev);
226
227 free_irq(pdev->irq, dev);
228 pci_disable_msi(pdev);
229
230 return 0;
231}
232
233static int mei_txe_pci_resume(struct device *device)
234{
235 struct pci_dev *pdev = to_pci_dev(device);
236 struct mei_device *dev;
237 int err;
238
239 dev = pci_get_drvdata(pdev);
240 if (!dev)
241 return -ENODEV;
242
243 pci_enable_msi(pdev);
244
245 mei_clear_interrupts(dev);
246
247 /* request and enable interrupt */
248 if (pci_dev_msi_enabled(pdev))
249 err = request_threaded_irq(pdev->irq,
250 NULL,
251 mei_txe_irq_thread_handler,
252 IRQF_ONESHOT, KBUILD_MODNAME, dev);
253 else
254 err = request_threaded_irq(pdev->irq,
255 mei_txe_irq_quick_handler,
256 mei_txe_irq_thread_handler,
257 IRQF_SHARED, KBUILD_MODNAME, dev);
258 if (err) {
259 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
260 pdev->irq);
261 return err;
262 }
263
264 err = mei_restart(dev);
265
266 return err;
267}
268
269static SIMPLE_DEV_PM_OPS(mei_txe_pm_ops,
270 mei_txe_pci_suspend,
271 mei_txe_pci_resume);
272
273#define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
274#else
275#define MEI_TXE_PM_OPS NULL
276#endif /* CONFIG_PM_SLEEP */
277/*
278 * PCI driver structure
279 */
280static struct pci_driver mei_txe_driver = {
281 .name = KBUILD_MODNAME,
282 .id_table = mei_txe_pci_tbl,
283 .probe = mei_txe_probe,
284 .remove = mei_txe_remove,
285 .shutdown = mei_txe_remove,
286 .driver.pm = MEI_TXE_PM_OPS,
287};
288
289module_pci_driver(mei_txe_driver);
290
291MODULE_AUTHOR("Intel Corporation");
292MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
293MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index f70945ed96f6..ebf1cbc198fd 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -25,7 +25,6 @@
25 25
26#include "mei_dev.h" 26#include "mei_dev.h"
27#include "hbm.h" 27#include "hbm.h"
28#include "hw-me.h"
29#include "client.h" 28#include "client.h"
30 29
31static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; 30static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
@@ -53,7 +52,7 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
53 * 52 *
54 * @dev: the device structure 53 * @dev: the device structure
55 * 54 *
56 * returns -ENENT if wd client cannot be found 55 * returns -ENOTTY if wd client cannot be found
57 * -EIO if write has failed 56 * -EIO if write has failed
58 * 0 on success 57 * 0 on success
59 */ 58 */
@@ -73,7 +72,7 @@ int mei_wd_host_init(struct mei_device *dev)
73 id = mei_me_cl_by_uuid(dev, &mei_wd_guid); 72 id = mei_me_cl_by_uuid(dev, &mei_wd_guid);
74 if (id < 0) { 73 if (id < 0) {
75 dev_info(&dev->pdev->dev, "wd: failed to find the client\n"); 74 dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
76 return id; 75 return -ENOTTY;
77 } 76 }
78 77
79 cl->me_client_id = dev->me_clients[id].client_id; 78 cl->me_client_id = dev->me_clients[id].client_id;
@@ -87,15 +86,20 @@ int mei_wd_host_init(struct mei_device *dev)
87 86
88 cl->state = MEI_FILE_CONNECTING; 87 cl->state = MEI_FILE_CONNECTING;
89 88
90 if (mei_hbm_cl_connect_req(dev, cl)) { 89 ret = mei_cl_connect(cl, NULL);
91 dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n"); 90
92 cl->state = MEI_FILE_DISCONNECTED; 91 if (ret) {
93 cl->host_client_id = 0; 92 dev_err(&dev->pdev->dev, "wd: failed to connect = %d\n", ret);
94 return -EIO; 93 mei_cl_unlink(cl);
94 return ret;
95 } 95 }
96 cl->timer_count = MEI_CONNECT_TIMEOUT;
97 96
98 return 0; 97 ret = mei_watchdog_register(dev);
98 if (ret) {
99 mei_cl_disconnect(cl);
100 mei_cl_unlink(cl);
101 }
102 return ret;
99} 103}
100 104
101/** 105/**
@@ -106,13 +110,16 @@ int mei_wd_host_init(struct mei_device *dev)
106 * returns 0 if success, 110 * returns 0 if success,
107 * -EIO when message send fails 111 * -EIO when message send fails
108 * -EINVAL when invalid message is to be sent 112 * -EINVAL when invalid message is to be sent
113 * -ENODEV on flow control failure
109 */ 114 */
110int mei_wd_send(struct mei_device *dev) 115int mei_wd_send(struct mei_device *dev)
111{ 116{
117 struct mei_cl *cl = &dev->wd_cl;
112 struct mei_msg_hdr hdr; 118 struct mei_msg_hdr hdr;
119 int ret;
113 120
114 hdr.host_addr = dev->wd_cl.host_client_id; 121 hdr.host_addr = cl->host_client_id;
115 hdr.me_addr = dev->wd_cl.me_client_id; 122 hdr.me_addr = cl->me_client_id;
116 hdr.msg_complete = 1; 123 hdr.msg_complete = 1;
117 hdr.reserved = 0; 124 hdr.reserved = 0;
118 hdr.internal = 0; 125 hdr.internal = 0;
@@ -121,10 +128,24 @@ int mei_wd_send(struct mei_device *dev)
121 hdr.length = MEI_WD_START_MSG_SIZE; 128 hdr.length = MEI_WD_START_MSG_SIZE;
122 else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) 129 else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
123 hdr.length = MEI_WD_STOP_MSG_SIZE; 130 hdr.length = MEI_WD_STOP_MSG_SIZE;
124 else 131 else {
132 dev_err(&dev->pdev->dev, "wd: invalid message is to be sent, aborting\n");
125 return -EINVAL; 133 return -EINVAL;
134 }
126 135
127 return mei_write_message(dev, &hdr, dev->wd_data); 136 ret = mei_write_message(dev, &hdr, dev->wd_data);
137 if (ret) {
138 dev_err(&dev->pdev->dev, "wd: write message failed\n");
139 return ret;
140 }
141
142 ret = mei_cl_flow_ctrl_reduce(cl);
143 if (ret) {
144 dev_err(&dev->pdev->dev, "wd: flow_ctrl_reduce failed.\n");
145 return ret;
146 }
147
148 return 0;
128} 149}
129 150
130/** 151/**
@@ -133,9 +154,11 @@ int mei_wd_send(struct mei_device *dev)
133 * @dev: the device structure 154 * @dev: the device structure
134 * @preserve: indicate if to keep the timeout value 155 * @preserve: indicate if to keep the timeout value
135 * 156 *
136 * returns 0 if success, 157 * returns 0 if success
137 * -EIO when message send fails 158 * on error:
159 * -EIO when message send fails
138 * -EINVAL when invalid message is to be sent 160 * -EINVAL when invalid message is to be sent
161 * -ETIME on message timeout
139 */ 162 */
140int mei_wd_stop(struct mei_device *dev) 163int mei_wd_stop(struct mei_device *dev)
141{ 164{
@@ -151,20 +174,12 @@ int mei_wd_stop(struct mei_device *dev)
151 174
152 ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); 175 ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
153 if (ret < 0) 176 if (ret < 0)
154 goto out; 177 goto err;
155
156 if (ret && dev->hbuf_is_ready) {
157 ret = 0;
158 dev->hbuf_is_ready = false;
159
160 if (!mei_wd_send(dev)) {
161 ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
162 if (ret)
163 goto out;
164 } else {
165 dev_err(&dev->pdev->dev, "wd: send stop failed\n");
166 }
167 178
179 if (ret && mei_hbuf_acquire(dev)) {
180 ret = mei_wd_send(dev);
181 if (ret)
182 goto err;
168 dev->wd_pending = false; 183 dev->wd_pending = false;
169 } else { 184 } else {
170 dev->wd_pending = true; 185 dev->wd_pending = true;
@@ -172,21 +187,21 @@ int mei_wd_stop(struct mei_device *dev)
172 187
173 mutex_unlock(&dev->device_lock); 188 mutex_unlock(&dev->device_lock);
174 189
175 ret = wait_event_interruptible_timeout(dev->wait_stop_wd, 190 ret = wait_event_timeout(dev->wait_stop_wd,
176 dev->wd_state == MEI_WD_IDLE, 191 dev->wd_state == MEI_WD_IDLE,
177 msecs_to_jiffies(MEI_WD_STOP_TIMEOUT)); 192 msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
178 mutex_lock(&dev->device_lock); 193 mutex_lock(&dev->device_lock);
179 if (dev->wd_state == MEI_WD_IDLE) { 194 if (dev->wd_state != MEI_WD_IDLE) {
180 dev_dbg(&dev->pdev->dev, "wd: stop completed ret=%d.\n", ret); 195 /* timeout */
181 ret = 0; 196 ret = -ETIME;
182 } else {
183 if (!ret)
184 ret = -ETIMEDOUT;
185 dev_warn(&dev->pdev->dev, 197 dev_warn(&dev->pdev->dev,
186 "wd: stop failed to complete ret=%d.\n", ret); 198 "wd: stop failed to complete ret=%d.\n", ret);
199 goto err;
187 } 200 }
188 201 dev_dbg(&dev->pdev->dev, "wd: stop completed after %u msec\n",
189out: 202 MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret));
203 return 0;
204err:
190 return ret; 205 return ret;
191} 206}
192 207
@@ -260,8 +275,8 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
260 */ 275 */
261static int mei_wd_ops_ping(struct watchdog_device *wd_dev) 276static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
262{ 277{
263 int ret = 0;
264 struct mei_device *dev; 278 struct mei_device *dev;
279 int ret;
265 280
266 dev = watchdog_get_drvdata(wd_dev); 281 dev = watchdog_get_drvdata(wd_dev);
267 if (!dev) 282 if (!dev)
@@ -277,25 +292,18 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
277 292
278 dev->wd_state = MEI_WD_RUNNING; 293 dev->wd_state = MEI_WD_RUNNING;
279 294
295 ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
296 if (ret < 0)
297 goto end;
280 /* Check if we can send the ping to HW*/ 298 /* Check if we can send the ping to HW*/
281 if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { 299 if (ret && mei_hbuf_acquire(dev)) {
282 300
283 dev->hbuf_is_ready = false;
284 dev_dbg(&dev->pdev->dev, "wd: sending ping\n"); 301 dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
285 302
286 if (mei_wd_send(dev)) { 303 ret = mei_wd_send(dev);
287 dev_err(&dev->pdev->dev, "wd: send failed.\n"); 304 if (ret)
288 ret = -EIO;
289 goto end; 305 goto end;
290 } 306 dev->wd_pending = false;
291
292 if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
293 dev_err(&dev->pdev->dev,
294 "wd: mei_cl_flow_ctrl_reduce() failed.\n");
295 ret = -EIO;
296 goto end;
297 }
298
299 } else { 307 } else {
300 dev->wd_pending = true; 308 dev->wd_pending = true;
301 } 309 }
@@ -363,17 +371,25 @@ static struct watchdog_device amt_wd_dev = {
363}; 371};
364 372
365 373
366void mei_watchdog_register(struct mei_device *dev) 374int mei_watchdog_register(struct mei_device *dev)
367{ 375{
368 if (watchdog_register_device(&amt_wd_dev)) { 376
369 dev_err(&dev->pdev->dev, 377 int ret;
370 "wd: unable to register watchdog device.\n"); 378
371 return; 379 /* unlock to perserve correct locking order */
380 mutex_unlock(&dev->device_lock);
381 ret = watchdog_register_device(&amt_wd_dev);
382 mutex_lock(&dev->device_lock);
383 if (ret) {
384 dev_err(&dev->pdev->dev, "wd: unable to register watchdog device = %d.\n",
385 ret);
386 return ret;
372 } 387 }
373 388
374 dev_dbg(&dev->pdev->dev, 389 dev_dbg(&dev->pdev->dev,
375 "wd: successfully register watchdog interface.\n"); 390 "wd: successfully register watchdog interface.\n");
376 watchdog_set_drvdata(&amt_wd_dev, dev); 391 watchdog_set_drvdata(&amt_wd_dev, dev);
392 return 0;
377} 393}
378 394
379void mei_watchdog_unregister(struct mei_device *dev) 395void mei_watchdog_unregister(struct mei_device *dev)
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index f9c29bc918bc..dbc5afde1392 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -194,7 +194,7 @@ static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
194 for (i = 0; i < MIC_MIN_MSIX; i++) 194 for (i = 0; i < MIC_MIN_MSIX; i++)
195 mdev->irq_info.msix_entries[i].entry = i; 195 mdev->irq_info.msix_entries[i].entry = i;
196 196
197 rc = pci_enable_msix(pdev, mdev->irq_info.msix_entries, 197 rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries,
198 MIC_MIN_MSIX); 198 MIC_MIN_MSIX);
199 if (rc) { 199 if (rc) {
200 dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc); 200 dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc);
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index a5925f7f17f6..956597321d2a 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -636,6 +636,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
636 u8 mac[ETH_ALEN]; 636 u8 mac[ETH_ALEN];
637 ssize_t rom_size; 637 ssize_t rom_size;
638 struct pch_phub_reg *chip = dev_get_drvdata(dev); 638 struct pch_phub_reg *chip = dev_get_drvdata(dev);
639 int ret;
639 640
640 if (!mac_pton(buf, mac)) 641 if (!mac_pton(buf, mac))
641 return -EINVAL; 642 return -EINVAL;
@@ -644,8 +645,10 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
644 if (!chip->pch_phub_extrom_base_address) 645 if (!chip->pch_phub_extrom_base_address)
645 return -ENOMEM; 646 return -ENOMEM;
646 647
647 pch_phub_write_gbe_mac_addr(chip, mac); 648 ret = pch_phub_write_gbe_mac_addr(chip, mac);
648 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); 649 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
650 if (ret)
651 return ret;
649 652
650 return count; 653 return count;
651} 654}
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index afe66571ce0b..21181fa243df 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -24,6 +24,9 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/of.h> 26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/list.h>
29#include <linux/list_sort.h>
27#include <linux/platform_device.h> 30#include <linux/platform_device.h>
28#include <linux/slab.h> 31#include <linux/slab.h>
29#include <linux/spinlock.h> 32#include <linux/spinlock.h>
@@ -36,14 +39,35 @@ struct sram_dev {
36 struct clk *clk; 39 struct clk *clk;
37}; 40};
38 41
42struct sram_reserve {
43 struct list_head list;
44 u32 start;
45 u32 size;
46};
47
48static int sram_reserve_cmp(void *priv, struct list_head *a,
49 struct list_head *b)
50{
51 struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
52 struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
53
54 return ra->start - rb->start;
55}
56
39static int sram_probe(struct platform_device *pdev) 57static int sram_probe(struct platform_device *pdev)
40{ 58{
41 void __iomem *virt_base; 59 void __iomem *virt_base;
42 struct sram_dev *sram; 60 struct sram_dev *sram;
43 struct resource *res; 61 struct resource *res;
44 unsigned long size; 62 struct device_node *np = pdev->dev.of_node, *child;
63 unsigned long size, cur_start, cur_size;
64 struct sram_reserve *rblocks, *block;
65 struct list_head reserve_list;
66 unsigned int nblocks;
45 int ret; 67 int ret;
46 68
69 INIT_LIST_HEAD(&reserve_list);
70
47 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 71 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
48 virt_base = devm_ioremap_resource(&pdev->dev, res); 72 virt_base = devm_ioremap_resource(&pdev->dev, res);
49 if (IS_ERR(virt_base)) 73 if (IS_ERR(virt_base))
@@ -65,19 +89,106 @@ static int sram_probe(struct platform_device *pdev)
65 if (!sram->pool) 89 if (!sram->pool)
66 return -ENOMEM; 90 return -ENOMEM;
67 91
68 ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, 92 /*
69 res->start, size, -1); 93 * We need an additional block to mark the end of the memory region
70 if (ret < 0) { 94 * after the reserved blocks from the dt are processed.
71 if (sram->clk) 95 */
72 clk_disable_unprepare(sram->clk); 96 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
73 return ret; 97 rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
98 if (!rblocks) {
99 ret = -ENOMEM;
100 goto err_alloc;
101 }
102
103 block = &rblocks[0];
104 for_each_available_child_of_node(np, child) {
105 struct resource child_res;
106
107 ret = of_address_to_resource(child, 0, &child_res);
108 if (ret < 0) {
109 dev_err(&pdev->dev,
110 "could not get address for node %s\n",
111 child->full_name);
112 goto err_chunks;
113 }
114
115 if (child_res.start < res->start || child_res.end > res->end) {
116 dev_err(&pdev->dev,
117 "reserved block %s outside the sram area\n",
118 child->full_name);
119 ret = -EINVAL;
120 goto err_chunks;
121 }
122
123 block->start = child_res.start - res->start;
124 block->size = resource_size(&child_res);
125 list_add_tail(&block->list, &reserve_list);
126
127 dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n",
128 block->start,
129 block->start + block->size);
130
131 block++;
132 }
133
134 /* the last chunk marks the end of the region */
135 rblocks[nblocks - 1].start = size;
136 rblocks[nblocks - 1].size = 0;
137 list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
138
139 list_sort(NULL, &reserve_list, sram_reserve_cmp);
140
141 cur_start = 0;
142
143 list_for_each_entry(block, &reserve_list, list) {
144 /* can only happen if sections overlap */
145 if (block->start < cur_start) {
146 dev_err(&pdev->dev,
147 "block at 0x%x starts after current offset 0x%lx\n",
148 block->start, cur_start);
149 ret = -EINVAL;
150 goto err_chunks;
151 }
152
153 /* current start is in a reserved block, so continue after it */
154 if (block->start == cur_start) {
155 cur_start = block->start + block->size;
156 continue;
157 }
158
159 /*
160 * allocate the space between the current starting
161 * address and the following reserved block, or the
162 * end of the region.
163 */
164 cur_size = block->start - cur_start;
165
166 dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n",
167 cur_start, cur_start + cur_size);
168 ret = gen_pool_add_virt(sram->pool,
169 (unsigned long)virt_base + cur_start,
170 res->start + cur_start, cur_size, -1);
171 if (ret < 0)
172 goto err_chunks;
173
174 /* next allocation after this reserved block */
175 cur_start = block->start + block->size;
74 } 176 }
75 177
178 kfree(rblocks);
179
76 platform_set_drvdata(pdev, sram); 180 platform_set_drvdata(pdev, sram);
77 181
78 dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base); 182 dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);
79 183
80 return 0; 184 return 0;
185
186err_chunks:
187 kfree(rblocks);
188err_alloc:
189 if (sram->clk)
190 clk_disable_unprepare(sram->clk);
191 return ret;
81} 192}
82 193
83static int sram_remove(struct platform_device *pdev) 194static int sram_remove(struct platform_device *pdev)
@@ -87,8 +198,6 @@ static int sram_remove(struct platform_device *pdev)
87 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) 198 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
88 dev_dbg(&pdev->dev, "removed while SRAM allocated\n"); 199 dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
89 200
90 gen_pool_destroy(sram->pool);
91
92 if (sram->clk) 201 if (sram->clk)
93 clk_disable_unprepare(sram->clk); 202 clk_disable_unprepare(sram->clk);
94 203
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 3aed525e55b4..1972d57aadb3 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -22,7 +22,6 @@
22#define pr_fmt(fmt) "(stc): " fmt 22#define pr_fmt(fmt) "(stc): " fmt
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/tty.h> 25#include <linux/tty.h>
27 26
28#include <linux/seq_file.h> 27#include <linux/seq_file.h>
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
index 83da711ce9f1..cb0289b44a17 100644
--- a/drivers/misc/ti_dac7512.c
+++ b/drivers/misc/ti_dac7512.c
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
25#include <linux/of.h> 24#include <linux/of.h>
26 25
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 5bc10fa193de..b00335652e52 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/i2c.h> 24#include <linux/i2c.h>
26#include <linux/mutex.h> 25#include <linux/mutex.h>
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index d35cda06b5e8..e0d5017785e5 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -383,11 +383,12 @@ static int vmci_enable_msix(struct pci_dev *pdev,
383 vmci_dev->msix_entries[i].vector = i; 383 vmci_dev->msix_entries[i].vector = i;
384 } 384 }
385 385
386 result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS); 386 result = pci_enable_msix_exact(pdev,
387 vmci_dev->msix_entries, VMCI_MAX_INTRS);
387 if (result == 0) 388 if (result == 0)
388 vmci_dev->exclusive_vectors = true; 389 vmci_dev->exclusive_vectors = true;
389 else if (result > 0) 390 else if (result == -ENOSPC)
390 result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1); 391 result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1);
391 392
392 return result; 393 return result;
393} 394}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 90ff447bf043..a4bee41ad5cb 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -428,6 +428,7 @@ config MTD_NAND_FSL_IFC
428 tristate "NAND support for Freescale IFC controller" 428 tristate "NAND support for Freescale IFC controller"
429 depends on MTD_NAND && FSL_SOC 429 depends on MTD_NAND && FSL_SOC
430 select FSL_IFC 430 select FSL_IFC
431 select MEMORY
431 help 432 help
432 Various Freescale chips e.g P1010, include a NAND Flash machine 433 Various Freescale chips e.g P1010, include a NAND Flash machine
433 with built-in hardware ECC capabilities. 434 with built-in hardware ECC capabilities.
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 90ca7e75d6f0..50d9161c4faf 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -30,7 +30,7 @@
30#include <linux/mtd/nand.h> 30#include <linux/mtd/nand.h>
31#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
32#include <linux/mtd/nand_ecc.h> 32#include <linux/mtd/nand_ecc.h>
33#include <asm/fsl_ifc.h> 33#include <linux/fsl_ifc.h>
34 34
35#define FSL_IFC_V1_1_0 0x01010000 35#define FSL_IFC_V1_1_0 0x01010000
36#define ERR_BYTE 0xFF /* Value returned for read 36#define ERR_BYTE 0xFF /* Value returned for read
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 6a83ee1e9178..3fa66244ce32 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -905,7 +905,8 @@ int parport_claim_or_block(struct pardevice *dev)
905 /* If dev->waiting is clear now, an interrupt 905 /* If dev->waiting is clear now, an interrupt
906 gave us the port and we would deadlock if we slept. */ 906 gave us the port and we would deadlock if we slept. */
907 if (dev->waiting) { 907 if (dev->waiting) {
908 interruptible_sleep_on (&dev->wait_q); 908 wait_event_interruptible(dev->wait_q,
909 !dev->waiting);
909 if (signal_pending (current)) { 910 if (signal_pending (current)) {
910 return -EINTR; 911 return -EINTR;
911 } 912 }
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
new file mode 100644
index 000000000000..bf1295e19f89
--- /dev/null
+++ b/drivers/spmi/Kconfig
@@ -0,0 +1,27 @@
1#
2# SPMI driver configuration
3#
4menuconfig SPMI
5 tristate "SPMI support"
6 help
7 SPMI (System Power Management Interface) is a two-wire
8 serial interface between baseband and application processors
9 and Power Management Integrated Circuits (PMIC).
10
11if SPMI
12
13config SPMI_MSM_PMIC_ARB
14 tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
15 depends on ARM
16 depends on IRQ_DOMAIN
17 depends on ARCH_QCOM || COMPILE_TEST
18 default ARCH_QCOM
19 help
20 If you say yes to this option, support will be included for the
21 built-in SPMI PMIC Arbiter interface on Qualcomm MSM family
22 processors.
23
24 This is required for communicating with Qualcomm PMICs and
25 other devices that have the SPMI interface.
26
27endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
new file mode 100644
index 000000000000..fc75104a5aab
--- /dev/null
+++ b/drivers/spmi/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for kernel SPMI framework.
3#
4obj-$(CONFIG_SPMI) += spmi.o
5
6obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
new file mode 100644
index 000000000000..246e03a18c94
--- /dev/null
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -0,0 +1,778 @@
1/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/delay.h>
13#include <linux/err.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/irqdomain.h>
18#include <linux/irq.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spmi.h>
25
26/* PMIC Arbiter configuration registers */
27#define PMIC_ARB_VERSION 0x0000
28#define PMIC_ARB_INT_EN 0x0004
29
30/* PMIC Arbiter channel registers */
31#define PMIC_ARB_CMD(N) (0x0800 + (0x80 * (N)))
32#define PMIC_ARB_CONFIG(N) (0x0804 + (0x80 * (N)))
33#define PMIC_ARB_STATUS(N) (0x0808 + (0x80 * (N)))
34#define PMIC_ARB_WDATA0(N) (0x0810 + (0x80 * (N)))
35#define PMIC_ARB_WDATA1(N) (0x0814 + (0x80 * (N)))
36#define PMIC_ARB_RDATA0(N) (0x0818 + (0x80 * (N)))
37#define PMIC_ARB_RDATA1(N) (0x081C + (0x80 * (N)))
38
39/* Interrupt Controller */
40#define SPMI_PIC_OWNER_ACC_STATUS(M, N) (0x0000 + ((32 * (M)) + (4 * (N))))
41#define SPMI_PIC_ACC_ENABLE(N) (0x0200 + (4 * (N)))
42#define SPMI_PIC_IRQ_STATUS(N) (0x0600 + (4 * (N)))
43#define SPMI_PIC_IRQ_CLEAR(N) (0x0A00 + (4 * (N)))
44
45/* Mapping Table */
46#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
47#define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF)
48#define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1)
49#define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF)
50#define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1)
51#define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF)
52
53#define SPMI_MAPPING_TABLE_LEN 255
54#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
55
56/* Ownership Table */
57#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
58#define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7)
59
60/* Channel Status fields */
61enum pmic_arb_chnl_status {
62 PMIC_ARB_STATUS_DONE = (1 << 0),
63 PMIC_ARB_STATUS_FAILURE = (1 << 1),
64 PMIC_ARB_STATUS_DENIED = (1 << 2),
65 PMIC_ARB_STATUS_DROPPED = (1 << 3),
66};
67
68/* Command register fields */
69#define PMIC_ARB_CMD_MAX_BYTE_COUNT 8
70
71/* Command Opcodes */
72enum pmic_arb_cmd_op_code {
73 PMIC_ARB_OP_EXT_WRITEL = 0,
74 PMIC_ARB_OP_EXT_READL = 1,
75 PMIC_ARB_OP_EXT_WRITE = 2,
76 PMIC_ARB_OP_RESET = 3,
77 PMIC_ARB_OP_SLEEP = 4,
78 PMIC_ARB_OP_SHUTDOWN = 5,
79 PMIC_ARB_OP_WAKEUP = 6,
80 PMIC_ARB_OP_AUTHENTICATE = 7,
81 PMIC_ARB_OP_MSTR_READ = 8,
82 PMIC_ARB_OP_MSTR_WRITE = 9,
83 PMIC_ARB_OP_EXT_READ = 13,
84 PMIC_ARB_OP_WRITE = 14,
85 PMIC_ARB_OP_READ = 15,
86 PMIC_ARB_OP_ZERO_WRITE = 16,
87};
88
89/* Maximum number of support PMIC peripherals */
90#define PMIC_ARB_MAX_PERIPHS 256
91#define PMIC_ARB_PERIPH_ID_VALID (1 << 15)
92#define PMIC_ARB_TIMEOUT_US 100
93#define PMIC_ARB_MAX_TRANS_BYTES (8)
94
95#define PMIC_ARB_APID_MASK 0xFF
96#define PMIC_ARB_PPID_MASK 0xFFF
97
98/* interrupt enable bit */
99#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
100
101/**
102 * spmi_pmic_arb_dev - SPMI PMIC Arbiter object
103 *
104 * @base: address of the PMIC Arbiter core registers.
105 * @intr: address of the SPMI interrupt control registers.
106 * @cnfg: address of the PMIC Arbiter configuration registers.
107 * @lock: lock to synchronize accesses.
108 * @channel: which channel to use for accesses.
109 * @irq: PMIC ARB interrupt.
110 * @ee: the current Execution Environment
111 * @min_apid: minimum APID (used for bounding IRQ search)
112 * @max_apid: maximum APID
113 * @mapping_table: in-memory copy of PPID -> APID mapping table.
114 * @domain: irq domain object for PMIC IRQ domain
115 * @spmic: SPMI controller object
116 * @apid_to_ppid: cached mapping from APID to PPID
117 */
118struct spmi_pmic_arb_dev {
119 void __iomem *base;
120 void __iomem *intr;
121 void __iomem *cnfg;
122 raw_spinlock_t lock;
123 u8 channel;
124 int irq;
125 u8 ee;
126 u8 min_apid;
127 u8 max_apid;
128 u32 mapping_table[SPMI_MAPPING_TABLE_LEN];
129 struct irq_domain *domain;
130 struct spmi_controller *spmic;
131 u16 apid_to_ppid[256];
132};
133
134static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset)
135{
136 return readl_relaxed(dev->base + offset);
137}
138
139static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
140 u32 offset, u32 val)
141{
142 writel_relaxed(val, dev->base + offset);
143}
144
145/**
146 * pa_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
147 * @bc: byte count -1. range: 0..3
148 * @reg: register's address
149 * @buf: output parameter, length must be bc + 1
150 */
151static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
152{
153 u32 data = pmic_arb_base_read(dev, reg);
154 memcpy(buf, &data, (bc & 3) + 1);
155}
156
157/**
158 * pa_write_data: write 1..4 bytes from buf to pmic-arb's register
159 * @bc: byte-count -1. range: 0..3.
160 * @reg: register's address.
161 * @buf: buffer to write. length must be bc + 1.
162 */
163static void
164pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
165{
166 u32 data = 0;
167 memcpy(&data, buf, (bc & 3) + 1);
168 pmic_arb_base_write(dev, reg, data);
169}
170
171static int pmic_arb_wait_for_done(struct spmi_controller *ctrl)
172{
173 struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
174 u32 status = 0;
175 u32 timeout = PMIC_ARB_TIMEOUT_US;
176 u32 offset = PMIC_ARB_STATUS(dev->channel);
177
178 while (timeout--) {
179 status = pmic_arb_base_read(dev, offset);
180
181 if (status & PMIC_ARB_STATUS_DONE) {
182 if (status & PMIC_ARB_STATUS_DENIED) {
183 dev_err(&ctrl->dev,
184 "%s: transaction denied (0x%x)\n",
185 __func__, status);
186 return -EPERM;
187 }
188
189 if (status & PMIC_ARB_STATUS_FAILURE) {
190 dev_err(&ctrl->dev,
191 "%s: transaction failed (0x%x)\n",
192 __func__, status);
193 return -EIO;
194 }
195
196 if (status & PMIC_ARB_STATUS_DROPPED) {
197 dev_err(&ctrl->dev,
198 "%s: transaction dropped (0x%x)\n",
199 __func__, status);
200 return -EIO;
201 }
202
203 return 0;
204 }
205 udelay(1);
206 }
207
208 dev_err(&ctrl->dev,
209 "%s: timeout, status 0x%x\n",
210 __func__, status);
211 return -ETIMEDOUT;
212}
213
214/* Non-data command */
215static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
216{
217 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
218 unsigned long flags;
219 u32 cmd;
220 int rc;
221
222 /* Check for valid non-data command */
223 if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
224 return -EINVAL;
225
226 cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
227
228 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
229 pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
230 rc = pmic_arb_wait_for_done(ctrl);
231 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
232
233 return rc;
234}
235
236static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
237 u16 addr, u8 *buf, size_t len)
238{
239 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
240 unsigned long flags;
241 u8 bc = len - 1;
242 u32 cmd;
243 int rc;
244
245 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
246 dev_err(&ctrl->dev,
247 "pmic-arb supports 1..%d bytes per trans, but %d requested",
248 PMIC_ARB_MAX_TRANS_BYTES, len);
249 return -EINVAL;
250 }
251
252 /* Check the opcode */
253 if (opc >= 0x60 && opc <= 0x7F)
254 opc = PMIC_ARB_OP_READ;
255 else if (opc >= 0x20 && opc <= 0x2F)
256 opc = PMIC_ARB_OP_EXT_READ;
257 else if (opc >= 0x38 && opc <= 0x3F)
258 opc = PMIC_ARB_OP_EXT_READL;
259 else
260 return -EINVAL;
261
262 cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
263
264 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
265 pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
266 rc = pmic_arb_wait_for_done(ctrl);
267 if (rc)
268 goto done;
269
270 pa_read_data(pmic_arb, buf, PMIC_ARB_RDATA0(pmic_arb->channel),
271 min_t(u8, bc, 3));
272
273 if (bc > 3)
274 pa_read_data(pmic_arb, buf + 4,
275 PMIC_ARB_RDATA1(pmic_arb->channel), bc - 4);
276
277done:
278 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
279 return rc;
280}
281
282static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
283 u16 addr, const u8 *buf, size_t len)
284{
285 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
286 unsigned long flags;
287 u8 bc = len - 1;
288 u32 cmd;
289 int rc;
290
291 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
292 dev_err(&ctrl->dev,
293 "pmic-arb supports 1..%d bytes per trans, but:%d requested",
294 PMIC_ARB_MAX_TRANS_BYTES, len);
295 return -EINVAL;
296 }
297
298 /* Check the opcode */
299 if (opc >= 0x40 && opc <= 0x5F)
300 opc = PMIC_ARB_OP_WRITE;
301 else if (opc >= 0x00 && opc <= 0x0F)
302 opc = PMIC_ARB_OP_EXT_WRITE;
303 else if (opc >= 0x30 && opc <= 0x37)
304 opc = PMIC_ARB_OP_EXT_WRITEL;
305 else if (opc >= 0x80 && opc <= 0xFF)
306 opc = PMIC_ARB_OP_ZERO_WRITE;
307 else
308 return -EINVAL;
309
310 cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
311
312 /* Write data to FIFOs */
313 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
314 pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel)
315 , min_t(u8, bc, 3));
316 if (bc > 3)
317 pa_write_data(pmic_arb, buf + 4,
318 PMIC_ARB_WDATA1(pmic_arb->channel), bc - 4);
319
320 /* Start the transaction */
321 pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
322 rc = pmic_arb_wait_for_done(ctrl);
323 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
324
325 return rc;
326}
327
328enum qpnpint_regs {
329 QPNPINT_REG_RT_STS = 0x10,
330 QPNPINT_REG_SET_TYPE = 0x11,
331 QPNPINT_REG_POLARITY_HIGH = 0x12,
332 QPNPINT_REG_POLARITY_LOW = 0x13,
333 QPNPINT_REG_LATCHED_CLR = 0x14,
334 QPNPINT_REG_EN_SET = 0x15,
335 QPNPINT_REG_EN_CLR = 0x16,
336 QPNPINT_REG_LATCHED_STS = 0x18,
337};
338
339struct spmi_pmic_arb_qpnpint_type {
340 u8 type; /* 1 -> edge */
341 u8 polarity_high;
342 u8 polarity_low;
343} __packed;
344
345/* Simplified accessor functions for irqchip callbacks */
346static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
347 size_t len)
348{
349 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
350 u8 sid = d->hwirq >> 24;
351 u8 per = d->hwirq >> 16;
352
353 if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
354 (per << 8) + reg, buf, len))
355 dev_err_ratelimited(&pa->spmic->dev,
356 "failed irqchip transaction on %x\n",
357 d->irq);
358}
359
360static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
361{
362 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
363 u8 sid = d->hwirq >> 24;
364 u8 per = d->hwirq >> 16;
365
366 if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
367 (per << 8) + reg, buf, len))
368 dev_err_ratelimited(&pa->spmic->dev,
369 "failed irqchip transaction on %x\n",
370 d->irq);
371}
372
373static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
374{
375 unsigned int irq;
376 u32 status;
377 int id;
378
379 status = readl_relaxed(pa->intr + SPMI_PIC_IRQ_STATUS(apid));
380 while (status) {
381 id = ffs(status) - 1;
382 status &= ~(1 << id);
383 irq = irq_find_mapping(pa->domain,
384 pa->apid_to_ppid[apid] << 16
385 | id << 8
386 | apid);
387 generic_handle_irq(irq);
388 }
389}
390
391static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
392{
393 struct spmi_pmic_arb_dev *pa = irq_get_handler_data(irq);
394 struct irq_chip *chip = irq_get_chip(irq);
395 void __iomem *intr = pa->intr;
396 int first = pa->min_apid >> 5;
397 int last = pa->max_apid >> 5;
398 u32 status;
399 int i, id;
400
401 chained_irq_enter(chip, desc);
402
403 for (i = first; i <= last; ++i) {
404 status = readl_relaxed(intr +
405 SPMI_PIC_OWNER_ACC_STATUS(pa->ee, i));
406 while (status) {
407 id = ffs(status) - 1;
408 status &= ~(1 << id);
409 periph_interrupt(pa, id + i * 32);
410 }
411 }
412
413 chained_irq_exit(chip, desc);
414}
415
416static void qpnpint_irq_ack(struct irq_data *d)
417{
418 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
419 u8 irq = d->hwirq >> 8;
420 u8 apid = d->hwirq;
421 unsigned long flags;
422 u8 data;
423
424 raw_spin_lock_irqsave(&pa->lock, flags);
425 writel_relaxed(1 << irq, pa->intr + SPMI_PIC_IRQ_CLEAR(apid));
426 raw_spin_unlock_irqrestore(&pa->lock, flags);
427
428 data = 1 << irq;
429 qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
430}
431
432static void qpnpint_irq_mask(struct irq_data *d)
433{
434 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
435 u8 irq = d->hwirq >> 8;
436 u8 apid = d->hwirq;
437 unsigned long flags;
438 u32 status;
439 u8 data;
440
441 raw_spin_lock_irqsave(&pa->lock, flags);
442 status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid));
443 if (status & SPMI_PIC_ACC_ENABLE_BIT) {
444 status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
445 writel_relaxed(status, pa->intr + SPMI_PIC_ACC_ENABLE(apid));
446 }
447 raw_spin_unlock_irqrestore(&pa->lock, flags);
448
449 data = 1 << irq;
450 qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
451}
452
453static void qpnpint_irq_unmask(struct irq_data *d)
454{
455 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
456 u8 irq = d->hwirq >> 8;
457 u8 apid = d->hwirq;
458 unsigned long flags;
459 u32 status;
460 u8 data;
461
462 raw_spin_lock_irqsave(&pa->lock, flags);
463 status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid));
464 if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
465 writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
466 pa->intr + SPMI_PIC_ACC_ENABLE(apid));
467 }
468 raw_spin_unlock_irqrestore(&pa->lock, flags);
469
470 data = 1 << irq;
471 qpnpint_spmi_write(d, QPNPINT_REG_EN_SET, &data, 1);
472}
473
474static void qpnpint_irq_enable(struct irq_data *d)
475{
476 u8 irq = d->hwirq >> 8;
477 u8 data;
478
479 qpnpint_irq_unmask(d);
480
481 data = 1 << irq;
482 qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
483}
484
485static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
486{
487 struct spmi_pmic_arb_qpnpint_type type;
488 u8 irq = d->hwirq >> 8;
489
490 qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
491
492 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
493 type.type |= 1 << irq;
494 if (flow_type & IRQF_TRIGGER_RISING)
495 type.polarity_high |= 1 << irq;
496 if (flow_type & IRQF_TRIGGER_FALLING)
497 type.polarity_low |= 1 << irq;
498 } else {
499 if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
500 (flow_type & (IRQF_TRIGGER_LOW)))
501 return -EINVAL;
502
503 type.type &= ~(1 << irq); /* level trig */
504 if (flow_type & IRQF_TRIGGER_HIGH)
505 type.polarity_high |= 1 << irq;
506 else
507 type.polarity_low |= 1 << irq;
508 }
509
510 qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
511 return 0;
512}
513
514static struct irq_chip pmic_arb_irqchip = {
515 .name = "pmic_arb",
516 .irq_enable = qpnpint_irq_enable,
517 .irq_ack = qpnpint_irq_ack,
518 .irq_mask = qpnpint_irq_mask,
519 .irq_unmask = qpnpint_irq_unmask,
520 .irq_set_type = qpnpint_irq_set_type,
521 .flags = IRQCHIP_MASK_ON_SUSPEND
522 | IRQCHIP_SKIP_SET_WAKE,
523};
524
525struct spmi_pmic_arb_irq_spec {
526 unsigned slave:4;
527 unsigned per:8;
528 unsigned irq:3;
529};
530
531static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
532 struct spmi_pmic_arb_irq_spec *spec,
533 u8 *apid)
534{
535 u16 ppid = spec->slave << 8 | spec->per;
536 u32 *mapping_table = pa->mapping_table;
537 int index = 0, i;
538 u32 data;
539
540 for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
541 data = mapping_table[index];
542
543 if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
544 if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
545 index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
546 } else {
547 *apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
548 return 0;
549 }
550 } else {
551 if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
552 index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
553 } else {
554 *apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
555 return 0;
556 }
557 }
558 }
559
560 return -ENODEV;
561}
562
563static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
564 struct device_node *controller,
565 const u32 *intspec,
566 unsigned int intsize,
567 unsigned long *out_hwirq,
568 unsigned int *out_type)
569{
570 struct spmi_pmic_arb_dev *pa = d->host_data;
571 struct spmi_pmic_arb_irq_spec spec;
572 int err;
573 u8 apid;
574
575 dev_dbg(&pa->spmic->dev,
576 "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
577 intspec[0], intspec[1], intspec[2]);
578
579 if (d->of_node != controller)
580 return -EINVAL;
581 if (intsize != 4)
582 return -EINVAL;
583 if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
584 return -EINVAL;
585
586 spec.slave = intspec[0];
587 spec.per = intspec[1];
588 spec.irq = intspec[2];
589
590 err = search_mapping_table(pa, &spec, &apid);
591 if (err)
592 return err;
593
594 pa->apid_to_ppid[apid] = spec.slave << 8 | spec.per;
595
596 /* Keep track of {max,min}_apid for bounding search during interrupt */
597 if (apid > pa->max_apid)
598 pa->max_apid = apid;
599 if (apid < pa->min_apid)
600 pa->min_apid = apid;
601
602 *out_hwirq = spec.slave << 24
603 | spec.per << 16
604 | spec.irq << 8
605 | apid;
606 *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
607
608 dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
609
610 return 0;
611}
612
613static int qpnpint_irq_domain_map(struct irq_domain *d,
614 unsigned int virq,
615 irq_hw_number_t hwirq)
616{
617 struct spmi_pmic_arb_dev *pa = d->host_data;
618
619 dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
620
621 irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq);
622 irq_set_chip_data(virq, d->host_data);
623 irq_set_noprobe(virq);
624 return 0;
625}
626
627static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
628 .map = qpnpint_irq_domain_map,
629 .xlate = qpnpint_irq_domain_dt_translate,
630};
631
632static int spmi_pmic_arb_probe(struct platform_device *pdev)
633{
634 struct spmi_pmic_arb_dev *pa;
635 struct spmi_controller *ctrl;
636 struct resource *res;
637 u32 channel, ee;
638 int err, i;
639
640 ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
641 if (!ctrl)
642 return -ENOMEM;
643
644 pa = spmi_controller_get_drvdata(ctrl);
645 pa->spmic = ctrl;
646
647 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
648 pa->base = devm_ioremap_resource(&ctrl->dev, res);
649 if (IS_ERR(pa->base)) {
650 err = PTR_ERR(pa->base);
651 goto err_put_ctrl;
652 }
653
654 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
655 pa->intr = devm_ioremap_resource(&ctrl->dev, res);
656 if (IS_ERR(pa->intr)) {
657 err = PTR_ERR(pa->intr);
658 goto err_put_ctrl;
659 }
660
661 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
662 pa->cnfg = devm_ioremap_resource(&ctrl->dev, res);
663 if (IS_ERR(pa->cnfg)) {
664 err = PTR_ERR(pa->cnfg);
665 goto err_put_ctrl;
666 }
667
668 pa->irq = platform_get_irq_byname(pdev, "periph_irq");
669 if (pa->irq < 0) {
670 err = pa->irq;
671 goto err_put_ctrl;
672 }
673
674 err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
675 if (err) {
676 dev_err(&pdev->dev, "channel unspecified.\n");
677 goto err_put_ctrl;
678 }
679
680 if (channel > 5) {
681 dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
682 channel);
683 goto err_put_ctrl;
684 }
685
686 pa->channel = channel;
687
688 err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
689 if (err) {
690 dev_err(&pdev->dev, "EE unspecified.\n");
691 goto err_put_ctrl;
692 }
693
694 if (ee > 5) {
695 dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee);
696 err = -EINVAL;
697 goto err_put_ctrl;
698 }
699
700 pa->ee = ee;
701
702 for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
703 pa->mapping_table[i] = readl_relaxed(
704 pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
705
706 /* Initialize max_apid/min_apid to the opposite bounds, during
707 * the irq domain translation, we are sure to update these */
708 pa->max_apid = 0;
709 pa->min_apid = PMIC_ARB_MAX_PERIPHS - 1;
710
711 platform_set_drvdata(pdev, ctrl);
712 raw_spin_lock_init(&pa->lock);
713
714 ctrl->cmd = pmic_arb_cmd;
715 ctrl->read_cmd = pmic_arb_read_cmd;
716 ctrl->write_cmd = pmic_arb_write_cmd;
717
718 dev_dbg(&pdev->dev, "adding irq domain\n");
719 pa->domain = irq_domain_add_tree(pdev->dev.of_node,
720 &pmic_arb_irq_domain_ops, pa);
721 if (!pa->domain) {
722 dev_err(&pdev->dev, "unable to create irq_domain\n");
723 err = -ENOMEM;
724 goto err_put_ctrl;
725 }
726
727 irq_set_handler_data(pa->irq, pa);
728 irq_set_chained_handler(pa->irq, pmic_arb_chained_irq);
729
730 err = spmi_controller_add(ctrl);
731 if (err)
732 goto err_domain_remove;
733
734 dev_dbg(&ctrl->dev, "PMIC Arb Version 0x%x\n",
735 pmic_arb_base_read(pa, PMIC_ARB_VERSION));
736
737 return 0;
738
739err_domain_remove:
740 irq_set_chained_handler(pa->irq, NULL);
741 irq_set_handler_data(pa->irq, NULL);
742 irq_domain_remove(pa->domain);
743err_put_ctrl:
744 spmi_controller_put(ctrl);
745 return err;
746}
747
748static int spmi_pmic_arb_remove(struct platform_device *pdev)
749{
750 struct spmi_controller *ctrl = platform_get_drvdata(pdev);
751 struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
752 spmi_controller_remove(ctrl);
753 irq_set_chained_handler(pa->irq, NULL);
754 irq_set_handler_data(pa->irq, NULL);
755 irq_domain_remove(pa->domain);
756 spmi_controller_put(ctrl);
757 return 0;
758}
759
760static const struct of_device_id spmi_pmic_arb_match_table[] = {
761 { .compatible = "qcom,spmi-pmic-arb", },
762 {},
763};
764MODULE_DEVICE_TABLE(of, spmi_pmic_arb_match_table);
765
766static struct platform_driver spmi_pmic_arb_driver = {
767 .probe = spmi_pmic_arb_probe,
768 .remove = spmi_pmic_arb_remove,
769 .driver = {
770 .name = "spmi_pmic_arb",
771 .owner = THIS_MODULE,
772 .of_match_table = spmi_pmic_arb_match_table,
773 },
774};
775module_platform_driver(spmi_pmic_arb_driver);
776
777MODULE_LICENSE("GPL v2");
778MODULE_ALIAS("platform:spmi_pmic_arb");
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
new file mode 100644
index 000000000000..3b5780710d50
--- /dev/null
+++ b/drivers/spmi/spmi.c
@@ -0,0 +1,574 @@
1/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/idr.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_device.h>
19#include <linux/platform_device.h>
20#include <linux/spmi.h>
21#include <linux/module.h>
22#include <linux/pm_runtime.h>
23
24#include <dt-bindings/spmi/spmi.h>
25
26static DEFINE_IDA(ctrl_ida);
27
28static void spmi_dev_release(struct device *dev)
29{
30 struct spmi_device *sdev = to_spmi_device(dev);
31 kfree(sdev);
32}
33
34static const struct device_type spmi_dev_type = {
35 .release = spmi_dev_release,
36};
37
38static void spmi_ctrl_release(struct device *dev)
39{
40 struct spmi_controller *ctrl = to_spmi_controller(dev);
41 ida_simple_remove(&ctrl_ida, ctrl->nr);
42 kfree(ctrl);
43}
44
45static const struct device_type spmi_ctrl_type = {
46 .release = spmi_ctrl_release,
47};
48
49static int spmi_device_match(struct device *dev, struct device_driver *drv)
50{
51 if (of_driver_match_device(dev, drv))
52 return 1;
53
54 if (drv->name)
55 return strncmp(dev_name(dev), drv->name,
56 SPMI_NAME_SIZE) == 0;
57
58 return 0;
59}
60
61/**
62 * spmi_device_add() - add a device previously constructed via spmi_device_alloc()
63 * @sdev: spmi_device to be added
64 */
65int spmi_device_add(struct spmi_device *sdev)
66{
67 struct spmi_controller *ctrl = sdev->ctrl;
68 int err;
69
70 dev_set_name(&sdev->dev, "%d-%02x", ctrl->nr, sdev->usid);
71
72 err = device_add(&sdev->dev);
73 if (err < 0) {
74 dev_err(&sdev->dev, "Can't add %s, status %d\n",
75 dev_name(&sdev->dev), err);
76 goto err_device_add;
77 }
78
79 dev_dbg(&sdev->dev, "device %s registered\n", dev_name(&sdev->dev));
80
81err_device_add:
82 return err;
83}
84EXPORT_SYMBOL_GPL(spmi_device_add);
85
86/**
87 * spmi_device_remove(): remove an SPMI device
88 * @sdev: spmi_device to be removed
89 */
90void spmi_device_remove(struct spmi_device *sdev)
91{
92 device_unregister(&sdev->dev);
93}
94EXPORT_SYMBOL_GPL(spmi_device_remove);
95
96static inline int
97spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid)
98{
99 if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type)
100 return -EINVAL;
101
102 return ctrl->cmd(ctrl, opcode, sid);
103}
104
105static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode,
106 u8 sid, u16 addr, u8 *buf, size_t len)
107{
108 if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
109 return -EINVAL;
110
111 return ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
112}
113
114static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode,
115 u8 sid, u16 addr, const u8 *buf, size_t len)
116{
117 if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type)
118 return -EINVAL;
119
120 return ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len);
121}
122
123/**
124 * spmi_register_read() - register read
125 * @sdev: SPMI device.
126 * @addr: slave register address (5-bit address).
127 * @buf: buffer to be populated with data from the Slave.
128 *
129 * Reads 1 byte of data from a Slave device register.
130 */
131int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf)
132{
133 /* 5-bit register address */
134 if (addr > 0x1F)
135 return -EINVAL;
136
137 return spmi_read_cmd(sdev->ctrl, SPMI_CMD_READ, sdev->usid, addr,
138 buf, 1);
139}
140EXPORT_SYMBOL_GPL(spmi_register_read);
141
142/**
143 * spmi_ext_register_read() - extended register read
144 * @sdev: SPMI device.
145 * @addr: slave register address (8-bit address).
146 * @buf: buffer to be populated with data from the Slave.
147 * @len: the request number of bytes to read (up to 16 bytes).
148 *
149 * Reads up to 16 bytes of data from the extended register space on a
150 * Slave device.
151 */
152int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
153 size_t len)
154{
155 /* 8-bit register address, up to 16 bytes */
156 if (len == 0 || len > 16)
157 return -EINVAL;
158
159 return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READ, sdev->usid, addr,
160 buf, len);
161}
162EXPORT_SYMBOL_GPL(spmi_ext_register_read);
163
164/**
165 * spmi_ext_register_readl() - extended register read long
166 * @sdev: SPMI device.
167 * @addr: slave register address (16-bit address).
168 * @buf: buffer to be populated with data from the Slave.
169 * @len: the request number of bytes to read (up to 8 bytes).
170 *
171 * Reads up to 8 bytes of data from the extended register space on a
172 * Slave device using 16-bit address.
173 */
174int spmi_ext_register_readl(struct spmi_device *sdev, u16 addr, u8 *buf,
175 size_t len)
176{
177 /* 16-bit register address, up to 8 bytes */
178 if (len == 0 || len > 8)
179 return -EINVAL;
180
181 return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READL, sdev->usid, addr,
182 buf, len);
183}
184EXPORT_SYMBOL_GPL(spmi_ext_register_readl);
185
186/**
187 * spmi_register_write() - register write
188 * @sdev: SPMI device
189 * @addr: slave register address (5-bit address).
190 * @data: buffer containing the data to be transferred to the Slave.
191 *
192 * Writes 1 byte of data to a Slave device register.
193 */
194int spmi_register_write(struct spmi_device *sdev, u8 addr, u8 data)
195{
196 /* 5-bit register address */
197 if (addr > 0x1F)
198 return -EINVAL;
199
200 return spmi_write_cmd(sdev->ctrl, SPMI_CMD_WRITE, sdev->usid, addr,
201 &data, 1);
202}
203EXPORT_SYMBOL_GPL(spmi_register_write);
204
205/**
206 * spmi_register_zero_write() - register zero write
207 * @sdev: SPMI device.
208 * @data: the data to be written to register 0 (7-bits).
209 *
210 * Writes data to register 0 of the Slave device.
211 */
212int spmi_register_zero_write(struct spmi_device *sdev, u8 data)
213{
214 return spmi_write_cmd(sdev->ctrl, SPMI_CMD_ZERO_WRITE, sdev->usid, 0,
215 &data, 1);
216}
217EXPORT_SYMBOL_GPL(spmi_register_zero_write);
218
219/**
220 * spmi_ext_register_write() - extended register write
221 * @sdev: SPMI device.
222 * @addr: slave register address (8-bit address).
223 * @buf: buffer containing the data to be transferred to the Slave.
224 * @len: the request number of bytes to read (up to 16 bytes).
225 *
226 * Writes up to 16 bytes of data to the extended register space of a
227 * Slave device.
228 */
229int spmi_ext_register_write(struct spmi_device *sdev, u8 addr, const u8 *buf,
230 size_t len)
231{
232 /* 8-bit register address, up to 16 bytes */
233 if (len == 0 || len > 16)
234 return -EINVAL;
235
236 return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITE, sdev->usid, addr,
237 buf, len);
238}
239EXPORT_SYMBOL_GPL(spmi_ext_register_write);
240
241/**
242 * spmi_ext_register_writel() - extended register write long
243 * @sdev: SPMI device.
244 * @addr: slave register address (16-bit address).
245 * @buf: buffer containing the data to be transferred to the Slave.
246 * @len: the request number of bytes to read (up to 8 bytes).
247 *
248 * Writes up to 8 bytes of data to the extended register space of a
249 * Slave device using 16-bit address.
250 */
251int spmi_ext_register_writel(struct spmi_device *sdev, u16 addr, const u8 *buf,
252 size_t len)
253{
254 /* 4-bit Slave Identifier, 16-bit register address, up to 8 bytes */
255 if (len == 0 || len > 8)
256 return -EINVAL;
257
258 return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITEL, sdev->usid,
259 addr, buf, len);
260}
261EXPORT_SYMBOL_GPL(spmi_ext_register_writel);
262
263/**
264 * spmi_command_reset() - sends RESET command to the specified slave
265 * @sdev: SPMI device.
266 *
267 * The Reset command initializes the Slave and forces all registers to
268 * their reset values. The Slave shall enter the STARTUP state after
269 * receiving a Reset command.
270 */
271int spmi_command_reset(struct spmi_device *sdev)
272{
273 return spmi_cmd(sdev->ctrl, SPMI_CMD_RESET, sdev->usid);
274}
275EXPORT_SYMBOL_GPL(spmi_command_reset);
276
277/**
278 * spmi_command_sleep() - sends SLEEP command to the specified SPMI device
279 * @sdev: SPMI device.
280 *
281 * The Sleep command causes the Slave to enter the user defined SLEEP state.
282 */
283int spmi_command_sleep(struct spmi_device *sdev)
284{
285 return spmi_cmd(sdev->ctrl, SPMI_CMD_SLEEP, sdev->usid);
286}
287EXPORT_SYMBOL_GPL(spmi_command_sleep);
288
289/**
290 * spmi_command_wakeup() - sends WAKEUP command to the specified SPMI device
291 * @sdev: SPMI device.
292 *
293 * The Wakeup command causes the Slave to move from the SLEEP state to
294 * the ACTIVE state.
295 */
296int spmi_command_wakeup(struct spmi_device *sdev)
297{
298 return spmi_cmd(sdev->ctrl, SPMI_CMD_WAKEUP, sdev->usid);
299}
300EXPORT_SYMBOL_GPL(spmi_command_wakeup);
301
302/**
303 * spmi_command_shutdown() - sends SHUTDOWN command to the specified SPMI device
304 * @sdev: SPMI device.
305 *
306 * The Shutdown command causes the Slave to enter the SHUTDOWN state.
307 */
308int spmi_command_shutdown(struct spmi_device *sdev)
309{
310 return spmi_cmd(sdev->ctrl, SPMI_CMD_SHUTDOWN, sdev->usid);
311}
312EXPORT_SYMBOL_GPL(spmi_command_shutdown);
313
314static int spmi_drv_probe(struct device *dev)
315{
316 const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
317 struct spmi_device *sdev = to_spmi_device(dev);
318 int err;
319
320 /* Ensure the slave is in ACTIVE state */
321 err = spmi_command_wakeup(sdev);
322 if (err)
323 goto fail_wakeup;
324
325 pm_runtime_get_noresume(dev);
326 pm_runtime_set_active(dev);
327 pm_runtime_enable(dev);
328
329 err = sdrv->probe(sdev);
330 if (err)
331 goto fail_probe;
332
333 return 0;
334
335fail_probe:
336 pm_runtime_disable(dev);
337 pm_runtime_set_suspended(dev);
338 pm_runtime_put_noidle(dev);
339fail_wakeup:
340 return err;
341}
342
343static int spmi_drv_remove(struct device *dev)
344{
345 const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
346
347 pm_runtime_get_sync(dev);
348 sdrv->remove(to_spmi_device(dev));
349 pm_runtime_put_noidle(dev);
350
351 pm_runtime_disable(dev);
352 pm_runtime_set_suspended(dev);
353 pm_runtime_put_noidle(dev);
354 return 0;
355}
356
357static struct bus_type spmi_bus_type = {
358 .name = "spmi",
359 .match = spmi_device_match,
360 .probe = spmi_drv_probe,
361 .remove = spmi_drv_remove,
362};
363
364/**
365 * spmi_controller_alloc() - Allocate a new SPMI device
366 * @ctrl: associated controller
367 *
368 * Caller is responsible for either calling spmi_device_add() to add the
369 * newly allocated controller, or calling spmi_device_put() to discard it.
370 */
371struct spmi_device *spmi_device_alloc(struct spmi_controller *ctrl)
372{
373 struct spmi_device *sdev;
374
375 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
376 if (!sdev)
377 return NULL;
378
379 sdev->ctrl = ctrl;
380 device_initialize(&sdev->dev);
381 sdev->dev.parent = &ctrl->dev;
382 sdev->dev.bus = &spmi_bus_type;
383 sdev->dev.type = &spmi_dev_type;
384 return sdev;
385}
386EXPORT_SYMBOL_GPL(spmi_device_alloc);
387
388/**
389 * spmi_controller_alloc() - Allocate a new SPMI controller
390 * @parent: parent device
391 * @size: size of private data
392 *
393 * Caller is responsible for either calling spmi_controller_add() to add the
394 * newly allocated controller, or calling spmi_controller_put() to discard it.
395 * The allocated private data region may be accessed via
396 * spmi_controller_get_drvdata()
397 */
398struct spmi_controller *spmi_controller_alloc(struct device *parent,
399 size_t size)
400{
401 struct spmi_controller *ctrl;
402 int id;
403
404 if (WARN_ON(!parent))
405 return NULL;
406
407 ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
408 if (!ctrl)
409 return NULL;
410
411 device_initialize(&ctrl->dev);
412 ctrl->dev.type = &spmi_ctrl_type;
413 ctrl->dev.bus = &spmi_bus_type;
414 ctrl->dev.parent = parent;
415 ctrl->dev.of_node = parent->of_node;
416 spmi_controller_set_drvdata(ctrl, &ctrl[1]);
417
418 id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
419 if (id < 0) {
420 dev_err(parent,
421 "unable to allocate SPMI controller identifier.\n");
422 spmi_controller_put(ctrl);
423 return NULL;
424 }
425
426 ctrl->nr = id;
427 dev_set_name(&ctrl->dev, "spmi-%d", id);
428
429 dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
430 return ctrl;
431}
432EXPORT_SYMBOL_GPL(spmi_controller_alloc);
433
434static void of_spmi_register_devices(struct spmi_controller *ctrl)
435{
436 struct device_node *node;
437 int err;
438
439 if (!ctrl->dev.of_node)
440 return;
441
442 for_each_available_child_of_node(ctrl->dev.of_node, node) {
443 struct spmi_device *sdev;
444 u32 reg[2];
445
446 dev_dbg(&ctrl->dev, "adding child %s\n", node->full_name);
447
448 err = of_property_read_u32_array(node, "reg", reg, 2);
449 if (err) {
450 dev_err(&ctrl->dev,
451 "node %s err (%d) does not have 'reg' property\n",
452 node->full_name, err);
453 continue;
454 }
455
456 if (reg[1] != SPMI_USID) {
457 dev_err(&ctrl->dev,
458 "node %s contains unsupported 'reg' entry\n",
459 node->full_name);
460 continue;
461 }
462
463 if (reg[0] >= SPMI_MAX_SLAVE_ID) {
464 dev_err(&ctrl->dev,
465 "invalid usid on node %s\n",
466 node->full_name);
467 continue;
468 }
469
470 dev_dbg(&ctrl->dev, "read usid %02x\n", reg[0]);
471
472 sdev = spmi_device_alloc(ctrl);
473 if (!sdev)
474 continue;
475
476 sdev->dev.of_node = node;
477 sdev->usid = (u8) reg[0];
478
479 err = spmi_device_add(sdev);
480 if (err) {
481 dev_err(&sdev->dev,
482 "failure adding device. status %d\n", err);
483 spmi_device_put(sdev);
484 }
485 }
486}
487
488/**
489 * spmi_controller_add() - Add an SPMI controller
490 * @ctrl: controller to be registered.
491 *
492 * Register a controller previously allocated via spmi_controller_alloc() with
493 * the SPMI core.
494 */
495int spmi_controller_add(struct spmi_controller *ctrl)
496{
497 int ret;
498
499 /* Can't register until after driver model init */
500 if (WARN_ON(!spmi_bus_type.p))
501 return -EAGAIN;
502
503 ret = device_add(&ctrl->dev);
504 if (ret)
505 return ret;
506
507 if (IS_ENABLED(CONFIG_OF))
508 of_spmi_register_devices(ctrl);
509
510 dev_dbg(&ctrl->dev, "spmi-%d registered: dev:%p\n",
511 ctrl->nr, &ctrl->dev);
512
513 return 0;
514};
515EXPORT_SYMBOL_GPL(spmi_controller_add);
516
517/* Remove a device associated with a controller */
518static int spmi_ctrl_remove_device(struct device *dev, void *data)
519{
520 struct spmi_device *spmidev = to_spmi_device(dev);
521 if (dev->type == &spmi_dev_type)
522 spmi_device_remove(spmidev);
523 return 0;
524}
525
526/**
527 * spmi_controller_remove(): remove an SPMI controller
528 * @ctrl: controller to remove
529 *
530 * Remove a SPMI controller. Caller is responsible for calling
531 * spmi_controller_put() to discard the allocated controller.
532 */
533void spmi_controller_remove(struct spmi_controller *ctrl)
534{
535 int dummy;
536
537 if (!ctrl)
538 return;
539
540 dummy = device_for_each_child(&ctrl->dev, NULL,
541 spmi_ctrl_remove_device);
542 device_del(&ctrl->dev);
543}
544EXPORT_SYMBOL_GPL(spmi_controller_remove);
545
546/**
547 * spmi_driver_register() - Register client driver with SPMI core
548 * @sdrv: client driver to be associated with client-device.
549 *
550 * This API will register the client driver with the SPMI framework.
551 * It is typically called from the driver's module-init function.
552 */
553int spmi_driver_register(struct spmi_driver *sdrv)
554{
555 sdrv->driver.bus = &spmi_bus_type;
556 return driver_register(&sdrv->driver);
557}
558EXPORT_SYMBOL_GPL(spmi_driver_register);
559
560static void __exit spmi_exit(void)
561{
562 bus_unregister(&spmi_bus_type);
563}
564module_exit(spmi_exit);
565
566static int __init spmi_init(void)
567{
568 return bus_register(&spmi_bus_type);
569}
570postcore_initcall(spmi_init);
571
572MODULE_LICENSE("GPL v2");
573MODULE_DESCRIPTION("SPMI module");
574MODULE_ALIAS("platform:spmi");
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index b269dbd47fc4..b1d7ee6e40b7 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -29,7 +29,6 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <linux/extcon.h> 31#include <linux/extcon.h>
32#include <linux/extcon/of_extcon.h>
33#include <linux/regulator/consumer.h> 32#include <linux/regulator/consumer.h>
34 33
35#include <linux/usb/otg.h> 34#include <linux/usb/otg.h>
@@ -522,7 +521,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
522 dwc3_omap_enable_irqs(omap); 521 dwc3_omap_enable_irqs(omap);
523 522
524 if (of_property_read_bool(node, "extcon")) { 523 if (of_property_read_bool(node, "extcon")) {
525 edev = of_extcon_get_extcon_dev(dev, 0); 524 edev = extcon_get_edev_by_phandle(dev, 0);
526 if (IS_ERR(edev)) { 525 if (IS_ERR(edev)) {
527 dev_vdbg(dev, "couldn't get extcon device\n"); 526 dev_vdbg(dev, "couldn't get extcon device\n");
528 ret = -EPROBE_DEFER; 527 ret = -EPROBE_DEFER;
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
index 130708f96430..e23392ec5af3 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/hyperv_fb.c
@@ -42,6 +42,7 @@
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/fb.h> 43#include <linux/fb.h>
44#include <linux/pci.h> 44#include <linux/pci.h>
45#include <linux/efi.h>
45 46
46#include <linux/hyperv.h> 47#include <linux/hyperv.h>
47 48
@@ -212,6 +213,7 @@ struct synthvid_msg {
212 213
213struct hvfb_par { 214struct hvfb_par {
214 struct fb_info *info; 215 struct fb_info *info;
216 struct resource mem;
215 bool fb_ready; /* fb device is ready */ 217 bool fb_ready; /* fb device is ready */
216 struct completion wait; 218 struct completion wait;
217 u32 synthvid_version; 219 u32 synthvid_version;
@@ -460,13 +462,13 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
460 goto error; 462 goto error;
461 } 463 }
462 464
463 if (par->synthvid_version == SYNTHVID_VERSION_WIN7) { 465 if (par->synthvid_version == SYNTHVID_VERSION_WIN7)
464 screen_depth = SYNTHVID_DEPTH_WIN7; 466 screen_depth = SYNTHVID_DEPTH_WIN7;
465 screen_fb_size = SYNTHVID_FB_SIZE_WIN7; 467 else
466 } else {
467 screen_depth = SYNTHVID_DEPTH_WIN8; 468 screen_depth = SYNTHVID_DEPTH_WIN8;
468 screen_fb_size = SYNTHVID_FB_SIZE_WIN8; 469
469 } 470 screen_fb_size = hdev->channel->offermsg.offer.
471 mmio_megabytes * 1024 * 1024;
470 472
471 return 0; 473 return 0;
472 474
@@ -627,26 +629,46 @@ static void hvfb_get_option(struct fb_info *info)
627/* Get framebuffer memory from Hyper-V video pci space */ 629/* Get framebuffer memory from Hyper-V video pci space */
628static int hvfb_getmem(struct fb_info *info) 630static int hvfb_getmem(struct fb_info *info)
629{ 631{
630 struct pci_dev *pdev; 632 struct hvfb_par *par = info->par;
631 ulong fb_phys; 633 struct pci_dev *pdev = NULL;
632 void __iomem *fb_virt; 634 void __iomem *fb_virt;
635 int gen2vm = efi_enabled(EFI_BOOT);
636 int ret;
633 637
634 pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, 638 par->mem.name = KBUILD_MODNAME;
639 par->mem.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
640 if (gen2vm) {
641 ret = allocate_resource(&hyperv_mmio, &par->mem,
642 screen_fb_size,
643 0, -1,
644 screen_fb_size,
645 NULL, NULL);
646 if (ret != 0) {
647 pr_err("Unable to allocate framebuffer memory\n");
648 return -ENODEV;
649 }
650 } else {
651 pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
635 PCI_DEVICE_ID_HYPERV_VIDEO, NULL); 652 PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
636 if (!pdev) { 653 if (!pdev) {
637 pr_err("Unable to find PCI Hyper-V video\n"); 654 pr_err("Unable to find PCI Hyper-V video\n");
638 return -ENODEV; 655 return -ENODEV;
639 } 656 }
640 657
641 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 658 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
642 pci_resource_len(pdev, 0) < screen_fb_size) 659 pci_resource_len(pdev, 0) < screen_fb_size)
643 goto err1; 660 goto err1;
644 661
645 fb_phys = pci_resource_end(pdev, 0) - screen_fb_size + 1; 662 par->mem.end = pci_resource_end(pdev, 0);
646 if (!request_mem_region(fb_phys, screen_fb_size, KBUILD_MODNAME)) 663 par->mem.start = par->mem.end - screen_fb_size + 1;
647 goto err1; 664 ret = request_resource(&pdev->resource[0], &par->mem);
665 if (ret != 0) {
666 pr_err("Unable to request framebuffer memory\n");
667 goto err1;
668 }
669 }
648 670
649 fb_virt = ioremap(fb_phys, screen_fb_size); 671 fb_virt = ioremap(par->mem.start, screen_fb_size);
650 if (!fb_virt) 672 if (!fb_virt)
651 goto err2; 673 goto err2;
652 674
@@ -654,30 +676,44 @@ static int hvfb_getmem(struct fb_info *info)
654 if (!info->apertures) 676 if (!info->apertures)
655 goto err3; 677 goto err3;
656 678
657 info->apertures->ranges[0].base = pci_resource_start(pdev, 0); 679 if (gen2vm) {
658 info->apertures->ranges[0].size = pci_resource_len(pdev, 0); 680 info->apertures->ranges[0].base = screen_info.lfb_base;
659 info->fix.smem_start = fb_phys; 681 info->apertures->ranges[0].size = screen_info.lfb_size;
682 remove_conflicting_framebuffers(info->apertures,
683 KBUILD_MODNAME, false);
684 } else {
685 info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
686 info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
687 }
688
689 info->fix.smem_start = par->mem.start;
660 info->fix.smem_len = screen_fb_size; 690 info->fix.smem_len = screen_fb_size;
661 info->screen_base = fb_virt; 691 info->screen_base = fb_virt;
662 info->screen_size = screen_fb_size; 692 info->screen_size = screen_fb_size;
663 693
664 pci_dev_put(pdev); 694 if (!gen2vm)
695 pci_dev_put(pdev);
696
665 return 0; 697 return 0;
666 698
667err3: 699err3:
668 iounmap(fb_virt); 700 iounmap(fb_virt);
669err2: 701err2:
670 release_mem_region(fb_phys, screen_fb_size); 702 release_resource(&par->mem);
671err1: 703err1:
672 pci_dev_put(pdev); 704 if (!gen2vm)
705 pci_dev_put(pdev);
706
673 return -ENOMEM; 707 return -ENOMEM;
674} 708}
675 709
676/* Release the framebuffer */ 710/* Release the framebuffer */
677static void hvfb_putmem(struct fb_info *info) 711static void hvfb_putmem(struct fb_info *info)
678{ 712{
713 struct hvfb_par *par = info->par;
714
679 iounmap(info->screen_base); 715 iounmap(info->screen_base);
680 release_mem_region(info->fix.smem_start, screen_fb_size); 716 release_resource(&par->mem);
681} 717}
682 718
683 719
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 256fba7f4641..1f38445014c1 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -190,7 +190,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
190 uvfb_tasks[seq] = task; 190 uvfb_tasks[seq] = task;
191 mutex_unlock(&uvfb_lock); 191 mutex_unlock(&uvfb_lock);
192 192
193 err = cn_netlink_send(m, 0, GFP_KERNEL); 193 err = cn_netlink_send(m, 0, 0, GFP_KERNEL);
194 if (err == -ESRCH) { 194 if (err == -ESRCH) {
195 /* 195 /*
196 * Try to start the userspace helper if sending 196 * Try to start the userspace helper if sending
@@ -204,7 +204,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
204 "helper is installed and executable\n"); 204 "helper is installed and executable\n");
205 } else { 205 } else {
206 v86d_started = 1; 206 v86d_started = 1;
207 err = cn_netlink_send(m, 0, gfp_any()); 207 err = cn_netlink_send(m, 0, 0, gfp_any());
208 if (err == -ENOBUFS) 208 if (err == -ENOBUFS)
209 err = 0; 209 err = 0;
210 } 210 }
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 1b5d48c578e1..bfb2d3f06738 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -869,14 +869,13 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
869 869
870 spin_lock(&image->lock); 870 spin_lock(&image->lock);
871 871
872 /* The following code handles VME address alignment problem 872 /* The following code handles VME address alignment. We cannot use
873 * in order to assure the maximal data width cycle. 873 * memcpy_xxx here because it may cut data transfers in to 8-bit
874 * We cannot use memcpy_xxx directly here because it 874 * cycles when D16 or D32 cycles are required on the VME bus.
875 * may cut data transfer in 8-bits cycles, thus making 875 * On the other hand, the bridge itself assures that the maximum data
876 * D16 cycle impossible. 876 * cycle configured for the transfer is used and splits it
877 * From the other hand, the bridge itself assures that 877 * automatically for non-aligned addresses, so we don't want the
878 * maximal configured data cycle is used and splits it 878 * overhead of needlessly forcing small transfers for the entire cycle.
879 * automatically for non-aligned addresses.
880 */ 879 */
881 if ((uintptr_t)addr & 0x1) { 880 if ((uintptr_t)addr & 0x1) {
882 *(u8 *)buf = ioread8(addr); 881 *(u8 *)buf = ioread8(addr);
@@ -896,9 +895,9 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
896 } 895 }
897 896
898 count32 = (count - done) & ~0x3; 897 count32 = (count - done) & ~0x3;
899 if (count32 > 0) { 898 while (done < count32) {
900 memcpy_fromio(buf + done, addr + done, (unsigned int)count); 899 *(u32 *)(buf + done) = ioread32(addr + done);
901 done += count32; 900 done += 4;
902 } 901 }
903 902
904 if ((count - done) & 0x2) { 903 if ((count - done) & 0x2) {
@@ -930,7 +929,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
930 spin_lock(&image->lock); 929 spin_lock(&image->lock);
931 930
932 /* Here we apply for the same strategy we do in master_read 931 /* Here we apply for the same strategy we do in master_read
933 * function in order to assure D16 cycle when required. 932 * function in order to assure the correct cycles.
934 */ 933 */
935 if ((uintptr_t)addr & 0x1) { 934 if ((uintptr_t)addr & 0x1) {
936 iowrite8(*(u8 *)buf, addr); 935 iowrite8(*(u8 *)buf, addr);
@@ -950,9 +949,9 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
950 } 949 }
951 950
952 count32 = (count - done) & ~0x3; 951 count32 = (count - done) & ~0x3;
953 if (count32 > 0) { 952 while (done < count32) {
954 memcpy_toio(addr + done, buf + done, count32); 953 iowrite32(*(u32 *)(buf + done), addr + done);
955 done += count32; 954 done += 4;
956 } 955 }
957 956
958 if ((count - done) & 0x2) { 957 if ((count - done) & 0x2) {
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 9911cd5fddb5..06990c6a1a69 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1276,8 +1276,8 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1276 spin_lock(&image->lock); 1276 spin_lock(&image->lock);
1277 1277
1278 /* The following code handles VME address alignment. We cannot use 1278 /* The following code handles VME address alignment. We cannot use
1279 * memcpy_xxx directly here because it may cut small data transfers in 1279 * memcpy_xxx here because it may cut data transfers in to 8-bit
1280 * to 8-bit cycles, thus making D16 cycle impossible. 1280 * cycles when D16 or D32 cycles are required on the VME bus.
1281 * On the other hand, the bridge itself assures that the maximum data 1281 * On the other hand, the bridge itself assures that the maximum data
1282 * cycle configured for the transfer is used and splits it 1282 * cycle configured for the transfer is used and splits it
1283 * automatically for non-aligned addresses, so we don't want the 1283 * automatically for non-aligned addresses, so we don't want the
@@ -1301,9 +1301,9 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1301 } 1301 }
1302 1302
1303 count32 = (count - done) & ~0x3; 1303 count32 = (count - done) & ~0x3;
1304 if (count32 > 0) { 1304 while (done < count32) {
1305 memcpy_fromio(buf + done, addr + done, count32); 1305 *(u32 *)(buf + done) = ioread32(addr + done);
1306 done += count32; 1306 done += 4;
1307 } 1307 }
1308 1308
1309 if ((count - done) & 0x2) { 1309 if ((count - done) & 0x2) {
@@ -1363,7 +1363,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1363 spin_lock(&image->lock); 1363 spin_lock(&image->lock);
1364 1364
1365 /* Here we apply for the same strategy we do in master_read 1365 /* Here we apply for the same strategy we do in master_read
1366 * function in order to assure D16 cycle when required. 1366 * function in order to assure the correct cycles.
1367 */ 1367 */
1368 if ((uintptr_t)addr & 0x1) { 1368 if ((uintptr_t)addr & 0x1) {
1369 iowrite8(*(u8 *)buf, addr); 1369 iowrite8(*(u8 *)buf, addr);
@@ -1383,9 +1383,9 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1383 } 1383 }
1384 1384
1385 count32 = (count - done) & ~0x3; 1385 count32 = (count - done) & ~0x3;
1386 if (count32 > 0) { 1386 while (done < count32) {
1387 memcpy_toio(addr + done, buf + done, count32); 1387 iowrite32(*(u32 *)(buf + done), addr + done);
1388 done += count32; 1388 done += 4;
1389 } 1389 }
1390 1390
1391 if ((count - done) & 0x2) { 1391 if ((count - done) & 0x2) {
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index efc7f075fcbe..1708b2300c7a 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -36,13 +36,12 @@ config W1_MASTER_DS2482
36 36
37config W1_MASTER_MXC 37config W1_MASTER_MXC
38 tristate "Freescale MXC 1-wire busmaster" 38 tristate "Freescale MXC 1-wire busmaster"
39 depends on W1 && ARCH_MXC 39 depends on ARCH_MXC || COMPILE_TEST
40 help 40 help
41 Say Y here to enable MXC 1-wire host 41 Say Y here to enable MXC 1-wire host
42 42
43config W1_MASTER_DS1WM 43config W1_MASTER_DS1WM
44 tristate "Maxim DS1WM 1-wire busmaster" 44 tristate "Maxim DS1WM 1-wire busmaster"
45 depends on W1
46 help 45 help
47 Say Y here to enable the DS1WM 1-wire driver, such as that 46 Say Y here to enable the DS1WM 1-wire driver, such as that
48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like 47 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 4f7e1d770f81..7404ad3062b7 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * dscore.c 2 * ds2490.c USB to one wire bridge
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
@@ -28,6 +28,10 @@
28#include "../w1_int.h" 28#include "../w1_int.h"
29#include "../w1.h" 29#include "../w1.h"
30 30
31/* USB Standard */
32/* USB Control request vendor type */
33#define VENDOR 0x40
34
31/* COMMAND TYPE CODES */ 35/* COMMAND TYPE CODES */
32#define CONTROL_CMD 0x00 36#define CONTROL_CMD 0x00
33#define COMM_CMD 0x01 37#define COMM_CMD 0x01
@@ -107,6 +111,8 @@
107#define ST_HALT 0x10 /* DS2490 is currently halted */ 111#define ST_HALT 0x10 /* DS2490 is currently halted */
108#define ST_IDLE 0x20 /* DS2490 is currently idle */ 112#define ST_IDLE 0x20 /* DS2490 is currently idle */
109#define ST_EPOF 0x80 113#define ST_EPOF 0x80
114/* Status transfer size, 16 bytes status, 16 byte result flags */
115#define ST_SIZE 0x20
110 116
111/* Result Register flags */ 117/* Result Register flags */
112#define RR_DETECT 0xA5 /* New device detected */ 118#define RR_DETECT 0xA5 /* New device detected */
@@ -198,7 +204,7 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
198 int err; 204 int err;
199 205
200 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), 206 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
201 CONTROL_CMD, 0x40, value, index, NULL, 0, 1000); 207 CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
202 if (err < 0) { 208 if (err < 0) {
203 printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n", 209 printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n",
204 value, index, err); 210 value, index, err);
@@ -213,7 +219,7 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
213 int err; 219 int err;
214 220
215 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), 221 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
216 MODE_CMD, 0x40, value, index, NULL, 0, 1000); 222 MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
217 if (err < 0) { 223 if (err < 0) {
218 printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n", 224 printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n",
219 value, index, err); 225 value, index, err);
@@ -228,7 +234,7 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
228 int err; 234 int err;
229 235
230 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), 236 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
231 COMM_CMD, 0x40, value, index, NULL, 0, 1000); 237 COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
232 if (err < 0) { 238 if (err < 0) {
233 printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n", 239 printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n",
234 value, index, err); 240 value, index, err);
@@ -246,7 +252,8 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
246 memset(st, 0, sizeof(*st)); 252 memset(st, 0, sizeof(*st));
247 253
248 count = 0; 254 count = 0;
249 err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100); 255 err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
256 dev->ep[EP_STATUS]), buf, size, &count, 100);
250 if (err < 0) { 257 if (err < 0) {
251 printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err); 258 printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err);
252 return err; 259 return err;
@@ -353,7 +360,7 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
353 err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]), 360 err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
354 buf, size, &count, 1000); 361 buf, size, &count, 1000);
355 if (err < 0) { 362 if (err < 0) {
356 u8 buf[0x20]; 363 u8 buf[ST_SIZE];
357 int count; 364 int count;
358 365
359 printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); 366 printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
@@ -398,7 +405,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
398{ 405{
399 struct ds_status st; 406 struct ds_status st;
400 int count = 0, err = 0; 407 int count = 0, err = 0;
401 u8 buf[0x20]; 408 u8 buf[ST_SIZE];
402 409
403 do { 410 do {
404 err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0); 411 err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
@@ -450,10 +457,11 @@ int ds_detect(struct ds_device *dev, struct ds_status *st)
450 457
451static int ds_wait_status(struct ds_device *dev, struct ds_status *st) 458static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
452{ 459{
453 u8 buf[0x20]; 460 u8 buf[ST_SIZE];
454 int err, count = 0; 461 int err, count = 0;
455 462
456 do { 463 do {
464 st->status = 0;
457 err = ds_recv_status_nodump(dev, st, buf, sizeof(buf)); 465 err = ds_recv_status_nodump(dev, st, buf, sizeof(buf));
458#if 0 466#if 0
459 if (err >= 0) { 467 if (err >= 0) {
@@ -464,7 +472,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
464 printk("\n"); 472 printk("\n");
465 } 473 }
466#endif 474#endif
467 } while (!(buf[0x08] & ST_IDLE) && !(err < 0) && ++count < 100); 475 } while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
468 476
469 if (err >= 16 && st->status & ST_EPOF) { 477 if (err >= 16 && st->status & ST_EPOF) {
470 printk(KERN_INFO "Resetting device after ST_EPOF.\n"); 478 printk(KERN_INFO "Resetting device after ST_EPOF.\n");
@@ -690,37 +698,106 @@ static int ds_write_block(struct ds_device *dev, u8 *buf, int len)
690 return !(err == len); 698 return !(err == len);
691} 699}
692 700
693#if 0 701static void ds9490r_search(void *data, struct w1_master *master,
694 702 u8 search_type, w1_slave_found_callback callback)
695static int ds_search(struct ds_device *dev, u64 init, u64 *buf, u8 id_number, int conditional_search)
696{ 703{
704 /* When starting with an existing id, the first id returned will
705 * be that device (if it is still on the bus most likely).
706 *
707 * If the number of devices found is less than or equal to the
708 * search_limit, that number of IDs will be returned. If there are
709 * more, search_limit IDs will be returned followed by a non-zero
710 * discrepency value.
711 */
712 struct ds_device *dev = data;
697 int err; 713 int err;
698 u16 value, index; 714 u16 value, index;
699 struct ds_status st; 715 struct ds_status st;
716 u8 st_buf[ST_SIZE];
717 int search_limit;
718 int found = 0;
719 int i;
700 720
701 memset(buf, 0, sizeof(buf)); 721 /* DS18b20 spec, 13.16 ms per device, 75 per second, sleep for
722 * discovering 8 devices (1 bulk transfer and 1/2 FIFO size) at a time.
723 */
724 const unsigned long jtime = msecs_to_jiffies(1000*8/75);
725 /* FIFO 128 bytes, bulk packet size 64, read a multiple of the
726 * packet size.
727 */
728 u64 buf[2*64/8];
702 729
703 err = ds_send_data(ds_dev, (unsigned char *)&init, 8); 730 mutex_lock(&master->bus_mutex);
704 if (err)
705 return err;
706 731
707 ds_wait_status(ds_dev, &st); 732 /* address to start searching at */
733 if (ds_send_data(dev, (u8 *)&master->search_id, 8) < 0)
734 goto search_out;
735 master->search_id = 0;
708 736
709 value = COMM_SEARCH_ACCESS | COMM_IM | COMM_SM | COMM_F | COMM_RTS; 737 value = COMM_SEARCH_ACCESS | COMM_IM | COMM_RST | COMM_SM | COMM_F |
710 index = (conditional_search ? 0xEC : 0xF0) | (id_number << 8); 738 COMM_RTS;
711 err = ds_send_control(ds_dev, value, index); 739 search_limit = master->max_slave_count;
712 if (err) 740 if (search_limit > 255)
713 return err; 741 search_limit = 0;
742 index = search_type | (search_limit << 8);
743 if (ds_send_control(dev, value, index) < 0)
744 goto search_out;
714 745
715 ds_wait_status(ds_dev, &st); 746 do {
747 schedule_timeout(jtime);
716 748
717 err = ds_recv_data(ds_dev, (unsigned char *)buf, 8*id_number); 749 if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) <
718 if (err < 0) 750 sizeof(st)) {
719 return err; 751 break;
752 }
720 753
721 return err/8; 754 if (st.data_in_buffer_status) {
755 /* Bulk in can receive partial ids, but when it does
756 * they fail crc and will be discarded anyway.
757 * That has only been seen when status in buffer
758 * is 0 and bulk is read anyway, so don't read
759 * bulk without first checking if status says there
760 * is data to read.
761 */
762 err = ds_recv_data(dev, (u8 *)buf, sizeof(buf));
763 if (err < 0)
764 break;
765 for (i = 0; i < err/8; ++i) {
766 ++found;
767 if (found <= search_limit)
768 callback(master, buf[i]);
769 /* can't know if there will be a discrepancy
770 * value after until the next id */
771 if (found == search_limit)
772 master->search_id = buf[i];
773 }
774 }
775
776 if (test_bit(W1_ABORT_SEARCH, &master->flags))
777 break;
778 } while (!(st.status & (ST_IDLE | ST_HALT)));
779
780 /* only continue the search if some weren't found */
781 if (found <= search_limit) {
782 master->search_id = 0;
783 } else if (!test_bit(W1_WARN_MAX_COUNT, &master->flags)) {
784 /* Only max_slave_count will be scanned in a search,
785 * but it will start where it left off next search
786 * until all ids are identified and then it will start
787 * over. A continued search will report the previous
788 * last id as the first id (provided it is still on the
789 * bus).
790 */
791 dev_info(&dev->udev->dev, "%s: max_slave_count %d reached, "
792 "will continue next search.\n", __func__,
793 master->max_slave_count);
794 set_bit(W1_WARN_MAX_COUNT, &master->flags);
795 }
796search_out:
797 mutex_unlock(&master->bus_mutex);
722} 798}
723 799
800#if 0
724static int ds_match_access(struct ds_device *dev, u64 init) 801static int ds_match_access(struct ds_device *dev, u64 init)
725{ 802{
726 int err; 803 int err;
@@ -894,6 +971,7 @@ static int ds_w1_init(struct ds_device *dev)
894 dev->master.write_block = &ds9490r_write_block; 971 dev->master.write_block = &ds9490r_write_block;
895 dev->master.reset_bus = &ds9490r_reset; 972 dev->master.reset_bus = &ds9490r_reset;
896 dev->master.set_pullup = &ds9490r_set_pullup; 973 dev->master.set_pullup = &ds9490r_set_pullup;
974 dev->master.search = &ds9490r_search;
897 975
898 return w1_add_master_device(&dev->master); 976 return w1_add_master_device(&dev->master);
899} 977}
@@ -910,15 +988,13 @@ static int ds_probe(struct usb_interface *intf,
910 struct usb_endpoint_descriptor *endpoint; 988 struct usb_endpoint_descriptor *endpoint;
911 struct usb_host_interface *iface_desc; 989 struct usb_host_interface *iface_desc;
912 struct ds_device *dev; 990 struct ds_device *dev;
913 int i, err; 991 int i, err, alt;
914 992
915 dev = kmalloc(sizeof(struct ds_device), GFP_KERNEL); 993 dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
916 if (!dev) { 994 if (!dev) {
917 printk(KERN_INFO "Failed to allocate new DS9490R structure.\n"); 995 printk(KERN_INFO "Failed to allocate new DS9490R structure.\n");
918 return -ENOMEM; 996 return -ENOMEM;
919 } 997 }
920 dev->spu_sleep = 0;
921 dev->spu_bit = 0;
922 dev->udev = usb_get_dev(udev); 998 dev->udev = usb_get_dev(udev);
923 if (!dev->udev) { 999 if (!dev->udev) {
924 err = -ENOMEM; 1000 err = -ENOMEM;
@@ -928,20 +1004,25 @@ static int ds_probe(struct usb_interface *intf,
928 1004
929 usb_set_intfdata(intf, dev); 1005 usb_set_intfdata(intf, dev);
930 1006
931 err = usb_set_interface(dev->udev, intf->altsetting[0].desc.bInterfaceNumber, 3); 1007 err = usb_reset_configuration(dev->udev);
932 if (err) { 1008 if (err) {
933 printk(KERN_ERR "Failed to set alternative setting 3 for %d interface: err=%d.\n", 1009 dev_err(&dev->udev->dev,
934 intf->altsetting[0].desc.bInterfaceNumber, err); 1010 "Failed to reset configuration: err=%d.\n", err);
935 goto err_out_clear; 1011 goto err_out_clear;
936 } 1012 }
937 1013
938 err = usb_reset_configuration(dev->udev); 1014 /* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
1015 alt = 3;
1016 err = usb_set_interface(dev->udev,
1017 intf->altsetting[alt].desc.bInterfaceNumber, alt);
939 if (err) { 1018 if (err) {
940 printk(KERN_ERR "Failed to reset configuration: err=%d.\n", err); 1019 dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
1020 "for %d interface: err=%d.\n", alt,
1021 intf->altsetting[alt].desc.bInterfaceNumber, err);
941 goto err_out_clear; 1022 goto err_out_clear;
942 } 1023 }
943 1024
944 iface_desc = &intf->altsetting[0]; 1025 iface_desc = &intf->altsetting[alt];
945 if (iface_desc->desc.bNumEndpoints != NUM_EP-1) { 1026 if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
946 printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints); 1027 printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints);
947 err = -EINVAL; 1028 err = -EINVAL;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 1e5d94c5afc9..67b067a3e2ab 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -10,24 +10,16 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 */ 13 */
19 14
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/platform_device.h>
23#include <linux/clk.h> 15#include <linux/clk.h>
24#include <linux/slab.h>
25#include <linux/delay.h> 16#include <linux/delay.h>
26#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
27 20
28#include "../w1.h" 21#include "../w1.h"
29#include "../w1_int.h" 22#include "../w1_int.h"
30#include "../w1_log.h"
31 23
32/* According to the mx27 Datasheet the reset procedure should take up to about 24/* According to the mx27 Datasheet the reset procedure should take up to about
33 * 1350us. We set the timeout to 500*100us = 50ms for sure */ 25 * 1350us. We set the timeout to 500*100us = 50ms for sure */
@@ -36,13 +28,13 @@
36/* 28/*
37 * MXC W1 Register offsets 29 * MXC W1 Register offsets
38 */ 30 */
39#define MXC_W1_CONTROL 0x00 31#define MXC_W1_CONTROL 0x00
40#define MXC_W1_TIME_DIVIDER 0x02 32# define MXC_W1_CONTROL_RDST BIT(3)
41#define MXC_W1_RESET 0x04 33# define MXC_W1_CONTROL_WR(x) BIT(5 - (x))
42#define MXC_W1_COMMAND 0x06 34# define MXC_W1_CONTROL_PST BIT(6)
43#define MXC_W1_TXRX 0x08 35# define MXC_W1_CONTROL_RPP BIT(7)
44#define MXC_W1_INTERRUPT 0x0A 36#define MXC_W1_TIME_DIVIDER 0x02
45#define MXC_W1_INTERRUPT_EN 0x0C 37#define MXC_W1_RESET 0x04
46 38
47struct mxc_w1_device { 39struct mxc_w1_device {
48 void __iomem *regs; 40 void __iomem *regs;
@@ -61,12 +53,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
61 unsigned int timeout_cnt = 0; 53 unsigned int timeout_cnt = 0;
62 struct mxc_w1_device *dev = data; 54 struct mxc_w1_device *dev = data;
63 55
64 __raw_writeb(0x80, (dev->regs + MXC_W1_CONTROL)); 56 writeb(MXC_W1_CONTROL_RPP, (dev->regs + MXC_W1_CONTROL));
65 57
66 while (1) { 58 while (1) {
67 reg_val = __raw_readb(dev->regs + MXC_W1_CONTROL); 59 reg_val = readb(dev->regs + MXC_W1_CONTROL);
68 60
69 if (((reg_val >> 7) & 0x1) == 0 || 61 if (!(reg_val & MXC_W1_CONTROL_RPP) ||
70 timeout_cnt > MXC_W1_RESET_TIMEOUT) 62 timeout_cnt > MXC_W1_RESET_TIMEOUT)
71 break; 63 break;
72 else 64 else
@@ -74,7 +66,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
74 66
75 udelay(100); 67 udelay(100);
76 } 68 }
77 return (reg_val >> 7) & 0x1; 69 return !!(reg_val & MXC_W1_CONTROL_PST);
78} 70}
79 71
80/* 72/*
@@ -90,16 +82,16 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
90 * datasheet. 82 * datasheet.
91 */ 83 */
92 84
93 __raw_writeb((1 << (5 - bit)), ctrl_addr); 85 writeb(MXC_W1_CONTROL_WR(bit), ctrl_addr);
94 86
95 while (timeout_cnt--) { 87 while (timeout_cnt--) {
96 if (!((__raw_readb(ctrl_addr) >> (5 - bit)) & 0x1)) 88 if (!(readb(ctrl_addr) & MXC_W1_CONTROL_WR(bit)))
97 break; 89 break;
98 90
99 udelay(1); 91 udelay(1);
100 } 92 }
101 93
102 return ((__raw_readb(ctrl_addr)) >> 3) & 0x1; 94 return !!(readb(ctrl_addr) & MXC_W1_CONTROL_RDST);
103} 95}
104 96
105static int mxc_w1_probe(struct platform_device *pdev) 97static int mxc_w1_probe(struct platform_device *pdev)
@@ -139,7 +131,7 @@ static int mxc_w1_probe(struct platform_device *pdev)
139 if (err) 131 if (err)
140 return err; 132 return err;
141 133
142 __raw_writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER); 134 writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
143 135
144 mdev->bus_master.data = mdev; 136 mdev->bus_master.data = mdev;
145 mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus; 137 mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
@@ -177,6 +169,7 @@ MODULE_DEVICE_TABLE(of, mxc_w1_dt_ids);
177static struct platform_driver mxc_w1_driver = { 169static struct platform_driver mxc_w1_driver = {
178 .driver = { 170 .driver = {
179 .name = "mxc_w1", 171 .name = "mxc_w1",
172 .owner = THIS_MODULE,
180 .of_match_table = mxc_w1_dt_ids, 173 .of_match_table = mxc_w1_dt_ids,
181 }, 174 },
182 .probe = mxc_w1_probe, 175 .probe = mxc_w1_probe,
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 9709b8b484ba..1d111e56c8c8 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -89,11 +89,22 @@ static int w1_gpio_probe_dt(struct platform_device *pdev)
89 pdata->is_open_drain = 1; 89 pdata->is_open_drain = 1;
90 90
91 gpio = of_get_gpio(np, 0); 91 gpio = of_get_gpio(np, 0);
92 if (gpio < 0) 92 if (gpio < 0) {
93 if (gpio != -EPROBE_DEFER)
94 dev_err(&pdev->dev,
95 "Failed to parse gpio property for data pin (%d)\n",
96 gpio);
97
93 return gpio; 98 return gpio;
99 }
94 pdata->pin = gpio; 100 pdata->pin = gpio;
95 101
96 pdata->ext_pullup_enable_pin = of_get_gpio(np, 1); 102 gpio = of_get_gpio(np, 1);
103 if (gpio == -EPROBE_DEFER)
104 return gpio;
105 /* ignore other errors as the pullup gpio is optional */
106 pdata->ext_pullup_enable_pin = gpio;
107
97 pdev->dev.platform_data = pdata; 108 pdev->dev.platform_data = pdata;
98 109
99 return 0; 110 return 0;
@@ -107,10 +118,8 @@ static int w1_gpio_probe(struct platform_device *pdev)
107 118
108 if (of_have_populated_dt()) { 119 if (of_have_populated_dt()) {
109 err = w1_gpio_probe_dt(pdev); 120 err = w1_gpio_probe_dt(pdev);
110 if (err < 0) { 121 if (err < 0)
111 dev_err(&pdev->dev, "Failed to parse DT\n");
112 return err; 122 return err;
113 }
114 } 123 }
115 124
116 pdata = dev_get_platdata(&pdev->dev); 125 pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 5e6a3c9e510b..1cdce80b6abf 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -72,7 +72,6 @@ config W1_SLAVE_DS2433_CRC
72 72
73config W1_SLAVE_DS2760 73config W1_SLAVE_DS2760
74 tristate "Dallas 2760 battery monitor chip (HP iPAQ & others)" 74 tristate "Dallas 2760 battery monitor chip (HP iPAQ & others)"
75 depends on W1
76 help 75 help
77 If you enable this you will have the DS2760 battery monitor 76 If you enable this you will have the DS2760 battery monitor
78 chip support. 77 chip support.
@@ -85,7 +84,6 @@ config W1_SLAVE_DS2760
85 84
86config W1_SLAVE_DS2780 85config W1_SLAVE_DS2780
87 tristate "Dallas 2780 battery monitor chip" 86 tristate "Dallas 2780 battery monitor chip"
88 depends on W1
89 help 87 help
90 If you enable this you will have the DS2780 battery monitor 88 If you enable this you will have the DS2780 battery monitor
91 chip support. 89 chip support.
@@ -98,7 +96,6 @@ config W1_SLAVE_DS2780
98 96
99config W1_SLAVE_DS2781 97config W1_SLAVE_DS2781
100 tristate "Dallas 2781 battery monitor chip" 98 tristate "Dallas 2781 battery monitor chip"
101 depends on W1
102 help 99 help
103 If you enable this you will have the DS2781 battery monitor 100 If you enable this you will have the DS2781 battery monitor
104 chip support. 101 chip support.
@@ -111,7 +108,6 @@ config W1_SLAVE_DS2781
111 108
112config W1_SLAVE_DS28E04 109config W1_SLAVE_DS28E04
113 tristate "4096-Bit Addressable 1-Wire EEPROM with PIO (DS28E04-100)" 110 tristate "4096-Bit Addressable 1-Wire EEPROM with PIO (DS28E04-100)"
114 depends on W1
115 select CRC16 111 select CRC16
116 help 112 help
117 If you enable this you will have the DS28E04-100 113 If you enable this you will have the DS28E04-100
@@ -124,7 +120,6 @@ config W1_SLAVE_DS28E04
124 120
125config W1_SLAVE_BQ27000 121config W1_SLAVE_BQ27000
126 tristate "BQ27000 slave support" 122 tristate "BQ27000 slave support"
127 depends on W1
128 help 123 help
129 Say Y here if you want to use a hdq 124 Say Y here if you want to use a hdq
130 bq27000 slave support. 125 bq27000 slave support.
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 8b5ff33f72cf..1f11a20a8ab9 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -27,6 +27,7 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/slab.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
31 32
32#include "../w1.h" 33#include "../w1.h"
@@ -58,6 +59,19 @@ MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
58static int w1_strong_pullup = 1; 59static int w1_strong_pullup = 1;
59module_param_named(strong_pullup, w1_strong_pullup, int, 0); 60module_param_named(strong_pullup, w1_strong_pullup, int, 0);
60 61
62static int w1_therm_add_slave(struct w1_slave *sl)
63{
64 sl->family_data = kzalloc(9, GFP_KERNEL);
65 if (!sl->family_data)
66 return -ENOMEM;
67 return 0;
68}
69
70static void w1_therm_remove_slave(struct w1_slave *sl)
71{
72 kfree(sl->family_data);
73 sl->family_data = NULL;
74}
61 75
62static ssize_t w1_slave_show(struct device *device, 76static ssize_t w1_slave_show(struct device *device,
63 struct device_attribute *attr, char *buf); 77 struct device_attribute *attr, char *buf);
@@ -71,6 +85,8 @@ static struct attribute *w1_therm_attrs[] = {
71ATTRIBUTE_GROUPS(w1_therm); 85ATTRIBUTE_GROUPS(w1_therm);
72 86
73static struct w1_family_ops w1_therm_fops = { 87static struct w1_family_ops w1_therm_fops = {
88 .add_slave = w1_therm_add_slave,
89 .remove_slave = w1_therm_remove_slave,
74 .groups = w1_therm_groups, 90 .groups = w1_therm_groups,
75}; 91};
76 92
@@ -253,12 +269,13 @@ static ssize_t w1_slave_show(struct device *device,
253 c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n", 269 c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
254 crc, (verdict) ? "YES" : "NO"); 270 crc, (verdict) ? "YES" : "NO");
255 if (verdict) 271 if (verdict)
256 memcpy(sl->rom, rom, sizeof(sl->rom)); 272 memcpy(sl->family_data, rom, sizeof(rom));
257 else 273 else
258 dev_warn(device, "Read failed CRC check\n"); 274 dev_warn(device, "Read failed CRC check\n");
259 275
260 for (i = 0; i < 9; ++i) 276 for (i = 0; i < 9; ++i)
261 c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]); 277 c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
278 ((u8 *)sl->family_data)[i]);
262 279
263 c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n", 280 c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
264 w1_convert_temp(rom, sl->family->fid)); 281 w1_convert_temp(rom, sl->family->fid));
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 66efa96c4603..b96f61b15dc6 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -46,18 +46,29 @@ MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); 46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
47 47
48static int w1_timeout = 10; 48static int w1_timeout = 10;
49int w1_max_slave_count = 10; 49int w1_max_slave_count = 64;
50int w1_max_slave_ttl = 10; 50int w1_max_slave_ttl = 10;
51 51
52module_param_named(timeout, w1_timeout, int, 0); 52module_param_named(timeout, w1_timeout, int, 0);
53MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
54/* A search stops when w1_max_slave_count devices have been found in that
55 * search. The next search will start over and detect the same set of devices
56 * on a static 1-wire bus. Memory is not allocated based on this number, just
57 * on the number of devices known to the kernel. Having a high number does not
58 * consume additional resources. As a special case, if there is only one
59 * device on the network and w1_max_slave_count is set to 1, the device id can
60 * be read directly skipping the normal slower search process.
61 */
53module_param_named(max_slave_count, w1_max_slave_count, int, 0); 62module_param_named(max_slave_count, w1_max_slave_count, int, 0);
63MODULE_PARM_DESC(max_slave_count,
64 "maximum number of slaves detected in a search");
54module_param_named(slave_ttl, w1_max_slave_ttl, int, 0); 65module_param_named(slave_ttl, w1_max_slave_ttl, int, 0);
66MODULE_PARM_DESC(slave_ttl,
67 "Number of searches not seeing a slave before it will be removed");
55 68
56DEFINE_MUTEX(w1_mlock); 69DEFINE_MUTEX(w1_mlock);
57LIST_HEAD(w1_masters); 70LIST_HEAD(w1_masters);
58 71
59static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
60
61static int w1_master_match(struct device *dev, struct device_driver *drv) 72static int w1_master_match(struct device *dev, struct device_driver *drv)
62{ 73{
63 return 1; 74 return 1;
@@ -81,19 +92,10 @@ static void w1_slave_release(struct device *dev)
81{ 92{
82 struct w1_slave *sl = dev_to_w1_slave(dev); 93 struct w1_slave *sl = dev_to_w1_slave(dev);
83 94
84 dev_dbg(dev, "%s: Releasing %s.\n", __func__, sl->name); 95 dev_dbg(dev, "%s: Releasing %s [%p]\n", __func__, sl->name, sl);
85
86 while (atomic_read(&sl->refcnt)) {
87 dev_dbg(dev, "Waiting for %s to become free: refcnt=%d.\n",
88 sl->name, atomic_read(&sl->refcnt));
89 if (msleep_interruptible(1000))
90 flush_signals(current);
91 }
92 96
93 w1_family_put(sl->family); 97 w1_family_put(sl->family);
94 sl->master->slave_count--; 98 sl->master->slave_count--;
95
96 complete(&sl->released);
97} 99}
98 100
99static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) 101static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -243,7 +245,9 @@ static ssize_t w1_master_attribute_store_search(struct device * dev,
243 mutex_lock(&md->mutex); 245 mutex_lock(&md->mutex);
244 md->search_count = tmp; 246 md->search_count = tmp;
245 mutex_unlock(&md->mutex); 247 mutex_unlock(&md->mutex);
246 wake_up_process(md->thread); 248 /* Only wake if it is going to be searching. */
249 if (tmp)
250 wake_up_process(md->thread);
247 251
248 return count; 252 return count;
249} 253}
@@ -277,7 +281,6 @@ static ssize_t w1_master_attribute_store_pullup(struct device *dev,
277 mutex_lock(&md->mutex); 281 mutex_lock(&md->mutex);
278 md->enable_pullup = tmp; 282 md->enable_pullup = tmp;
279 mutex_unlock(&md->mutex); 283 mutex_unlock(&md->mutex);
280 wake_up_process(md->thread);
281 284
282 return count; 285 return count;
283} 286}
@@ -314,6 +317,24 @@ static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct devic
314 return count; 317 return count;
315} 318}
316 319
320static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev,
321 struct device_attribute *attr, const char *buf, size_t count)
322{
323 int tmp;
324 struct w1_master *md = dev_to_w1_master(dev);
325
326 if (kstrtoint(buf, 0, &tmp) == -EINVAL || tmp < 1)
327 return -EINVAL;
328
329 mutex_lock(&md->mutex);
330 md->max_slave_count = tmp;
331 /* allow each time the max_slave_count is updated */
332 clear_bit(W1_WARN_MAX_COUNT, &md->flags);
333 mutex_unlock(&md->mutex);
334
335 return count;
336}
337
317static ssize_t w1_master_attribute_show_max_slave_count(struct device *dev, struct device_attribute *attr, char *buf) 338static ssize_t w1_master_attribute_show_max_slave_count(struct device *dev, struct device_attribute *attr, char *buf)
318{ 339{
319 struct w1_master *md = dev_to_w1_master(dev); 340 struct w1_master *md = dev_to_w1_master(dev);
@@ -352,23 +373,20 @@ static ssize_t w1_master_attribute_show_slaves(struct device *dev,
352{ 373{
353 struct w1_master *md = dev_to_w1_master(dev); 374 struct w1_master *md = dev_to_w1_master(dev);
354 int c = PAGE_SIZE; 375 int c = PAGE_SIZE;
376 struct list_head *ent, *n;
377 struct w1_slave *sl = NULL;
355 378
356 mutex_lock(&md->mutex); 379 mutex_lock(&md->list_mutex);
357
358 if (md->slave_count == 0)
359 c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
360 else {
361 struct list_head *ent, *n;
362 struct w1_slave *sl;
363 380
364 list_for_each_safe(ent, n, &md->slist) { 381 list_for_each_safe(ent, n, &md->slist) {
365 sl = list_entry(ent, struct w1_slave, w1_slave_entry); 382 sl = list_entry(ent, struct w1_slave, w1_slave_entry);
366 383
367 c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name); 384 c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name);
368 }
369 } 385 }
386 if (!sl)
387 c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
370 388
371 mutex_unlock(&md->mutex); 389 mutex_unlock(&md->list_mutex);
372 390
373 return PAGE_SIZE - c; 391 return PAGE_SIZE - c;
374} 392}
@@ -422,19 +440,22 @@ static int w1_atoreg_num(struct device *dev, const char *buf, size_t count,
422} 440}
423 441
424/* Searches the slaves in the w1_master and returns a pointer or NULL. 442/* Searches the slaves in the w1_master and returns a pointer or NULL.
425 * Note: must hold the mutex 443 * Note: must not hold list_mutex
426 */ 444 */
427static struct w1_slave *w1_slave_search_device(struct w1_master *dev, 445struct w1_slave *w1_slave_search_device(struct w1_master *dev,
428 struct w1_reg_num *rn) 446 struct w1_reg_num *rn)
429{ 447{
430 struct w1_slave *sl; 448 struct w1_slave *sl;
449 mutex_lock(&dev->list_mutex);
431 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 450 list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
432 if (sl->reg_num.family == rn->family && 451 if (sl->reg_num.family == rn->family &&
433 sl->reg_num.id == rn->id && 452 sl->reg_num.id == rn->id &&
434 sl->reg_num.crc == rn->crc) { 453 sl->reg_num.crc == rn->crc) {
454 mutex_unlock(&dev->list_mutex);
435 return sl; 455 return sl;
436 } 456 }
437 } 457 }
458 mutex_unlock(&dev->list_mutex);
438 return NULL; 459 return NULL;
439} 460}
440 461
@@ -491,7 +512,10 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
491 mutex_lock(&md->mutex); 512 mutex_lock(&md->mutex);
492 sl = w1_slave_search_device(md, &rn); 513 sl = w1_slave_search_device(md, &rn);
493 if (sl) { 514 if (sl) {
494 w1_slave_detach(sl); 515 result = w1_slave_detach(sl);
516 /* refcnt 0 means it was detached in the call */
517 if (result == 0)
518 result = count;
495 } else { 519 } else {
496 dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family, 520 dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family,
497 (unsigned long long)rn.id); 521 (unsigned long long)rn.id);
@@ -516,7 +540,7 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
516static W1_MASTER_ATTR_RO(name, S_IRUGO); 540static W1_MASTER_ATTR_RO(name, S_IRUGO);
517static W1_MASTER_ATTR_RO(slaves, S_IRUGO); 541static W1_MASTER_ATTR_RO(slaves, S_IRUGO);
518static W1_MASTER_ATTR_RO(slave_count, S_IRUGO); 542static W1_MASTER_ATTR_RO(slave_count, S_IRUGO);
519static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO); 543static W1_MASTER_ATTR_RW(max_slave_count, S_IRUGO | S_IWUSR | S_IWGRP);
520static W1_MASTER_ATTR_RO(attempts, S_IRUGO); 544static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
521static W1_MASTER_ATTR_RO(timeout, S_IRUGO); 545static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
522static W1_MASTER_ATTR_RO(pointer, S_IRUGO); 546static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
@@ -686,12 +710,14 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
686 dev_set_uevent_suppress(&sl->dev, false); 710 dev_set_uevent_suppress(&sl->dev, false);
687 kobject_uevent(&sl->dev.kobj, KOBJ_ADD); 711 kobject_uevent(&sl->dev.kobj, KOBJ_ADD);
688 712
713 mutex_lock(&sl->master->list_mutex);
689 list_add_tail(&sl->w1_slave_entry, &sl->master->slist); 714 list_add_tail(&sl->w1_slave_entry, &sl->master->slist);
715 mutex_unlock(&sl->master->list_mutex);
690 716
691 return 0; 717 return 0;
692} 718}
693 719
694static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) 720int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
695{ 721{
696 struct w1_slave *sl; 722 struct w1_slave *sl;
697 struct w1_family *f; 723 struct w1_family *f;
@@ -713,8 +739,8 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
713 739
714 memset(&msg, 0, sizeof(msg)); 740 memset(&msg, 0, sizeof(msg));
715 memcpy(&sl->reg_num, rn, sizeof(sl->reg_num)); 741 memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
716 atomic_set(&sl->refcnt, 0); 742 atomic_set(&sl->refcnt, 1);
717 init_completion(&sl->released); 743 atomic_inc(&sl->master->refcnt);
718 744
719 /* slave modules need to be loaded in a context with unlocked mutex */ 745 /* slave modules need to be loaded in a context with unlocked mutex */
720 mutex_unlock(&dev->mutex); 746 mutex_unlock(&dev->mutex);
@@ -754,23 +780,48 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
754 return 0; 780 return 0;
755} 781}
756 782
757void w1_slave_detach(struct w1_slave *sl) 783int w1_unref_slave(struct w1_slave *sl)
758{ 784{
759 struct w1_netlink_msg msg; 785 struct w1_master *dev = sl->master;
760 786 int refcnt;
761 dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__, sl->name, sl); 787 mutex_lock(&dev->list_mutex);
762 788 refcnt = atomic_sub_return(1, &sl->refcnt);
763 list_del(&sl->w1_slave_entry); 789 if (refcnt == 0) {
764 790 struct w1_netlink_msg msg;
765 memset(&msg, 0, sizeof(msg)); 791
766 memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id)); 792 dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__,
767 msg.type = W1_SLAVE_REMOVE; 793 sl->name, sl);
768 w1_netlink_send(sl->master, &msg); 794
769 795 list_del(&sl->w1_slave_entry);
770 device_unregister(&sl->dev); 796
797 memset(&msg, 0, sizeof(msg));
798 memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id));
799 msg.type = W1_SLAVE_REMOVE;
800 w1_netlink_send(sl->master, &msg);
801
802 device_unregister(&sl->dev);
803 #ifdef DEBUG
804 memset(sl, 0, sizeof(*sl));
805 #endif
806 kfree(sl);
807 }
808 atomic_dec(&dev->refcnt);
809 mutex_unlock(&dev->list_mutex);
810 return refcnt;
811}
771 812
772 wait_for_completion(&sl->released); 813int w1_slave_detach(struct w1_slave *sl)
773 kfree(sl); 814{
815 /* Only detach a slave once as it decreases the refcnt each time. */
816 int destroy_now;
817 mutex_lock(&sl->master->list_mutex);
818 destroy_now = !test_bit(W1_SLAVE_DETACH, &sl->flags);
819 set_bit(W1_SLAVE_DETACH, &sl->flags);
820 mutex_unlock(&sl->master->list_mutex);
821
822 if (destroy_now)
823 destroy_now = !w1_unref_slave(sl);
824 return destroy_now ? 0 : -EBUSY;
774} 825}
775 826
776struct w1_master *w1_search_master_id(u32 id) 827struct w1_master *w1_search_master_id(u32 id)
@@ -799,7 +850,7 @@ struct w1_slave *w1_search_slave(struct w1_reg_num *id)
799 850
800 mutex_lock(&w1_mlock); 851 mutex_lock(&w1_mlock);
801 list_for_each_entry(dev, &w1_masters, w1_master_entry) { 852 list_for_each_entry(dev, &w1_masters, w1_master_entry) {
802 mutex_lock(&dev->mutex); 853 mutex_lock(&dev->list_mutex);
803 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 854 list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
804 if (sl->reg_num.family == id->family && 855 if (sl->reg_num.family == id->family &&
805 sl->reg_num.id == id->id && 856 sl->reg_num.id == id->id &&
@@ -810,7 +861,7 @@ struct w1_slave *w1_search_slave(struct w1_reg_num *id)
810 break; 861 break;
811 } 862 }
812 } 863 }
813 mutex_unlock(&dev->mutex); 864 mutex_unlock(&dev->list_mutex);
814 865
815 if (found) 866 if (found)
816 break; 867 break;
@@ -830,6 +881,7 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
830 dev_dbg(&dev->dev, "Reconnecting slaves in device %s " 881 dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
831 "for family %02x.\n", dev->name, f->fid); 882 "for family %02x.\n", dev->name, f->fid);
832 mutex_lock(&dev->mutex); 883 mutex_lock(&dev->mutex);
884 mutex_lock(&dev->list_mutex);
833 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 885 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
834 /* If it is a new family, slaves with the default 886 /* If it is a new family, slaves with the default
835 * family driver and are that family will be 887 * family driver and are that family will be
@@ -841,14 +893,19 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
841 (!attach && sl->family->fid == f->fid)) { 893 (!attach && sl->family->fid == f->fid)) {
842 struct w1_reg_num rn; 894 struct w1_reg_num rn;
843 895
896 mutex_unlock(&dev->list_mutex);
844 memcpy(&rn, &sl->reg_num, sizeof(rn)); 897 memcpy(&rn, &sl->reg_num, sizeof(rn));
845 w1_slave_detach(sl); 898 /* If it was already in use let the automatic
846 899 * scan pick it up again later.
847 w1_attach_slave_device(dev, &rn); 900 */
901 if (!w1_slave_detach(sl))
902 w1_attach_slave_device(dev, &rn);
903 mutex_lock(&dev->list_mutex);
848 } 904 }
849 } 905 }
850 dev_dbg(&dev->dev, "Reconnecting slaves in device %s " 906 dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
851 "has been finished.\n", dev->name); 907 "has been finished.\n", dev->name);
908 mutex_unlock(&dev->list_mutex);
852 mutex_unlock(&dev->mutex); 909 mutex_unlock(&dev->mutex);
853 } 910 }
854 mutex_unlock(&w1_mlock); 911 mutex_unlock(&w1_mlock);
@@ -876,7 +933,12 @@ void w1_slave_found(struct w1_master *dev, u64 rn)
876} 933}
877 934
878/** 935/**
879 * Performs a ROM Search & registers any devices found. 936 * w1_search() - Performs a ROM Search & registers any devices found.
937 * @dev: The master device to search
938 * @search_type: W1_SEARCH to search all devices, or W1_ALARM_SEARCH
939 * to return only devices in the alarmed state
940 * @cb: Function to call when a device is found
941 *
880 * The 1-wire search is a simple binary tree search. 942 * The 1-wire search is a simple binary tree search.
881 * For each bit of the address, we read two bits and write one bit. 943 * For each bit of the address, we read two bits and write one bit.
882 * The bit written will put to sleep all devies that don't match that bit. 944 * The bit written will put to sleep all devies that don't match that bit.
@@ -886,8 +948,6 @@ void w1_slave_found(struct w1_master *dev, u64 rn)
886 * 948 *
887 * See "Application note 187 1-wire search algorithm" at www.maxim-ic.com 949 * See "Application note 187 1-wire search algorithm" at www.maxim-ic.com
888 * 950 *
889 * @dev The master device to search
890 * @cb Function to call when a device is found
891 */ 951 */
892void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb) 952void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb)
893{ 953{
@@ -898,7 +958,8 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
898 u8 triplet_ret = 0; 958 u8 triplet_ret = 0;
899 959
900 search_bit = 0; 960 search_bit = 0;
901 rn = last_rn = 0; 961 rn = dev->search_id;
962 last_rn = 0;
902 last_device = 0; 963 last_device = 0;
903 last_zero = -1; 964 last_zero = -1;
904 965
@@ -945,7 +1006,7 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
945 else 1006 else
946 search_bit = ((last_rn >> i) & 0x1); 1007 search_bit = ((last_rn >> i) & 0x1);
947 1008
948 /** Read two bits and write one bit */ 1009 /* Read two bits and write one bit */
949 triplet_ret = w1_triplet(dev, search_bit); 1010 triplet_ret = w1_triplet(dev, search_bit);
950 1011
951 /* quit if no device responded */ 1012 /* quit if no device responded */
@@ -960,8 +1021,7 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
960 tmp64 = (triplet_ret >> 2); 1021 tmp64 = (triplet_ret >> 2);
961 rn |= (tmp64 << i); 1022 rn |= (tmp64 << i);
962 1023
963 /* ensure we're called from kthread and not by netlink callback */ 1024 if (test_bit(W1_ABORT_SEARCH, &dev->flags)) {
964 if (!dev->priv && kthread_should_stop()) {
965 mutex_unlock(&dev->bus_mutex); 1025 mutex_unlock(&dev->bus_mutex);
966 dev_dbg(&dev->dev, "Abort w1_search\n"); 1026 dev_dbg(&dev->dev, "Abort w1_search\n");
967 return; 1027 return;
@@ -970,11 +1030,30 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
970 mutex_unlock(&dev->bus_mutex); 1030 mutex_unlock(&dev->bus_mutex);
971 1031
972 if ( (triplet_ret & 0x03) != 0x03 ) { 1032 if ( (triplet_ret & 0x03) != 0x03 ) {
973 if ( (desc_bit == last_zero) || (last_zero < 0)) 1033 if ((desc_bit == last_zero) || (last_zero < 0)) {
974 last_device = 1; 1034 last_device = 1;
1035 dev->search_id = 0;
1036 } else {
1037 dev->search_id = rn;
1038 }
975 desc_bit = last_zero; 1039 desc_bit = last_zero;
976 cb(dev, rn); 1040 cb(dev, rn);
977 } 1041 }
1042
1043 if (!last_device && slave_count == dev->max_slave_count &&
1044 !test_bit(W1_WARN_MAX_COUNT, &dev->flags)) {
1045 /* Only max_slave_count will be scanned in a search,
1046 * but it will start where it left off next search
1047 * until all ids are identified and then it will start
1048 * over. A continued search will report the previous
1049 * last id as the first id (provided it is still on the
1050 * bus).
1051 */
1052 dev_info(&dev->dev, "%s: max_slave_count %d reached, "
1053 "will continue next search.\n", __func__,
1054 dev->max_slave_count);
1055 set_bit(W1_WARN_MAX_COUNT, &dev->flags);
1056 }
978 } 1057 }
979} 1058}
980 1059
@@ -983,17 +1062,24 @@ void w1_search_process_cb(struct w1_master *dev, u8 search_type,
983{ 1062{
984 struct w1_slave *sl, *sln; 1063 struct w1_slave *sl, *sln;
985 1064
1065 mutex_lock(&dev->list_mutex);
986 list_for_each_entry(sl, &dev->slist, w1_slave_entry) 1066 list_for_each_entry(sl, &dev->slist, w1_slave_entry)
987 clear_bit(W1_SLAVE_ACTIVE, &sl->flags); 1067 clear_bit(W1_SLAVE_ACTIVE, &sl->flags);
1068 mutex_unlock(&dev->list_mutex);
988 1069
989 w1_search_devices(dev, search_type, cb); 1070 w1_search_devices(dev, search_type, cb);
990 1071
1072 mutex_lock(&dev->list_mutex);
991 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 1073 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
992 if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) 1074 if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) {
1075 mutex_unlock(&dev->list_mutex);
993 w1_slave_detach(sl); 1076 w1_slave_detach(sl);
1077 mutex_lock(&dev->list_mutex);
1078 }
994 else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags)) 1079 else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags))
995 sl->ttl = dev->slave_ttl; 1080 sl->ttl = dev->slave_ttl;
996 } 1081 }
1082 mutex_unlock(&dev->list_mutex);
997 1083
998 if (dev->search_count > 0) 1084 if (dev->search_count > 0)
999 dev->search_count--; 1085 dev->search_count--;
@@ -1004,6 +1090,32 @@ static void w1_search_process(struct w1_master *dev, u8 search_type)
1004 w1_search_process_cb(dev, search_type, w1_slave_found); 1090 w1_search_process_cb(dev, search_type, w1_slave_found);
1005} 1091}
1006 1092
1093/**
1094 * w1_process_callbacks() - execute each dev->async_list callback entry
1095 * @dev: w1_master device
1096 *
1097 * Return: 1 if there were commands to executed 0 otherwise
1098 */
1099int w1_process_callbacks(struct w1_master *dev)
1100{
1101 int ret = 0;
1102 struct w1_async_cmd *async_cmd, *async_n;
1103
1104 /* The list can be added to in another thread, loop until it is empty */
1105 while (!list_empty(&dev->async_list)) {
1106 list_for_each_entry_safe(async_cmd, async_n, &dev->async_list,
1107 async_entry) {
1108 /* drop the lock, if it is a search it can take a long
1109 * time */
1110 mutex_unlock(&dev->list_mutex);
1111 async_cmd->cb(dev, async_cmd);
1112 ret = 1;
1113 mutex_lock(&dev->list_mutex);
1114 }
1115 }
1116 return ret;
1117}
1118
1007int w1_process(void *data) 1119int w1_process(void *data)
1008{ 1120{
1009 struct w1_master *dev = (struct w1_master *) data; 1121 struct w1_master *dev = (struct w1_master *) data;
@@ -1011,23 +1123,46 @@ int w1_process(void *data)
1011 * time can be calculated in jiffies once. 1123 * time can be calculated in jiffies once.
1012 */ 1124 */
1013 const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000); 1125 const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000);
1126 /* remainder if it woke up early */
1127 unsigned long jremain = 0;
1014 1128
1015 while (!kthread_should_stop()) { 1129 for (;;) {
1016 if (dev->search_count) { 1130
1131 if (!jremain && dev->search_count) {
1017 mutex_lock(&dev->mutex); 1132 mutex_lock(&dev->mutex);
1018 w1_search_process(dev, W1_SEARCH); 1133 w1_search_process(dev, W1_SEARCH);
1019 mutex_unlock(&dev->mutex); 1134 mutex_unlock(&dev->mutex);
1020 } 1135 }
1021 1136
1137 mutex_lock(&dev->list_mutex);
1138 /* Note, w1_process_callback drops the lock while processing,
1139 * but locks it again before returning.
1140 */
1141 if (!w1_process_callbacks(dev) && jremain) {
1142 /* a wake up is either to stop the thread, process
1143 * callbacks, or search, it isn't process callbacks, so
1144 * schedule a search.
1145 */
1146 jremain = 1;
1147 }
1148
1022 try_to_freeze(); 1149 try_to_freeze();
1023 __set_current_state(TASK_INTERRUPTIBLE); 1150 __set_current_state(TASK_INTERRUPTIBLE);
1024 1151
1152 /* hold list_mutex until after interruptible to prevent loosing
1153 * the wakeup signal when async_cmd is added.
1154 */
1155 mutex_unlock(&dev->list_mutex);
1156
1025 if (kthread_should_stop()) 1157 if (kthread_should_stop())
1026 break; 1158 break;
1027 1159
1028 /* Only sleep when the search is active. */ 1160 /* Only sleep when the search is active. */
1029 if (dev->search_count) 1161 if (dev->search_count) {
1030 schedule_timeout(jtime); 1162 if (!jremain)
1163 jremain = jtime;
1164 jremain = schedule_timeout(jremain);
1165 }
1031 else 1166 else
1032 schedule(); 1167 schedule();
1033 } 1168 }
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index ca8081a101d6..734dab7fc687 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -22,6 +22,13 @@
22#ifndef __W1_H 22#ifndef __W1_H
23#define __W1_H 23#define __W1_H
24 24
25/**
26 * struct w1_reg_num - broken out slave device id
27 *
28 * @family: identifies the type of device
29 * @id: along with family is the unique device id
30 * @crc: checksum of the other bytes
31 */
25struct w1_reg_num 32struct w1_reg_num
26{ 33{
27#if defined(__LITTLE_ENDIAN_BITFIELD) 34#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -58,7 +65,24 @@ struct w1_reg_num
58#define W1_RESUME_CMD 0xA5 65#define W1_RESUME_CMD 0xA5
59 66
60#define W1_SLAVE_ACTIVE 0 67#define W1_SLAVE_ACTIVE 0
68#define W1_SLAVE_DETACH 1
61 69
70/**
71 * struct w1_slave - holds a single slave device on the bus
72 *
73 * @owner: Points to the one wire "wire" kernel module.
74 * @name: Device id is ascii.
75 * @w1_slave_entry: data for the linked list
76 * @reg_num: the slave id in binary
77 * @refcnt: reference count, delete when 0
78 * @flags: bit flags for W1_SLAVE_ACTIVE W1_SLAVE_DETACH
79 * @ttl: decrement per search this slave isn't found, deatch at 0
80 * @master: bus which this slave is on
81 * @family: module for device family type
82 * @family_data: pointer for use by the family module
83 * @dev: kernel device identifier
84 *
85 */
62struct w1_slave 86struct w1_slave
63{ 87{
64 struct module *owner; 88 struct module *owner;
@@ -66,7 +90,6 @@ struct w1_slave
66 struct list_head w1_slave_entry; 90 struct list_head w1_slave_entry;
67 struct w1_reg_num reg_num; 91 struct w1_reg_num reg_num;
68 atomic_t refcnt; 92 atomic_t refcnt;
69 u8 rom[9];
70 int ttl; 93 int ttl;
71 unsigned long flags; 94 unsigned long flags;
72 95
@@ -74,99 +97,146 @@ struct w1_slave
74 struct w1_family *family; 97 struct w1_family *family;
75 void *family_data; 98 void *family_data;
76 struct device dev; 99 struct device dev;
77 struct completion released;
78}; 100};
79 101
80typedef void (*w1_slave_found_callback)(struct w1_master *, u64); 102typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
81 103
82 104
83/** 105/**
106 * struct w1_bus_master - operations available on a bus master
107 *
108 * @data: the first parameter in all the functions below
109 *
110 * @read_bit: Sample the line level @return the level read (0 or 1)
111 *
112 * @write_bit: Sets the line level
113 *
114 * @touch_bit: the lowest-level function for devices that really support the
115 * 1-wire protocol.
116 * touch_bit(0) = write-0 cycle
117 * touch_bit(1) = write-1 / read cycle
118 * @return the bit read (0 or 1)
119 *
120 * @read_byte: Reads a bytes. Same as 8 touch_bit(1) calls.
121 * @return the byte read
122 *
123 * @write_byte: Writes a byte. Same as 8 touch_bit(x) calls.
124 *
125 * @read_block: Same as a series of read_byte() calls
126 * @return the number of bytes read
127 *
128 * @write_block: Same as a series of write_byte() calls
129 *
130 * @triplet: Combines two reads and a smart write for ROM searches
131 * @return bit0=Id bit1=comp_id bit2=dir_taken
132 *
133 * @reset_bus: long write-0 with a read for the presence pulse detection
134 * @return -1=Error, 0=Device present, 1=No device present
135 *
136 * @set_pullup: Put out a strong pull-up pulse of the specified duration.
137 * @return -1=Error, 0=completed
138 *
139 * @search: Really nice hardware can handles the different types of ROM search
140 * w1_master* is passed to the slave found callback.
141 * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH
142 *
84 * Note: read_bit and write_bit are very low level functions and should only 143 * Note: read_bit and write_bit are very low level functions and should only
85 * be used with hardware that doesn't really support 1-wire operations, 144 * be used with hardware that doesn't really support 1-wire operations,
86 * like a parallel/serial port. 145 * like a parallel/serial port.
87 * Either define read_bit and write_bit OR define, at minimum, touch_bit and 146 * Either define read_bit and write_bit OR define, at minimum, touch_bit and
88 * reset_bus. 147 * reset_bus.
148 *
89 */ 149 */
90struct w1_bus_master 150struct w1_bus_master
91{ 151{
92 /** the first parameter in all the functions below */
93 void *data; 152 void *data;
94 153
95 /**
96 * Sample the line level
97 * @return the level read (0 or 1)
98 */
99 u8 (*read_bit)(void *); 154 u8 (*read_bit)(void *);
100 155
101 /** Sets the line level */
102 void (*write_bit)(void *, u8); 156 void (*write_bit)(void *, u8);
103 157
104 /**
105 * touch_bit is the lowest-level function for devices that really
106 * support the 1-wire protocol.
107 * touch_bit(0) = write-0 cycle
108 * touch_bit(1) = write-1 / read cycle
109 * @return the bit read (0 or 1)
110 */
111 u8 (*touch_bit)(void *, u8); 158 u8 (*touch_bit)(void *, u8);
112 159
113 /**
114 * Reads a bytes. Same as 8 touch_bit(1) calls.
115 * @return the byte read
116 */
117 u8 (*read_byte)(void *); 160 u8 (*read_byte)(void *);
118 161
119 /**
120 * Writes a byte. Same as 8 touch_bit(x) calls.
121 */
122 void (*write_byte)(void *, u8); 162 void (*write_byte)(void *, u8);
123 163
124 /**
125 * Same as a series of read_byte() calls
126 * @return the number of bytes read
127 */
128 u8 (*read_block)(void *, u8 *, int); 164 u8 (*read_block)(void *, u8 *, int);
129 165
130 /** Same as a series of write_byte() calls */
131 void (*write_block)(void *, const u8 *, int); 166 void (*write_block)(void *, const u8 *, int);
132 167
133 /**
134 * Combines two reads and a smart write for ROM searches
135 * @return bit0=Id bit1=comp_id bit2=dir_taken
136 */
137 u8 (*triplet)(void *, u8); 168 u8 (*triplet)(void *, u8);
138 169
139 /**
140 * long write-0 with a read for the presence pulse detection
141 * @return -1=Error, 0=Device present, 1=No device present
142 */
143 u8 (*reset_bus)(void *); 170 u8 (*reset_bus)(void *);
144 171
145 /**
146 * Put out a strong pull-up pulse of the specified duration.
147 * @return -1=Error, 0=completed
148 */
149 u8 (*set_pullup)(void *, int); 172 u8 (*set_pullup)(void *, int);
150 173
151 /** Really nice hardware can handles the different types of ROM search
152 * w1_master* is passed to the slave found callback.
153 */
154 void (*search)(void *, struct w1_master *, 174 void (*search)(void *, struct w1_master *,
155 u8, w1_slave_found_callback); 175 u8, w1_slave_found_callback);
156}; 176};
157 177
178/**
179 * enum w1_master_flags - bitfields used in w1_master.flags
180 * @W1_ABORT_SEARCH: abort searching early on shutdown
181 * @W1_WARN_MAX_COUNT: limit warning when the maximum count is reached
182 */
183enum w1_master_flags {
184 W1_ABORT_SEARCH = 0,
185 W1_WARN_MAX_COUNT = 1,
186};
187
188/**
189 * struct w1_master - one per bus master
190 * @w1_master_entry: master linked list
191 * @owner: module owner
192 * @name: dynamically allocate bus name
193 * @list_mutex: protect slist and async_list
194 * @slist: linked list of slaves
195 * @async_list: linked list of netlink commands to execute
196 * @max_slave_count: maximum number of slaves to search for at a time
197 * @slave_count: current number of slaves known
198 * @attempts: number of searches ran
199 * @slave_ttl: number of searches before a slave is timed out
200 * @initialized: prevent init/removal race conditions
201 * @id: w1 bus number
202 * @search_count: number of automatic searches to run, -1 unlimited
203 * @search_id: allows continuing a search
204 * @refcnt: reference count
205 * @priv: private data storage
206 * @priv_size: size allocated
207 * @enable_pullup: allows a strong pullup
208 * @pullup_duration: time for the next strong pullup
209 * @flags: one of w1_master_flags
210 * @thread: thread for bus search and netlink commands
211 * @mutex: protect most of w1_master
212 * @bus_mutex: pretect concurrent bus access
213 * @driver: sysfs driver
214 * @dev: sysfs device
215 * @bus_master: io operations available
216 * @seq: sequence number used for netlink broadcasts
217 * @portid: destination for the current netlink command
218 */
158struct w1_master 219struct w1_master
159{ 220{
160 struct list_head w1_master_entry; 221 struct list_head w1_master_entry;
161 struct module *owner; 222 struct module *owner;
162 unsigned char name[W1_MAXNAMELEN]; 223 unsigned char name[W1_MAXNAMELEN];
224 /* list_mutex protects just slist and async_list so slaves can be
225 * searched for and async commands added while the master has
226 * w1_master.mutex locked and is operating on the bus.
227 * lock order w1_mlock, w1_master.mutex, w1_master.list_mutex
228 */
229 struct mutex list_mutex;
163 struct list_head slist; 230 struct list_head slist;
231 struct list_head async_list;
164 int max_slave_count, slave_count; 232 int max_slave_count, slave_count;
165 unsigned long attempts; 233 unsigned long attempts;
166 int slave_ttl; 234 int slave_ttl;
167 int initialized; 235 int initialized;
168 u32 id; 236 u32 id;
169 int search_count; 237 int search_count;
238 /* id to start searching on, to continue a search or 0 to restart */
239 u64 search_id;
170 240
171 atomic_t refcnt; 241 atomic_t refcnt;
172 242
@@ -178,6 +248,8 @@ struct w1_master
178 /** 5V strong pullup duration in milliseconds, zero disabled. */ 248 /** 5V strong pullup duration in milliseconds, zero disabled. */
179 int pullup_duration; 249 int pullup_duration;
180 250
251 long flags;
252
181 struct task_struct *thread; 253 struct task_struct *thread;
182 struct mutex mutex; 254 struct mutex mutex;
183 struct mutex bus_mutex; 255 struct mutex bus_mutex;
@@ -188,16 +260,41 @@ struct w1_master
188 struct w1_bus_master *bus_master; 260 struct w1_bus_master *bus_master;
189 261
190 u32 seq; 262 u32 seq;
263 /* port id to send netlink responses to. The value is temporarily
264 * stored here while processing a message, set after locking the
265 * mutex, zero before unlocking the mutex.
266 */
267 u32 portid;
268};
269
270/**
271 * struct w1_async_cmd - execute callback from the w1_process kthread
272 * @async_entry: link entry
273 * @cb: callback function, must list_del and destroy this list before
274 * returning
275 *
276 * When inserted into the w1_master async_list, w1_process will execute
277 * the callback. Embed this into the structure with the command details.
278 */
279struct w1_async_cmd {
280 struct list_head async_entry;
281 void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
191}; 282};
192 283
193int w1_create_master_attributes(struct w1_master *); 284int w1_create_master_attributes(struct w1_master *);
194void w1_destroy_master_attributes(struct w1_master *master); 285void w1_destroy_master_attributes(struct w1_master *master);
195void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 286void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
196void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 287void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
288/* call w1_unref_slave to release the reference counts w1_search_slave added */
197struct w1_slave *w1_search_slave(struct w1_reg_num *id); 289struct w1_slave *w1_search_slave(struct w1_reg_num *id);
290/* decrements the reference on sl->master and sl, and cleans up if zero
291 * returns the reference count after it has been decremented */
292int w1_unref_slave(struct w1_slave *sl);
198void w1_slave_found(struct w1_master *dev, u64 rn); 293void w1_slave_found(struct w1_master *dev, u64 rn);
199void w1_search_process_cb(struct w1_master *dev, u8 search_type, 294void w1_search_process_cb(struct w1_master *dev, u8 search_type,
200 w1_slave_found_callback cb); 295 w1_slave_found_callback cb);
296struct w1_slave *w1_slave_search_device(struct w1_master *dev,
297 struct w1_reg_num *rn);
201struct w1_master *w1_search_master_id(u32 id); 298struct w1_master *w1_search_master_id(u32 id);
202 299
203/* Disconnect and reconnect devices in the given family. Used for finding 300/* Disconnect and reconnect devices in the given family. Used for finding
@@ -206,7 +303,9 @@ struct w1_master *w1_search_master_id(u32 id);
206 * has just been registered, to 0 when it has been unregistered. 303 * has just been registered, to 0 when it has been unregistered.
207 */ 304 */
208void w1_reconnect_slaves(struct w1_family *f, int attach); 305void w1_reconnect_slaves(struct w1_family *f, int attach);
209void w1_slave_detach(struct w1_slave *sl); 306int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
307/* 0 success, otherwise EBUSY */
308int w1_slave_detach(struct w1_slave *sl);
210 309
211u8 w1_triplet(struct w1_master *dev, int bdir); 310u8 w1_triplet(struct w1_master *dev, int bdir);
212void w1_write_8(struct w1_master *, u8); 311void w1_write_8(struct w1_master *, u8);
@@ -242,6 +341,7 @@ extern int w1_max_slave_ttl;
242extern struct list_head w1_masters; 341extern struct list_head w1_masters;
243extern struct mutex w1_mlock; 342extern struct mutex w1_mlock;
244 343
344extern int w1_process_callbacks(struct w1_master *dev);
245extern int w1_process(void *); 345extern int w1_process(void *);
246 346
247#endif /* __KERNEL__ */ 347#endif /* __KERNEL__ */
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index e9309778ee72..3bff6b37b472 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -31,6 +31,10 @@
31DEFINE_SPINLOCK(w1_flock); 31DEFINE_SPINLOCK(w1_flock);
32static LIST_HEAD(w1_families); 32static LIST_HEAD(w1_families);
33 33
34/**
35 * w1_register_family() - register a device family driver
36 * @newf: family to register
37 */
34int w1_register_family(struct w1_family *newf) 38int w1_register_family(struct w1_family *newf)
35{ 39{
36 struct list_head *ent, *n; 40 struct list_head *ent, *n;
@@ -59,6 +63,10 @@ int w1_register_family(struct w1_family *newf)
59 return ret; 63 return ret;
60} 64}
61 65
66/**
67 * w1_unregister_family() - unregister a device family driver
68 * @fent: family to unregister
69 */
62void w1_unregister_family(struct w1_family *fent) 70void w1_unregister_family(struct w1_family *fent)
63{ 71{
64 struct list_head *ent, *n; 72 struct list_head *ent, *n;
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 4ad0e81b6404..26ca1343055b 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -48,6 +48,12 @@
48 48
49struct w1_slave; 49struct w1_slave;
50 50
51/**
52 * struct w1_family_ops - operations for a family type
53 * @add_slave: add_slave
54 * @remove_slave: remove_slave
55 * @groups: sysfs group
56 */
51struct w1_family_ops 57struct w1_family_ops
52{ 58{
53 int (* add_slave)(struct w1_slave *); 59 int (* add_slave)(struct w1_slave *);
@@ -55,6 +61,13 @@ struct w1_family_ops
55 const struct attribute_group **groups; 61 const struct attribute_group **groups;
56}; 62};
57 63
64/**
65 * struct w1_family - reference counted family structure.
66 * @family_entry: family linked list
67 * @fid: 8 bit family identifier
68 * @fops: operations for this family
69 * @refcnt: reference counter
70 */
58struct w1_family 71struct w1_family
59{ 72{
60 struct list_head family_entry; 73 struct list_head family_entry;
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 590bd8a7cd1b..9b084db739c7 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -75,8 +75,10 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
75 atomic_set(&dev->refcnt, 2); 75 atomic_set(&dev->refcnt, 2);
76 76
77 INIT_LIST_HEAD(&dev->slist); 77 INIT_LIST_HEAD(&dev->slist);
78 INIT_LIST_HEAD(&dev->async_list);
78 mutex_init(&dev->mutex); 79 mutex_init(&dev->mutex);
79 mutex_init(&dev->bus_mutex); 80 mutex_init(&dev->bus_mutex);
81 mutex_init(&dev->list_mutex);
80 82
81 memcpy(&dev->dev, device, sizeof(struct device)); 83 memcpy(&dev->dev, device, sizeof(struct device));
82 dev_set_name(&dev->dev, "w1_bus_master%u", dev->id); 84 dev_set_name(&dev->dev, "w1_bus_master%u", dev->id);
@@ -103,6 +105,10 @@ static void w1_free_dev(struct w1_master *dev)
103 device_unregister(&dev->dev); 105 device_unregister(&dev->dev);
104} 106}
105 107
108/**
109 * w1_add_master_device() - registers a new master device
110 * @master: master bus device to register
111 */
106int w1_add_master_device(struct w1_bus_master *master) 112int w1_add_master_device(struct w1_bus_master *master)
107{ 113{
108 struct w1_master *dev, *entry; 114 struct w1_master *dev, *entry;
@@ -172,6 +178,7 @@ int w1_add_master_device(struct w1_bus_master *master)
172 178
173#if 0 /* Thread cleanup code, not required currently. */ 179#if 0 /* Thread cleanup code, not required currently. */
174err_out_kill_thread: 180err_out_kill_thread:
181 set_bit(W1_ABORT_SEARCH, &dev->flags);
175 kthread_stop(dev->thread); 182 kthread_stop(dev->thread);
176#endif 183#endif
177err_out_rm_attr: 184err_out_rm_attr:
@@ -187,16 +194,22 @@ void __w1_remove_master_device(struct w1_master *dev)
187 struct w1_netlink_msg msg; 194 struct w1_netlink_msg msg;
188 struct w1_slave *sl, *sln; 195 struct w1_slave *sl, *sln;
189 196
190 kthread_stop(dev->thread);
191
192 mutex_lock(&w1_mlock); 197 mutex_lock(&w1_mlock);
193 list_del(&dev->w1_master_entry); 198 list_del(&dev->w1_master_entry);
194 mutex_unlock(&w1_mlock); 199 mutex_unlock(&w1_mlock);
195 200
201 set_bit(W1_ABORT_SEARCH, &dev->flags);
202 kthread_stop(dev->thread);
203
196 mutex_lock(&dev->mutex); 204 mutex_lock(&dev->mutex);
197 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) 205 mutex_lock(&dev->list_mutex);
206 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
207 mutex_unlock(&dev->list_mutex);
198 w1_slave_detach(sl); 208 w1_slave_detach(sl);
209 mutex_lock(&dev->list_mutex);
210 }
199 w1_destroy_master_attributes(dev); 211 w1_destroy_master_attributes(dev);
212 mutex_unlock(&dev->list_mutex);
200 mutex_unlock(&dev->mutex); 213 mutex_unlock(&dev->mutex);
201 atomic_dec(&dev->refcnt); 214 atomic_dec(&dev->refcnt);
202 215
@@ -206,7 +219,9 @@ void __w1_remove_master_device(struct w1_master *dev)
206 219
207 if (msleep_interruptible(1000)) 220 if (msleep_interruptible(1000))
208 flush_signals(current); 221 flush_signals(current);
222 w1_process_callbacks(dev);
209 } 223 }
224 w1_process_callbacks(dev);
210 225
211 memset(&msg, 0, sizeof(msg)); 226 memset(&msg, 0, sizeof(msg));
212 msg.id.mst.id = dev->id; 227 msg.id.mst.id = dev->id;
@@ -216,6 +231,10 @@ void __w1_remove_master_device(struct w1_master *dev)
216 w1_free_dev(dev); 231 w1_free_dev(dev);
217} 232}
218 233
234/**
235 * w1_remove_master_device() - unregister a master device
236 * @bm: master bus device to remove
237 */
219void w1_remove_master_device(struct w1_bus_master *bm) 238void w1_remove_master_device(struct w1_bus_master *bm)
220{ 239{
221 struct w1_master *dev, *found = NULL; 240 struct w1_master *dev, *found = NULL;
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index e10acc237733..282092421cc9 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -62,7 +62,9 @@ static void w1_write_bit(struct w1_master *dev, int bit);
62static u8 w1_read_bit(struct w1_master *dev); 62static u8 w1_read_bit(struct w1_master *dev);
63 63
64/** 64/**
65 * Generates a write-0 or write-1 cycle and samples the level. 65 * w1_touch_bit() - Generates a write-0 or write-1 cycle and samples the level.
66 * @dev: the master device
67 * @bit: 0 - write a 0, 1 - write a 0 read the level
66 */ 68 */
67static u8 w1_touch_bit(struct w1_master *dev, int bit) 69static u8 w1_touch_bit(struct w1_master *dev, int bit)
68{ 70{
@@ -77,7 +79,10 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit)
77} 79}
78 80
79/** 81/**
80 * Generates a write-0 or write-1 cycle. 82 * w1_write_bit() - Generates a write-0 or write-1 cycle.
83 * @dev: the master device
84 * @bit: bit to write
85 *
81 * Only call if dev->bus_master->touch_bit is NULL 86 * Only call if dev->bus_master->touch_bit is NULL
82 */ 87 */
83static void w1_write_bit(struct w1_master *dev, int bit) 88static void w1_write_bit(struct w1_master *dev, int bit)
@@ -102,11 +107,12 @@ static void w1_write_bit(struct w1_master *dev, int bit)
102} 107}
103 108
104/** 109/**
110 * w1_pre_write() - pre-write operations
111 * @dev: the master device
112 *
105 * Pre-write operation, currently only supporting strong pullups. 113 * Pre-write operation, currently only supporting strong pullups.
106 * Program the hardware for a strong pullup, if one has been requested and 114 * Program the hardware for a strong pullup, if one has been requested and
107 * the hardware supports it. 115 * the hardware supports it.
108 *
109 * @param dev the master device
110 */ 116 */
111static void w1_pre_write(struct w1_master *dev) 117static void w1_pre_write(struct w1_master *dev)
112{ 118{
@@ -118,11 +124,12 @@ static void w1_pre_write(struct w1_master *dev)
118} 124}
119 125
120/** 126/**
127 * w1_post_write() - post-write options
128 * @dev: the master device
129 *
121 * Post-write operation, currently only supporting strong pullups. 130 * Post-write operation, currently only supporting strong pullups.
122 * If a strong pullup was requested, clear it if the hardware supports 131 * If a strong pullup was requested, clear it if the hardware supports
123 * them, or execute the delay otherwise, in either case clear the request. 132 * them, or execute the delay otherwise, in either case clear the request.
124 *
125 * @param dev the master device
126 */ 133 */
127static void w1_post_write(struct w1_master *dev) 134static void w1_post_write(struct w1_master *dev)
128{ 135{
@@ -136,10 +143,9 @@ static void w1_post_write(struct w1_master *dev)
136} 143}
137 144
138/** 145/**
139 * Writes 8 bits. 146 * w1_write_8() - Writes 8 bits.
140 * 147 * @dev: the master device
141 * @param dev the master device 148 * @byte: the byte to write
142 * @param byte the byte to write
143 */ 149 */
144void w1_write_8(struct w1_master *dev, u8 byte) 150void w1_write_8(struct w1_master *dev, u8 byte)
145{ 151{
@@ -161,7 +167,9 @@ EXPORT_SYMBOL_GPL(w1_write_8);
161 167
162 168
163/** 169/**
164 * Generates a write-1 cycle and samples the level. 170 * w1_read_bit() - Generates a write-1 cycle and samples the level.
171 * @dev: the master device
172 *
165 * Only call if dev->bus_master->touch_bit is NULL 173 * Only call if dev->bus_master->touch_bit is NULL
166 */ 174 */
167static u8 w1_read_bit(struct w1_master *dev) 175static u8 w1_read_bit(struct w1_master *dev)
@@ -185,16 +193,17 @@ static u8 w1_read_bit(struct w1_master *dev)
185} 193}
186 194
187/** 195/**
188 * Does a triplet - used for searching ROM addresses. 196 * w1_triplet() - * Does a triplet - used for searching ROM addresses.
197 * @dev: the master device
198 * @bdir: the bit to write if both id_bit and comp_bit are 0
199 *
189 * Return bits: 200 * Return bits:
190 * bit 0 = id_bit 201 * bit 0 = id_bit
191 * bit 1 = comp_bit 202 * bit 1 = comp_bit
192 * bit 2 = dir_taken 203 * bit 2 = dir_taken
193 * If both bits 0 & 1 are set, the search should be restarted. 204 * If both bits 0 & 1 are set, the search should be restarted.
194 * 205 *
195 * @param dev the master device 206 * Return: bit fields - see above
196 * @param bdir the bit to write if both id_bit and comp_bit are 0
197 * @return bit fields - see above
198 */ 207 */
199u8 w1_triplet(struct w1_master *dev, int bdir) 208u8 w1_triplet(struct w1_master *dev, int bdir)
200{ 209{
@@ -226,10 +235,10 @@ u8 w1_triplet(struct w1_master *dev, int bdir)
226} 235}
227 236
228/** 237/**
229 * Reads 8 bits. 238 * w1_read_8() - Reads 8 bits.
239 * @dev: the master device
230 * 240 *
231 * @param dev the master device 241 * Return: the byte read
232 * @return the byte read
233 */ 242 */
234u8 w1_read_8(struct w1_master *dev) 243u8 w1_read_8(struct w1_master *dev)
235{ 244{
@@ -247,11 +256,10 @@ u8 w1_read_8(struct w1_master *dev)
247EXPORT_SYMBOL_GPL(w1_read_8); 256EXPORT_SYMBOL_GPL(w1_read_8);
248 257
249/** 258/**
250 * Writes a series of bytes. 259 * w1_write_block() - Writes a series of bytes.
251 * 260 * @dev: the master device
252 * @param dev the master device 261 * @buf: pointer to the data to write
253 * @param buf pointer to the data to write 262 * @len: the number of bytes to write
254 * @param len the number of bytes to write
255 */ 263 */
256void w1_write_block(struct w1_master *dev, const u8 *buf, int len) 264void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
257{ 265{
@@ -269,11 +277,10 @@ void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
269EXPORT_SYMBOL_GPL(w1_write_block); 277EXPORT_SYMBOL_GPL(w1_write_block);
270 278
271/** 279/**
272 * Touches a series of bytes. 280 * w1_touch_block() - Touches a series of bytes.
273 * 281 * @dev: the master device
274 * @param dev the master device 282 * @buf: pointer to the data to write
275 * @param buf pointer to the data to write 283 * @len: the number of bytes to write
276 * @param len the number of bytes to write
277 */ 284 */
278void w1_touch_block(struct w1_master *dev, u8 *buf, int len) 285void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
279{ 286{
@@ -294,12 +301,11 @@ void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
294EXPORT_SYMBOL_GPL(w1_touch_block); 301EXPORT_SYMBOL_GPL(w1_touch_block);
295 302
296/** 303/**
297 * Reads a series of bytes. 304 * w1_read_block() - Reads a series of bytes.
298 * 305 * @dev: the master device
299 * @param dev the master device 306 * @buf: pointer to the buffer to fill
300 * @param buf pointer to the buffer to fill 307 * @len: the number of bytes to read
301 * @param len the number of bytes to read 308 * Return: the number of bytes read
302 * @return the number of bytes read
303 */ 309 */
304u8 w1_read_block(struct w1_master *dev, u8 *buf, int len) 310u8 w1_read_block(struct w1_master *dev, u8 *buf, int len)
305{ 311{
@@ -319,10 +325,9 @@ u8 w1_read_block(struct w1_master *dev, u8 *buf, int len)
319EXPORT_SYMBOL_GPL(w1_read_block); 325EXPORT_SYMBOL_GPL(w1_read_block);
320 326
321/** 327/**
322 * Issues a reset bus sequence. 328 * w1_reset_bus() - Issues a reset bus sequence.
323 * 329 * @dev: the master device
324 * @param dev The bus master pointer 330 * Return: 0=Device present, 1=No device present or error
325 * @return 0=Device present, 1=No device present or error
326 */ 331 */
327int w1_reset_bus(struct w1_master *dev) 332int w1_reset_bus(struct w1_master *dev)
328{ 333{
@@ -383,12 +388,15 @@ void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_cal
383} 388}
384 389
385/** 390/**
391 * w1_reset_select_slave() - reset and select a slave
392 * @sl: the slave to select
393 *
386 * Resets the bus and then selects the slave by sending either a skip rom 394 * Resets the bus and then selects the slave by sending either a skip rom
387 * or a rom match. 395 * or a rom match. A skip rom is issued if there is only one device
396 * registered on the bus.
388 * The w1 master lock must be held. 397 * The w1 master lock must be held.
389 * 398 *
390 * @param sl the slave to select 399 * Return: 0=success, anything else=error
391 * @return 0=success, anything else=error
392 */ 400 */
393int w1_reset_select_slave(struct w1_slave *sl) 401int w1_reset_select_slave(struct w1_slave *sl)
394{ 402{
@@ -409,6 +417,9 @@ int w1_reset_select_slave(struct w1_slave *sl)
409EXPORT_SYMBOL_GPL(w1_reset_select_slave); 417EXPORT_SYMBOL_GPL(w1_reset_select_slave);
410 418
411/** 419/**
420 * w1_reset_resume_command() - resume instead of another match ROM
421 * @dev: the master device
422 *
412 * When the workflow with a slave amongst many requires several 423 * When the workflow with a slave amongst many requires several
413 * successive commands a reset between each, this function is similar 424 * successive commands a reset between each, this function is similar
414 * to doing a reset then a match ROM for the last matched ROM. The 425 * to doing a reset then a match ROM for the last matched ROM. The
@@ -420,8 +431,6 @@ EXPORT_SYMBOL_GPL(w1_reset_select_slave);
420 * doesn't work of course, but the resume command is the next best thing. 431 * doesn't work of course, but the resume command is the next best thing.
421 * 432 *
422 * The w1 master lock must be held. 433 * The w1 master lock must be held.
423 *
424 * @param dev the master device
425 */ 434 */
426int w1_reset_resume_command(struct w1_master *dev) 435int w1_reset_resume_command(struct w1_master *dev)
427{ 436{
@@ -435,6 +444,10 @@ int w1_reset_resume_command(struct w1_master *dev)
435EXPORT_SYMBOL_GPL(w1_reset_resume_command); 444EXPORT_SYMBOL_GPL(w1_reset_resume_command);
436 445
437/** 446/**
447 * w1_next_pullup() - register for a strong pullup
448 * @dev: the master device
449 * @delay: time in milliseconds
450 *
438 * Put out a strong pull-up of the specified duration after the next write 451 * Put out a strong pull-up of the specified duration after the next write
439 * operation. Not all hardware supports strong pullups. Hardware that 452 * operation. Not all hardware supports strong pullups. Hardware that
440 * doesn't support strong pullups will sleep for the given time after the 453 * doesn't support strong pullups will sleep for the given time after the
@@ -442,8 +455,7 @@ EXPORT_SYMBOL_GPL(w1_reset_resume_command);
442 * the next write, specifying zero will clear a previous request. 455 * the next write, specifying zero will clear a previous request.
443 * The w1 master lock must be held. 456 * The w1 master lock must be held.
444 * 457 *
445 * @param delay time in milliseconds 458 * Return: 0=success, anything else=error
446 * @return 0=success, anything else=error
447 */ 459 */
448void w1_next_pullup(struct w1_master *dev, int delay) 460void w1_next_pullup(struct w1_master *dev, int delay)
449{ 461{
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 40788c925d1c..5234964fe001 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -45,7 +45,7 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
45 45
46 memcpy(w, msg, sizeof(struct w1_netlink_msg)); 46 memcpy(w, msg, sizeof(struct w1_netlink_msg));
47 47
48 cn_netlink_send(m, 0, GFP_KERNEL); 48 cn_netlink_send(m, dev->portid, 0, GFP_KERNEL);
49} 49}
50 50
51static void w1_send_slave(struct w1_master *dev, u64 rn) 51static void w1_send_slave(struct w1_master *dev, u64 rn)
@@ -54,53 +54,95 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
54 struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1); 54 struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
55 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); 55 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
56 int avail; 56 int avail;
57 57 u64 *data;
58 /* update kernel slave list */
59 w1_slave_found(dev, rn);
60 58
61 avail = dev->priv_size - cmd->len; 59 avail = dev->priv_size - cmd->len;
62 60
63 if (avail > 8) { 61 if (avail < 8) {
64 u64 *data = (void *)(cmd + 1) + cmd->len; 62 msg->ack++;
63 cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL);
65 64
66 *data = rn; 65 msg->len = sizeof(struct w1_netlink_msg) +
67 cmd->len += 8; 66 sizeof(struct w1_netlink_cmd);
68 hdr->len += 8; 67 hdr->len = sizeof(struct w1_netlink_cmd);
69 msg->len += 8; 68 cmd->len = 0;
70 return;
71 } 69 }
72 70
73 msg->ack++; 71 data = (void *)(cmd + 1) + cmd->len;
74 cn_netlink_send(msg, 0, GFP_KERNEL);
75 72
76 msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd); 73 *data = rn;
77 hdr->len = sizeof(struct w1_netlink_cmd); 74 cmd->len += 8;
78 cmd->len = 0; 75 hdr->len += 8;
76 msg->len += 8;
79} 77}
80 78
81static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg, 79static void w1_found_send_slave(struct w1_master *dev, u64 rn)
82 unsigned int avail)
83{ 80{
84 struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1); 81 /* update kernel slave list */
85 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); 82 w1_slave_found(dev, rn);
86 int search_type = (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH;
87 83
88 dev->priv = msg; 84 w1_send_slave(dev, rn);
89 dev->priv_size = avail; 85}
86
87/* Get the current slave list, or search (with or without alarm) */
88static int w1_get_slaves(struct w1_master *dev,
89 struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr,
90 struct w1_netlink_cmd *req_cmd)
91{
92 struct cn_msg *msg;
93 struct w1_netlink_msg *hdr;
94 struct w1_netlink_cmd *cmd;
95 struct w1_slave *sl;
96
97 msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
98 if (!msg)
99 return -ENOMEM;
100
101 msg->id = req_msg->id;
102 msg->seq = req_msg->seq;
103 msg->ack = 0;
104 msg->len = sizeof(struct w1_netlink_msg) +
105 sizeof(struct w1_netlink_cmd);
106
107 hdr = (struct w1_netlink_msg *)(msg + 1);
108 cmd = (struct w1_netlink_cmd *)(hdr + 1);
109
110 hdr->type = W1_MASTER_CMD;
111 hdr->id = req_hdr->id;
112 hdr->len = sizeof(struct w1_netlink_cmd);
113
114 cmd->cmd = req_cmd->cmd;
115 cmd->len = 0;
90 116
91 w1_search_process_cb(dev, search_type, w1_send_slave); 117 dev->priv = msg;
118 dev->priv_size = PAGE_SIZE - msg->len - sizeof(struct cn_msg);
119
120 if (req_cmd->cmd == W1_CMD_LIST_SLAVES) {
121 __u64 rn;
122 mutex_lock(&dev->list_mutex);
123 list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
124 memcpy(&rn, &sl->reg_num, sizeof(rn));
125 w1_send_slave(dev, rn);
126 }
127 mutex_unlock(&dev->list_mutex);
128 } else {
129 w1_search_process_cb(dev, cmd->cmd == W1_CMD_ALARM_SEARCH ?
130 W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave);
131 }
92 132
93 msg->ack = 0; 133 msg->ack = 0;
94 cn_netlink_send(msg, 0, GFP_KERNEL); 134 cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL);
95 135
96 dev->priv = NULL; 136 dev->priv = NULL;
97 dev->priv_size = 0; 137 dev->priv_size = 0;
98 138
139 kfree(msg);
140
99 return 0; 141 return 0;
100} 142}
101 143
102static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr, 144static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
103 struct w1_netlink_cmd *cmd) 145 struct w1_netlink_cmd *cmd, u32 portid)
104{ 146{
105 void *data; 147 void *data;
106 struct w1_netlink_msg *h; 148 struct w1_netlink_msg *h;
@@ -131,7 +173,7 @@ static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
131 173
132 memcpy(c->data, cmd->data, c->len); 174 memcpy(c->data, cmd->data, c->len);
133 175
134 err = cn_netlink_send(cm, 0, GFP_KERNEL); 176 err = cn_netlink_send(cm, portid, 0, GFP_KERNEL);
135 177
136 kfree(data); 178 kfree(data);
137 179
@@ -146,11 +188,11 @@ static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
146 switch (cmd->cmd) { 188 switch (cmd->cmd) {
147 case W1_CMD_TOUCH: 189 case W1_CMD_TOUCH:
148 w1_touch_block(dev, cmd->data, cmd->len); 190 w1_touch_block(dev, cmd->data, cmd->len);
149 w1_send_read_reply(msg, hdr, cmd); 191 w1_send_read_reply(msg, hdr, cmd, dev->portid);
150 break; 192 break;
151 case W1_CMD_READ: 193 case W1_CMD_READ:
152 w1_read_block(dev, cmd->data, cmd->len); 194 w1_read_block(dev, cmd->data, cmd->len);
153 w1_send_read_reply(msg, hdr, cmd); 195 w1_send_read_reply(msg, hdr, cmd, dev->portid);
154 break; 196 break;
155 case W1_CMD_WRITE: 197 case W1_CMD_WRITE:
156 w1_write_block(dev, cmd->data, cmd->len); 198 w1_write_block(dev, cmd->data, cmd->len);
@@ -163,38 +205,57 @@ static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
163 return err; 205 return err;
164} 206}
165 207
166static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_msg, 208static int w1_process_command_addremove(struct w1_master *dev,
167 struct w1_netlink_msg *req_hdr, struct w1_netlink_cmd *req_cmd) 209 struct cn_msg *msg, struct w1_netlink_msg *hdr,
210 struct w1_netlink_cmd *cmd)
168{ 211{
169 int err = -EINVAL; 212 struct w1_slave *sl;
170 struct cn_msg *msg; 213 int err = 0;
171 struct w1_netlink_msg *hdr; 214 struct w1_reg_num *id;
172 struct w1_netlink_cmd *cmd;
173 215
174 msg = kzalloc(PAGE_SIZE, GFP_KERNEL); 216 if (cmd->len != 8)
175 if (!msg) 217 return -EINVAL;
176 return -ENOMEM;
177 218
178 msg->id = req_msg->id; 219 id = (struct w1_reg_num *)cmd->data;
179 msg->seq = req_msg->seq;
180 msg->ack = 0;
181 msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
182 220
183 hdr = (struct w1_netlink_msg *)(msg + 1); 221 sl = w1_slave_search_device(dev, id);
184 cmd = (struct w1_netlink_cmd *)(hdr + 1); 222 switch (cmd->cmd) {
223 case W1_CMD_SLAVE_ADD:
224 if (sl)
225 err = -EINVAL;
226 else
227 err = w1_attach_slave_device(dev, id);
228 break;
229 case W1_CMD_SLAVE_REMOVE:
230 if (sl)
231 w1_slave_detach(sl);
232 else
233 err = -EINVAL;
234 break;
235 default:
236 err = -EINVAL;
237 break;
238 }
185 239
186 hdr->type = W1_MASTER_CMD; 240 return err;
187 hdr->id = req_hdr->id; 241}
188 hdr->len = sizeof(struct w1_netlink_cmd);
189 242
190 cmd->cmd = req_cmd->cmd; 243static int w1_process_command_master(struct w1_master *dev,
191 cmd->len = 0; 244 struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr,
245 struct w1_netlink_cmd *req_cmd)
246{
247 int err = -EINVAL;
192 248
193 switch (cmd->cmd) { 249 /* drop bus_mutex for search (does it's own locking), and add/remove
250 * which doesn't use the bus
251 */
252 switch (req_cmd->cmd) {
194 case W1_CMD_SEARCH: 253 case W1_CMD_SEARCH:
195 case W1_CMD_ALARM_SEARCH: 254 case W1_CMD_ALARM_SEARCH:
196 err = w1_process_search_command(dev, msg, 255 case W1_CMD_LIST_SLAVES:
197 PAGE_SIZE - msg->len - sizeof(struct cn_msg)); 256 mutex_unlock(&dev->bus_mutex);
257 err = w1_get_slaves(dev, req_msg, req_hdr, req_cmd);
258 mutex_lock(&dev->bus_mutex);
198 break; 259 break;
199 case W1_CMD_READ: 260 case W1_CMD_READ:
200 case W1_CMD_WRITE: 261 case W1_CMD_WRITE:
@@ -204,12 +265,20 @@ static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_m
204 case W1_CMD_RESET: 265 case W1_CMD_RESET:
205 err = w1_reset_bus(dev); 266 err = w1_reset_bus(dev);
206 break; 267 break;
268 case W1_CMD_SLAVE_ADD:
269 case W1_CMD_SLAVE_REMOVE:
270 mutex_unlock(&dev->bus_mutex);
271 mutex_lock(&dev->mutex);
272 err = w1_process_command_addremove(dev, req_msg, req_hdr,
273 req_cmd);
274 mutex_unlock(&dev->mutex);
275 mutex_lock(&dev->bus_mutex);
276 break;
207 default: 277 default:
208 err = -EINVAL; 278 err = -EINVAL;
209 break; 279 break;
210 } 280 }
211 281
212 kfree(msg);
213 return err; 282 return err;
214} 283}
215 284
@@ -223,7 +292,8 @@ static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg,
223 return w1_process_command_io(sl->master, msg, hdr, cmd); 292 return w1_process_command_io(sl->master, msg, hdr, cmd);
224} 293}
225 294
226static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mcmd) 295static int w1_process_command_root(struct cn_msg *msg,
296 struct w1_netlink_msg *mcmd, u32 portid)
227{ 297{
228 struct w1_master *m; 298 struct w1_master *m;
229 struct cn_msg *cn; 299 struct cn_msg *cn;
@@ -256,7 +326,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
256 mutex_lock(&w1_mlock); 326 mutex_lock(&w1_mlock);
257 list_for_each_entry(m, &w1_masters, w1_master_entry) { 327 list_for_each_entry(m, &w1_masters, w1_master_entry) {
258 if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) { 328 if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
259 cn_netlink_send(cn, 0, GFP_KERNEL); 329 cn_netlink_send(cn, portid, 0, GFP_KERNEL);
260 cn->ack++; 330 cn->ack++;
261 cn->len = sizeof(struct w1_netlink_msg); 331 cn->len = sizeof(struct w1_netlink_msg);
262 w->len = 0; 332 w->len = 0;
@@ -269,7 +339,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
269 id++; 339 id++;
270 } 340 }
271 cn->ack = 0; 341 cn->ack = 0;
272 cn_netlink_send(cn, 0, GFP_KERNEL); 342 cn_netlink_send(cn, portid, 0, GFP_KERNEL);
273 mutex_unlock(&w1_mlock); 343 mutex_unlock(&w1_mlock);
274 344
275 kfree(cn); 345 kfree(cn);
@@ -277,7 +347,7 @@ static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mc
277} 347}
278 348
279static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg, 349static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg,
280 struct w1_netlink_cmd *rcmd, int error) 350 struct w1_netlink_cmd *rcmd, int portid, int error)
281{ 351{
282 struct cn_msg *cmsg; 352 struct cn_msg *cmsg;
283 struct w1_netlink_msg *msg; 353 struct w1_netlink_msg *msg;
@@ -304,35 +374,147 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
304 cmsg->len += sizeof(*cmd); 374 cmsg->len += sizeof(*cmd);
305 } 375 }
306 376
307 error = cn_netlink_send(cmsg, 0, GFP_KERNEL); 377 error = cn_netlink_send(cmsg, portid, 0, GFP_KERNEL);
308 kfree(cmsg); 378 kfree(cmsg);
309 379
310 return error; 380 return error;
311} 381}
312 382
383/* Bundle together a reference count, the full message, and broken out
384 * commands to be executed on each w1 master kthread in one memory allocation.
385 */
386struct w1_cb_block {
387 atomic_t refcnt;
388 u32 portid; /* Sending process port ID */
389 struct cn_msg msg;
390 /* cn_msg data */
391 /* one or more variable length struct w1_cb_node */
392};
393struct w1_cb_node {
394 struct w1_async_cmd async;
395 /* pointers within w1_cb_block and msg data */
396 struct w1_cb_block *block;
397 struct w1_netlink_msg *m;
398 struct w1_slave *sl;
399 struct w1_master *dev;
400};
401
402static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
403{
404 struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
405 async);
406 u16 mlen = node->m->len;
407 u8 *cmd_data = node->m->data;
408 int err = 0;
409 struct w1_slave *sl = node->sl;
410 struct w1_netlink_cmd *cmd = NULL;
411
412 mutex_lock(&dev->bus_mutex);
413 dev->portid = node->block->portid;
414 if (sl && w1_reset_select_slave(sl))
415 err = -ENODEV;
416
417 while (mlen && !err) {
418 cmd = (struct w1_netlink_cmd *)cmd_data;
419
420 if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
421 err = -E2BIG;
422 break;
423 }
424
425 if (sl)
426 err = w1_process_command_slave(sl, &node->block->msg,
427 node->m, cmd);
428 else
429 err = w1_process_command_master(dev, &node->block->msg,
430 node->m, cmd);
431
432 w1_netlink_send_error(&node->block->msg, node->m, cmd,
433 node->block->portid, err);
434 err = 0;
435
436 cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
437 mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
438 }
439
440 if (!cmd || err)
441 w1_netlink_send_error(&node->block->msg, node->m, cmd,
442 node->block->portid, err);
443
444 if (sl)
445 w1_unref_slave(sl);
446 else
447 atomic_dec(&dev->refcnt);
448 dev->portid = 0;
449 mutex_unlock(&dev->bus_mutex);
450
451 mutex_lock(&dev->list_mutex);
452 list_del(&async_cmd->async_entry);
453 mutex_unlock(&dev->list_mutex);
454
455 if (atomic_sub_return(1, &node->block->refcnt) == 0)
456 kfree(node->block);
457}
458
313static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) 459static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
314{ 460{
315 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); 461 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
316 struct w1_netlink_cmd *cmd;
317 struct w1_slave *sl; 462 struct w1_slave *sl;
318 struct w1_master *dev; 463 struct w1_master *dev;
464 u16 msg_len;
319 int err = 0; 465 int err = 0;
466 struct w1_cb_block *block = NULL;
467 struct w1_cb_node *node = NULL;
468 int node_count = 0;
469
470 /* Count the number of master or slave commands there are to allocate
471 * space for one cb_node each.
472 */
473 msg_len = msg->len;
474 while (msg_len && !err) {
475 if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
476 err = -E2BIG;
477 break;
478 }
479
480 if (m->type == W1_MASTER_CMD || m->type == W1_SLAVE_CMD)
481 ++node_count;
320 482
321 while (msg->len && !err) { 483 msg_len -= sizeof(struct w1_netlink_msg) + m->len;
484 m = (struct w1_netlink_msg *)(((u8 *)m) +
485 sizeof(struct w1_netlink_msg) + m->len);
486 }
487 m = (struct w1_netlink_msg *)(msg + 1);
488 if (node_count) {
489 /* msg->len doesn't include itself */
490 long size = sizeof(struct w1_cb_block) + msg->len +
491 node_count*sizeof(struct w1_cb_node);
492 block = kmalloc(size, GFP_KERNEL);
493 if (!block) {
494 w1_netlink_send_error(msg, m, NULL, nsp->portid,
495 -ENOMEM);
496 return;
497 }
498 atomic_set(&block->refcnt, 1);
499 block->portid = nsp->portid;
500 memcpy(&block->msg, msg, sizeof(*msg) + msg->len);
501 node = (struct w1_cb_node *)((u8 *)block->msg.data + msg->len);
502 }
503
504 msg_len = msg->len;
505 while (msg_len && !err) {
322 struct w1_reg_num id; 506 struct w1_reg_num id;
323 u16 mlen = m->len; 507 u16 mlen = m->len;
324 u8 *cmd_data = m->data;
325 508
326 dev = NULL; 509 dev = NULL;
327 sl = NULL; 510 sl = NULL;
328 cmd = NULL;
329 511
330 memcpy(&id, m->id.id, sizeof(id)); 512 memcpy(&id, m->id.id, sizeof(id));
331#if 0 513#if 0
332 printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n", 514 printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n",
333 __func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len); 515 __func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len);
334#endif 516#endif
335 if (m->len + sizeof(struct w1_netlink_msg) > msg->len) { 517 if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
336 err = -E2BIG; 518 err = -E2BIG;
337 break; 519 break;
338 } 520 }
@@ -344,7 +526,7 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
344 if (sl) 526 if (sl)
345 dev = sl->master; 527 dev = sl->master;
346 } else { 528 } else {
347 err = w1_process_command_root(msg, m); 529 err = w1_process_command_root(msg, m, nsp->portid);
348 goto out_cont; 530 goto out_cont;
349 } 531 }
350 532
@@ -357,41 +539,24 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
357 if (!mlen) 539 if (!mlen)
358 goto out_cont; 540 goto out_cont;
359 541
360 mutex_lock(&dev->mutex); 542 atomic_inc(&block->refcnt);
361 543 node->async.cb = w1_process_cb;
362 if (sl && w1_reset_select_slave(sl)) { 544 node->block = block;
363 err = -ENODEV; 545 node->m = (struct w1_netlink_msg *)((u8 *)&block->msg +
364 goto out_up; 546 (size_t)((u8 *)m - (u8 *)msg));
365 } 547 node->sl = sl;
548 node->dev = dev;
366 549
367 while (mlen) { 550 mutex_lock(&dev->list_mutex);
368 cmd = (struct w1_netlink_cmd *)cmd_data; 551 list_add_tail(&node->async.async_entry, &dev->async_list);
552 wake_up_process(dev->thread);
553 mutex_unlock(&dev->list_mutex);
554 ++node;
369 555
370 if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
371 err = -E2BIG;
372 break;
373 }
374
375 if (sl)
376 err = w1_process_command_slave(sl, msg, m, cmd);
377 else
378 err = w1_process_command_master(dev, msg, m, cmd);
379
380 w1_netlink_send_error(msg, m, cmd, err);
381 err = 0;
382
383 cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
384 mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
385 }
386out_up:
387 atomic_dec(&dev->refcnt);
388 if (sl)
389 atomic_dec(&sl->refcnt);
390 mutex_unlock(&dev->mutex);
391out_cont: 556out_cont:
392 if (!cmd || err) 557 if (err)
393 w1_netlink_send_error(msg, m, cmd, err); 558 w1_netlink_send_error(msg, m, NULL, nsp->portid, err);
394 msg->len -= sizeof(struct w1_netlink_msg) + m->len; 559 msg_len -= sizeof(struct w1_netlink_msg) + m->len;
395 m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); 560 m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len);
396 561
397 /* 562 /*
@@ -400,6 +565,8 @@ out_cont:
400 if (err == -ENODEV) 565 if (err == -ENODEV)
401 err = 0; 566 err = 0;
402 } 567 }
568 if (block && atomic_sub_return(1, &block->refcnt) == 0)
569 kfree(block);
403} 570}
404 571
405int w1_init_netlink(void) 572int w1_init_netlink(void)
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index b0922dc29658..1e9504e67650 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -27,6 +27,18 @@
27 27
28#include "w1.h" 28#include "w1.h"
29 29
30/**
31 * enum w1_netlink_message_types - message type
32 *
33 * @W1_SLAVE_ADD: notification that a slave device was added
34 * @W1_SLAVE_REMOVE: notification that a slave device was removed
35 * @W1_MASTER_ADD: notification that a new bus master was added
36 * @W1_MASTER_REMOVE: notification that a bus masterwas removed
37 * @W1_MASTER_CMD: initiate operations on a specific master
38 * @W1_SLAVE_CMD: sends reset, selects the slave, then does a read/write/touch
39 * operation
40 * @W1_LIST_MASTERS: used to determine the bus master identifiers
41 */
30enum w1_netlink_message_types { 42enum w1_netlink_message_types {
31 W1_SLAVE_ADD = 0, 43 W1_SLAVE_ADD = 0,
32 W1_SLAVE_REMOVE, 44 W1_SLAVE_REMOVE,
@@ -52,6 +64,22 @@ struct w1_netlink_msg
52 __u8 data[0]; 64 __u8 data[0];
53}; 65};
54 66
67/**
68 * enum w1_commands - commands available for master or slave operations
69 * @W1_CMD_READ: read len bytes
70 * @W1_CMD_WRITE: write len bytes
71 * @W1_CMD_SEARCH: initiate a standard search, returns only the slave
72 * devices found during that search
73 * @W1_CMD_ALARM_SEARCH: search for devices that are currently alarming
74 * @W1_CMD_TOUCH: Touches a series of bytes.
75 * @W1_CMD_RESET: sends a bus reset on the given master
76 * @W1_CMD_SLAVE_ADD: adds a slave to the given master,
77 * 8 byte slave id at data[0]
78 * @W1_CMD_SLAVE_REMOVE: removes a slave to the given master,
79 * 8 byte slave id at data[0]
80 * @W1_CMD_LIST_SLAVES: list of slaves registered on this master
81 * @W1_CMD_MAX: number of available commands
82 */
55enum w1_commands { 83enum w1_commands {
56 W1_CMD_READ = 0, 84 W1_CMD_READ = 0,
57 W1_CMD_WRITE, 85 W1_CMD_WRITE,
@@ -59,7 +87,10 @@ enum w1_commands {
59 W1_CMD_ALARM_SEARCH, 87 W1_CMD_ALARM_SEARCH,
60 W1_CMD_TOUCH, 88 W1_CMD_TOUCH,
61 W1_CMD_RESET, 89 W1_CMD_RESET,
62 W1_CMD_MAX, 90 W1_CMD_SLAVE_ADD,
91 W1_CMD_SLAVE_REMOVE,
92 W1_CMD_LIST_SLAVES,
93 W1_CMD_MAX
63}; 94};
64 95
65struct w1_netlink_cmd 96struct w1_netlink_cmd