diff options
Diffstat (limited to 'drivers/misc')
64 files changed, 6532 insertions, 642 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index df1f86b5c83e..0d0d625fece2 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -13,6 +13,20 @@ menuconfig MISC_DEVICES | |||
13 | 13 | ||
14 | if MISC_DEVICES | 14 | if MISC_DEVICES |
15 | 15 | ||
16 | config AD525X_DPOT | ||
17 | tristate "Analog Devices AD525x Digital Potentiometers" | ||
18 | depends on I2C && SYSFS | ||
19 | help | ||
20 | If you say yes here, you get support for the Analog Devices | ||
21 | AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255 | ||
22 | digital potentiometer chips. | ||
23 | |||
24 | See Documentation/misc-devices/ad525x_dpot.txt for the | ||
25 | userspace interface. | ||
26 | |||
27 | This driver can also be built as a module. If so, the module | ||
28 | will be called ad525x_dpot. | ||
29 | |||
16 | config ATMEL_PWM | 30 | config ATMEL_PWM |
17 | tristate "Atmel AT32/AT91 PWM support" | 31 | tristate "Atmel AT32/AT91 PWM support" |
18 | depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 | 32 | depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 |
@@ -173,6 +187,40 @@ config SGI_XP | |||
173 | this feature will allow for direct communication between SSIs | 187 | this feature will allow for direct communication between SSIs |
174 | based on a network adapter and DMA messaging. | 188 | based on a network adapter and DMA messaging. |
175 | 189 | ||
190 | config CS5535_MFGPT | ||
191 | tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" | ||
192 | depends on PCI | ||
193 | depends on X86 | ||
194 | default n | ||
195 | help | ||
196 | This driver provides access to MFGPT functionality for other | ||
197 | drivers that need timers. MFGPTs are available in the CS5535 and | ||
198 | CS5536 companion chips that are found in AMD Geode and several | ||
199 | other platforms. They have a better resolution and max interval | ||
200 | than the generic PIT, and are suitable for use as high-res timers. | ||
201 | You probably don't want to enable this manually; other drivers that | ||
202 | make use of it should enable it. | ||
203 | |||
204 | config CS5535_MFGPT_DEFAULT_IRQ | ||
205 | int | ||
206 | depends on CS5535_MFGPT | ||
207 | default 7 | ||
208 | help | ||
209 | MFGPTs on the CS5535 require an interrupt. The selected IRQ | ||
210 | can be overridden as a module option as well as by driver that | ||
211 | use the cs5535_mfgpt_ API; however, different architectures might | ||
212 | want to use a different IRQ by default. This is here for | ||
213 | architectures to set as necessary. | ||
214 | |||
215 | config CS5535_CLOCK_EVENT_SRC | ||
216 | tristate "CS5535/CS5536 high-res timer (MFGPT) events" | ||
217 | depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT | ||
218 | help | ||
219 | This driver provides a clock event source based on the MFGPT | ||
220 | timer(s) in the CS5535 and CS5536 companion chips. | ||
221 | MFGPTs have a better resolution and max interval than the | ||
222 | generic PIT, and are suitable for use as high-res timers. | ||
223 | |||
176 | config HP_ILO | 224 | config HP_ILO |
177 | tristate "Channel interface driver for HP iLO/iLO2 processor" | 225 | tristate "Channel interface driver for HP iLO/iLO2 processor" |
178 | depends on PCI | 226 | depends on PCI |
@@ -210,19 +258,6 @@ config SGI_GRU_DEBUG | |||
210 | This option enables addition debugging code for the SGI GRU driver. If | 258 | This option enables addition debugging code for the SGI GRU driver. If |
211 | you are unsure, say N. | 259 | you are unsure, say N. |
212 | 260 | ||
213 | config DELL_LAPTOP | ||
214 | tristate "Dell Laptop Extras (EXPERIMENTAL)" | ||
215 | depends on X86 | ||
216 | depends on DCDBAS | ||
217 | depends on EXPERIMENTAL | ||
218 | depends on BACKLIGHT_CLASS_DEVICE | ||
219 | depends on RFKILL | ||
220 | depends on POWER_SUPPLY | ||
221 | default n | ||
222 | ---help--- | ||
223 | This driver adds support for rfkill and backlight control to Dell | ||
224 | laptops. | ||
225 | |||
226 | config ISL29003 | 261 | config ISL29003 |
227 | tristate "Intersil ISL29003 ambient light sensor" | 262 | tristate "Intersil ISL29003 ambient light sensor" |
228 | depends on I2C && SYSFS | 263 | depends on I2C && SYSFS |
@@ -233,6 +268,16 @@ config ISL29003 | |||
233 | This driver can also be built as a module. If so, the module | 268 | This driver can also be built as a module. If so, the module |
234 | will be called isl29003. | 269 | will be called isl29003. |
235 | 270 | ||
271 | config SENSORS_TSL2550 | ||
272 | tristate "Taos TSL2550 ambient light sensor" | ||
273 | depends on I2C && SYSFS | ||
274 | help | ||
275 | If you say yes here you get support for the Taos TSL2550 | ||
276 | ambient light sensor. | ||
277 | |||
278 | This driver can also be built as a module. If so, the module | ||
279 | will be called tsl2550. | ||
280 | |||
236 | config EP93XX_PWM | 281 | config EP93XX_PWM |
237 | tristate "EP93xx PWM support" | 282 | tristate "EP93xx PWM support" |
238 | depends on ARCH_EP93XX | 283 | depends on ARCH_EP93XX |
@@ -246,8 +291,45 @@ config EP93XX_PWM | |||
246 | To compile this driver as a module, choose M here: the module will | 291 | To compile this driver as a module, choose M here: the module will |
247 | be called ep93xx_pwm. | 292 | be called ep93xx_pwm. |
248 | 293 | ||
294 | config DS1682 | ||
295 | tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" | ||
296 | depends on I2C && EXPERIMENTAL | ||
297 | help | ||
298 | If you say yes here you get support for Dallas Semiconductor | ||
299 | DS1682 Total Elapsed Time Recorder. | ||
300 | |||
301 | This driver can also be built as a module. If so, the module | ||
302 | will be called ds1682. | ||
303 | |||
304 | config TI_DAC7512 | ||
305 | tristate "Texas Instruments DAC7512" | ||
306 | depends on SPI && SYSFS | ||
307 | help | ||
308 | If you say yes here you get support for the Texas Instruments | ||
309 | DAC7512 16-bit digital-to-analog converter. | ||
310 | |||
311 | This driver can also be built as a module. If so, the module | ||
312 | will be calles ti_dac7512. | ||
313 | |||
314 | config VMWARE_BALLOON | ||
315 | tristate "VMware Balloon Driver" | ||
316 | depends on X86 | ||
317 | help | ||
318 | This is VMware physical memory management driver which acts | ||
319 | like a "balloon" that can be inflated to reclaim physical pages | ||
320 | by reserving them in the guest and invalidating them in the | ||
321 | monitor, freeing up the underlying machine pages so they can | ||
322 | be allocated to other guests. The balloon can also be deflated | ||
323 | to allow the guest to use more physical memory. | ||
324 | |||
325 | If unsure, say N. | ||
326 | |||
327 | To compile this driver as a module, choose M here: the | ||
328 | module will be called vmware_balloon. | ||
329 | |||
249 | source "drivers/misc/c2port/Kconfig" | 330 | source "drivers/misc/c2port/Kconfig" |
250 | source "drivers/misc/eeprom/Kconfig" | 331 | source "drivers/misc/eeprom/Kconfig" |
251 | source "drivers/misc/cb710/Kconfig" | 332 | source "drivers/misc/cb710/Kconfig" |
333 | source "drivers/misc/iwmc3200top/Kconfig" | ||
252 | 334 | ||
253 | endif # MISC_DEVICES | 335 | endif # MISC_DEVICES |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f982d2ecfde7..7b6f7eefdf8d 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_IBM_ASM) += ibmasm/ | 5 | obj-$(CONFIG_IBM_ASM) += ibmasm/ |
6 | obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ | 6 | obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ |
7 | obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o | ||
7 | obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o | 8 | obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o |
8 | obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o | 9 | obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o |
9 | obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o | 10 | obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o |
@@ -17,9 +18,15 @@ obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o | |||
17 | obj-$(CONFIG_KGDB_TESTS) += kgdbts.o | 18 | obj-$(CONFIG_KGDB_TESTS) += kgdbts.o |
18 | obj-$(CONFIG_SGI_XP) += sgi-xp/ | 19 | obj-$(CONFIG_SGI_XP) += sgi-xp/ |
19 | obj-$(CONFIG_SGI_GRU) += sgi-gru/ | 20 | obj-$(CONFIG_SGI_GRU) += sgi-gru/ |
21 | obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o | ||
20 | obj-$(CONFIG_HP_ILO) += hpilo.o | 22 | obj-$(CONFIG_HP_ILO) += hpilo.o |
21 | obj-$(CONFIG_ISL29003) += isl29003.o | 23 | obj-$(CONFIG_ISL29003) += isl29003.o |
24 | obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o | ||
22 | obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o | 25 | obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o |
26 | obj-$(CONFIG_DS1682) += ds1682.o | ||
27 | obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o | ||
23 | obj-$(CONFIG_C2PORT) += c2port/ | 28 | obj-$(CONFIG_C2PORT) += c2port/ |
29 | obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ | ||
24 | obj-y += eeprom/ | 30 | obj-y += eeprom/ |
25 | obj-y += cb710/ | 31 | obj-y += cb710/ |
32 | obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o | ||
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c new file mode 100644 index 000000000000..30a59f2bacd2 --- /dev/null +++ b/drivers/misc/ad525x_dpot.c | |||
@@ -0,0 +1,666 @@ | |||
1 | /* | ||
2 | * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers | ||
3 | * Copyright (c) 2009 Analog Devices, Inc. | ||
4 | * Author: Michael Hennerich <hennerich@blackfin.uclinux.org> | ||
5 | * | ||
6 | * DEVID #Wipers #Positions Resistor Options (kOhm) | ||
7 | * AD5258 1 64 1, 10, 50, 100 | ||
8 | * AD5259 1 256 5, 10, 50, 100 | ||
9 | * AD5251 2 64 1, 10, 50, 100 | ||
10 | * AD5252 2 256 1, 10, 50, 100 | ||
11 | * AD5255 3 512 25, 250 | ||
12 | * AD5253 4 64 1, 10, 50, 100 | ||
13 | * AD5254 4 256 1, 10, 50, 100 | ||
14 | * | ||
15 | * See Documentation/misc-devices/ad525x_dpot.txt for more info. | ||
16 | * | ||
17 | * derived from ad5258.c | ||
18 | * Copyright (c) 2009 Cyber Switching, Inc. | ||
19 | * Author: Chris Verges <chrisv@cyberswitching.com> | ||
20 | * | ||
21 | * derived from ad5252.c | ||
22 | * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org> | ||
23 | * | ||
24 | * Licensed under the GPL-2 or later. | ||
25 | */ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/device.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/i2c.h> | ||
33 | #include <linux/delay.h> | ||
34 | |||
35 | #define DRIVER_NAME "ad525x_dpot" | ||
36 | #define DRIVER_VERSION "0.1" | ||
37 | |||
38 | enum dpot_devid { | ||
39 | AD5258_ID, | ||
40 | AD5259_ID, | ||
41 | AD5251_ID, | ||
42 | AD5252_ID, | ||
43 | AD5253_ID, | ||
44 | AD5254_ID, | ||
45 | AD5255_ID, | ||
46 | }; | ||
47 | |||
48 | #define AD5258_MAX_POSITION 64 | ||
49 | #define AD5259_MAX_POSITION 256 | ||
50 | #define AD5251_MAX_POSITION 64 | ||
51 | #define AD5252_MAX_POSITION 256 | ||
52 | #define AD5253_MAX_POSITION 64 | ||
53 | #define AD5254_MAX_POSITION 256 | ||
54 | #define AD5255_MAX_POSITION 512 | ||
55 | |||
56 | #define AD525X_RDAC0 0 | ||
57 | #define AD525X_RDAC1 1 | ||
58 | #define AD525X_RDAC2 2 | ||
59 | #define AD525X_RDAC3 3 | ||
60 | |||
61 | #define AD525X_REG_TOL 0x18 | ||
62 | #define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0) | ||
63 | #define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1) | ||
64 | #define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2) | ||
65 | #define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3) | ||
66 | |||
67 | /* RDAC-to-EEPROM Interface Commands */ | ||
68 | #define AD525X_I2C_RDAC (0x00 << 5) | ||
69 | #define AD525X_I2C_EEPROM (0x01 << 5) | ||
70 | #define AD525X_I2C_CMD (0x80) | ||
71 | |||
72 | #define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3)) | ||
73 | #define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3)) | ||
74 | #define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3)) | ||
75 | #define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3)) | ||
76 | |||
77 | static s32 ad525x_read(struct i2c_client *client, u8 reg); | ||
78 | static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value); | ||
79 | |||
80 | /* | ||
81 | * Client data (each client gets its own) | ||
82 | */ | ||
83 | |||
84 | struct dpot_data { | ||
85 | struct mutex update_lock; | ||
86 | unsigned rdac_mask; | ||
87 | unsigned max_pos; | ||
88 | unsigned devid; | ||
89 | }; | ||
90 | |||
91 | /* sysfs functions */ | ||
92 | |||
93 | static ssize_t sysfs_show_reg(struct device *dev, | ||
94 | struct device_attribute *attr, char *buf, u32 reg) | ||
95 | { | ||
96 | struct i2c_client *client = to_i2c_client(dev); | ||
97 | struct dpot_data *data = i2c_get_clientdata(client); | ||
98 | s32 value; | ||
99 | |||
100 | mutex_lock(&data->update_lock); | ||
101 | value = ad525x_read(client, reg); | ||
102 | mutex_unlock(&data->update_lock); | ||
103 | |||
104 | if (value < 0) | ||
105 | return -EINVAL; | ||
106 | /* | ||
107 | * Let someone else deal with converting this ... | ||
108 | * the tolerance is a two-byte value where the MSB | ||
109 | * is a sign + integer value, and the LSB is a | ||
110 | * decimal value. See page 18 of the AD5258 | ||
111 | * datasheet (Rev. A) for more details. | ||
112 | */ | ||
113 | |||
114 | if (reg & AD525X_REG_TOL) | ||
115 | return sprintf(buf, "0x%04x\n", value & 0xFFFF); | ||
116 | else | ||
117 | return sprintf(buf, "%u\n", value & data->rdac_mask); | ||
118 | } | ||
119 | |||
120 | static ssize_t sysfs_set_reg(struct device *dev, | ||
121 | struct device_attribute *attr, | ||
122 | const char *buf, size_t count, u32 reg) | ||
123 | { | ||
124 | struct i2c_client *client = to_i2c_client(dev); | ||
125 | struct dpot_data *data = i2c_get_clientdata(client); | ||
126 | unsigned long value; | ||
127 | int err; | ||
128 | |||
129 | err = strict_strtoul(buf, 10, &value); | ||
130 | if (err) | ||
131 | return err; | ||
132 | |||
133 | if (value > data->rdac_mask) | ||
134 | value = data->rdac_mask; | ||
135 | |||
136 | mutex_lock(&data->update_lock); | ||
137 | ad525x_write(client, reg, value); | ||
138 | if (reg & AD525X_I2C_EEPROM) | ||
139 | msleep(26); /* Sleep while the EEPROM updates */ | ||
140 | mutex_unlock(&data->update_lock); | ||
141 | |||
142 | return count; | ||
143 | } | ||
144 | |||
145 | static ssize_t sysfs_do_cmd(struct device *dev, | ||
146 | struct device_attribute *attr, | ||
147 | const char *buf, size_t count, u32 reg) | ||
148 | { | ||
149 | struct i2c_client *client = to_i2c_client(dev); | ||
150 | struct dpot_data *data = i2c_get_clientdata(client); | ||
151 | |||
152 | mutex_lock(&data->update_lock); | ||
153 | ad525x_write(client, reg, 0); | ||
154 | mutex_unlock(&data->update_lock); | ||
155 | |||
156 | return count; | ||
157 | } | ||
158 | |||
159 | /* ------------------------------------------------------------------------- */ | ||
160 | |||
161 | static ssize_t show_rdac0(struct device *dev, | ||
162 | struct device_attribute *attr, char *buf) | ||
163 | { | ||
164 | return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0); | ||
165 | } | ||
166 | |||
167 | static ssize_t set_rdac0(struct device *dev, | ||
168 | struct device_attribute *attr, | ||
169 | const char *buf, size_t count) | ||
170 | { | ||
171 | return sysfs_set_reg(dev, attr, buf, count, | ||
172 | AD525X_I2C_RDAC | AD525X_RDAC0); | ||
173 | } | ||
174 | |||
175 | static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0); | ||
176 | |||
177 | static ssize_t show_eeprom0(struct device *dev, | ||
178 | struct device_attribute *attr, char *buf) | ||
179 | { | ||
180 | return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0); | ||
181 | } | ||
182 | |||
183 | static ssize_t set_eeprom0(struct device *dev, | ||
184 | struct device_attribute *attr, | ||
185 | const char *buf, size_t count) | ||
186 | { | ||
187 | return sysfs_set_reg(dev, attr, buf, count, | ||
188 | AD525X_I2C_EEPROM | AD525X_RDAC0); | ||
189 | } | ||
190 | |||
191 | static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0); | ||
192 | |||
193 | static ssize_t show_tolerance0(struct device *dev, | ||
194 | struct device_attribute *attr, char *buf) | ||
195 | { | ||
196 | return sysfs_show_reg(dev, attr, buf, | ||
197 | AD525X_I2C_EEPROM | AD525X_TOL_RDAC0); | ||
198 | } | ||
199 | |||
200 | static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL); | ||
201 | |||
202 | /* ------------------------------------------------------------------------- */ | ||
203 | |||
204 | static ssize_t show_rdac1(struct device *dev, | ||
205 | struct device_attribute *attr, char *buf) | ||
206 | { | ||
207 | return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1); | ||
208 | } | ||
209 | |||
210 | static ssize_t set_rdac1(struct device *dev, | ||
211 | struct device_attribute *attr, | ||
212 | const char *buf, size_t count) | ||
213 | { | ||
214 | return sysfs_set_reg(dev, attr, buf, count, | ||
215 | AD525X_I2C_RDAC | AD525X_RDAC1); | ||
216 | } | ||
217 | |||
218 | static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1); | ||
219 | |||
220 | static ssize_t show_eeprom1(struct device *dev, | ||
221 | struct device_attribute *attr, char *buf) | ||
222 | { | ||
223 | return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1); | ||
224 | } | ||
225 | |||
226 | static ssize_t set_eeprom1(struct device *dev, | ||
227 | struct device_attribute *attr, | ||
228 | const char *buf, size_t count) | ||
229 | { | ||
230 | return sysfs_set_reg(dev, attr, buf, count, | ||
231 | AD525X_I2C_EEPROM | AD525X_RDAC1); | ||
232 | } | ||
233 | |||
234 | static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1); | ||
235 | |||
236 | static ssize_t show_tolerance1(struct device *dev, | ||
237 | struct device_attribute *attr, char *buf) | ||
238 | { | ||
239 | return sysfs_show_reg(dev, attr, buf, | ||
240 | AD525X_I2C_EEPROM | AD525X_TOL_RDAC1); | ||
241 | } | ||
242 | |||
243 | static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL); | ||
244 | |||
245 | /* ------------------------------------------------------------------------- */ | ||
246 | |||
247 | static ssize_t show_rdac2(struct device *dev, | ||
248 | struct device_attribute *attr, char *buf) | ||
249 | { | ||
250 | return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2); | ||
251 | } | ||
252 | |||
253 | static ssize_t set_rdac2(struct device *dev, | ||
254 | struct device_attribute *attr, | ||
255 | const char *buf, size_t count) | ||
256 | { | ||
257 | return sysfs_set_reg(dev, attr, buf, count, | ||
258 | AD525X_I2C_RDAC | AD525X_RDAC2); | ||
259 | } | ||
260 | |||
261 | static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2); | ||
262 | |||
263 | static ssize_t show_eeprom2(struct device *dev, | ||
264 | struct device_attribute *attr, char *buf) | ||
265 | { | ||
266 | return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2); | ||
267 | } | ||
268 | |||
269 | static ssize_t set_eeprom2(struct device *dev, | ||
270 | struct device_attribute *attr, | ||
271 | const char *buf, size_t count) | ||
272 | { | ||
273 | return sysfs_set_reg(dev, attr, buf, count, | ||
274 | AD525X_I2C_EEPROM | AD525X_RDAC2); | ||
275 | } | ||
276 | |||
277 | static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2); | ||
278 | |||
279 | static ssize_t show_tolerance2(struct device *dev, | ||
280 | struct device_attribute *attr, char *buf) | ||
281 | { | ||
282 | return sysfs_show_reg(dev, attr, buf, | ||
283 | AD525X_I2C_EEPROM | AD525X_TOL_RDAC2); | ||
284 | } | ||
285 | |||
286 | static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL); | ||
287 | |||
288 | /* ------------------------------------------------------------------------- */ | ||
289 | |||
290 | static ssize_t show_rdac3(struct device *dev, | ||
291 | struct device_attribute *attr, char *buf) | ||
292 | { | ||
293 | return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3); | ||
294 | } | ||
295 | |||
296 | static ssize_t set_rdac3(struct device *dev, | ||
297 | struct device_attribute *attr, | ||
298 | const char *buf, size_t count) | ||
299 | { | ||
300 | return sysfs_set_reg(dev, attr, buf, count, | ||
301 | AD525X_I2C_RDAC | AD525X_RDAC3); | ||
302 | } | ||
303 | |||
304 | static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3); | ||
305 | |||
306 | static ssize_t show_eeprom3(struct device *dev, | ||
307 | struct device_attribute *attr, char *buf) | ||
308 | { | ||
309 | return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3); | ||
310 | } | ||
311 | |||
312 | static ssize_t set_eeprom3(struct device *dev, | ||
313 | struct device_attribute *attr, | ||
314 | const char *buf, size_t count) | ||
315 | { | ||
316 | return sysfs_set_reg(dev, attr, buf, count, | ||
317 | AD525X_I2C_EEPROM | AD525X_RDAC3); | ||
318 | } | ||
319 | |||
320 | static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3); | ||
321 | |||
322 | static ssize_t show_tolerance3(struct device *dev, | ||
323 | struct device_attribute *attr, char *buf) | ||
324 | { | ||
325 | return sysfs_show_reg(dev, attr, buf, | ||
326 | AD525X_I2C_EEPROM | AD525X_TOL_RDAC3); | ||
327 | } | ||
328 | |||
329 | static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL); | ||
330 | |||
331 | static struct attribute *ad525x_attributes_wipers[4][4] = { | ||
332 | { | ||
333 | &dev_attr_rdac0.attr, | ||
334 | &dev_attr_eeprom0.attr, | ||
335 | &dev_attr_tolerance0.attr, | ||
336 | NULL | ||
337 | }, { | ||
338 | &dev_attr_rdac1.attr, | ||
339 | &dev_attr_eeprom1.attr, | ||
340 | &dev_attr_tolerance1.attr, | ||
341 | NULL | ||
342 | }, { | ||
343 | &dev_attr_rdac2.attr, | ||
344 | &dev_attr_eeprom2.attr, | ||
345 | &dev_attr_tolerance2.attr, | ||
346 | NULL | ||
347 | }, { | ||
348 | &dev_attr_rdac3.attr, | ||
349 | &dev_attr_eeprom3.attr, | ||
350 | &dev_attr_tolerance3.attr, | ||
351 | NULL | ||
352 | } | ||
353 | }; | ||
354 | |||
355 | static const struct attribute_group ad525x_group_wipers[] = { | ||
356 | {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]}, | ||
357 | {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]}, | ||
358 | {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]}, | ||
359 | {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]}, | ||
360 | }; | ||
361 | |||
362 | /* ------------------------------------------------------------------------- */ | ||
363 | |||
364 | static ssize_t set_inc_all(struct device *dev, | ||
365 | struct device_attribute *attr, | ||
366 | const char *buf, size_t count) | ||
367 | { | ||
368 | return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL); | ||
369 | } | ||
370 | |||
371 | static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all); | ||
372 | |||
373 | static ssize_t set_dec_all(struct device *dev, | ||
374 | struct device_attribute *attr, | ||
375 | const char *buf, size_t count) | ||
376 | { | ||
377 | return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL); | ||
378 | } | ||
379 | |||
380 | static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all); | ||
381 | |||
382 | static ssize_t set_inc_all_6db(struct device *dev, | ||
383 | struct device_attribute *attr, | ||
384 | const char *buf, size_t count) | ||
385 | { | ||
386 | return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB); | ||
387 | } | ||
388 | |||
389 | static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db); | ||
390 | |||
391 | static ssize_t set_dec_all_6db(struct device *dev, | ||
392 | struct device_attribute *attr, | ||
393 | const char *buf, size_t count) | ||
394 | { | ||
395 | return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB); | ||
396 | } | ||
397 | |||
398 | static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db); | ||
399 | |||
400 | static struct attribute *ad525x_attributes_commands[] = { | ||
401 | &dev_attr_inc_all.attr, | ||
402 | &dev_attr_dec_all.attr, | ||
403 | &dev_attr_inc_all_6db.attr, | ||
404 | &dev_attr_dec_all_6db.attr, | ||
405 | NULL | ||
406 | }; | ||
407 | |||
408 | static const struct attribute_group ad525x_group_commands = { | ||
409 | .attrs = ad525x_attributes_commands, | ||
410 | }; | ||
411 | |||
412 | /* ------------------------------------------------------------------------- */ | ||
413 | |||
414 | /* i2c device functions */ | ||
415 | |||
416 | /** | ||
417 | * ad525x_read - return the value contained in the specified register | ||
418 | * on the AD5258 device. | ||
419 | * @client: value returned from i2c_new_device() | ||
420 | * @reg: the register to read | ||
421 | * | ||
422 | * If the tolerance register is specified, 2 bytes are returned. | ||
423 | * Otherwise, 1 byte is returned. A negative value indicates an error | ||
424 | * occurred while reading the register. | ||
425 | */ | ||
426 | static s32 ad525x_read(struct i2c_client *client, u8 reg) | ||
427 | { | ||
428 | struct dpot_data *data = i2c_get_clientdata(client); | ||
429 | |||
430 | if ((reg & AD525X_REG_TOL) || (data->max_pos > 256)) | ||
431 | return i2c_smbus_read_word_data(client, (reg & 0xF8) | | ||
432 | ((reg & 0x7) << 1)); | ||
433 | else | ||
434 | return i2c_smbus_read_byte_data(client, reg); | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * ad525x_write - store the given value in the specified register on | ||
439 | * the AD5258 device. | ||
440 | * @client: value returned from i2c_new_device() | ||
441 | * @reg: the register to write | ||
442 | * @value: the byte to store in the register | ||
443 | * | ||
444 | * For certain instructions that do not require a data byte, "NULL" | ||
445 | * should be specified for the "value" parameter. These instructions | ||
446 | * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM. | ||
447 | * | ||
448 | * A negative return value indicates an error occurred while reading | ||
449 | * the register. | ||
450 | */ | ||
451 | static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value) | ||
452 | { | ||
453 | struct dpot_data *data = i2c_get_clientdata(client); | ||
454 | |||
455 | /* Only write the instruction byte for certain commands */ | ||
456 | if (reg & AD525X_I2C_CMD) | ||
457 | return i2c_smbus_write_byte(client, reg); | ||
458 | |||
459 | if (data->max_pos > 256) | ||
460 | return i2c_smbus_write_word_data(client, (reg & 0xF8) | | ||
461 | ((reg & 0x7) << 1), value); | ||
462 | else | ||
463 | /* All other registers require instruction + data bytes */ | ||
464 | return i2c_smbus_write_byte_data(client, reg, value); | ||
465 | } | ||
466 | |||
467 | static int ad525x_probe(struct i2c_client *client, | ||
468 | const struct i2c_device_id *id) | ||
469 | { | ||
470 | struct device *dev = &client->dev; | ||
471 | struct dpot_data *data; | ||
472 | int err = 0; | ||
473 | |||
474 | dev_dbg(dev, "%s\n", __func__); | ||
475 | |||
476 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { | ||
477 | dev_err(dev, "missing I2C functionality for this driver\n"); | ||
478 | goto exit; | ||
479 | } | ||
480 | |||
481 | data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL); | ||
482 | if (!data) { | ||
483 | err = -ENOMEM; | ||
484 | goto exit; | ||
485 | } | ||
486 | |||
487 | i2c_set_clientdata(client, data); | ||
488 | mutex_init(&data->update_lock); | ||
489 | |||
490 | switch (id->driver_data) { | ||
491 | case AD5258_ID: | ||
492 | data->max_pos = AD5258_MAX_POSITION; | ||
493 | err = sysfs_create_group(&dev->kobj, | ||
494 | &ad525x_group_wipers[AD525X_RDAC0]); | ||
495 | break; | ||
496 | case AD5259_ID: | ||
497 | data->max_pos = AD5259_MAX_POSITION; | ||
498 | err = sysfs_create_group(&dev->kobj, | ||
499 | &ad525x_group_wipers[AD525X_RDAC0]); | ||
500 | break; | ||
501 | case AD5251_ID: | ||
502 | data->max_pos = AD5251_MAX_POSITION; | ||
503 | err = sysfs_create_group(&dev->kobj, | ||
504 | &ad525x_group_wipers[AD525X_RDAC1]); | ||
505 | err |= sysfs_create_group(&dev->kobj, | ||
506 | &ad525x_group_wipers[AD525X_RDAC3]); | ||
507 | err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); | ||
508 | break; | ||
509 | case AD5252_ID: | ||
510 | data->max_pos = AD5252_MAX_POSITION; | ||
511 | err = sysfs_create_group(&dev->kobj, | ||
512 | &ad525x_group_wipers[AD525X_RDAC1]); | ||
513 | err |= sysfs_create_group(&dev->kobj, | ||
514 | &ad525x_group_wipers[AD525X_RDAC3]); | ||
515 | err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); | ||
516 | break; | ||
517 | case AD5253_ID: | ||
518 | data->max_pos = AD5253_MAX_POSITION; | ||
519 | err = sysfs_create_group(&dev->kobj, | ||
520 | &ad525x_group_wipers[AD525X_RDAC0]); | ||
521 | err |= sysfs_create_group(&dev->kobj, | ||
522 | &ad525x_group_wipers[AD525X_RDAC1]); | ||
523 | err |= sysfs_create_group(&dev->kobj, | ||
524 | &ad525x_group_wipers[AD525X_RDAC2]); | ||
525 | err |= sysfs_create_group(&dev->kobj, | ||
526 | &ad525x_group_wipers[AD525X_RDAC3]); | ||
527 | err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); | ||
528 | break; | ||
529 | case AD5254_ID: | ||
530 | data->max_pos = AD5254_MAX_POSITION; | ||
531 | err = sysfs_create_group(&dev->kobj, | ||
532 | &ad525x_group_wipers[AD525X_RDAC0]); | ||
533 | err |= sysfs_create_group(&dev->kobj, | ||
534 | &ad525x_group_wipers[AD525X_RDAC1]); | ||
535 | err |= sysfs_create_group(&dev->kobj, | ||
536 | &ad525x_group_wipers[AD525X_RDAC2]); | ||
537 | err |= sysfs_create_group(&dev->kobj, | ||
538 | &ad525x_group_wipers[AD525X_RDAC3]); | ||
539 | err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); | ||
540 | break; | ||
541 | case AD5255_ID: | ||
542 | data->max_pos = AD5255_MAX_POSITION; | ||
543 | err = sysfs_create_group(&dev->kobj, | ||
544 | &ad525x_group_wipers[AD525X_RDAC0]); | ||
545 | err |= sysfs_create_group(&dev->kobj, | ||
546 | &ad525x_group_wipers[AD525X_RDAC1]); | ||
547 | err |= sysfs_create_group(&dev->kobj, | ||
548 | &ad525x_group_wipers[AD525X_RDAC2]); | ||
549 | err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); | ||
550 | break; | ||
551 | default: | ||
552 | err = -ENODEV; | ||
553 | goto exit_free; | ||
554 | } | ||
555 | |||
556 | if (err) { | ||
557 | dev_err(dev, "failed to register sysfs hooks\n"); | ||
558 | goto exit_free; | ||
559 | } | ||
560 | |||
561 | data->devid = id->driver_data; | ||
562 | data->rdac_mask = data->max_pos - 1; | ||
563 | |||
564 | dev_info(dev, "%s %d-Position Digital Potentiometer registered\n", | ||
565 | id->name, data->max_pos); | ||
566 | |||
567 | return 0; | ||
568 | |||
569 | exit_free: | ||
570 | kfree(data); | ||
571 | i2c_set_clientdata(client, NULL); | ||
572 | exit: | ||
573 | dev_err(dev, "failed to create client\n"); | ||
574 | return err; | ||
575 | } | ||
576 | |||
577 | static int __devexit ad525x_remove(struct i2c_client *client) | ||
578 | { | ||
579 | struct dpot_data *data = i2c_get_clientdata(client); | ||
580 | struct device *dev = &client->dev; | ||
581 | |||
582 | switch (data->devid) { | ||
583 | case AD5258_ID: | ||
584 | case AD5259_ID: | ||
585 | sysfs_remove_group(&dev->kobj, | ||
586 | &ad525x_group_wipers[AD525X_RDAC0]); | ||
587 | break; | ||
588 | case AD5251_ID: | ||
589 | case AD5252_ID: | ||
590 | sysfs_remove_group(&dev->kobj, | ||
591 | &ad525x_group_wipers[AD525X_RDAC1]); | ||
592 | sysfs_remove_group(&dev->kobj, | ||
593 | &ad525x_group_wipers[AD525X_RDAC3]); | ||
594 | sysfs_remove_group(&dev->kobj, &ad525x_group_commands); | ||
595 | break; | ||
596 | case AD5253_ID: | ||
597 | case AD5254_ID: | ||
598 | sysfs_remove_group(&dev->kobj, | ||
599 | &ad525x_group_wipers[AD525X_RDAC0]); | ||
600 | sysfs_remove_group(&dev->kobj, | ||
601 | &ad525x_group_wipers[AD525X_RDAC1]); | ||
602 | sysfs_remove_group(&dev->kobj, | ||
603 | &ad525x_group_wipers[AD525X_RDAC2]); | ||
604 | sysfs_remove_group(&dev->kobj, | ||
605 | &ad525x_group_wipers[AD525X_RDAC3]); | ||
606 | sysfs_remove_group(&dev->kobj, &ad525x_group_commands); | ||
607 | break; | ||
608 | case AD5255_ID: | ||
609 | sysfs_remove_group(&dev->kobj, | ||
610 | &ad525x_group_wipers[AD525X_RDAC0]); | ||
611 | sysfs_remove_group(&dev->kobj, | ||
612 | &ad525x_group_wipers[AD525X_RDAC1]); | ||
613 | sysfs_remove_group(&dev->kobj, | ||
614 | &ad525x_group_wipers[AD525X_RDAC2]); | ||
615 | sysfs_remove_group(&dev->kobj, &ad525x_group_commands); | ||
616 | break; | ||
617 | } | ||
618 | |||
619 | i2c_set_clientdata(client, NULL); | ||
620 | kfree(data); | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | static const struct i2c_device_id ad525x_idtable[] = { | ||
626 | {"ad5258", AD5258_ID}, | ||
627 | {"ad5259", AD5259_ID}, | ||
628 | {"ad5251", AD5251_ID}, | ||
629 | {"ad5252", AD5252_ID}, | ||
630 | {"ad5253", AD5253_ID}, | ||
631 | {"ad5254", AD5254_ID}, | ||
632 | {"ad5255", AD5255_ID}, | ||
633 | {} | ||
634 | }; | ||
635 | |||
636 | MODULE_DEVICE_TABLE(i2c, ad525x_idtable); | ||
637 | |||
638 | static struct i2c_driver ad525x_driver = { | ||
639 | .driver = { | ||
640 | .owner = THIS_MODULE, | ||
641 | .name = DRIVER_NAME, | ||
642 | }, | ||
643 | .id_table = ad525x_idtable, | ||
644 | .probe = ad525x_probe, | ||
645 | .remove = __devexit_p(ad525x_remove), | ||
646 | }; | ||
647 | |||
648 | static int __init ad525x_init(void) | ||
649 | { | ||
650 | return i2c_add_driver(&ad525x_driver); | ||
651 | } | ||
652 | |||
653 | module_init(ad525x_init); | ||
654 | |||
655 | static void __exit ad525x_exit(void) | ||
656 | { | ||
657 | i2c_del_driver(&ad525x_driver); | ||
658 | } | ||
659 | |||
660 | module_exit(ad525x_exit); | ||
661 | |||
662 | MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, " | ||
663 | "Michael Hennerich <hennerich@blackfin.uclinux.org>, "); | ||
664 | MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver"); | ||
665 | MODULE_LICENSE("GPL"); | ||
666 | MODULE_VERSION(DRIVER_VERSION); | ||
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index 558bf3f2c276..4afffe610f99 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/atmel-ssc.h> | 17 | #include <linux/atmel-ssc.h> |
18 | #include <linux/slab.h> | ||
18 | 19 | ||
19 | /* Serialize access to ssc_list and user count */ | 20 | /* Serialize access to ssc_list and user count */ |
20 | static DEFINE_SPINLOCK(user_lock); | 21 | static DEFINE_SPINLOCK(user_lock); |
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c index 6aa5294dfec4..0f3fb4f03bdf 100644 --- a/drivers/misc/atmel_pwm.c +++ b/drivers/misc/atmel_pwm.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <linux/clk.h> | 2 | #include <linux/clk.h> |
3 | #include <linux/err.h> | 3 | #include <linux/err.h> |
4 | #include <linux/slab.h> | ||
4 | #include <linux/io.h> | 5 | #include <linux/io.h> |
5 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
6 | #include <linux/platform_device.h> | 7 | #include <linux/platform_device.h> |
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c index 05dc8a31f280..3891124001f2 100644 --- a/drivers/misc/atmel_tclib.c +++ b/drivers/misc/atmel_tclib.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/ioport.h> | 6 | #include <linux/ioport.h> |
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
8 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
9 | #include <linux/slab.h> | ||
9 | 10 | ||
10 | /* Number of bytes to reserve for the iomem resource */ | 11 | /* Number of bytes to reserve for the iomem resource */ |
11 | #define ATMEL_TC_IOMEM_SIZE 256 | 12 | #define ATMEL_TC_IOMEM_SIZE 256 |
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index b5346b4db91a..ed090e77c9cd 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/idr.h> | 21 | #include <linux/idr.h> |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/slab.h> | ||
23 | 24 | ||
24 | #include <linux/c2port.h> | 25 | #include <linux/c2port.h> |
25 | 26 | ||
@@ -912,8 +913,8 @@ struct c2port_device *c2port_device_register(char *name, | |||
912 | 913 | ||
913 | c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, | 914 | c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, |
914 | "c2port%d", id); | 915 | "c2port%d", id); |
915 | if (unlikely(!c2dev->dev)) { | 916 | if (unlikely(IS_ERR(c2dev->dev))) { |
916 | ret = -ENOMEM; | 917 | ret = PTR_ERR(c2dev->dev); |
917 | goto error_device_create; | 918 | goto error_device_create; |
918 | } | 919 | } |
919 | dev_set_drvdata(c2dev->dev, c2dev); | 920 | dev_set_drvdata(c2dev->dev, c2dev); |
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index b14eab0f2ba5..efec4139c3f6 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c | |||
@@ -9,11 +9,11 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | ||
13 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
14 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
15 | #include <linux/idr.h> | 14 | #include <linux/idr.h> |
16 | #include <linux/cb710.h> | 15 | #include <linux/cb710.h> |
16 | #include <linux/gfp.h> | ||
17 | 17 | ||
18 | static DEFINE_IDA(cb710_ida); | 18 | static DEFINE_IDA(cb710_ida); |
19 | static DEFINE_SPINLOCK(cb710_ida_lock); | 19 | static DEFINE_SPINLOCK(cb710_ida_lock); |
diff --git a/drivers/misc/cb710/debug.c b/drivers/misc/cb710/debug.c index 02358d086e03..fcb3b8e30c52 100644 --- a/drivers/misc/cb710/debug.c +++ b/drivers/misc/cb710/debug.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/cb710.h> | 10 | #include <linux/cb710.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | ||
14 | 13 | ||
15 | #define CB710_REG_COUNT 0x80 | 14 | #define CB710_REG_COUNT 0x80 |
16 | 15 | ||
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c new file mode 100644 index 000000000000..9bec24db4d41 --- /dev/null +++ b/drivers/misc/cs5535-mfgpt.c | |||
@@ -0,0 +1,371 @@ | |||
1 | /* | ||
2 | * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT) | ||
3 | * | ||
4 | * Copyright (C) 2006, Advanced Micro Devices, Inc. | ||
5 | * Copyright (C) 2007 Andres Salomon <dilinger@debian.org> | ||
6 | * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of version 2 of the GNU General Public License | ||
10 | * as published by the Free Software Foundation. | ||
11 | * | ||
12 | * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/cs5535.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #define DRV_NAME "cs5535-mfgpt" | ||
24 | #define MFGPT_BAR 2 | ||
25 | |||
26 | static int mfgpt_reset_timers; | ||
27 | module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644); | ||
28 | MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; " | ||
29 | "required by some broken BIOSes (ie, TinyBIOS < 0.99)."); | ||
30 | |||
31 | struct cs5535_mfgpt_timer { | ||
32 | struct cs5535_mfgpt_chip *chip; | ||
33 | int nr; | ||
34 | }; | ||
35 | |||
36 | static struct cs5535_mfgpt_chip { | ||
37 | DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS); | ||
38 | resource_size_t base; | ||
39 | |||
40 | struct pci_dev *pdev; | ||
41 | spinlock_t lock; | ||
42 | int initialized; | ||
43 | } cs5535_mfgpt_chip; | ||
44 | |||
45 | int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, | ||
46 | int event, int enable) | ||
47 | { | ||
48 | uint32_t msr, mask, value, dummy; | ||
49 | int shift = (cmp == MFGPT_CMP1) ? 0 : 8; | ||
50 | |||
51 | if (!timer) { | ||
52 | WARN_ON(1); | ||
53 | return -EIO; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * The register maps for these are described in sections 6.17.1.x of | ||
58 | * the AMD Geode CS5536 Companion Device Data Book. | ||
59 | */ | ||
60 | switch (event) { | ||
61 | case MFGPT_EVENT_RESET: | ||
62 | /* | ||
63 | * XXX: According to the docs, we cannot reset timers above | ||
64 | * 6; that is, resets for 7 and 8 will be ignored. Is this | ||
65 | * a problem? -dilinger | ||
66 | */ | ||
67 | msr = MSR_MFGPT_NR; | ||
68 | mask = 1 << (timer->nr + 24); | ||
69 | break; | ||
70 | |||
71 | case MFGPT_EVENT_NMI: | ||
72 | msr = MSR_MFGPT_NR; | ||
73 | mask = 1 << (timer->nr + shift); | ||
74 | break; | ||
75 | |||
76 | case MFGPT_EVENT_IRQ: | ||
77 | msr = MSR_MFGPT_IRQ; | ||
78 | mask = 1 << (timer->nr + shift); | ||
79 | break; | ||
80 | |||
81 | default: | ||
82 | return -EIO; | ||
83 | } | ||
84 | |||
85 | rdmsr(msr, value, dummy); | ||
86 | |||
87 | if (enable) | ||
88 | value |= mask; | ||
89 | else | ||
90 | value &= ~mask; | ||
91 | |||
92 | wrmsr(msr, value, dummy); | ||
93 | return 0; | ||
94 | } | ||
95 | EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event); | ||
96 | |||
97 | int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq, | ||
98 | int enable) | ||
99 | { | ||
100 | uint32_t zsel, lpc, dummy; | ||
101 | int shift; | ||
102 | |||
103 | if (!timer) { | ||
104 | WARN_ON(1); | ||
105 | return -EIO; | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA | ||
110 | * is using the same CMP of the timer's Siamese twin, the IRQ is set to | ||
111 | * 2, and we mustn't use nor change it. | ||
112 | * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the | ||
113 | * IRQ of the 1st. This can only happen if forcing an IRQ, calling this | ||
114 | * with *irq==0 is safe. Currently there _are_ no 2 drivers. | ||
115 | */ | ||
116 | rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); | ||
117 | shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4; | ||
118 | if (((zsel >> shift) & 0xF) == 2) | ||
119 | return -EIO; | ||
120 | |||
121 | /* Choose IRQ: if none supplied, keep IRQ already set or use default */ | ||
122 | if (!*irq) | ||
123 | *irq = (zsel >> shift) & 0xF; | ||
124 | if (!*irq) | ||
125 | *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; | ||
126 | |||
127 | /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */ | ||
128 | if (*irq < 1 || *irq == 2 || *irq > 15) | ||
129 | return -EIO; | ||
130 | rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy); | ||
131 | if (lpc & (1 << *irq)) | ||
132 | return -EIO; | ||
133 | |||
134 | /* All chosen and checked - go for it */ | ||
135 | if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) | ||
136 | return -EIO; | ||
137 | if (enable) { | ||
138 | zsel = (zsel & ~(0xF << shift)) | (*irq << shift); | ||
139 | wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq); | ||
145 | |||
146 | struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain) | ||
147 | { | ||
148 | struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip; | ||
149 | struct cs5535_mfgpt_timer *timer = NULL; | ||
150 | unsigned long flags; | ||
151 | int max; | ||
152 | |||
153 | if (!mfgpt->initialized) | ||
154 | goto done; | ||
155 | |||
156 | /* only allocate timers from the working domain if requested */ | ||
157 | if (domain == MFGPT_DOMAIN_WORKING) | ||
158 | max = 6; | ||
159 | else | ||
160 | max = MFGPT_MAX_TIMERS; | ||
161 | |||
162 | if (timer_nr >= max) { | ||
163 | /* programmer error. silly programmers! */ | ||
164 | WARN_ON(1); | ||
165 | goto done; | ||
166 | } | ||
167 | |||
168 | spin_lock_irqsave(&mfgpt->lock, flags); | ||
169 | if (timer_nr < 0) { | ||
170 | unsigned long t; | ||
171 | |||
172 | /* try to find any available timer */ | ||
173 | t = find_first_bit(mfgpt->avail, max); | ||
174 | /* set timer_nr to -1 if no timers available */ | ||
175 | timer_nr = t < max ? (int) t : -1; | ||
176 | } else { | ||
177 | /* check if the requested timer's available */ | ||
178 | if (test_bit(timer_nr, mfgpt->avail)) | ||
179 | timer_nr = -1; | ||
180 | } | ||
181 | |||
182 | if (timer_nr >= 0) | ||
183 | /* if timer_nr is not -1, it's an available timer */ | ||
184 | __clear_bit(timer_nr, mfgpt->avail); | ||
185 | spin_unlock_irqrestore(&mfgpt->lock, flags); | ||
186 | |||
187 | if (timer_nr < 0) | ||
188 | goto done; | ||
189 | |||
190 | timer = kmalloc(sizeof(*timer), GFP_KERNEL); | ||
191 | if (!timer) { | ||
192 | /* aw hell */ | ||
193 | spin_lock_irqsave(&mfgpt->lock, flags); | ||
194 | __set_bit(timer_nr, mfgpt->avail); | ||
195 | spin_unlock_irqrestore(&mfgpt->lock, flags); | ||
196 | goto done; | ||
197 | } | ||
198 | timer->chip = mfgpt; | ||
199 | timer->nr = timer_nr; | ||
200 | dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr); | ||
201 | |||
202 | done: | ||
203 | return timer; | ||
204 | } | ||
205 | EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer); | ||
206 | |||
207 | /* | ||
208 | * XXX: This frees the timer memory, but never resets the actual hardware | ||
209 | * timer. The old geode_mfgpt code did this; it would be good to figure | ||
210 | * out a way to actually release the hardware timer. See comments below. | ||
211 | */ | ||
212 | void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer) | ||
213 | { | ||
214 | kfree(timer); | ||
215 | } | ||
216 | EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer); | ||
217 | |||
218 | uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg) | ||
219 | { | ||
220 | return inw(timer->chip->base + reg + (timer->nr * 8)); | ||
221 | } | ||
222 | EXPORT_SYMBOL_GPL(cs5535_mfgpt_read); | ||
223 | |||
224 | void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, | ||
225 | uint16_t value) | ||
226 | { | ||
227 | outw(value, timer->chip->base + reg + (timer->nr * 8)); | ||
228 | } | ||
229 | EXPORT_SYMBOL_GPL(cs5535_mfgpt_write); | ||
230 | |||
231 | /* | ||
232 | * This is a sledgehammer that resets all MFGPT timers. This is required by | ||
233 | * some broken BIOSes which leave the system in an unstable state | ||
234 | * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to | ||
235 | * whether or not this secret MSR can be used to release individual timers. | ||
236 | * Jordan tells me that he and Mitch once played w/ it, but it's unclear | ||
237 | * what the results of that were (and they experienced some instability). | ||
238 | */ | ||
239 | static void __init reset_all_timers(void) | ||
240 | { | ||
241 | uint32_t val, dummy; | ||
242 | |||
243 | /* The following undocumented bit resets the MFGPT timers */ | ||
244 | val = 0xFF; dummy = 0; | ||
245 | wrmsr(MSR_MFGPT_SETUP, val, dummy); | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * Check whether any MFGPTs are available for the kernel to use. In most | ||
250 | * cases, firmware that uses AMD's VSA code will claim all timers during | ||
251 | * bootup; we certainly don't want to take them if they're already in use. | ||
252 | * In other cases (such as with VSAless OpenFirmware), the system firmware | ||
253 | * leaves timers available for us to use. | ||
254 | */ | ||
255 | static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) | ||
256 | { | ||
257 | struct cs5535_mfgpt_timer timer = { .chip = mfgpt }; | ||
258 | unsigned long flags; | ||
259 | int timers = 0; | ||
260 | uint16_t val; | ||
261 | int i; | ||
262 | |||
263 | /* bios workaround */ | ||
264 | if (mfgpt_reset_timers) | ||
265 | reset_all_timers(); | ||
266 | |||
267 | /* just to be safe, protect this section w/ lock */ | ||
268 | spin_lock_irqsave(&mfgpt->lock, flags); | ||
269 | for (i = 0; i < MFGPT_MAX_TIMERS; i++) { | ||
270 | timer.nr = i; | ||
271 | val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP); | ||
272 | if (!(val & MFGPT_SETUP_SETUP)) { | ||
273 | __set_bit(i, mfgpt->avail); | ||
274 | timers++; | ||
275 | } | ||
276 | } | ||
277 | spin_unlock_irqrestore(&mfgpt->lock, flags); | ||
278 | |||
279 | return timers; | ||
280 | } | ||
281 | |||
282 | static int __init cs5535_mfgpt_probe(struct pci_dev *pdev, | ||
283 | const struct pci_device_id *pci_id) | ||
284 | { | ||
285 | int err, t; | ||
286 | |||
287 | /* There are two ways to get the MFGPT base address; one is by | ||
288 | * fetching it from MSR_LBAR_MFGPT, the other is by reading the | ||
289 | * PCI BAR info. The latter method is easier (especially across | ||
290 | * different architectures), so we'll stick with that for now. If | ||
291 | * it turns out to be unreliable in the face of crappy BIOSes, we | ||
292 | * can always go back to using MSRs.. */ | ||
293 | |||
294 | err = pci_enable_device_io(pdev); | ||
295 | if (err) { | ||
296 | dev_err(&pdev->dev, "can't enable device IO\n"); | ||
297 | goto done; | ||
298 | } | ||
299 | |||
300 | err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME); | ||
301 | if (err) { | ||
302 | dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR); | ||
303 | goto done; | ||
304 | } | ||
305 | |||
306 | /* set up the driver-specific struct */ | ||
307 | cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR); | ||
308 | cs5535_mfgpt_chip.pdev = pdev; | ||
309 | spin_lock_init(&cs5535_mfgpt_chip.lock); | ||
310 | |||
311 | dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR, | ||
312 | (unsigned long long) cs5535_mfgpt_chip.base); | ||
313 | |||
314 | /* detect the available timers */ | ||
315 | t = scan_timers(&cs5535_mfgpt_chip); | ||
316 | dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t); | ||
317 | cs5535_mfgpt_chip.initialized = 1; | ||
318 | return 0; | ||
319 | |||
320 | done: | ||
321 | return err; | ||
322 | } | ||
323 | |||
324 | static struct pci_device_id cs5535_mfgpt_pci_tbl[] = { | ||
325 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, | ||
326 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, | ||
327 | { 0, }, | ||
328 | }; | ||
329 | MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl); | ||
330 | |||
331 | /* | ||
332 | * Just like with the cs5535-gpio driver, we can't use the standard PCI driver | ||
333 | * registration stuff. It only allows only one driver to bind to each PCI | ||
334 | * device, and we want the GPIO and MFGPT drivers to be able to share a PCI | ||
335 | * device. Instead, we manually scan for the PCI device, request a single | ||
336 | * region, and keep track of the devices that we're using. | ||
337 | */ | ||
338 | |||
339 | static int __init cs5535_mfgpt_scan_pci(void) | ||
340 | { | ||
341 | struct pci_dev *pdev; | ||
342 | int err = -ENODEV; | ||
343 | int i; | ||
344 | |||
345 | for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) { | ||
346 | pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor, | ||
347 | cs5535_mfgpt_pci_tbl[i].device, NULL); | ||
348 | if (pdev) { | ||
349 | err = cs5535_mfgpt_probe(pdev, | ||
350 | &cs5535_mfgpt_pci_tbl[i]); | ||
351 | if (err) | ||
352 | pci_dev_put(pdev); | ||
353 | |||
354 | /* we only support a single CS5535/6 southbridge */ | ||
355 | break; | ||
356 | } | ||
357 | } | ||
358 | |||
359 | return err; | ||
360 | } | ||
361 | |||
362 | static int __init cs5535_mfgpt_init(void) | ||
363 | { | ||
364 | return cs5535_mfgpt_scan_pci(); | ||
365 | } | ||
366 | |||
367 | module_init(cs5535_mfgpt_init); | ||
368 | |||
369 | MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>"); | ||
370 | MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver"); | ||
371 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c new file mode 100644 index 000000000000..9197cfc55015 --- /dev/null +++ b/drivers/misc/ds1682.c | |||
@@ -0,0 +1,266 @@ | |||
1 | /* | ||
2 | * Dallas Semiconductor DS1682 Elapsed Time Recorder device driver | ||
3 | * | ||
4 | * Written by: Grant Likely <grant.likely@secretlab.ca> | ||
5 | * | ||
6 | * Copyright (C) 2007 Secret Lab Technologies Ltd. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | * The DS1682 elapsed timer recorder is a simple device that implements | ||
15 | * one elapsed time counter, one event counter, an alarm signal and 10 | ||
16 | * bytes of general purpose EEPROM. | ||
17 | * | ||
18 | * This driver provides access to the DS1682 counters and user data via | ||
19 | * the sysfs. The following attributes are added to the device node: | ||
20 | * elapsed_time (u32): Total elapsed event time in ms resolution | ||
21 | * alarm_time (u32): When elapsed time exceeds the value in alarm_time, | ||
22 | * then the alarm pin is asserted. | ||
23 | * event_count (u16): number of times the event pin has gone low. | ||
24 | * eeprom (u8[10]): general purpose EEPROM | ||
25 | * | ||
26 | * Counter registers and user data are both read/write unless the device | ||
27 | * has been write protected. This driver does not support turning off write | ||
28 | * protection. Once write protection is turned on, it is impossible to | ||
29 | * turn it off again, so I have left the feature out of this driver to avoid | ||
30 | * accidental enabling, but it is trivial to add write protect support. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/i2c.h> | ||
37 | #include <linux/string.h> | ||
38 | #include <linux/list.h> | ||
39 | #include <linux/sysfs.h> | ||
40 | #include <linux/ctype.h> | ||
41 | #include <linux/hwmon-sysfs.h> | ||
42 | |||
43 | /* Device registers */ | ||
44 | #define DS1682_REG_CONFIG 0x00 | ||
45 | #define DS1682_REG_ALARM 0x01 | ||
46 | #define DS1682_REG_ELAPSED 0x05 | ||
47 | #define DS1682_REG_EVT_CNTR 0x09 | ||
48 | #define DS1682_REG_EEPROM 0x0b | ||
49 | #define DS1682_REG_RESET 0x1d | ||
50 | #define DS1682_REG_WRITE_DISABLE 0x1e | ||
51 | #define DS1682_REG_WRITE_MEM_DISABLE 0x1f | ||
52 | |||
53 | #define DS1682_EEPROM_SIZE 10 | ||
54 | |||
55 | /* | ||
56 | * Generic counter attributes | ||
57 | */ | ||
58 | static ssize_t ds1682_show(struct device *dev, struct device_attribute *attr, | ||
59 | char *buf) | ||
60 | { | ||
61 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); | ||
62 | struct i2c_client *client = to_i2c_client(dev); | ||
63 | __le32 val = 0; | ||
64 | int rc; | ||
65 | |||
66 | dev_dbg(dev, "ds1682_show() called on %s\n", attr->attr.name); | ||
67 | |||
68 | /* Read the register */ | ||
69 | rc = i2c_smbus_read_i2c_block_data(client, sattr->index, sattr->nr, | ||
70 | (u8 *) & val); | ||
71 | if (rc < 0) | ||
72 | return -EIO; | ||
73 | |||
74 | /* Special case: the 32 bit regs are time values with 1/4s | ||
75 | * resolution, scale them up to milliseconds */ | ||
76 | if (sattr->nr == 4) | ||
77 | return sprintf(buf, "%llu\n", | ||
78 | ((unsigned long long)le32_to_cpu(val)) * 250); | ||
79 | |||
80 | /* Format the output string and return # of bytes */ | ||
81 | return sprintf(buf, "%li\n", (long)le32_to_cpu(val)); | ||
82 | } | ||
83 | |||
84 | static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr, | ||
85 | const char *buf, size_t count) | ||
86 | { | ||
87 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); | ||
88 | struct i2c_client *client = to_i2c_client(dev); | ||
89 | char *endp; | ||
90 | u64 val; | ||
91 | __le32 val_le; | ||
92 | int rc; | ||
93 | |||
94 | dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name); | ||
95 | |||
96 | /* Decode input */ | ||
97 | val = simple_strtoull(buf, &endp, 0); | ||
98 | if (buf == endp) { | ||
99 | dev_dbg(dev, "input string not a number\n"); | ||
100 | return -EINVAL; | ||
101 | } | ||
102 | |||
103 | /* Special case: the 32 bit regs are time values with 1/4s | ||
104 | * resolution, scale input down to quarter-seconds */ | ||
105 | if (sattr->nr == 4) | ||
106 | do_div(val, 250); | ||
107 | |||
108 | /* write out the value */ | ||
109 | val_le = cpu_to_le32(val); | ||
110 | rc = i2c_smbus_write_i2c_block_data(client, sattr->index, sattr->nr, | ||
111 | (u8 *) & val_le); | ||
112 | if (rc < 0) { | ||
113 | dev_err(dev, "register write failed; reg=0x%x, size=%i\n", | ||
114 | sattr->index, sattr->nr); | ||
115 | return -EIO; | ||
116 | } | ||
117 | |||
118 | return count; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Simple register attributes | ||
123 | */ | ||
124 | static SENSOR_DEVICE_ATTR_2(elapsed_time, S_IRUGO | S_IWUSR, ds1682_show, | ||
125 | ds1682_store, 4, DS1682_REG_ELAPSED); | ||
126 | static SENSOR_DEVICE_ATTR_2(alarm_time, S_IRUGO | S_IWUSR, ds1682_show, | ||
127 | ds1682_store, 4, DS1682_REG_ALARM); | ||
128 | static SENSOR_DEVICE_ATTR_2(event_count, S_IRUGO | S_IWUSR, ds1682_show, | ||
129 | ds1682_store, 2, DS1682_REG_EVT_CNTR); | ||
130 | |||
131 | static const struct attribute_group ds1682_group = { | ||
132 | .attrs = (struct attribute *[]) { | ||
133 | &sensor_dev_attr_elapsed_time.dev_attr.attr, | ||
134 | &sensor_dev_attr_alarm_time.dev_attr.attr, | ||
135 | &sensor_dev_attr_event_count.dev_attr.attr, | ||
136 | NULL, | ||
137 | }, | ||
138 | }; | ||
139 | |||
140 | /* | ||
141 | * User data attribute | ||
142 | */ | ||
143 | static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *attr, | ||
144 | char *buf, loff_t off, size_t count) | ||
145 | { | ||
146 | struct i2c_client *client = kobj_to_i2c_client(kobj); | ||
147 | int rc; | ||
148 | |||
149 | dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n", | ||
150 | buf, off, count); | ||
151 | |||
152 | if (off >= DS1682_EEPROM_SIZE) | ||
153 | return 0; | ||
154 | |||
155 | if (off + count > DS1682_EEPROM_SIZE) | ||
156 | count = DS1682_EEPROM_SIZE - off; | ||
157 | |||
158 | rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off, | ||
159 | count, buf); | ||
160 | if (rc < 0) | ||
161 | return -EIO; | ||
162 | |||
163 | return count; | ||
164 | } | ||
165 | |||
166 | static ssize_t ds1682_eeprom_write(struct kobject *kobj, struct bin_attribute *attr, | ||
167 | char *buf, loff_t off, size_t count) | ||
168 | { | ||
169 | struct i2c_client *client = kobj_to_i2c_client(kobj); | ||
170 | |||
171 | dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n", | ||
172 | buf, off, count); | ||
173 | |||
174 | if (off >= DS1682_EEPROM_SIZE) | ||
175 | return -ENOSPC; | ||
176 | |||
177 | if (off + count > DS1682_EEPROM_SIZE) | ||
178 | count = DS1682_EEPROM_SIZE - off; | ||
179 | |||
180 | /* Write out to the device */ | ||
181 | if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off, | ||
182 | count, buf) < 0) | ||
183 | return -EIO; | ||
184 | |||
185 | return count; | ||
186 | } | ||
187 | |||
188 | static struct bin_attribute ds1682_eeprom_attr = { | ||
189 | .attr = { | ||
190 | .name = "eeprom", | ||
191 | .mode = S_IRUGO | S_IWUSR, | ||
192 | }, | ||
193 | .size = DS1682_EEPROM_SIZE, | ||
194 | .read = ds1682_eeprom_read, | ||
195 | .write = ds1682_eeprom_write, | ||
196 | }; | ||
197 | |||
198 | /* | ||
199 | * Called when a ds1682 device is matched with this driver | ||
200 | */ | ||
201 | static int ds1682_probe(struct i2c_client *client, | ||
202 | const struct i2c_device_id *id) | ||
203 | { | ||
204 | int rc; | ||
205 | |||
206 | if (!i2c_check_functionality(client->adapter, | ||
207 | I2C_FUNC_SMBUS_I2C_BLOCK)) { | ||
208 | dev_err(&client->dev, "i2c bus does not support the ds1682\n"); | ||
209 | rc = -ENODEV; | ||
210 | goto exit; | ||
211 | } | ||
212 | |||
213 | rc = sysfs_create_group(&client->dev.kobj, &ds1682_group); | ||
214 | if (rc) | ||
215 | goto exit; | ||
216 | |||
217 | rc = sysfs_create_bin_file(&client->dev.kobj, &ds1682_eeprom_attr); | ||
218 | if (rc) | ||
219 | goto exit_bin_attr; | ||
220 | |||
221 | return 0; | ||
222 | |||
223 | exit_bin_attr: | ||
224 | sysfs_remove_group(&client->dev.kobj, &ds1682_group); | ||
225 | exit: | ||
226 | return rc; | ||
227 | } | ||
228 | |||
229 | static int ds1682_remove(struct i2c_client *client) | ||
230 | { | ||
231 | sysfs_remove_bin_file(&client->dev.kobj, &ds1682_eeprom_attr); | ||
232 | sysfs_remove_group(&client->dev.kobj, &ds1682_group); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | static const struct i2c_device_id ds1682_id[] = { | ||
237 | { "ds1682", 0 }, | ||
238 | { } | ||
239 | }; | ||
240 | MODULE_DEVICE_TABLE(i2c, ds1682_id); | ||
241 | |||
242 | static struct i2c_driver ds1682_driver = { | ||
243 | .driver = { | ||
244 | .name = "ds1682", | ||
245 | }, | ||
246 | .probe = ds1682_probe, | ||
247 | .remove = ds1682_remove, | ||
248 | .id_table = ds1682_id, | ||
249 | }; | ||
250 | |||
251 | static int __init ds1682_init(void) | ||
252 | { | ||
253 | return i2c_add_driver(&ds1682_driver); | ||
254 | } | ||
255 | |||
256 | static void __exit ds1682_exit(void) | ||
257 | { | ||
258 | i2c_del_driver(&ds1682_driver); | ||
259 | } | ||
260 | |||
261 | MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); | ||
262 | MODULE_DESCRIPTION("DS1682 Elapsed Time Indicator driver"); | ||
263 | MODULE_LICENSE("GPL"); | ||
264 | |||
265 | module_init(ds1682_init); | ||
266 | module_exit(ds1682_exit); | ||
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 2cb2736d65aa..db7d0f21b65d 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
@@ -505,6 +505,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
505 | * Export the EEPROM bytes through sysfs, since that's convenient. | 505 | * Export the EEPROM bytes through sysfs, since that's convenient. |
506 | * By default, only root should see the data (maybe passwords etc) | 506 | * By default, only root should see the data (maybe passwords etc) |
507 | */ | 507 | */ |
508 | sysfs_bin_attr_init(&at24->bin); | ||
508 | at24->bin.attr.name = "eeprom"; | 509 | at24->bin.attr.name = "eeprom"; |
509 | at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR; | 510 | at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR; |
510 | at24->bin.read = at24_bin_read; | 511 | at24->bin.read = at24_bin_read; |
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index d902d81dde39..d194212a41f6 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c | |||
@@ -347,6 +347,7 @@ static int at25_probe(struct spi_device *spi) | |||
347 | * that's sensitive for read and/or write, like ethernet addresses, | 347 | * that's sensitive for read and/or write, like ethernet addresses, |
348 | * security codes, board-specific manufacturing calibrations, etc. | 348 | * security codes, board-specific manufacturing calibrations, etc. |
349 | */ | 349 | */ |
350 | sysfs_bin_attr_init(&at25->bin); | ||
350 | at25->bin.attr.name = "eeprom"; | 351 | at25->bin.attr.name = "eeprom"; |
351 | at25->bin.attr.mode = S_IRUSR; | 352 | at25->bin.attr.mode = S_IRUSR; |
352 | at25->bin.read = at25_bin_read; | 353 | at25->bin.read = at25_bin_read; |
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 2c27193aeaa0..f939ebc2507c 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c | |||
@@ -32,9 +32,6 @@ | |||
32 | static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, | 32 | static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, |
33 | 0x55, 0x56, 0x57, I2C_CLIENT_END }; | 33 | 0x55, 0x56, 0x57, I2C_CLIENT_END }; |
34 | 34 | ||
35 | /* Insmod parameters */ | ||
36 | I2C_CLIENT_INSMOD_1(eeprom); | ||
37 | |||
38 | 35 | ||
39 | /* Size of EEPROM in bytes */ | 36 | /* Size of EEPROM in bytes */ |
40 | #define EEPROM_SIZE 256 | 37 | #define EEPROM_SIZE 256 |
@@ -135,8 +132,7 @@ static struct bin_attribute eeprom_attr = { | |||
135 | }; | 132 | }; |
136 | 133 | ||
137 | /* Return 0 if detection is successful, -ENODEV otherwise */ | 134 | /* Return 0 if detection is successful, -ENODEV otherwise */ |
138 | static int eeprom_detect(struct i2c_client *client, int kind, | 135 | static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info) |
139 | struct i2c_board_info *info) | ||
140 | { | 136 | { |
141 | struct i2c_adapter *adapter = client->adapter; | 137 | struct i2c_adapter *adapter = client->adapter; |
142 | 138 | ||
@@ -233,7 +229,7 @@ static struct i2c_driver eeprom_driver = { | |||
233 | 229 | ||
234 | .class = I2C_CLASS_DDC | I2C_CLASS_SPD, | 230 | .class = I2C_CLASS_DDC | I2C_CLASS_SPD, |
235 | .detect = eeprom_detect, | 231 | .detect = eeprom_detect, |
236 | .address_data = &addr_data, | 232 | .address_list = normal_i2c, |
237 | }; | 233 | }; |
238 | 234 | ||
239 | static int __init eeprom_init(void) | 235 | static int __init eeprom_init(void) |
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index e9eae4a78402..48c84a58163e 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
30 | #include <linux/slab.h> | ||
30 | 31 | ||
31 | static LIST_HEAD(container_list); | 32 | static LIST_HEAD(container_list); |
32 | static DEFINE_MUTEX(container_list_lock); | 33 | static DEFINE_MUTEX(container_list_lock); |
@@ -391,6 +392,7 @@ static const char *const enclosure_status [] = { | |||
391 | [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", | 392 | [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", |
392 | [ENCLOSURE_STATUS_UNKNOWN] = "unknown", | 393 | [ENCLOSURE_STATUS_UNKNOWN] = "unknown", |
393 | [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", | 394 | [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", |
395 | [ENCLOSURE_STATUS_MAX] = NULL, | ||
394 | }; | 396 | }; |
395 | 397 | ||
396 | static const char *const enclosure_type [] = { | 398 | static const char *const enclosure_type [] = { |
diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c index ba4694169d79..46b3439673e9 100644 --- a/drivers/misc/ep93xx_pwm.c +++ b/drivers/misc/ep93xx_pwm.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/slab.h> | ||
22 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
23 | #include <linux/err.h> | 24 | #include <linux/err.h> |
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index a92a3a742b43..98ad0120aa9b 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | #include <linux/wait.h> | 26 | #include <linux/wait.h> |
27 | #include <linux/poll.h> | 27 | #include <linux/poll.h> |
28 | #include <linux/slab.h> | ||
28 | #include "hpilo.h" | 29 | #include "hpilo.h" |
29 | 30 | ||
30 | static struct class *ilo_class; | 31 | static struct class *ilo_class; |
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h index 38576050776a..247eb386a973 100644 --- a/drivers/misc/hpilo.h +++ b/drivers/misc/hpilo.h | |||
@@ -44,9 +44,20 @@ struct ilo_hwinfo { | |||
44 | 44 | ||
45 | struct pci_dev *ilo_dev; | 45 | struct pci_dev *ilo_dev; |
46 | 46 | ||
47 | /* | ||
48 | * open_lock serializes ccb_cnt during open and close | ||
49 | * [ irq disabled ] | ||
50 | * -> alloc_lock used when adding/removing/searching ccb_alloc, | ||
51 | * which represents all ccbs open on the device | ||
52 | * --> fifo_lock controls access to fifo queues shared with hw | ||
53 | * | ||
54 | * Locks must be taken in this order, but open_lock and alloc_lock | ||
55 | * are optional, they do not need to be held in order to take a | ||
56 | * lower level lock. | ||
57 | */ | ||
58 | spinlock_t open_lock; | ||
47 | spinlock_t alloc_lock; | 59 | spinlock_t alloc_lock; |
48 | spinlock_t fifo_lock; | 60 | spinlock_t fifo_lock; |
49 | spinlock_t open_lock; | ||
50 | 61 | ||
51 | struct cdev cdev; | 62 | struct cdev cdev; |
52 | }; | 63 | }; |
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c index e2031739aa29..5c766b4fb238 100644 --- a/drivers/misc/ibmasm/command.c +++ b/drivers/misc/ibmasm/command.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
26 | #include <linux/slab.h> | ||
26 | #include "ibmasm.h" | 27 | #include "ibmasm.h" |
27 | #include "lowlevel.h" | 28 | #include "lowlevel.h" |
28 | 29 | ||
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c index 572d41ffc186..76bfda1ffaa9 100644 --- a/drivers/misc/ibmasm/event.c +++ b/drivers/misc/ibmasm/event.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
26 | #include <linux/slab.h> | ||
26 | #include "ibmasm.h" | 27 | #include "ibmasm.h" |
27 | #include "lowlevel.h" | 28 | #include "lowlevel.h" |
28 | 29 | ||
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index aecf40ecb3a4..8844a3f45381 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c | |||
@@ -75,6 +75,7 @@ | |||
75 | 75 | ||
76 | #include <linux/fs.h> | 76 | #include <linux/fs.h> |
77 | #include <linux/pagemap.h> | 77 | #include <linux/pagemap.h> |
78 | #include <linux/slab.h> | ||
78 | #include <asm/uaccess.h> | 79 | #include <asm/uaccess.h> |
79 | #include <asm/io.h> | 80 | #include <asm/io.h> |
80 | #include "ibmasm.h" | 81 | #include "ibmasm.h" |
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c index dc14b0b9cbfa..a234d965243b 100644 --- a/drivers/misc/ibmasm/module.c +++ b/drivers/misc/ibmasm/module.c | |||
@@ -52,6 +52,7 @@ | |||
52 | 52 | ||
53 | #include <linux/pci.h> | 53 | #include <linux/pci.h> |
54 | #include <linux/init.h> | 54 | #include <linux/init.h> |
55 | #include <linux/slab.h> | ||
55 | #include "ibmasm.h" | 56 | #include "ibmasm.h" |
56 | #include "lowlevel.h" | 57 | #include "lowlevel.h" |
57 | #include "remote.h" | 58 | #include "remote.h" |
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index 6e43ab4231ae..152e9d93eecb 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c | |||
@@ -26,13 +26,11 @@ | |||
26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/log2.h> | 28 | #include <linux/log2.h> |
29 | #include <linux/slab.h> | ||
29 | 30 | ||
30 | /* Addresses to scan */ | 31 | /* Addresses to scan */ |
31 | static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; | 32 | static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; |
32 | 33 | ||
33 | /* Insmod parameters */ | ||
34 | I2C_CLIENT_INSMOD_1(ics932s401); | ||
35 | |||
36 | /* ICS932S401 registers */ | 34 | /* ICS932S401 registers */ |
37 | #define ICS932S401_REG_CFG2 0x01 | 35 | #define ICS932S401_REG_CFG2 0x01 |
38 | #define ICS932S401_CFG1_SPREAD 0x01 | 36 | #define ICS932S401_CFG1_SPREAD 0x01 |
@@ -106,12 +104,12 @@ struct ics932s401_data { | |||
106 | 104 | ||
107 | static int ics932s401_probe(struct i2c_client *client, | 105 | static int ics932s401_probe(struct i2c_client *client, |
108 | const struct i2c_device_id *id); | 106 | const struct i2c_device_id *id); |
109 | static int ics932s401_detect(struct i2c_client *client, int kind, | 107 | static int ics932s401_detect(struct i2c_client *client, |
110 | struct i2c_board_info *info); | 108 | struct i2c_board_info *info); |
111 | static int ics932s401_remove(struct i2c_client *client); | 109 | static int ics932s401_remove(struct i2c_client *client); |
112 | 110 | ||
113 | static const struct i2c_device_id ics932s401_id[] = { | 111 | static const struct i2c_device_id ics932s401_id[] = { |
114 | { "ics932s401", ics932s401 }, | 112 | { "ics932s401", 0 }, |
115 | { } | 113 | { } |
116 | }; | 114 | }; |
117 | MODULE_DEVICE_TABLE(i2c, ics932s401_id); | 115 | MODULE_DEVICE_TABLE(i2c, ics932s401_id); |
@@ -125,7 +123,7 @@ static struct i2c_driver ics932s401_driver = { | |||
125 | .remove = ics932s401_remove, | 123 | .remove = ics932s401_remove, |
126 | .id_table = ics932s401_id, | 124 | .id_table = ics932s401_id, |
127 | .detect = ics932s401_detect, | 125 | .detect = ics932s401_detect, |
128 | .address_data = &addr_data, | 126 | .address_list = normal_i2c, |
129 | }; | 127 | }; |
130 | 128 | ||
131 | static struct ics932s401_data *ics932s401_update_device(struct device *dev) | 129 | static struct ics932s401_data *ics932s401_update_device(struct device *dev) |
@@ -413,36 +411,29 @@ static ssize_t show_spread(struct device *dev, | |||
413 | } | 411 | } |
414 | 412 | ||
415 | /* Return 0 if detection is successful, -ENODEV otherwise */ | 413 | /* Return 0 if detection is successful, -ENODEV otherwise */ |
416 | static int ics932s401_detect(struct i2c_client *client, int kind, | 414 | static int ics932s401_detect(struct i2c_client *client, |
417 | struct i2c_board_info *info) | 415 | struct i2c_board_info *info) |
418 | { | 416 | { |
419 | struct i2c_adapter *adapter = client->adapter; | 417 | struct i2c_adapter *adapter = client->adapter; |
418 | int vendor, device, revision; | ||
420 | 419 | ||
421 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) | 420 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) |
422 | return -ENODEV; | 421 | return -ENODEV; |
423 | 422 | ||
424 | if (kind <= 0) { | 423 | vendor = i2c_smbus_read_word_data(client, ICS932S401_REG_VENDOR_REV); |
425 | int vendor, device, revision; | 424 | vendor >>= 8; |
426 | 425 | revision = vendor >> ICS932S401_REV_SHIFT; | |
427 | vendor = i2c_smbus_read_word_data(client, | 426 | vendor &= ICS932S401_VENDOR_MASK; |
428 | ICS932S401_REG_VENDOR_REV); | 427 | if (vendor != ICS932S401_VENDOR) |
429 | vendor >>= 8; | 428 | return -ENODEV; |
430 | revision = vendor >> ICS932S401_REV_SHIFT; | 429 | |
431 | vendor &= ICS932S401_VENDOR_MASK; | 430 | device = i2c_smbus_read_word_data(client, ICS932S401_REG_DEVICE); |
432 | if (vendor != ICS932S401_VENDOR) | 431 | device >>= 8; |
433 | return -ENODEV; | 432 | if (device != ICS932S401_DEVICE) |
434 | 433 | return -ENODEV; | |
435 | device = i2c_smbus_read_word_data(client, | 434 | |
436 | ICS932S401_REG_DEVICE); | 435 | if (revision != ICS932S401_REV) |
437 | device >>= 8; | 436 | dev_info(&adapter->dev, "Unknown revision %d\n", revision); |
438 | if (device != ICS932S401_DEVICE) | ||
439 | return -ENODEV; | ||
440 | |||
441 | if (revision != ICS932S401_REV) | ||
442 | dev_info(&adapter->dev, "Unknown revision %d\n", | ||
443 | revision); | ||
444 | } else | ||
445 | dev_dbg(&adapter->dev, "detection forced\n"); | ||
446 | 437 | ||
447 | strlcpy(info->type, "ics932s401", I2C_NAME_SIZE); | 438 | strlcpy(info->type, "ics932s401", I2C_NAME_SIZE); |
448 | 439 | ||
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c index 60b0b1a4fb3a..193206602d88 100644 --- a/drivers/misc/ioc4.c +++ b/drivers/misc/ioc4.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/ioc4.h> | 31 | #include <linux/ioc4.h> |
32 | #include <linux/ktime.h> | 32 | #include <linux/ktime.h> |
33 | #include <linux/slab.h> | ||
33 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
34 | #include <linux/time.h> | 35 | #include <linux/time.h> |
35 | #include <asm/io.h> | 36 | #include <asm/io.h> |
@@ -138,7 +139,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is) | |||
138 | * even though the following code utilizes external interrupt registers | 139 | * even though the following code utilizes external interrupt registers |
139 | * to perform the speed calculation. | 140 | * to perform the speed calculation. |
140 | */ | 141 | */ |
141 | static void | 142 | static void __devinit |
142 | ioc4_clock_calibrate(struct ioc4_driver_data *idd) | 143 | ioc4_clock_calibrate(struct ioc4_driver_data *idd) |
143 | { | 144 | { |
144 | union ioc4_int_out int_out; | 145 | union ioc4_int_out int_out; |
@@ -230,7 +231,7 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd) | |||
230 | * on the same PCI bus at slot number 3 to differentiate IO9 from IO10. | 231 | * on the same PCI bus at slot number 3 to differentiate IO9 from IO10. |
231 | * If neither is present, it's a PCI-RT. | 232 | * If neither is present, it's a PCI-RT. |
232 | */ | 233 | */ |
233 | static unsigned int | 234 | static unsigned int __devinit |
234 | ioc4_variant(struct ioc4_driver_data *idd) | 235 | ioc4_variant(struct ioc4_driver_data *idd) |
235 | { | 236 | { |
236 | struct pci_dev *pdev = NULL; | 237 | struct pci_dev *pdev = NULL; |
@@ -269,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd) | |||
269 | return IOC4_VARIANT_PCI_RT; | 270 | return IOC4_VARIANT_PCI_RT; |
270 | } | 271 | } |
271 | 272 | ||
272 | static void | 273 | static void __devinit |
273 | ioc4_load_modules(struct work_struct *work) | 274 | ioc4_load_modules(struct work_struct *work) |
274 | { | 275 | { |
275 | /* arg just has to be freed */ | 276 | /* arg just has to be freed */ |
@@ -280,7 +281,7 @@ ioc4_load_modules(struct work_struct *work) | |||
280 | } | 281 | } |
281 | 282 | ||
282 | /* Adds a new instance of an IOC4 card */ | 283 | /* Adds a new instance of an IOC4 card */ |
283 | static int | 284 | static int __devinit |
284 | ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | 285 | ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) |
285 | { | 286 | { |
286 | struct ioc4_driver_data *idd; | 287 | struct ioc4_driver_data *idd; |
@@ -425,7 +426,7 @@ out: | |||
425 | } | 426 | } |
426 | 427 | ||
427 | /* Removes a particular instance of an IOC4 card. */ | 428 | /* Removes a particular instance of an IOC4 card. */ |
428 | static void | 429 | static void __devexit |
429 | ioc4_remove(struct pci_dev *pdev) | 430 | ioc4_remove(struct pci_dev *pdev) |
430 | { | 431 | { |
431 | struct ioc4_submodule *is; | 432 | struct ioc4_submodule *is; |
@@ -476,7 +477,7 @@ static struct pci_driver ioc4_driver = { | |||
476 | .name = "IOC4", | 477 | .name = "IOC4", |
477 | .id_table = ioc4_id_table, | 478 | .id_table = ioc4_id_table, |
478 | .probe = ioc4_probe, | 479 | .probe = ioc4_probe, |
479 | .remove = ioc4_remove, | 480 | .remove = __devexit_p(ioc4_remove), |
480 | }; | 481 | }; |
481 | 482 | ||
482 | MODULE_DEVICE_TABLE(pci, ioc4_id_table); | 483 | MODULE_DEVICE_TABLE(pci, ioc4_id_table); |
@@ -486,14 +487,14 @@ MODULE_DEVICE_TABLE(pci, ioc4_id_table); | |||
486 | *********************/ | 487 | *********************/ |
487 | 488 | ||
488 | /* Module load */ | 489 | /* Module load */ |
489 | static int __devinit | 490 | static int __init |
490 | ioc4_init(void) | 491 | ioc4_init(void) |
491 | { | 492 | { |
492 | return pci_register_driver(&ioc4_driver); | 493 | return pci_register_driver(&ioc4_driver); |
493 | } | 494 | } |
494 | 495 | ||
495 | /* Module unload */ | 496 | /* Module unload */ |
496 | static void __devexit | 497 | static void __exit |
497 | ioc4_exit(void) | 498 | ioc4_exit(void) |
498 | { | 499 | { |
499 | /* Ensure ioc4_load_modules() has completed before exiting */ | 500 | /* Ensure ioc4_load_modules() has completed before exiting */ |
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig new file mode 100644 index 000000000000..9e4b88fb57f1 --- /dev/null +++ b/drivers/misc/iwmc3200top/Kconfig | |||
@@ -0,0 +1,20 @@ | |||
1 | config IWMC3200TOP | ||
2 | tristate "Intel Wireless MultiCom Top Driver" | ||
3 | depends on MMC && EXPERIMENTAL | ||
4 | select FW_LOADER | ||
5 | ---help--- | ||
6 | Intel Wireless MultiCom 3200 Top driver is responsible for | ||
7 | for firmware load and enabled coms enumeration | ||
8 | |||
9 | config IWMC3200TOP_DEBUG | ||
10 | bool "Enable full debug output of iwmc3200top Driver" | ||
11 | depends on IWMC3200TOP | ||
12 | ---help--- | ||
13 | Enable full debug output of iwmc3200top Driver | ||
14 | |||
15 | config IWMC3200TOP_DEBUGFS | ||
16 | bool "Enable Debugfs debugging interface for iwmc3200top" | ||
17 | depends on IWMC3200TOP | ||
18 | ---help--- | ||
19 | Enable creation of debugfs files for iwmc3200top | ||
20 | |||
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile new file mode 100644 index 000000000000..fbf53fb4634e --- /dev/null +++ b/drivers/misc/iwmc3200top/Makefile | |||
@@ -0,0 +1,29 @@ | |||
1 | # iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
2 | # drivers/misc/iwmc3200top/Makefile | ||
3 | # | ||
4 | # Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or | ||
7 | # modify it under the terms of the GNU General Public License version | ||
8 | # 2 as published by the Free Software Foundation. | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, | ||
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | # GNU General Public License for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License | ||
16 | # along with this program; if not, write to the Free Software | ||
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | # 02110-1301, USA. | ||
19 | # | ||
20 | # | ||
21 | # Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
22 | # - | ||
23 | # | ||
24 | # | ||
25 | |||
26 | obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o | ||
27 | iwmc3200top-objs := main.o fw-download.o | ||
28 | iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o | ||
29 | iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o | ||
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c new file mode 100644 index 000000000000..e9eda471f6e0 --- /dev/null +++ b/drivers/misc/iwmc3200top/debugfs.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
3 | * drivers/misc/iwmc3200top/debufs.c | ||
4 | * | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
23 | * - | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/string.h> | ||
30 | #include <linux/ctype.h> | ||
31 | #include <linux/mmc/sdio_func.h> | ||
32 | #include <linux/mmc/sdio.h> | ||
33 | #include <linux/debugfs.h> | ||
34 | |||
35 | #include "iwmc3200top.h" | ||
36 | #include "fw-msg.h" | ||
37 | #include "log.h" | ||
38 | #include "debugfs.h" | ||
39 | |||
40 | |||
41 | |||
42 | /* Constants definition */ | ||
43 | #define HEXADECIMAL_RADIX 16 | ||
44 | |||
45 | /* Functions definition */ | ||
46 | |||
47 | |||
48 | #define DEBUGFS_ADD(name, parent) do { \ | ||
49 | dbgfs->dbgfs_##parent##_files.file_##name = \ | ||
50 | debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \ | ||
51 | &iwmct_dbgfs_##name##_ops); \ | ||
52 | } while (0) | ||
53 | |||
54 | #define DEBUGFS_RM(name) do { \ | ||
55 | debugfs_remove(name); \ | ||
56 | name = NULL; \ | ||
57 | } while (0) | ||
58 | |||
59 | #define DEBUGFS_READ_FUNC(name) \ | ||
60 | ssize_t iwmct_dbgfs_##name##_read(struct file *file, \ | ||
61 | char __user *user_buf, \ | ||
62 | size_t count, loff_t *ppos); | ||
63 | |||
64 | #define DEBUGFS_WRITE_FUNC(name) \ | ||
65 | ssize_t iwmct_dbgfs_##name##_write(struct file *file, \ | ||
66 | const char __user *user_buf, \ | ||
67 | size_t count, loff_t *ppos); | ||
68 | |||
69 | #define DEBUGFS_READ_FILE_OPS(name) \ | ||
70 | DEBUGFS_READ_FUNC(name) \ | ||
71 | static const struct file_operations iwmct_dbgfs_##name##_ops = { \ | ||
72 | .read = iwmct_dbgfs_##name##_read, \ | ||
73 | .open = iwmct_dbgfs_open_file_generic, \ | ||
74 | }; | ||
75 | |||
76 | #define DEBUGFS_WRITE_FILE_OPS(name) \ | ||
77 | DEBUGFS_WRITE_FUNC(name) \ | ||
78 | static const struct file_operations iwmct_dbgfs_##name##_ops = { \ | ||
79 | .write = iwmct_dbgfs_##name##_write, \ | ||
80 | .open = iwmct_dbgfs_open_file_generic, \ | ||
81 | }; | ||
82 | |||
83 | #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ | ||
84 | DEBUGFS_READ_FUNC(name) \ | ||
85 | DEBUGFS_WRITE_FUNC(name) \ | ||
86 | static const struct file_operations iwmct_dbgfs_##name##_ops = {\ | ||
87 | .write = iwmct_dbgfs_##name##_write, \ | ||
88 | .read = iwmct_dbgfs_##name##_read, \ | ||
89 | .open = iwmct_dbgfs_open_file_generic, \ | ||
90 | }; | ||
91 | |||
92 | |||
93 | /* Debugfs file ops definitions */ | ||
94 | |||
95 | /* | ||
96 | * Create the debugfs files and directories | ||
97 | * | ||
98 | */ | ||
99 | void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name) | ||
100 | { | ||
101 | struct iwmct_debugfs *dbgfs; | ||
102 | |||
103 | dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL); | ||
104 | if (!dbgfs) { | ||
105 | LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n", | ||
106 | sizeof(struct iwmct_debugfs)); | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | priv->dbgfs = dbgfs; | ||
111 | dbgfs->name = name; | ||
112 | dbgfs->dir_drv = debugfs_create_dir(name, NULL); | ||
113 | if (!dbgfs->dir_drv) { | ||
114 | LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n"); | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | return; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * Remove the debugfs files and directories | ||
123 | * | ||
124 | */ | ||
125 | void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs) | ||
126 | { | ||
127 | if (!dbgfs) | ||
128 | return; | ||
129 | |||
130 | DEBUGFS_RM(dbgfs->dir_drv); | ||
131 | kfree(dbgfs); | ||
132 | dbgfs = NULL; | ||
133 | } | ||
134 | |||
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h new file mode 100644 index 000000000000..71d45759b40f --- /dev/null +++ b/drivers/misc/iwmc3200top/debugfs.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
3 | * drivers/misc/iwmc3200top/debufs.h | ||
4 | * | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
23 | * - | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef __DEBUGFS_H__ | ||
28 | #define __DEBUGFS_H__ | ||
29 | |||
30 | |||
31 | #ifdef CONFIG_IWMC3200TOP_DEBUGFS | ||
32 | |||
33 | struct iwmct_debugfs { | ||
34 | const char *name; | ||
35 | struct dentry *dir_drv; | ||
36 | struct dir_drv_files { | ||
37 | } dbgfs_drv_files; | ||
38 | }; | ||
39 | |||
40 | void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name); | ||
41 | void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs); | ||
42 | |||
43 | #else /* CONFIG_IWMC3200TOP_DEBUGFS */ | ||
44 | |||
45 | struct iwmct_debugfs; | ||
46 | |||
47 | static inline void | ||
48 | iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name) | ||
49 | {} | ||
50 | |||
51 | static inline void | ||
52 | iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs) | ||
53 | {} | ||
54 | |||
55 | #endif /* CONFIG_IWMC3200TOP_DEBUGFS */ | ||
56 | |||
57 | #endif /* __DEBUGFS_H__ */ | ||
58 | |||
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c new file mode 100644 index 000000000000..e27afde6e99f --- /dev/null +++ b/drivers/misc/iwmc3200top/fw-download.c | |||
@@ -0,0 +1,358 @@ | |||
1 | /* | ||
2 | * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
3 | * drivers/misc/iwmc3200top/fw-download.c | ||
4 | * | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
23 | * - | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/firmware.h> | ||
28 | #include <linux/mmc/sdio_func.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <asm/unaligned.h> | ||
31 | |||
32 | #include "iwmc3200top.h" | ||
33 | #include "log.h" | ||
34 | #include "fw-msg.h" | ||
35 | |||
36 | #define CHECKSUM_BYTES_NUM sizeof(u32) | ||
37 | |||
38 | /** | ||
39 | init parser struct with file | ||
40 | */ | ||
41 | static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file, | ||
42 | size_t file_size, size_t block_size) | ||
43 | { | ||
44 | struct iwmct_parser *parser = &priv->parser; | ||
45 | struct iwmct_fw_hdr *fw_hdr = &parser->versions; | ||
46 | |||
47 | LOG_TRACE(priv, FW_DOWNLOAD, "-->\n"); | ||
48 | |||
49 | LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size); | ||
50 | |||
51 | parser->file = file; | ||
52 | parser->file_size = file_size; | ||
53 | parser->cur_pos = 0; | ||
54 | parser->entry_point = 0; | ||
55 | parser->buf = kzalloc(block_size, GFP_KERNEL); | ||
56 | if (!parser->buf) { | ||
57 | LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n"); | ||
58 | return -ENOMEM; | ||
59 | } | ||
60 | parser->buf_size = block_size; | ||
61 | |||
62 | /* extract fw versions */ | ||
63 | memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr)); | ||
64 | LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n" | ||
65 | "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n", | ||
66 | fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision, | ||
67 | fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision, | ||
68 | fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision, | ||
69 | fw_hdr->tic_name); | ||
70 | |||
71 | parser->cur_pos += sizeof(struct iwmct_fw_hdr); | ||
72 | |||
73 | LOG_TRACE(priv, FW_DOWNLOAD, "<--\n"); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static bool iwmct_checksum(struct iwmct_priv *priv) | ||
78 | { | ||
79 | struct iwmct_parser *parser = &priv->parser; | ||
80 | __le32 *file = (__le32 *)parser->file; | ||
81 | int i, pad, steps; | ||
82 | u32 accum = 0; | ||
83 | u32 checksum; | ||
84 | u32 mask = 0xffffffff; | ||
85 | |||
86 | pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4; | ||
87 | steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4; | ||
88 | |||
89 | LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps); | ||
90 | |||
91 | for (i = 0; i < steps; i++) | ||
92 | accum += le32_to_cpu(file[i]); | ||
93 | |||
94 | if (pad) { | ||
95 | mask <<= 8 * (4 - pad); | ||
96 | accum += le32_to_cpu(file[steps]) & mask; | ||
97 | } | ||
98 | |||
99 | checksum = get_unaligned_le32((__le32 *)(parser->file + | ||
100 | parser->file_size - CHECKSUM_BYTES_NUM)); | ||
101 | |||
102 | LOG_INFO(priv, FW_DOWNLOAD, | ||
103 | "compare checksum accum=0x%x to checksum=0x%x\n", | ||
104 | accum, checksum); | ||
105 | |||
106 | return checksum == accum; | ||
107 | } | ||
108 | |||
109 | static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec, | ||
110 | size_t *sec_size, __le32 *sec_addr) | ||
111 | { | ||
112 | struct iwmct_parser *parser = &priv->parser; | ||
113 | struct iwmct_dbg *dbg = &priv->dbg; | ||
114 | struct iwmct_fw_sec_hdr *sec_hdr; | ||
115 | |||
116 | LOG_TRACE(priv, FW_DOWNLOAD, "-->\n"); | ||
117 | |||
118 | while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr) | ||
119 | <= parser->file_size) { | ||
120 | |||
121 | sec_hdr = (struct iwmct_fw_sec_hdr *) | ||
122 | (parser->file + parser->cur_pos); | ||
123 | parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr); | ||
124 | |||
125 | LOG_INFO(priv, FW_DOWNLOAD, | ||
126 | "sec hdr: type=%s addr=0x%x size=%d\n", | ||
127 | sec_hdr->type, sec_hdr->target_addr, | ||
128 | sec_hdr->data_size); | ||
129 | |||
130 | if (strcmp(sec_hdr->type, "ENT") == 0) | ||
131 | parser->entry_point = le32_to_cpu(sec_hdr->target_addr); | ||
132 | else if (strcmp(sec_hdr->type, "LBL") == 0) | ||
133 | strcpy(dbg->label_fw, parser->file + parser->cur_pos); | ||
134 | else if (((strcmp(sec_hdr->type, "TOP") == 0) && | ||
135 | (priv->barker & BARKER_DNLOAD_TOP_MSK)) || | ||
136 | ((strcmp(sec_hdr->type, "GPS") == 0) && | ||
137 | (priv->barker & BARKER_DNLOAD_GPS_MSK)) || | ||
138 | ((strcmp(sec_hdr->type, "BTH") == 0) && | ||
139 | (priv->barker & BARKER_DNLOAD_BT_MSK))) { | ||
140 | *sec_addr = sec_hdr->target_addr; | ||
141 | *sec_size = le32_to_cpu(sec_hdr->data_size); | ||
142 | *p_sec = parser->file + parser->cur_pos; | ||
143 | parser->cur_pos += le32_to_cpu(sec_hdr->data_size); | ||
144 | return 1; | ||
145 | } else if (strcmp(sec_hdr->type, "LOG") != 0) | ||
146 | LOG_WARNING(priv, FW_DOWNLOAD, | ||
147 | "skipping section type %s\n", | ||
148 | sec_hdr->type); | ||
149 | |||
150 | parser->cur_pos += le32_to_cpu(sec_hdr->data_size); | ||
151 | LOG_INFO(priv, FW_DOWNLOAD, | ||
152 | "finished with section cur_pos=%zd\n", parser->cur_pos); | ||
153 | } | ||
154 | |||
155 | LOG_TRACE(priv, INIT, "<--\n"); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec, | ||
160 | size_t sec_size, __le32 addr) | ||
161 | { | ||
162 | struct iwmct_parser *parser = &priv->parser; | ||
163 | struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf; | ||
164 | const u8 *cur_block = p_sec; | ||
165 | size_t sent = 0; | ||
166 | int cnt = 0; | ||
167 | int ret = 0; | ||
168 | u32 cmd = 0; | ||
169 | |||
170 | LOG_TRACE(priv, FW_DOWNLOAD, "-->\n"); | ||
171 | LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n", | ||
172 | addr, sec_size); | ||
173 | |||
174 | while (sent < sec_size) { | ||
175 | int i; | ||
176 | u32 chksm = 0; | ||
177 | u32 reset = atomic_read(&priv->reset); | ||
178 | /* actual FW data */ | ||
179 | u32 data_size = min(parser->buf_size - sizeof(*hdr), | ||
180 | sec_size - sent); | ||
181 | /* Pad to block size */ | ||
182 | u32 trans_size = (data_size + sizeof(*hdr) + | ||
183 | IWMC_SDIO_BLK_SIZE - 1) & | ||
184 | ~(IWMC_SDIO_BLK_SIZE - 1); | ||
185 | ++cnt; | ||
186 | |||
187 | /* in case of reset, interrupt FW DOWNLAOD */ | ||
188 | if (reset) { | ||
189 | LOG_INFO(priv, FW_DOWNLOAD, | ||
190 | "Reset detected. Abort FW download!!!"); | ||
191 | ret = -ECANCELED; | ||
192 | goto exit; | ||
193 | } | ||
194 | |||
195 | memset(parser->buf, 0, parser->buf_size); | ||
196 | cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS; | ||
197 | cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS; | ||
198 | cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS; | ||
199 | cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS; | ||
200 | hdr->data_size = cpu_to_le32(data_size); | ||
201 | hdr->target_addr = addr; | ||
202 | |||
203 | /* checksum is allowed for sizes divisible by 4 */ | ||
204 | if (data_size & 0x3) | ||
205 | cmd &= ~CMD_HDR_USE_CHECKSUM_MSK; | ||
206 | |||
207 | memcpy(hdr->data, cur_block, data_size); | ||
208 | |||
209 | |||
210 | if (cmd & CMD_HDR_USE_CHECKSUM_MSK) { | ||
211 | |||
212 | chksm = data_size + le32_to_cpu(addr) + cmd; | ||
213 | for (i = 0; i < data_size >> 2; i++) | ||
214 | chksm += ((u32 *)cur_block)[i]; | ||
215 | |||
216 | hdr->block_chksm = cpu_to_le32(chksm); | ||
217 | LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n", | ||
218 | hdr->block_chksm); | ||
219 | } | ||
220 | |||
221 | LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, " | ||
222 | "sec_size=%zd, startAddress 0x%X\n", | ||
223 | cnt, trans_size, sent, sec_size, addr); | ||
224 | |||
225 | if (priv->dbg.dump) | ||
226 | LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size); | ||
227 | |||
228 | |||
229 | hdr->cmd = cpu_to_le32(cmd); | ||
230 | /* send it down */ | ||
231 | /* TODO: add more proper sending and error checking */ | ||
232 | ret = iwmct_tx(priv, parser->buf, trans_size); | ||
233 | if (ret != 0) { | ||
234 | LOG_INFO(priv, FW_DOWNLOAD, | ||
235 | "iwmct_tx returned %d\n", ret); | ||
236 | goto exit; | ||
237 | } | ||
238 | |||
239 | addr = cpu_to_le32(le32_to_cpu(addr) + data_size); | ||
240 | sent += data_size; | ||
241 | cur_block = p_sec + sent; | ||
242 | |||
243 | if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) { | ||
244 | LOG_INFO(priv, FW_DOWNLOAD, | ||
245 | "Block number limit is reached [%d]\n", | ||
246 | priv->dbg.blocks); | ||
247 | break; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | if (sent < sec_size) | ||
252 | ret = -EINVAL; | ||
253 | exit: | ||
254 | LOG_TRACE(priv, FW_DOWNLOAD, "<--\n"); | ||
255 | return ret; | ||
256 | } | ||
257 | |||
258 | static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump) | ||
259 | { | ||
260 | struct iwmct_parser *parser = &priv->parser; | ||
261 | struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf; | ||
262 | int ret; | ||
263 | u32 cmd; | ||
264 | |||
265 | LOG_TRACE(priv, FW_DOWNLOAD, "-->\n"); | ||
266 | |||
267 | memset(parser->buf, 0, parser->buf_size); | ||
268 | cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS; | ||
269 | if (jump) { | ||
270 | cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS; | ||
271 | hdr->target_addr = cpu_to_le32(parser->entry_point); | ||
272 | LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n", | ||
273 | parser->entry_point); | ||
274 | } else { | ||
275 | cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS; | ||
276 | LOG_INFO(priv, FW_DOWNLOAD, "last command\n"); | ||
277 | } | ||
278 | |||
279 | hdr->cmd = cpu_to_le32(cmd); | ||
280 | |||
281 | LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr)); | ||
282 | /* send it down */ | ||
283 | /* TODO: add more proper sending and error checking */ | ||
284 | ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE); | ||
285 | if (ret) | ||
286 | LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret); | ||
287 | |||
288 | LOG_TRACE(priv, FW_DOWNLOAD, "<--\n"); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | int iwmct_fw_load(struct iwmct_priv *priv) | ||
293 | { | ||
294 | const u8 *fw_name = FW_NAME(FW_API_VER); | ||
295 | const struct firmware *raw; | ||
296 | const u8 *pdata; | ||
297 | size_t len; | ||
298 | __le32 addr; | ||
299 | int ret; | ||
300 | |||
301 | |||
302 | LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n", | ||
303 | priv->barker); | ||
304 | LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n", | ||
305 | (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not"); | ||
306 | LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n", | ||
307 | (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not"); | ||
308 | LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n", | ||
309 | (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not"); | ||
310 | |||
311 | |||
312 | /* get the firmware */ | ||
313 | ret = request_firmware(&raw, fw_name, &priv->func->dev); | ||
314 | if (ret < 0) { | ||
315 | LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n", | ||
316 | fw_name, ret); | ||
317 | goto exit; | ||
318 | } | ||
319 | |||
320 | if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) { | ||
321 | LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n", | ||
322 | fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size); | ||
323 | goto exit; | ||
324 | } | ||
325 | |||
326 | LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name); | ||
327 | |||
328 | /* clear parser struct */ | ||
329 | ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len); | ||
330 | if (ret < 0) { | ||
331 | LOG_ERROR(priv, FW_DOWNLOAD, | ||
332 | "iwmct_parser_init failed: Reason %d\n", ret); | ||
333 | goto exit; | ||
334 | } | ||
335 | |||
336 | if (!iwmct_checksum(priv)) { | ||
337 | LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n"); | ||
338 | ret = -EINVAL; | ||
339 | goto exit; | ||
340 | } | ||
341 | |||
342 | /* download firmware to device */ | ||
343 | while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) { | ||
344 | ret = iwmct_download_section(priv, pdata, len, addr); | ||
345 | if (ret) { | ||
346 | LOG_ERROR(priv, FW_DOWNLOAD, | ||
347 | "%s download section failed\n", fw_name); | ||
348 | goto exit; | ||
349 | } | ||
350 | } | ||
351 | |||
352 | ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK)); | ||
353 | |||
354 | exit: | ||
355 | kfree(priv->parser.buf); | ||
356 | release_firmware(raw); | ||
357 | return ret; | ||
358 | } | ||
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h new file mode 100644 index 000000000000..9e26b75bd482 --- /dev/null +++ b/drivers/misc/iwmc3200top/fw-msg.h | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
3 | * drivers/misc/iwmc3200top/fw-msg.h | ||
4 | * | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
23 | * - | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef __FWMSG_H__ | ||
28 | #define __FWMSG_H__ | ||
29 | |||
30 | #define COMM_TYPE_D2H 0xFF | ||
31 | #define COMM_TYPE_H2D 0xEE | ||
32 | |||
33 | #define COMM_CATEGORY_OPERATIONAL 0x00 | ||
34 | #define COMM_CATEGORY_DEBUG 0x01 | ||
35 | #define COMM_CATEGORY_TESTABILITY 0x02 | ||
36 | #define COMM_CATEGORY_DIAGNOSTICS 0x03 | ||
37 | |||
38 | #define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A) | ||
39 | |||
40 | #define FW_LOG_SRC_MAX 32 | ||
41 | #define FW_LOG_SRC_ALL 255 | ||
42 | |||
43 | #define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000) | ||
44 | |||
45 | #define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001) | ||
46 | #define CMD_TST_DEV_RESET cpu_to_le16(0x0060) | ||
47 | #define CMD_TST_FUNC_RESET cpu_to_le16(0x0062) | ||
48 | #define CMD_TST_IFACE_RESET cpu_to_le16(0x0064) | ||
49 | #define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065) | ||
50 | #define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080) | ||
51 | #define CMD_TST_WAKEUP cpu_to_le16(0x0081) | ||
52 | #define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082) | ||
53 | #define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083) | ||
54 | #define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096) | ||
55 | |||
56 | #define OP_OPR_ALIVE cpu_to_le16(0x0010) | ||
57 | #define OP_OPR_CMD_ACK cpu_to_le16(0x001F) | ||
58 | #define OP_OPR_CMD_NACK cpu_to_le16(0x0020) | ||
59 | #define OP_TST_MEM_DUMP cpu_to_le16(0x0043) | ||
60 | |||
61 | #define CMD_FLAG_PADDING_256 0x80 | ||
62 | |||
63 | #define FW_HCMD_BLOCK_SIZE 256 | ||
64 | |||
65 | struct msg_hdr { | ||
66 | u8 type; | ||
67 | u8 category; | ||
68 | __le16 opcode; | ||
69 | u8 seqnum; | ||
70 | u8 flags; | ||
71 | __le16 length; | ||
72 | } __attribute__((__packed__)); | ||
73 | |||
74 | struct log_hdr { | ||
75 | __le32 timestamp; | ||
76 | u8 severity; | ||
77 | u8 logsource; | ||
78 | __le16 reserved; | ||
79 | } __attribute__((__packed__)); | ||
80 | |||
81 | struct mdump_hdr { | ||
82 | u8 dmpid; | ||
83 | u8 frag; | ||
84 | __le16 size; | ||
85 | __le32 addr; | ||
86 | } __attribute__((__packed__)); | ||
87 | |||
88 | struct top_msg { | ||
89 | struct msg_hdr hdr; | ||
90 | union { | ||
91 | /* D2H messages */ | ||
92 | struct { | ||
93 | struct log_hdr log_hdr; | ||
94 | u8 data[1]; | ||
95 | } __attribute__((__packed__)) log; | ||
96 | |||
97 | struct { | ||
98 | struct log_hdr log_hdr; | ||
99 | struct mdump_hdr md_hdr; | ||
100 | u8 data[1]; | ||
101 | } __attribute__((__packed__)) mdump; | ||
102 | |||
103 | /* H2D messages */ | ||
104 | struct { | ||
105 | u8 logsource; | ||
106 | u8 sevmask; | ||
107 | } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX]; | ||
108 | struct mdump_hdr mdump_req; | ||
109 | } u; | ||
110 | } __attribute__((__packed__)); | ||
111 | |||
112 | |||
113 | #endif /* __FWMSG_H__ */ | ||
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h new file mode 100644 index 000000000000..740ff0738ea8 --- /dev/null +++ b/drivers/misc/iwmc3200top/iwmc3200top.h | |||
@@ -0,0 +1,207 @@ | |||
1 | /* | ||
2 | * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
3 | * drivers/misc/iwmc3200top/iwmc3200top.h | ||
4 | * | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
23 | * - | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef __IWMC3200TOP_H__ | ||
28 | #define __IWMC3200TOP_H__ | ||
29 | |||
30 | #include <linux/workqueue.h> | ||
31 | |||
32 | #define DRV_NAME "iwmc3200top" | ||
33 | #define FW_API_VER 1 | ||
34 | #define _FW_NAME(api) DRV_NAME "." #api ".fw" | ||
35 | #define FW_NAME(api) _FW_NAME(api) | ||
36 | |||
37 | #define IWMC_SDIO_BLK_SIZE 256 | ||
38 | #define IWMC_DEFAULT_TR_BLK 64 | ||
39 | #define IWMC_SDIO_DATA_ADDR 0x0 | ||
40 | #define IWMC_SDIO_INTR_ENABLE_ADDR 0x14 | ||
41 | #define IWMC_SDIO_INTR_STATUS_ADDR 0x13 | ||
42 | #define IWMC_SDIO_INTR_CLEAR_ADDR 0x13 | ||
43 | #define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C | ||
44 | |||
45 | #define COMM_HUB_HEADER_LENGTH 16 | ||
46 | #define LOGGER_HEADER_LENGTH 10 | ||
47 | |||
48 | |||
49 | #define BARKER_DNLOAD_BT_POS 0 | ||
50 | #define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS) | ||
51 | #define BARKER_DNLOAD_GPS_POS 1 | ||
52 | #define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS) | ||
53 | #define BARKER_DNLOAD_TOP_POS 2 | ||
54 | #define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS) | ||
55 | #define BARKER_DNLOAD_RESERVED1_POS 3 | ||
56 | #define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS) | ||
57 | #define BARKER_DNLOAD_JUMP_POS 4 | ||
58 | #define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS) | ||
59 | #define BARKER_DNLOAD_SYNC_POS 5 | ||
60 | #define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS) | ||
61 | #define BARKER_DNLOAD_RESERVED2_POS 6 | ||
62 | #define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS) | ||
63 | #define BARKER_DNLOAD_BARKER_POS 8 | ||
64 | #define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS) | ||
65 | |||
66 | #define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS) | ||
67 | /* whole field barker */ | ||
68 | #define IWMC_BARKER_ACK 0xfeedbabe | ||
69 | |||
70 | #define IWMC_CMD_SIGNATURE 0xcbbc | ||
71 | |||
72 | #define CMD_HDR_OPCODE_POS 0 | ||
73 | #define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS) | ||
74 | #define CMD_HDR_RESPONSE_CODE_POS 4 | ||
75 | #define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS) | ||
76 | #define CMD_HDR_USE_CHECKSUM_POS 8 | ||
77 | #define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS) | ||
78 | #define CMD_HDR_RESPONSE_REQUIRED_POS 9 | ||
79 | #define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS) | ||
80 | #define CMD_HDR_DIRECT_ACCESS_POS 10 | ||
81 | #define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS) | ||
82 | #define CMD_HDR_RESERVED_POS 11 | ||
83 | #define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS) | ||
84 | #define CMD_HDR_SIGNATURE_POS 16 | ||
85 | #define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS) | ||
86 | |||
87 | enum { | ||
88 | IWMC_OPCODE_PING = 0, | ||
89 | IWMC_OPCODE_READ = 1, | ||
90 | IWMC_OPCODE_WRITE = 2, | ||
91 | IWMC_OPCODE_JUMP = 3, | ||
92 | IWMC_OPCODE_REBOOT = 4, | ||
93 | IWMC_OPCODE_PERSISTENT_WRITE = 5, | ||
94 | IWMC_OPCODE_PERSISTENT_READ = 6, | ||
95 | IWMC_OPCODE_READ_MODIFY_WRITE = 7, | ||
96 | IWMC_OPCODE_LAST_COMMAND = 15 | ||
97 | }; | ||
98 | |||
99 | struct iwmct_fw_load_hdr { | ||
100 | __le32 cmd; | ||
101 | __le32 target_addr; | ||
102 | __le32 data_size; | ||
103 | __le32 block_chksm; | ||
104 | u8 data[0]; | ||
105 | }; | ||
106 | |||
107 | /** | ||
108 | * struct iwmct_fw_hdr | ||
109 | * holds all sw components versions | ||
110 | */ | ||
111 | struct iwmct_fw_hdr { | ||
112 | u8 top_major; | ||
113 | u8 top_minor; | ||
114 | u8 top_revision; | ||
115 | u8 gps_major; | ||
116 | u8 gps_minor; | ||
117 | u8 gps_revision; | ||
118 | u8 bt_major; | ||
119 | u8 bt_minor; | ||
120 | u8 bt_revision; | ||
121 | u8 tic_name[31]; | ||
122 | }; | ||
123 | |||
124 | /** | ||
125 | * struct iwmct_fw_sec_hdr | ||
126 | * @type: function type | ||
127 | * @data_size: section's data size | ||
128 | * @target_addr: download address | ||
129 | */ | ||
130 | struct iwmct_fw_sec_hdr { | ||
131 | u8 type[4]; | ||
132 | __le32 data_size; | ||
133 | __le32 target_addr; | ||
134 | }; | ||
135 | |||
136 | /** | ||
137 | * struct iwmct_parser | ||
138 | * @file: fw image | ||
139 | * @file_size: fw size | ||
140 | * @cur_pos: position in file | ||
141 | * @buf: temp buf for download | ||
142 | * @buf_size: size of buf | ||
143 | * @entry_point: address to jump in fw kick-off | ||
144 | */ | ||
145 | struct iwmct_parser { | ||
146 | const u8 *file; | ||
147 | size_t file_size; | ||
148 | size_t cur_pos; | ||
149 | u8 *buf; | ||
150 | size_t buf_size; | ||
151 | u32 entry_point; | ||
152 | struct iwmct_fw_hdr versions; | ||
153 | }; | ||
154 | |||
155 | |||
156 | struct iwmct_work_struct { | ||
157 | struct list_head list; | ||
158 | ssize_t iosize; | ||
159 | }; | ||
160 | |||
161 | struct iwmct_dbg { | ||
162 | int blocks; | ||
163 | bool dump; | ||
164 | bool jump; | ||
165 | bool direct; | ||
166 | bool checksum; | ||
167 | bool fw_download; | ||
168 | int block_size; | ||
169 | int download_trans_blks; | ||
170 | |||
171 | char label_fw[256]; | ||
172 | }; | ||
173 | |||
174 | struct iwmct_debugfs; | ||
175 | |||
176 | struct iwmct_priv { | ||
177 | struct sdio_func *func; | ||
178 | struct iwmct_debugfs *dbgfs; | ||
179 | struct iwmct_parser parser; | ||
180 | atomic_t reset; | ||
181 | atomic_t dev_sync; | ||
182 | u32 trans_len; | ||
183 | u32 barker; | ||
184 | struct iwmct_dbg dbg; | ||
185 | |||
186 | /* drivers work queue */ | ||
187 | struct workqueue_struct *wq; | ||
188 | struct workqueue_struct *bus_rescan_wq; | ||
189 | struct work_struct bus_rescan_worker; | ||
190 | struct work_struct isr_worker; | ||
191 | |||
192 | /* drivers wait queue */ | ||
193 | wait_queue_head_t wait_q; | ||
194 | |||
195 | /* rx request list */ | ||
196 | struct list_head read_req_list; | ||
197 | }; | ||
198 | |||
199 | extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count); | ||
200 | extern int iwmct_fw_load(struct iwmct_priv *priv); | ||
201 | |||
202 | extern void iwmct_dbg_init_params(struct iwmct_priv *drv); | ||
203 | extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv); | ||
204 | extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv); | ||
205 | extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len); | ||
206 | |||
207 | #endif /* __IWMC3200TOP_H__ */ | ||
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c new file mode 100644 index 000000000000..a36a55a49cac --- /dev/null +++ b/drivers/misc/iwmc3200top/log.c | |||
@@ -0,0 +1,348 @@ | |||
1 | /* | ||
2 | * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
3 | * drivers/misc/iwmc3200top/log.c | ||
4 | * | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
23 | * - | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/mmc/sdio_func.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/ctype.h> | ||
31 | #include "fw-msg.h" | ||
32 | #include "iwmc3200top.h" | ||
33 | #include "log.h" | ||
34 | |||
35 | /* Maximal hexadecimal string size of the FW memdump message */ | ||
36 | #define LOG_MSG_SIZE_MAX 12400 | ||
37 | |||
38 | /* iwmct_logdefs is a global used by log macros */ | ||
39 | u8 iwmct_logdefs[LOG_SRC_MAX]; | ||
40 | static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX]; | ||
41 | |||
42 | |||
43 | static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask) | ||
44 | { | ||
45 | int i; | ||
46 | |||
47 | if (src < size) | ||
48 | logdefs[src] = logmask; | ||
49 | else if (src == LOG_SRC_ALL) | ||
50 | for (i = 0; i < size; i++) | ||
51 | logdefs[i] = logmask; | ||
52 | else | ||
53 | return -1; | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | |||
59 | int iwmct_log_set_filter(u8 src, u8 logmask) | ||
60 | { | ||
61 | return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask); | ||
62 | } | ||
63 | |||
64 | |||
65 | int iwmct_log_set_fw_filter(u8 src, u8 logmask) | ||
66 | { | ||
67 | return _log_set_log_filter(iwmct_fw_logdefs, | ||
68 | FW_LOG_SRC_MAX, src, logmask); | ||
69 | } | ||
70 | |||
71 | |||
72 | static int log_msg_format_hex(char *str, int slen, u8 *ibuf, | ||
73 | int ilen, char *pref) | ||
74 | { | ||
75 | int pos = 0; | ||
76 | int i; | ||
77 | int len; | ||
78 | |||
79 | for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++) | ||
80 | str[pos] = pref[i]; | ||
81 | |||
82 | for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++) | ||
83 | len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]); | ||
84 | |||
85 | if (i < ilen) | ||
86 | return -1; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | /* NOTE: This function is not thread safe. | ||
92 | Currently it's called only from sdio rx worker - no race there | ||
93 | */ | ||
94 | void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len) | ||
95 | { | ||
96 | struct top_msg *msg; | ||
97 | static char logbuf[LOG_MSG_SIZE_MAX]; | ||
98 | |||
99 | msg = (struct top_msg *)buf; | ||
100 | |||
101 | if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) { | ||
102 | LOG_ERROR(priv, FW_MSG, "Log message from TOP " | ||
103 | "is too short %d (expected %zd)\n", | ||
104 | len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)); | ||
105 | return; | ||
106 | } | ||
107 | |||
108 | if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] & | ||
109 | BIT(msg->u.log.log_hdr.severity)) || | ||
110 | !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity))) | ||
111 | return; | ||
112 | |||
113 | switch (msg->hdr.category) { | ||
114 | case COMM_CATEGORY_TESTABILITY: | ||
115 | if (!(iwmct_logdefs[LOG_SRC_TST] & | ||
116 | BIT(msg->u.log.log_hdr.severity))) | ||
117 | return; | ||
118 | if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf, | ||
119 | le16_to_cpu(msg->hdr.length) + | ||
120 | sizeof(msg->hdr), "<TST>")) | ||
121 | LOG_WARNING(priv, TST, | ||
122 | "TOP TST message is too long, truncating..."); | ||
123 | LOG_WARNING(priv, TST, "%s\n", logbuf); | ||
124 | break; | ||
125 | case COMM_CATEGORY_DEBUG: | ||
126 | if (msg->hdr.opcode == OP_DBG_ZSTR_MSG) | ||
127 | LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>", | ||
128 | ((u8 *)msg) + sizeof(msg->hdr) | ||
129 | + sizeof(msg->u.log.log_hdr)); | ||
130 | else { | ||
131 | if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf, | ||
132 | le16_to_cpu(msg->hdr.length) | ||
133 | + sizeof(msg->hdr), | ||
134 | "<DBG>")) | ||
135 | LOG_WARNING(priv, FW_MSG, | ||
136 | "TOP DBG message is too long," | ||
137 | "truncating..."); | ||
138 | LOG_WARNING(priv, FW_MSG, "%s\n", logbuf); | ||
139 | } | ||
140 | break; | ||
141 | default: | ||
142 | break; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size) | ||
147 | { | ||
148 | int i, pos, len; | ||
149 | for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) { | ||
150 | len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,", | ||
151 | i, logdefs[i]); | ||
152 | pos += len; | ||
153 | } | ||
154 | buf[pos-1] = '\n'; | ||
155 | buf[pos] = '\0'; | ||
156 | |||
157 | if (i < logdefsz) | ||
158 | return -1; | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | int log_get_filter_str(char *buf, int size) | ||
163 | { | ||
164 | return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size); | ||
165 | } | ||
166 | |||
167 | int log_get_fw_filter_str(char *buf, int size) | ||
168 | { | ||
169 | return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size); | ||
170 | } | ||
171 | |||
172 | #define HEXADECIMAL_RADIX 16 | ||
173 | #define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */ | ||
174 | |||
175 | ssize_t show_iwmct_log_level(struct device *d, | ||
176 | struct device_attribute *attr, char *buf) | ||
177 | { | ||
178 | struct iwmct_priv *priv = dev_get_drvdata(d); | ||
179 | char *str_buf; | ||
180 | int buf_size; | ||
181 | ssize_t ret; | ||
182 | |||
183 | buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1; | ||
184 | str_buf = kzalloc(buf_size, GFP_KERNEL); | ||
185 | if (!str_buf) { | ||
186 | LOG_ERROR(priv, DEBUGFS, | ||
187 | "failed to allocate %d bytes\n", buf_size); | ||
188 | ret = -ENOMEM; | ||
189 | goto exit; | ||
190 | } | ||
191 | |||
192 | if (log_get_filter_str(str_buf, buf_size) < 0) { | ||
193 | ret = -EINVAL; | ||
194 | goto exit; | ||
195 | } | ||
196 | |||
197 | ret = sprintf(buf, "%s", str_buf); | ||
198 | |||
199 | exit: | ||
200 | kfree(str_buf); | ||
201 | return ret; | ||
202 | } | ||
203 | |||
204 | ssize_t store_iwmct_log_level(struct device *d, | ||
205 | struct device_attribute *attr, | ||
206 | const char *buf, size_t count) | ||
207 | { | ||
208 | struct iwmct_priv *priv = dev_get_drvdata(d); | ||
209 | char *token, *str_buf = NULL; | ||
210 | long val; | ||
211 | ssize_t ret = count; | ||
212 | u8 src, mask; | ||
213 | |||
214 | if (!count) | ||
215 | goto exit; | ||
216 | |||
217 | str_buf = kzalloc(count, GFP_KERNEL); | ||
218 | if (!str_buf) { | ||
219 | LOG_ERROR(priv, DEBUGFS, | ||
220 | "failed to allocate %zd bytes\n", count); | ||
221 | ret = -ENOMEM; | ||
222 | goto exit; | ||
223 | } | ||
224 | |||
225 | memcpy(str_buf, buf, count); | ||
226 | |||
227 | while ((token = strsep(&str_buf, ",")) != NULL) { | ||
228 | while (isspace(*token)) | ||
229 | ++token; | ||
230 | if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) { | ||
231 | LOG_ERROR(priv, DEBUGFS, | ||
232 | "failed to convert string to long %s\n", | ||
233 | token); | ||
234 | ret = -EINVAL; | ||
235 | goto exit; | ||
236 | } | ||
237 | |||
238 | mask = val & 0xFF; | ||
239 | src = (val & 0XFF00) >> 8; | ||
240 | iwmct_log_set_filter(src, mask); | ||
241 | } | ||
242 | |||
243 | exit: | ||
244 | kfree(str_buf); | ||
245 | return ret; | ||
246 | } | ||
247 | |||
248 | ssize_t show_iwmct_log_level_fw(struct device *d, | ||
249 | struct device_attribute *attr, char *buf) | ||
250 | { | ||
251 | struct iwmct_priv *priv = dev_get_drvdata(d); | ||
252 | char *str_buf; | ||
253 | int buf_size; | ||
254 | ssize_t ret; | ||
255 | |||
256 | buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2; | ||
257 | |||
258 | str_buf = kzalloc(buf_size, GFP_KERNEL); | ||
259 | if (!str_buf) { | ||
260 | LOG_ERROR(priv, DEBUGFS, | ||
261 | "failed to allocate %d bytes\n", buf_size); | ||
262 | ret = -ENOMEM; | ||
263 | goto exit; | ||
264 | } | ||
265 | |||
266 | if (log_get_fw_filter_str(str_buf, buf_size) < 0) { | ||
267 | ret = -EINVAL; | ||
268 | goto exit; | ||
269 | } | ||
270 | |||
271 | ret = sprintf(buf, "%s", str_buf); | ||
272 | |||
273 | exit: | ||
274 | kfree(str_buf); | ||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | ssize_t store_iwmct_log_level_fw(struct device *d, | ||
279 | struct device_attribute *attr, | ||
280 | const char *buf, size_t count) | ||
281 | { | ||
282 | struct iwmct_priv *priv = dev_get_drvdata(d); | ||
283 | struct top_msg cmd; | ||
284 | char *token, *str_buf = NULL; | ||
285 | ssize_t ret = count; | ||
286 | u16 cmdlen = 0; | ||
287 | int i; | ||
288 | long val; | ||
289 | u8 src, mask; | ||
290 | |||
291 | if (!count) | ||
292 | goto exit; | ||
293 | |||
294 | str_buf = kzalloc(count, GFP_KERNEL); | ||
295 | if (!str_buf) { | ||
296 | LOG_ERROR(priv, DEBUGFS, | ||
297 | "failed to allocate %zd bytes\n", count); | ||
298 | ret = -ENOMEM; | ||
299 | goto exit; | ||
300 | } | ||
301 | |||
302 | memcpy(str_buf, buf, count); | ||
303 | |||
304 | cmd.hdr.type = COMM_TYPE_H2D; | ||
305 | cmd.hdr.category = COMM_CATEGORY_DEBUG; | ||
306 | cmd.hdr.opcode = CMD_DBG_LOG_LEVEL; | ||
307 | |||
308 | for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) && | ||
309 | (i < FW_LOG_SRC_MAX); i++) { | ||
310 | |||
311 | while (isspace(*token)) | ||
312 | ++token; | ||
313 | |||
314 | if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) { | ||
315 | LOG_ERROR(priv, DEBUGFS, | ||
316 | "failed to convert string to long %s\n", | ||
317 | token); | ||
318 | ret = -EINVAL; | ||
319 | goto exit; | ||
320 | } | ||
321 | |||
322 | mask = val & 0xFF; /* LSB */ | ||
323 | src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */ | ||
324 | iwmct_log_set_fw_filter(src, mask); | ||
325 | |||
326 | cmd.u.logdefs[i].logsource = src; | ||
327 | cmd.u.logdefs[i].sevmask = mask; | ||
328 | } | ||
329 | |||
330 | cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0])); | ||
331 | cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr)); | ||
332 | |||
333 | ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen); | ||
334 | if (ret) { | ||
335 | LOG_ERROR(priv, DEBUGFS, | ||
336 | "Failed to send %d bytes of fwcmd, ret=%zd\n", | ||
337 | cmdlen, ret); | ||
338 | goto exit; | ||
339 | } else | ||
340 | LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen); | ||
341 | |||
342 | ret = count; | ||
343 | |||
344 | exit: | ||
345 | kfree(str_buf); | ||
346 | return ret; | ||
347 | } | ||
348 | |||
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h new file mode 100644 index 000000000000..4434bb16cea7 --- /dev/null +++ b/drivers/misc/iwmc3200top/log.h | |||
@@ -0,0 +1,171 @@ | |||
1 | /* | ||
2 | * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
3 | * drivers/misc/iwmc3200top/log.h | ||
4 | * | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
23 | * - | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef __LOG_H__ | ||
28 | #define __LOG_H__ | ||
29 | |||
30 | |||
31 | /* log severity: | ||
32 | * The log levels here match FW log levels | ||
33 | * so values need to stay as is */ | ||
34 | #define LOG_SEV_CRITICAL 0 | ||
35 | #define LOG_SEV_ERROR 1 | ||
36 | #define LOG_SEV_WARNING 2 | ||
37 | #define LOG_SEV_INFO 3 | ||
38 | #define LOG_SEV_INFOEX 4 | ||
39 | |||
40 | /* Log levels not defined for FW */ | ||
41 | #define LOG_SEV_TRACE 5 | ||
42 | #define LOG_SEV_DUMP 6 | ||
43 | |||
44 | #define LOG_SEV_FW_FILTER_ALL \ | ||
45 | (BIT(LOG_SEV_CRITICAL) | \ | ||
46 | BIT(LOG_SEV_ERROR) | \ | ||
47 | BIT(LOG_SEV_WARNING) | \ | ||
48 | BIT(LOG_SEV_INFO) | \ | ||
49 | BIT(LOG_SEV_INFOEX)) | ||
50 | |||
51 | #define LOG_SEV_FILTER_ALL \ | ||
52 | (BIT(LOG_SEV_CRITICAL) | \ | ||
53 | BIT(LOG_SEV_ERROR) | \ | ||
54 | BIT(LOG_SEV_WARNING) | \ | ||
55 | BIT(LOG_SEV_INFO) | \ | ||
56 | BIT(LOG_SEV_INFOEX) | \ | ||
57 | BIT(LOG_SEV_TRACE) | \ | ||
58 | BIT(LOG_SEV_DUMP)) | ||
59 | |||
60 | /* log source */ | ||
61 | #define LOG_SRC_INIT 0 | ||
62 | #define LOG_SRC_DEBUGFS 1 | ||
63 | #define LOG_SRC_FW_DOWNLOAD 2 | ||
64 | #define LOG_SRC_FW_MSG 3 | ||
65 | #define LOG_SRC_TST 4 | ||
66 | #define LOG_SRC_IRQ 5 | ||
67 | |||
68 | #define LOG_SRC_MAX 6 | ||
69 | #define LOG_SRC_ALL 0xFF | ||
70 | |||
71 | /** | ||
72 | * Default intitialization runtime log level | ||
73 | */ | ||
74 | #ifndef LOG_SEV_FILTER_RUNTIME | ||
75 | #define LOG_SEV_FILTER_RUNTIME \ | ||
76 | (BIT(LOG_SEV_CRITICAL) | \ | ||
77 | BIT(LOG_SEV_ERROR) | \ | ||
78 | BIT(LOG_SEV_WARNING)) | ||
79 | #endif | ||
80 | |||
81 | #ifndef FW_LOG_SEV_FILTER_RUNTIME | ||
82 | #define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL | ||
83 | #endif | ||
84 | |||
85 | #ifdef CONFIG_IWMC3200TOP_DEBUG | ||
86 | /** | ||
87 | * Log macros | ||
88 | */ | ||
89 | |||
90 | #define priv2dev(priv) (&(priv->func)->dev) | ||
91 | |||
92 | #define LOG_CRITICAL(priv, src, fmt, args...) \ | ||
93 | do { \ | ||
94 | if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \ | ||
95 | dev_crit(priv2dev(priv), "%s %d: " fmt, \ | ||
96 | __func__, __LINE__, ##args); \ | ||
97 | } while (0) | ||
98 | |||
99 | #define LOG_ERROR(priv, src, fmt, args...) \ | ||
100 | do { \ | ||
101 | if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \ | ||
102 | dev_err(priv2dev(priv), "%s %d: " fmt, \ | ||
103 | __func__, __LINE__, ##args); \ | ||
104 | } while (0) | ||
105 | |||
106 | #define LOG_WARNING(priv, src, fmt, args...) \ | ||
107 | do { \ | ||
108 | if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \ | ||
109 | dev_warn(priv2dev(priv), "%s %d: " fmt, \ | ||
110 | __func__, __LINE__, ##args); \ | ||
111 | } while (0) | ||
112 | |||
113 | #define LOG_INFO(priv, src, fmt, args...) \ | ||
114 | do { \ | ||
115 | if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \ | ||
116 | dev_info(priv2dev(priv), "%s %d: " fmt, \ | ||
117 | __func__, __LINE__, ##args); \ | ||
118 | } while (0) | ||
119 | |||
120 | #define LOG_TRACE(priv, src, fmt, args...) \ | ||
121 | do { \ | ||
122 | if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \ | ||
123 | dev_dbg(priv2dev(priv), "%s %d: " fmt, \ | ||
124 | __func__, __LINE__, ##args); \ | ||
125 | } while (0) | ||
126 | |||
127 | #define LOG_HEXDUMP(src, ptr, len) \ | ||
128 | do { \ | ||
129 | if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \ | ||
130 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \ | ||
131 | 16, 1, ptr, len, false); \ | ||
132 | } while (0) | ||
133 | |||
134 | void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len); | ||
135 | |||
136 | extern u8 iwmct_logdefs[]; | ||
137 | |||
138 | int iwmct_log_set_filter(u8 src, u8 logmask); | ||
139 | int iwmct_log_set_fw_filter(u8 src, u8 logmask); | ||
140 | |||
141 | ssize_t show_iwmct_log_level(struct device *d, | ||
142 | struct device_attribute *attr, char *buf); | ||
143 | ssize_t store_iwmct_log_level(struct device *d, | ||
144 | struct device_attribute *attr, | ||
145 | const char *buf, size_t count); | ||
146 | ssize_t show_iwmct_log_level_fw(struct device *d, | ||
147 | struct device_attribute *attr, char *buf); | ||
148 | ssize_t store_iwmct_log_level_fw(struct device *d, | ||
149 | struct device_attribute *attr, | ||
150 | const char *buf, size_t count); | ||
151 | |||
152 | #else | ||
153 | |||
154 | #define LOG_CRITICAL(priv, src, fmt, args...) | ||
155 | #define LOG_ERROR(priv, src, fmt, args...) | ||
156 | #define LOG_WARNING(priv, src, fmt, args...) | ||
157 | #define LOG_INFO(priv, src, fmt, args...) | ||
158 | #define LOG_TRACE(priv, src, fmt, args...) | ||
159 | #define LOG_HEXDUMP(src, ptr, len) | ||
160 | |||
161 | static inline void iwmct_log_top_message(struct iwmct_priv *priv, | ||
162 | u8 *buf, int len) {} | ||
163 | static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; } | ||
164 | static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; } | ||
165 | |||
166 | #endif /* CONFIG_IWMC3200TOP_DEBUG */ | ||
167 | |||
168 | int log_get_filter_str(char *buf, int size); | ||
169 | int log_get_fw_filter_str(char *buf, int size); | ||
170 | |||
171 | #endif /* __LOG_H__ */ | ||
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c new file mode 100644 index 000000000000..c73cef2c3c5e --- /dev/null +++ b/drivers/misc/iwmc3200top/main.c | |||
@@ -0,0 +1,666 @@ | |||
1 | /* | ||
2 | * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver | ||
3 | * drivers/misc/iwmc3200top/main.c | ||
4 | * | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> | ||
23 | * - | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/debugfs.h> | ||
32 | #include <linux/mmc/sdio_ids.h> | ||
33 | #include <linux/mmc/sdio_func.h> | ||
34 | #include <linux/mmc/sdio.h> | ||
35 | |||
36 | #include "iwmc3200top.h" | ||
37 | #include "log.h" | ||
38 | #include "fw-msg.h" | ||
39 | #include "debugfs.h" | ||
40 | |||
41 | |||
42 | #define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver" | ||
43 | #define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation." | ||
44 | |||
45 | #define DRIVER_VERSION "0.1.62" | ||
46 | |||
47 | MODULE_DESCRIPTION(DRIVER_DESCRIPTION); | ||
48 | MODULE_VERSION(DRIVER_VERSION); | ||
49 | MODULE_LICENSE("GPL"); | ||
50 | MODULE_AUTHOR(DRIVER_COPYRIGHT); | ||
51 | MODULE_FIRMWARE(FW_NAME(FW_API_VER)); | ||
52 | |||
53 | |||
54 | static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count) | ||
55 | { | ||
56 | return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count); | ||
57 | |||
58 | } | ||
59 | int iwmct_tx(struct iwmct_priv *priv, void *src, int count) | ||
60 | { | ||
61 | int ret; | ||
62 | sdio_claim_host(priv->func); | ||
63 | ret = __iwmct_tx(priv, src, count); | ||
64 | sdio_release_host(priv->func); | ||
65 | return ret; | ||
66 | } | ||
67 | /* | ||
68 | * This workers main task is to wait for OP_OPR_ALIVE | ||
69 | * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed. | ||
70 | * When OP_OPR_ALIVE received it will issue | ||
71 | * a call to "bus_rescan_devices". | ||
72 | */ | ||
73 | static void iwmct_rescan_worker(struct work_struct *ws) | ||
74 | { | ||
75 | struct iwmct_priv *priv; | ||
76 | int ret; | ||
77 | |||
78 | priv = container_of(ws, struct iwmct_priv, bus_rescan_worker); | ||
79 | |||
80 | LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n"); | ||
81 | |||
82 | ret = bus_rescan_devices(priv->func->dev.bus); | ||
83 | if (ret < 0) | ||
84 | LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n"); | ||
85 | } | ||
86 | |||
87 | static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg) | ||
88 | { | ||
89 | switch (msg->hdr.opcode) { | ||
90 | case OP_OPR_ALIVE: | ||
91 | LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n"); | ||
92 | queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker); | ||
93 | break; | ||
94 | default: | ||
95 | LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n", | ||
96 | msg->hdr.opcode); | ||
97 | break; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | |||
102 | static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len) | ||
103 | { | ||
104 | struct top_msg *msg; | ||
105 | |||
106 | msg = (struct top_msg *)buf; | ||
107 | |||
108 | if (msg->hdr.type != COMM_TYPE_D2H) { | ||
109 | LOG_ERROR(priv, FW_MSG, | ||
110 | "Message from TOP with invalid message type 0x%X\n", | ||
111 | msg->hdr.type); | ||
112 | return; | ||
113 | } | ||
114 | |||
115 | if (len < sizeof(msg->hdr)) { | ||
116 | LOG_ERROR(priv, FW_MSG, | ||
117 | "Message from TOP is too short for message header " | ||
118 | "received %d bytes, expected at least %zd bytes\n", | ||
119 | len, sizeof(msg->hdr)); | ||
120 | return; | ||
121 | } | ||
122 | |||
123 | if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) { | ||
124 | LOG_ERROR(priv, FW_MSG, | ||
125 | "Message length (%d bytes) is shorter than " | ||
126 | "in header (%d bytes)\n", | ||
127 | len, le16_to_cpu(msg->hdr.length)); | ||
128 | return; | ||
129 | } | ||
130 | |||
131 | switch (msg->hdr.category) { | ||
132 | case COMM_CATEGORY_OPERATIONAL: | ||
133 | op_top_message(priv, (struct top_msg *)buf); | ||
134 | break; | ||
135 | |||
136 | case COMM_CATEGORY_DEBUG: | ||
137 | case COMM_CATEGORY_TESTABILITY: | ||
138 | case COMM_CATEGORY_DIAGNOSTICS: | ||
139 | iwmct_log_top_message(priv, buf, len); | ||
140 | break; | ||
141 | |||
142 | default: | ||
143 | LOG_ERROR(priv, FW_MSG, | ||
144 | "Message from TOP with unknown category 0x%X\n", | ||
145 | msg->hdr.category); | ||
146 | break; | ||
147 | } | ||
148 | } | ||
149 | |||
150 | int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len) | ||
151 | { | ||
152 | int ret; | ||
153 | u8 *buf; | ||
154 | |||
155 | LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n"); | ||
156 | |||
157 | /* add padding to 256 for IWMC */ | ||
158 | ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256; | ||
159 | |||
160 | LOG_HEXDUMP(FW_MSG, cmd, len); | ||
161 | |||
162 | if (len > FW_HCMD_BLOCK_SIZE) { | ||
163 | LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n", | ||
164 | len, FW_HCMD_BLOCK_SIZE); | ||
165 | return -1; | ||
166 | } | ||
167 | |||
168 | buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL); | ||
169 | if (!buf) { | ||
170 | LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n", | ||
171 | FW_HCMD_BLOCK_SIZE); | ||
172 | return -1; | ||
173 | } | ||
174 | |||
175 | memcpy(buf, cmd, len); | ||
176 | ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE); | ||
177 | |||
178 | kfree(buf); | ||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | |||
183 | static void iwmct_irq_read_worker(struct work_struct *ws) | ||
184 | { | ||
185 | struct iwmct_priv *priv; | ||
186 | struct iwmct_work_struct *read_req; | ||
187 | __le32 *buf = NULL; | ||
188 | int ret; | ||
189 | int iosize; | ||
190 | u32 barker; | ||
191 | bool is_barker; | ||
192 | |||
193 | priv = container_of(ws, struct iwmct_priv, isr_worker); | ||
194 | |||
195 | LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws); | ||
196 | |||
197 | /* --------------------- Handshake with device -------------------- */ | ||
198 | sdio_claim_host(priv->func); | ||
199 | |||
200 | /* all list manipulations have to be protected by | ||
201 | * sdio_claim_host/sdio_release_host */ | ||
202 | if (list_empty(&priv->read_req_list)) { | ||
203 | LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n"); | ||
204 | goto exit_release; | ||
205 | } | ||
206 | |||
207 | read_req = list_entry(priv->read_req_list.next, | ||
208 | struct iwmct_work_struct, list); | ||
209 | |||
210 | list_del(&read_req->list); | ||
211 | iosize = read_req->iosize; | ||
212 | kfree(read_req); | ||
213 | |||
214 | buf = kzalloc(iosize, GFP_KERNEL); | ||
215 | if (!buf) { | ||
216 | LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize); | ||
217 | goto exit_release; | ||
218 | } | ||
219 | |||
220 | LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n", | ||
221 | iosize, buf, priv->func->num); | ||
222 | |||
223 | /* read from device */ | ||
224 | ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize); | ||
225 | if (ret) { | ||
226 | LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret); | ||
227 | goto exit_release; | ||
228 | } | ||
229 | |||
230 | LOG_HEXDUMP(IRQ, (u8 *)buf, iosize); | ||
231 | |||
232 | barker = le32_to_cpu(buf[0]); | ||
233 | |||
234 | /* Verify whether it's a barker and if not - treat as regular Rx */ | ||
235 | if (barker == IWMC_BARKER_ACK || | ||
236 | (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) { | ||
237 | |||
238 | /* Valid Barker is equal on first 4 dwords */ | ||
239 | is_barker = (buf[1] == buf[0]) && | ||
240 | (buf[2] == buf[0]) && | ||
241 | (buf[3] == buf[0]); | ||
242 | |||
243 | if (!is_barker) { | ||
244 | LOG_WARNING(priv, IRQ, | ||
245 | "Potentially inconsistent barker " | ||
246 | "%08X_%08X_%08X_%08X\n", | ||
247 | le32_to_cpu(buf[0]), le32_to_cpu(buf[1]), | ||
248 | le32_to_cpu(buf[2]), le32_to_cpu(buf[3])); | ||
249 | } | ||
250 | } else { | ||
251 | is_barker = false; | ||
252 | } | ||
253 | |||
254 | /* Handle Top CommHub message */ | ||
255 | if (!is_barker) { | ||
256 | sdio_release_host(priv->func); | ||
257 | handle_top_message(priv, (u8 *)buf, iosize); | ||
258 | goto exit; | ||
259 | } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */ | ||
260 | if (atomic_read(&priv->dev_sync) == 0) { | ||
261 | LOG_ERROR(priv, IRQ, | ||
262 | "ACK barker arrived out-of-sync\n"); | ||
263 | goto exit_release; | ||
264 | } | ||
265 | |||
266 | /* Continuing to FW download (after Sync is completed)*/ | ||
267 | atomic_set(&priv->dev_sync, 0); | ||
268 | LOG_INFO(priv, IRQ, "ACK barker arrived " | ||
269 | "- starting FW download\n"); | ||
270 | } else { /* REBOOT barker */ | ||
271 | LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker); | ||
272 | priv->barker = barker; | ||
273 | |||
274 | if (barker & BARKER_DNLOAD_SYNC_MSK) { | ||
275 | /* Send the same barker back */ | ||
276 | ret = __iwmct_tx(priv, buf, iosize); | ||
277 | if (ret) { | ||
278 | LOG_ERROR(priv, IRQ, | ||
279 | "error %d echoing barker\n", ret); | ||
280 | goto exit_release; | ||
281 | } | ||
282 | LOG_INFO(priv, IRQ, "Echoing barker to device\n"); | ||
283 | atomic_set(&priv->dev_sync, 1); | ||
284 | goto exit_release; | ||
285 | } | ||
286 | |||
287 | /* Continuing to FW download (without Sync) */ | ||
288 | LOG_INFO(priv, IRQ, "No sync requested " | ||
289 | "- starting FW download\n"); | ||
290 | } | ||
291 | |||
292 | sdio_release_host(priv->func); | ||
293 | |||
294 | if (priv->dbg.fw_download) | ||
295 | iwmct_fw_load(priv); | ||
296 | else | ||
297 | LOG_ERROR(priv, IRQ, "FW download not allowed\n"); | ||
298 | |||
299 | goto exit; | ||
300 | |||
301 | exit_release: | ||
302 | sdio_release_host(priv->func); | ||
303 | exit: | ||
304 | kfree(buf); | ||
305 | LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n"); | ||
306 | } | ||
307 | |||
308 | static void iwmct_irq(struct sdio_func *func) | ||
309 | { | ||
310 | struct iwmct_priv *priv; | ||
311 | int val, ret; | ||
312 | int iosize; | ||
313 | int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR; | ||
314 | struct iwmct_work_struct *read_req; | ||
315 | |||
316 | priv = sdio_get_drvdata(func); | ||
317 | |||
318 | LOG_TRACE(priv, IRQ, "enter iwmct_irq\n"); | ||
319 | |||
320 | /* read the function's status register */ | ||
321 | val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret); | ||
322 | |||
323 | LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret); | ||
324 | |||
325 | if (!val) { | ||
326 | LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n"); | ||
327 | goto exit_clear_intr; | ||
328 | } | ||
329 | |||
330 | |||
331 | /* | ||
332 | * read 2 bytes of the transaction size | ||
333 | * IMPORTANT: sdio transaction size has to be read before clearing | ||
334 | * sdio interrupt!!! | ||
335 | */ | ||
336 | val = sdio_readb(priv->func, addr++, &ret); | ||
337 | iosize = val; | ||
338 | val = sdio_readb(priv->func, addr++, &ret); | ||
339 | iosize += val << 8; | ||
340 | |||
341 | LOG_INFO(priv, IRQ, "READ size %d\n", iosize); | ||
342 | |||
343 | if (iosize == 0) { | ||
344 | LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize); | ||
345 | goto exit_clear_intr; | ||
346 | } | ||
347 | |||
348 | /* allocate a work structure to pass iosize to the worker */ | ||
349 | read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL); | ||
350 | if (!read_req) { | ||
351 | LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n"); | ||
352 | goto exit_clear_intr; | ||
353 | } | ||
354 | |||
355 | INIT_LIST_HEAD(&read_req->list); | ||
356 | read_req->iosize = iosize; | ||
357 | |||
358 | list_add_tail(&priv->read_req_list, &read_req->list); | ||
359 | |||
360 | /* clear the function's interrupt request bit (write 1 to clear) */ | ||
361 | sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); | ||
362 | |||
363 | queue_work(priv->wq, &priv->isr_worker); | ||
364 | |||
365 | LOG_TRACE(priv, IRQ, "exit iwmct_irq\n"); | ||
366 | |||
367 | return; | ||
368 | |||
369 | exit_clear_intr: | ||
370 | /* clear the function's interrupt request bit (write 1 to clear) */ | ||
371 | sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); | ||
372 | } | ||
373 | |||
374 | |||
375 | static int blocks; | ||
376 | module_param(blocks, int, 0604); | ||
377 | MODULE_PARM_DESC(blocks, "max_blocks_to_send"); | ||
378 | |||
379 | static int dump; | ||
380 | module_param(dump, bool, 0604); | ||
381 | MODULE_PARM_DESC(dump, "dump_hex_content"); | ||
382 | |||
383 | static int jump = 1; | ||
384 | module_param(jump, bool, 0604); | ||
385 | |||
386 | static int direct = 1; | ||
387 | module_param(direct, bool, 0604); | ||
388 | |||
389 | static int checksum = 1; | ||
390 | module_param(checksum, bool, 0604); | ||
391 | |||
392 | static int fw_download = 1; | ||
393 | module_param(fw_download, bool, 0604); | ||
394 | |||
395 | static int block_size = IWMC_SDIO_BLK_SIZE; | ||
396 | module_param(block_size, int, 0404); | ||
397 | |||
398 | static int download_trans_blks = IWMC_DEFAULT_TR_BLK; | ||
399 | module_param(download_trans_blks, int, 0604); | ||
400 | |||
401 | static int rubbish_barker; | ||
402 | module_param(rubbish_barker, bool, 0604); | ||
403 | |||
404 | #ifdef CONFIG_IWMC3200TOP_DEBUG | ||
405 | static int log_level[LOG_SRC_MAX]; | ||
406 | static unsigned int log_level_argc; | ||
407 | module_param_array(log_level, int, &log_level_argc, 0604); | ||
408 | MODULE_PARM_DESC(log_level, "log_level"); | ||
409 | |||
410 | static int log_level_fw[FW_LOG_SRC_MAX]; | ||
411 | static unsigned int log_level_fw_argc; | ||
412 | module_param_array(log_level_fw, int, &log_level_fw_argc, 0604); | ||
413 | MODULE_PARM_DESC(log_level_fw, "log_level_fw"); | ||
414 | #endif | ||
415 | |||
416 | void iwmct_dbg_init_params(struct iwmct_priv *priv) | ||
417 | { | ||
418 | #ifdef CONFIG_IWMC3200TOP_DEBUG | ||
419 | int i; | ||
420 | |||
421 | for (i = 0; i < log_level_argc; i++) { | ||
422 | dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n", | ||
423 | i, log_level[i]); | ||
424 | iwmct_log_set_filter((log_level[i] >> 8) & 0xFF, | ||
425 | log_level[i] & 0xFF); | ||
426 | } | ||
427 | for (i = 0; i < log_level_fw_argc; i++) { | ||
428 | dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n", | ||
429 | i, log_level_fw[i]); | ||
430 | iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF, | ||
431 | log_level_fw[i] & 0xFF); | ||
432 | } | ||
433 | #endif | ||
434 | |||
435 | priv->dbg.blocks = blocks; | ||
436 | LOG_INFO(priv, INIT, "blocks=%d\n", blocks); | ||
437 | priv->dbg.dump = (bool)dump; | ||
438 | LOG_INFO(priv, INIT, "dump=%d\n", dump); | ||
439 | priv->dbg.jump = (bool)jump; | ||
440 | LOG_INFO(priv, INIT, "jump=%d\n", jump); | ||
441 | priv->dbg.direct = (bool)direct; | ||
442 | LOG_INFO(priv, INIT, "direct=%d\n", direct); | ||
443 | priv->dbg.checksum = (bool)checksum; | ||
444 | LOG_INFO(priv, INIT, "checksum=%d\n", checksum); | ||
445 | priv->dbg.fw_download = (bool)fw_download; | ||
446 | LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download); | ||
447 | priv->dbg.block_size = block_size; | ||
448 | LOG_INFO(priv, INIT, "block_size=%d\n", block_size); | ||
449 | priv->dbg.download_trans_blks = download_trans_blks; | ||
450 | LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks); | ||
451 | } | ||
452 | |||
453 | /***************************************************************************** | ||
454 | * | ||
455 | * sysfs attributes | ||
456 | * | ||
457 | *****************************************************************************/ | ||
458 | static ssize_t show_iwmct_fw_version(struct device *d, | ||
459 | struct device_attribute *attr, char *buf) | ||
460 | { | ||
461 | struct iwmct_priv *priv = dev_get_drvdata(d); | ||
462 | return sprintf(buf, "%s\n", priv->dbg.label_fw); | ||
463 | } | ||
464 | static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL); | ||
465 | |||
466 | #ifdef CONFIG_IWMC3200TOP_DEBUG | ||
467 | static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO, | ||
468 | show_iwmct_log_level, store_iwmct_log_level); | ||
469 | static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO, | ||
470 | show_iwmct_log_level_fw, store_iwmct_log_level_fw); | ||
471 | #endif | ||
472 | |||
473 | static struct attribute *iwmct_sysfs_entries[] = { | ||
474 | &dev_attr_cc_label_fw.attr, | ||
475 | #ifdef CONFIG_IWMC3200TOP_DEBUG | ||
476 | &dev_attr_log_level.attr, | ||
477 | &dev_attr_log_level_fw.attr, | ||
478 | #endif | ||
479 | NULL | ||
480 | }; | ||
481 | |||
482 | static struct attribute_group iwmct_attribute_group = { | ||
483 | .name = NULL, /* put in device directory */ | ||
484 | .attrs = iwmct_sysfs_entries, | ||
485 | }; | ||
486 | |||
487 | |||
488 | static int iwmct_probe(struct sdio_func *func, | ||
489 | const struct sdio_device_id *id) | ||
490 | { | ||
491 | struct iwmct_priv *priv; | ||
492 | int ret; | ||
493 | int val = 1; | ||
494 | int addr = IWMC_SDIO_INTR_ENABLE_ADDR; | ||
495 | |||
496 | dev_dbg(&func->dev, "enter iwmct_probe\n"); | ||
497 | |||
498 | dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n", | ||
499 | jiffies_to_msecs(2147483647), HZ); | ||
500 | |||
501 | priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL); | ||
502 | if (!priv) { | ||
503 | dev_err(&func->dev, "kzalloc error\n"); | ||
504 | return -ENOMEM; | ||
505 | } | ||
506 | priv->func = func; | ||
507 | sdio_set_drvdata(func, priv); | ||
508 | |||
509 | |||
510 | /* create drivers work queue */ | ||
511 | priv->wq = create_workqueue(DRV_NAME "_wq"); | ||
512 | priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq"); | ||
513 | INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker); | ||
514 | INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker); | ||
515 | |||
516 | init_waitqueue_head(&priv->wait_q); | ||
517 | |||
518 | sdio_claim_host(func); | ||
519 | /* FIXME: Remove after it is fixed in the Boot ROM upgrade */ | ||
520 | func->enable_timeout = 10; | ||
521 | |||
522 | /* In our HW, setting the block size also wakes up the boot rom. */ | ||
523 | ret = sdio_set_block_size(func, priv->dbg.block_size); | ||
524 | if (ret) { | ||
525 | LOG_ERROR(priv, INIT, | ||
526 | "sdio_set_block_size() failure: %d\n", ret); | ||
527 | goto error_sdio_enable; | ||
528 | } | ||
529 | |||
530 | ret = sdio_enable_func(func); | ||
531 | if (ret) { | ||
532 | LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret); | ||
533 | goto error_sdio_enable; | ||
534 | } | ||
535 | |||
536 | /* init reset and dev_sync states */ | ||
537 | atomic_set(&priv->reset, 0); | ||
538 | atomic_set(&priv->dev_sync, 0); | ||
539 | |||
540 | /* init read req queue */ | ||
541 | INIT_LIST_HEAD(&priv->read_req_list); | ||
542 | |||
543 | /* process configurable parameters */ | ||
544 | iwmct_dbg_init_params(priv); | ||
545 | ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group); | ||
546 | if (ret) { | ||
547 | LOG_ERROR(priv, INIT, "Failed to register attributes and " | ||
548 | "initialize module_params\n"); | ||
549 | goto error_dev_attrs; | ||
550 | } | ||
551 | |||
552 | iwmct_dbgfs_register(priv, DRV_NAME); | ||
553 | |||
554 | if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) { | ||
555 | LOG_INFO(priv, INIT, | ||
556 | "Reducing transaction to 8 blocks = 2K (from %d)\n", | ||
557 | priv->dbg.download_trans_blks); | ||
558 | priv->dbg.download_trans_blks = 8; | ||
559 | } | ||
560 | priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size; | ||
561 | LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len); | ||
562 | |||
563 | ret = sdio_claim_irq(func, iwmct_irq); | ||
564 | if (ret) { | ||
565 | LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret); | ||
566 | goto error_claim_irq; | ||
567 | } | ||
568 | |||
569 | |||
570 | /* Enable function's interrupt */ | ||
571 | sdio_writeb(priv->func, val, addr, &ret); | ||
572 | if (ret) { | ||
573 | LOG_ERROR(priv, INIT, "Failure writing to " | ||
574 | "Interrupt Enable Register (%d): %d\n", addr, ret); | ||
575 | goto error_enable_int; | ||
576 | } | ||
577 | |||
578 | sdio_release_host(func); | ||
579 | |||
580 | LOG_INFO(priv, INIT, "exit iwmct_probe\n"); | ||
581 | |||
582 | return ret; | ||
583 | |||
584 | error_enable_int: | ||
585 | sdio_release_irq(func); | ||
586 | error_claim_irq: | ||
587 | sdio_disable_func(func); | ||
588 | error_dev_attrs: | ||
589 | iwmct_dbgfs_unregister(priv->dbgfs); | ||
590 | sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group); | ||
591 | error_sdio_enable: | ||
592 | sdio_release_host(func); | ||
593 | return ret; | ||
594 | } | ||
595 | |||
596 | static void iwmct_remove(struct sdio_func *func) | ||
597 | { | ||
598 | struct iwmct_work_struct *read_req; | ||
599 | struct iwmct_priv *priv = sdio_get_drvdata(func); | ||
600 | |||
601 | LOG_INFO(priv, INIT, "enter\n"); | ||
602 | |||
603 | sdio_claim_host(func); | ||
604 | sdio_release_irq(func); | ||
605 | sdio_release_host(func); | ||
606 | |||
607 | /* Safely destroy osc workqueue */ | ||
608 | destroy_workqueue(priv->bus_rescan_wq); | ||
609 | destroy_workqueue(priv->wq); | ||
610 | |||
611 | sdio_claim_host(func); | ||
612 | sdio_disable_func(func); | ||
613 | sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group); | ||
614 | iwmct_dbgfs_unregister(priv->dbgfs); | ||
615 | sdio_release_host(func); | ||
616 | |||
617 | /* free read requests */ | ||
618 | while (!list_empty(&priv->read_req_list)) { | ||
619 | read_req = list_entry(priv->read_req_list.next, | ||
620 | struct iwmct_work_struct, list); | ||
621 | |||
622 | list_del(&read_req->list); | ||
623 | kfree(read_req); | ||
624 | } | ||
625 | |||
626 | kfree(priv); | ||
627 | } | ||
628 | |||
629 | |||
630 | static const struct sdio_device_id iwmct_ids[] = { | ||
631 | /* Intel Wireless MultiCom 3200 Top Driver */ | ||
632 | { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)}, | ||
633 | { }, /* Terminating entry */ | ||
634 | }; | ||
635 | |||
636 | MODULE_DEVICE_TABLE(sdio, iwmct_ids); | ||
637 | |||
638 | static struct sdio_driver iwmct_driver = { | ||
639 | .probe = iwmct_probe, | ||
640 | .remove = iwmct_remove, | ||
641 | .name = DRV_NAME, | ||
642 | .id_table = iwmct_ids, | ||
643 | }; | ||
644 | |||
645 | static int __init iwmct_init(void) | ||
646 | { | ||
647 | int rc; | ||
648 | |||
649 | /* Default log filter settings */ | ||
650 | iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME); | ||
651 | iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL); | ||
652 | iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME); | ||
653 | |||
654 | rc = sdio_register_driver(&iwmct_driver); | ||
655 | |||
656 | return rc; | ||
657 | } | ||
658 | |||
659 | static void __exit iwmct_exit(void) | ||
660 | { | ||
661 | sdio_unregister_driver(&iwmct_driver); | ||
662 | } | ||
663 | |||
664 | module_init(iwmct_init); | ||
665 | module_exit(iwmct_exit); | ||
666 | |||
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index e4ff50b95a5e..72450237a0f4 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c | |||
@@ -295,6 +295,10 @@ static int check_and_rewind_pc(char *put_str, char *arg) | |||
295 | /* On x86 a breakpoint stop requires it to be decremented */ | 295 | /* On x86 a breakpoint stop requires it to be decremented */ |
296 | if (addr + 1 == kgdbts_regs.ip) | 296 | if (addr + 1 == kgdbts_regs.ip) |
297 | offset = -1; | 297 | offset = -1; |
298 | #elif defined(CONFIG_SUPERH) | ||
299 | /* On SUPERH a breakpoint stop requires it to be decremented */ | ||
300 | if (addr + 2 == kgdbts_regs.pc) | ||
301 | offset = -2; | ||
298 | #endif | 302 | #endif |
299 | if (strcmp(arg, "silent") && | 303 | if (strcmp(arg, "silent") && |
300 | instruction_pointer(&kgdbts_regs) + offset != addr) { | 304 | instruction_pointer(&kgdbts_regs) + offset != addr) { |
@@ -305,6 +309,8 @@ static int check_and_rewind_pc(char *put_str, char *arg) | |||
305 | #ifdef CONFIG_X86 | 309 | #ifdef CONFIG_X86 |
306 | /* On x86 adjust the instruction pointer if needed */ | 310 | /* On x86 adjust the instruction pointer if needed */ |
307 | kgdbts_regs.ip += offset; | 311 | kgdbts_regs.ip += offset; |
312 | #elif defined(CONFIG_SUPERH) | ||
313 | kgdbts_regs.pc += offset; | ||
308 | #endif | 314 | #endif |
309 | return 0; | 315 | return 0; |
310 | } | 316 | } |
@@ -712,6 +718,12 @@ static int run_simple_test(int is_get_char, int chr) | |||
712 | 718 | ||
713 | /* End of packet == #XX so look for the '#' */ | 719 | /* End of packet == #XX so look for the '#' */ |
714 | if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') { | 720 | if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') { |
721 | if (put_buf_cnt >= BUFMAX) { | ||
722 | eprintk("kgdbts: ERROR: put buffer overflow on" | ||
723 | " '%s' line %i\n", ts.name, ts.idx); | ||
724 | put_buf_cnt = 0; | ||
725 | return 0; | ||
726 | } | ||
715 | put_buf[put_buf_cnt] = '\0'; | 727 | put_buf[put_buf_cnt] = '\0'; |
716 | v2printk("put%i: %s\n", ts.idx, put_buf); | 728 | v2printk("put%i: %s\n", ts.idx, put_buf); |
717 | /* Trigger check here */ | 729 | /* Trigger check here */ |
@@ -885,16 +897,16 @@ static void kgdbts_run_tests(void) | |||
885 | int nmi_sleep = 0; | 897 | int nmi_sleep = 0; |
886 | int i; | 898 | int i; |
887 | 899 | ||
888 | ptr = strstr(config, "F"); | 900 | ptr = strchr(config, 'F'); |
889 | if (ptr) | 901 | if (ptr) |
890 | fork_test = simple_strtol(ptr + 1, NULL, 10); | 902 | fork_test = simple_strtol(ptr + 1, NULL, 10); |
891 | ptr = strstr(config, "S"); | 903 | ptr = strchr(config, 'S'); |
892 | if (ptr) | 904 | if (ptr) |
893 | do_sys_open_test = simple_strtol(ptr + 1, NULL, 10); | 905 | do_sys_open_test = simple_strtol(ptr + 1, NULL, 10); |
894 | ptr = strstr(config, "N"); | 906 | ptr = strchr(config, 'N'); |
895 | if (ptr) | 907 | if (ptr) |
896 | nmi_sleep = simple_strtol(ptr+1, NULL, 10); | 908 | nmi_sleep = simple_strtol(ptr+1, NULL, 10); |
897 | ptr = strstr(config, "I"); | 909 | ptr = strchr(config, 'I'); |
898 | if (ptr) | 910 | if (ptr) |
899 | sstep_test = simple_strtol(ptr+1, NULL, 10); | 911 | sstep_test = simple_strtol(ptr+1, NULL, 10); |
900 | 912 | ||
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 3648b23d5c92..31a991161f0a 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c | |||
@@ -26,21 +26,9 @@ | |||
26 | * It is adapted from the Linux Kernel Dump Test Tool by | 26 | * It is adapted from the Linux Kernel Dump Test Tool by |
27 | * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> | 27 | * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> |
28 | * | 28 | * |
29 | * Usage : insmod lkdtm.ko [recur_count={>0}] cpoint_name=<> cpoint_type=<> | 29 | * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net> |
30 | * [cpoint_count={>0}] | ||
31 | * | 30 | * |
32 | * recur_count : Recursion level for the stack overflow test. Default is 10. | 31 | * See Documentation/fault-injection/provoke-crashes.txt for instructions |
33 | * | ||
34 | * cpoint_name : Crash point where the kernel is to be crashed. It can be | ||
35 | * one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY, | ||
36 | * FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD, | ||
37 | * IDE_CORE_CP | ||
38 | * | ||
39 | * cpoint_type : Indicates the action to be taken on hitting the crash point. | ||
40 | * It can be one of PANIC, BUG, EXCEPTION, LOOP, OVERFLOW | ||
41 | * | ||
42 | * cpoint_count : Indicates the number of times the crash point is to be hit | ||
43 | * to trigger an action. The default is 10. | ||
44 | */ | 32 | */ |
45 | 33 | ||
46 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
@@ -52,14 +40,14 @@ | |||
52 | #include <linux/init.h> | 40 | #include <linux/init.h> |
53 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
54 | #include <linux/hrtimer.h> | 42 | #include <linux/hrtimer.h> |
43 | #include <linux/slab.h> | ||
55 | #include <scsi/scsi_cmnd.h> | 44 | #include <scsi/scsi_cmnd.h> |
45 | #include <linux/debugfs.h> | ||
56 | 46 | ||
57 | #ifdef CONFIG_IDE | 47 | #ifdef CONFIG_IDE |
58 | #include <linux/ide.h> | 48 | #include <linux/ide.h> |
59 | #endif | 49 | #endif |
60 | 50 | ||
61 | #define NUM_CPOINTS 8 | ||
62 | #define NUM_CPOINT_TYPES 5 | ||
63 | #define DEFAULT_COUNT 10 | 51 | #define DEFAULT_COUNT 10 |
64 | #define REC_NUM_DEFAULT 10 | 52 | #define REC_NUM_DEFAULT 10 |
65 | 53 | ||
@@ -72,7 +60,8 @@ enum cname { | |||
72 | MEM_SWAPOUT, | 60 | MEM_SWAPOUT, |
73 | TIMERADD, | 61 | TIMERADD, |
74 | SCSI_DISPATCH_CMD, | 62 | SCSI_DISPATCH_CMD, |
75 | IDE_CORE_CP | 63 | IDE_CORE_CP, |
64 | DIRECT, | ||
76 | }; | 65 | }; |
77 | 66 | ||
78 | enum ctype { | 67 | enum ctype { |
@@ -81,7 +70,11 @@ enum ctype { | |||
81 | BUG, | 70 | BUG, |
82 | EXCEPTION, | 71 | EXCEPTION, |
83 | LOOP, | 72 | LOOP, |
84 | OVERFLOW | 73 | OVERFLOW, |
74 | CORRUPT_STACK, | ||
75 | UNALIGNED_LOAD_STORE_WRITE, | ||
76 | OVERWRITE_ALLOCATION, | ||
77 | WRITE_AFTER_FREE, | ||
85 | }; | 78 | }; |
86 | 79 | ||
87 | static char* cp_name[] = { | 80 | static char* cp_name[] = { |
@@ -92,7 +85,8 @@ static char* cp_name[] = { | |||
92 | "MEM_SWAPOUT", | 85 | "MEM_SWAPOUT", |
93 | "TIMERADD", | 86 | "TIMERADD", |
94 | "SCSI_DISPATCH_CMD", | 87 | "SCSI_DISPATCH_CMD", |
95 | "IDE_CORE_CP" | 88 | "IDE_CORE_CP", |
89 | "DIRECT", | ||
96 | }; | 90 | }; |
97 | 91 | ||
98 | static char* cp_type[] = { | 92 | static char* cp_type[] = { |
@@ -100,7 +94,11 @@ static char* cp_type[] = { | |||
100 | "BUG", | 94 | "BUG", |
101 | "EXCEPTION", | 95 | "EXCEPTION", |
102 | "LOOP", | 96 | "LOOP", |
103 | "OVERFLOW" | 97 | "OVERFLOW", |
98 | "CORRUPT_STACK", | ||
99 | "UNALIGNED_LOAD_STORE_WRITE", | ||
100 | "OVERWRITE_ALLOCATION", | ||
101 | "WRITE_AFTER_FREE", | ||
104 | }; | 102 | }; |
105 | 103 | ||
106 | static struct jprobe lkdtm; | 104 | static struct jprobe lkdtm; |
@@ -193,34 +191,66 @@ int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, | |||
193 | } | 191 | } |
194 | #endif | 192 | #endif |
195 | 193 | ||
194 | /* Return the crashpoint number or NONE if the name is invalid */ | ||
195 | static enum ctype parse_cp_type(const char *what, size_t count) | ||
196 | { | ||
197 | int i; | ||
198 | |||
199 | for (i = 0; i < ARRAY_SIZE(cp_type); i++) { | ||
200 | if (!strcmp(what, cp_type[i])) | ||
201 | return i + 1; | ||
202 | } | ||
203 | |||
204 | return NONE; | ||
205 | } | ||
206 | |||
207 | static const char *cp_type_to_str(enum ctype type) | ||
208 | { | ||
209 | if (type == NONE || type < 0 || type > ARRAY_SIZE(cp_type)) | ||
210 | return "None"; | ||
211 | |||
212 | return cp_type[type - 1]; | ||
213 | } | ||
214 | |||
215 | static const char *cp_name_to_str(enum cname name) | ||
216 | { | ||
217 | if (name == INVALID || name < 0 || name > ARRAY_SIZE(cp_name)) | ||
218 | return "INVALID"; | ||
219 | |||
220 | return cp_name[name - 1]; | ||
221 | } | ||
222 | |||
223 | |||
196 | static int lkdtm_parse_commandline(void) | 224 | static int lkdtm_parse_commandline(void) |
197 | { | 225 | { |
198 | int i; | 226 | int i; |
199 | 227 | ||
200 | if (cpoint_name == NULL || cpoint_type == NULL || | 228 | if (cpoint_count < 1 || recur_count < 1) |
201 | cpoint_count < 1 || recur_count < 1) | ||
202 | return -EINVAL; | 229 | return -EINVAL; |
203 | 230 | ||
204 | for (i = 0; i < NUM_CPOINTS; ++i) { | 231 | count = cpoint_count; |
232 | |||
233 | /* No special parameters */ | ||
234 | if (!cpoint_type && !cpoint_name) | ||
235 | return 0; | ||
236 | |||
237 | /* Neither or both of these need to be set */ | ||
238 | if (!cpoint_type || !cpoint_name) | ||
239 | return -EINVAL; | ||
240 | |||
241 | cptype = parse_cp_type(cpoint_type, strlen(cpoint_type)); | ||
242 | if (cptype == NONE) | ||
243 | return -EINVAL; | ||
244 | |||
245 | for (i = 0; i < ARRAY_SIZE(cp_name); i++) { | ||
205 | if (!strcmp(cpoint_name, cp_name[i])) { | 246 | if (!strcmp(cpoint_name, cp_name[i])) { |
206 | cpoint = i + 1; | 247 | cpoint = i + 1; |
207 | break; | 248 | return 0; |
208 | } | ||
209 | } | ||
210 | |||
211 | for (i = 0; i < NUM_CPOINT_TYPES; ++i) { | ||
212 | if (!strcmp(cpoint_type, cp_type[i])) { | ||
213 | cptype = i + 1; | ||
214 | break; | ||
215 | } | 249 | } |
216 | } | 250 | } |
217 | 251 | ||
218 | if (cpoint == INVALID || cptype == NONE) | 252 | /* Could not find a valid crash point */ |
219 | return -EINVAL; | 253 | return -EINVAL; |
220 | |||
221 | count = cpoint_count; | ||
222 | |||
223 | return 0; | ||
224 | } | 254 | } |
225 | 255 | ||
226 | static int recursive_loop(int a) | 256 | static int recursive_loop(int a) |
@@ -235,53 +265,92 @@ static int recursive_loop(int a) | |||
235 | return recursive_loop(a); | 265 | return recursive_loop(a); |
236 | } | 266 | } |
237 | 267 | ||
238 | void lkdtm_handler(void) | 268 | static void lkdtm_do_action(enum ctype which) |
239 | { | 269 | { |
240 | printk(KERN_INFO "lkdtm : Crash point %s of type %s hit\n", | 270 | switch (which) { |
241 | cpoint_name, cpoint_type); | 271 | case PANIC: |
242 | --count; | 272 | panic("dumptest"); |
273 | break; | ||
274 | case BUG: | ||
275 | BUG(); | ||
276 | break; | ||
277 | case EXCEPTION: | ||
278 | *((int *) 0) = 0; | ||
279 | break; | ||
280 | case LOOP: | ||
281 | for (;;) | ||
282 | ; | ||
283 | break; | ||
284 | case OVERFLOW: | ||
285 | (void) recursive_loop(0); | ||
286 | break; | ||
287 | case CORRUPT_STACK: { | ||
288 | volatile u32 data[8]; | ||
289 | volatile u32 *p = data; | ||
290 | |||
291 | p[12] = 0x12345678; | ||
292 | break; | ||
293 | } | ||
294 | case UNALIGNED_LOAD_STORE_WRITE: { | ||
295 | static u8 data[5] __attribute__((aligned(4))) = {1, 2, | ||
296 | 3, 4, 5}; | ||
297 | u32 *p; | ||
298 | u32 val = 0x12345678; | ||
299 | |||
300 | p = (u32 *)(data + 1); | ||
301 | if (*p == 0) | ||
302 | val = 0x87654321; | ||
303 | *p = val; | ||
304 | break; | ||
305 | } | ||
306 | case OVERWRITE_ALLOCATION: { | ||
307 | size_t len = 1020; | ||
308 | u32 *data = kmalloc(len, GFP_KERNEL); | ||
309 | |||
310 | data[1024 / sizeof(u32)] = 0x12345678; | ||
311 | kfree(data); | ||
312 | break; | ||
313 | } | ||
314 | case WRITE_AFTER_FREE: { | ||
315 | size_t len = 1024; | ||
316 | u32 *data = kmalloc(len, GFP_KERNEL); | ||
317 | |||
318 | kfree(data); | ||
319 | schedule(); | ||
320 | memset(data, 0x78, len); | ||
321 | break; | ||
322 | } | ||
323 | case NONE: | ||
324 | default: | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | } | ||
329 | |||
330 | static void lkdtm_handler(void) | ||
331 | { | ||
332 | count--; | ||
333 | printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", | ||
334 | cp_name_to_str(cpoint), cp_type_to_str(cptype), count); | ||
243 | 335 | ||
244 | if (count == 0) { | 336 | if (count == 0) { |
245 | switch (cptype) { | 337 | lkdtm_do_action(cptype); |
246 | case NONE: | ||
247 | break; | ||
248 | case PANIC: | ||
249 | printk(KERN_INFO "lkdtm : PANIC\n"); | ||
250 | panic("dumptest"); | ||
251 | break; | ||
252 | case BUG: | ||
253 | printk(KERN_INFO "lkdtm : BUG\n"); | ||
254 | BUG(); | ||
255 | break; | ||
256 | case EXCEPTION: | ||
257 | printk(KERN_INFO "lkdtm : EXCEPTION\n"); | ||
258 | *((int *) 0) = 0; | ||
259 | break; | ||
260 | case LOOP: | ||
261 | printk(KERN_INFO "lkdtm : LOOP\n"); | ||
262 | for (;;); | ||
263 | break; | ||
264 | case OVERFLOW: | ||
265 | printk(KERN_INFO "lkdtm : OVERFLOW\n"); | ||
266 | (void) recursive_loop(0); | ||
267 | break; | ||
268 | default: | ||
269 | break; | ||
270 | } | ||
271 | count = cpoint_count; | 338 | count = cpoint_count; |
272 | } | 339 | } |
273 | } | 340 | } |
274 | 341 | ||
275 | static int __init lkdtm_module_init(void) | 342 | static int lkdtm_register_cpoint(enum cname which) |
276 | { | 343 | { |
277 | int ret; | 344 | int ret; |
278 | 345 | ||
279 | if (lkdtm_parse_commandline() == -EINVAL) { | 346 | cpoint = INVALID; |
280 | printk(KERN_INFO "lkdtm : Invalid command\n"); | 347 | if (lkdtm.entry != NULL) |
281 | return -EINVAL; | 348 | unregister_jprobe(&lkdtm); |
282 | } | ||
283 | 349 | ||
284 | switch (cpoint) { | 350 | switch (which) { |
351 | case DIRECT: | ||
352 | lkdtm_do_action(cptype); | ||
353 | return 0; | ||
285 | case INT_HARDWARE_ENTRY: | 354 | case INT_HARDWARE_ENTRY: |
286 | lkdtm.kp.symbol_name = "do_IRQ"; | 355 | lkdtm.kp.symbol_name = "do_IRQ"; |
287 | lkdtm.entry = (kprobe_opcode_t*) jp_do_irq; | 356 | lkdtm.entry = (kprobe_opcode_t*) jp_do_irq; |
@@ -315,28 +384,268 @@ static int __init lkdtm_module_init(void) | |||
315 | lkdtm.kp.symbol_name = "generic_ide_ioctl"; | 384 | lkdtm.kp.symbol_name = "generic_ide_ioctl"; |
316 | lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; | 385 | lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; |
317 | #else | 386 | #else |
318 | printk(KERN_INFO "lkdtm : Crash point not available\n"); | 387 | printk(KERN_INFO "lkdtm: Crash point not available\n"); |
388 | return -EINVAL; | ||
319 | #endif | 389 | #endif |
320 | break; | 390 | break; |
321 | default: | 391 | default: |
322 | printk(KERN_INFO "lkdtm : Invalid Crash Point\n"); | 392 | printk(KERN_INFO "lkdtm: Invalid Crash Point\n"); |
323 | break; | 393 | return -EINVAL; |
324 | } | 394 | } |
325 | 395 | ||
396 | cpoint = which; | ||
326 | if ((ret = register_jprobe(&lkdtm)) < 0) { | 397 | if ((ret = register_jprobe(&lkdtm)) < 0) { |
327 | printk(KERN_INFO "lkdtm : Couldn't register jprobe\n"); | 398 | printk(KERN_INFO "lkdtm: Couldn't register jprobe\n"); |
328 | return ret; | 399 | cpoint = INVALID; |
400 | } | ||
401 | |||
402 | return ret; | ||
403 | } | ||
404 | |||
405 | static ssize_t do_register_entry(enum cname which, struct file *f, | ||
406 | const char __user *user_buf, size_t count, loff_t *off) | ||
407 | { | ||
408 | char *buf; | ||
409 | int err; | ||
410 | |||
411 | if (count >= PAGE_SIZE) | ||
412 | return -EINVAL; | ||
413 | |||
414 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
415 | if (!buf) | ||
416 | return -ENOMEM; | ||
417 | if (copy_from_user(buf, user_buf, count)) { | ||
418 | free_page((unsigned long) buf); | ||
419 | return -EFAULT; | ||
420 | } | ||
421 | /* NULL-terminate and remove enter */ | ||
422 | buf[count] = '\0'; | ||
423 | strim(buf); | ||
424 | |||
425 | cptype = parse_cp_type(buf, count); | ||
426 | free_page((unsigned long) buf); | ||
427 | |||
428 | if (cptype == NONE) | ||
429 | return -EINVAL; | ||
430 | |||
431 | err = lkdtm_register_cpoint(which); | ||
432 | if (err < 0) | ||
433 | return err; | ||
434 | |||
435 | *off += count; | ||
436 | |||
437 | return count; | ||
438 | } | ||
439 | |||
440 | /* Generic read callback that just prints out the available crash types */ | ||
441 | static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, | ||
442 | size_t count, loff_t *off) | ||
443 | { | ||
444 | char *buf; | ||
445 | int i, n, out; | ||
446 | |||
447 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
448 | |||
449 | n = snprintf(buf, PAGE_SIZE, "Available crash types:\n"); | ||
450 | for (i = 0; i < ARRAY_SIZE(cp_type); i++) | ||
451 | n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]); | ||
452 | buf[n] = '\0'; | ||
453 | |||
454 | out = simple_read_from_buffer(user_buf, count, off, | ||
455 | buf, n); | ||
456 | free_page((unsigned long) buf); | ||
457 | |||
458 | return out; | ||
459 | } | ||
460 | |||
461 | static int lkdtm_debugfs_open(struct inode *inode, struct file *file) | ||
462 | { | ||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | |||
467 | static ssize_t int_hardware_entry(struct file *f, const char __user *buf, | ||
468 | size_t count, loff_t *off) | ||
469 | { | ||
470 | return do_register_entry(INT_HARDWARE_ENTRY, f, buf, count, off); | ||
471 | } | ||
472 | |||
473 | static ssize_t int_hw_irq_en(struct file *f, const char __user *buf, | ||
474 | size_t count, loff_t *off) | ||
475 | { | ||
476 | return do_register_entry(INT_HW_IRQ_EN, f, buf, count, off); | ||
477 | } | ||
478 | |||
479 | static ssize_t int_tasklet_entry(struct file *f, const char __user *buf, | ||
480 | size_t count, loff_t *off) | ||
481 | { | ||
482 | return do_register_entry(INT_TASKLET_ENTRY, f, buf, count, off); | ||
483 | } | ||
484 | |||
485 | static ssize_t fs_devrw_entry(struct file *f, const char __user *buf, | ||
486 | size_t count, loff_t *off) | ||
487 | { | ||
488 | return do_register_entry(FS_DEVRW, f, buf, count, off); | ||
489 | } | ||
490 | |||
491 | static ssize_t mem_swapout_entry(struct file *f, const char __user *buf, | ||
492 | size_t count, loff_t *off) | ||
493 | { | ||
494 | return do_register_entry(MEM_SWAPOUT, f, buf, count, off); | ||
495 | } | ||
496 | |||
497 | static ssize_t timeradd_entry(struct file *f, const char __user *buf, | ||
498 | size_t count, loff_t *off) | ||
499 | { | ||
500 | return do_register_entry(TIMERADD, f, buf, count, off); | ||
501 | } | ||
502 | |||
503 | static ssize_t scsi_dispatch_cmd_entry(struct file *f, | ||
504 | const char __user *buf, size_t count, loff_t *off) | ||
505 | { | ||
506 | return do_register_entry(SCSI_DISPATCH_CMD, f, buf, count, off); | ||
507 | } | ||
508 | |||
509 | static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf, | ||
510 | size_t count, loff_t *off) | ||
511 | { | ||
512 | return do_register_entry(IDE_CORE_CP, f, buf, count, off); | ||
513 | } | ||
514 | |||
515 | /* Special entry to just crash directly. Available without KPROBEs */ | ||
516 | static ssize_t direct_entry(struct file *f, const char __user *user_buf, | ||
517 | size_t count, loff_t *off) | ||
518 | { | ||
519 | enum ctype type; | ||
520 | char *buf; | ||
521 | |||
522 | if (count >= PAGE_SIZE) | ||
523 | return -EINVAL; | ||
524 | if (count < 1) | ||
525 | return -EINVAL; | ||
526 | |||
527 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
528 | if (!buf) | ||
529 | return -ENOMEM; | ||
530 | if (copy_from_user(buf, user_buf, count)) { | ||
531 | free_page((unsigned long) buf); | ||
532 | return -EFAULT; | ||
533 | } | ||
534 | /* NULL-terminate and remove enter */ | ||
535 | buf[count] = '\0'; | ||
536 | strim(buf); | ||
537 | |||
538 | type = parse_cp_type(buf, count); | ||
539 | free_page((unsigned long) buf); | ||
540 | if (type == NONE) | ||
541 | return -EINVAL; | ||
542 | |||
543 | printk(KERN_INFO "lkdtm: Performing direct entry %s\n", | ||
544 | cp_type_to_str(type)); | ||
545 | lkdtm_do_action(type); | ||
546 | *off += count; | ||
547 | |||
548 | return count; | ||
549 | } | ||
550 | |||
551 | struct crash_entry { | ||
552 | const char *name; | ||
553 | const struct file_operations fops; | ||
554 | }; | ||
555 | |||
556 | static const struct crash_entry crash_entries[] = { | ||
557 | {"DIRECT", {.read = lkdtm_debugfs_read, | ||
558 | .open = lkdtm_debugfs_open, | ||
559 | .write = direct_entry} }, | ||
560 | {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read, | ||
561 | .open = lkdtm_debugfs_open, | ||
562 | .write = int_hardware_entry} }, | ||
563 | {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read, | ||
564 | .open = lkdtm_debugfs_open, | ||
565 | .write = int_hw_irq_en} }, | ||
566 | {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read, | ||
567 | .open = lkdtm_debugfs_open, | ||
568 | .write = int_tasklet_entry} }, | ||
569 | {"FS_DEVRW", {.read = lkdtm_debugfs_read, | ||
570 | .open = lkdtm_debugfs_open, | ||
571 | .write = fs_devrw_entry} }, | ||
572 | {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read, | ||
573 | .open = lkdtm_debugfs_open, | ||
574 | .write = mem_swapout_entry} }, | ||
575 | {"TIMERADD", {.read = lkdtm_debugfs_read, | ||
576 | .open = lkdtm_debugfs_open, | ||
577 | .write = timeradd_entry} }, | ||
578 | {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read, | ||
579 | .open = lkdtm_debugfs_open, | ||
580 | .write = scsi_dispatch_cmd_entry} }, | ||
581 | {"IDE_CORE_CP", {.read = lkdtm_debugfs_read, | ||
582 | .open = lkdtm_debugfs_open, | ||
583 | .write = ide_core_cp_entry} }, | ||
584 | }; | ||
585 | |||
586 | static struct dentry *lkdtm_debugfs_root; | ||
587 | |||
588 | static int __init lkdtm_module_init(void) | ||
589 | { | ||
590 | int ret = -EINVAL; | ||
591 | int n_debugfs_entries = 1; /* Assume only the direct entry */ | ||
592 | int i; | ||
593 | |||
594 | /* Register debugfs interface */ | ||
595 | lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); | ||
596 | if (!lkdtm_debugfs_root) { | ||
597 | printk(KERN_ERR "lkdtm: creating root dir failed\n"); | ||
598 | return -ENODEV; | ||
599 | } | ||
600 | |||
601 | #ifdef CONFIG_KPROBES | ||
602 | n_debugfs_entries = ARRAY_SIZE(crash_entries); | ||
603 | #endif | ||
604 | |||
605 | for (i = 0; i < n_debugfs_entries; i++) { | ||
606 | const struct crash_entry *cur = &crash_entries[i]; | ||
607 | struct dentry *de; | ||
608 | |||
609 | de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, | ||
610 | NULL, &cur->fops); | ||
611 | if (de == NULL) { | ||
612 | printk(KERN_ERR "lkdtm: could not create %s\n", | ||
613 | cur->name); | ||
614 | goto out_err; | ||
615 | } | ||
616 | } | ||
617 | |||
618 | if (lkdtm_parse_commandline() == -EINVAL) { | ||
619 | printk(KERN_INFO "lkdtm: Invalid command\n"); | ||
620 | goto out_err; | ||
621 | } | ||
622 | |||
623 | if (cpoint != INVALID && cptype != NONE) { | ||
624 | ret = lkdtm_register_cpoint(cpoint); | ||
625 | if (ret < 0) { | ||
626 | printk(KERN_INFO "lkdtm: Invalid crash point %d\n", | ||
627 | cpoint); | ||
628 | goto out_err; | ||
629 | } | ||
630 | printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n", | ||
631 | cpoint_name, cpoint_type); | ||
632 | } else { | ||
633 | printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n"); | ||
329 | } | 634 | } |
330 | 635 | ||
331 | printk(KERN_INFO "lkdtm : Crash point %s of type %s registered\n", | ||
332 | cpoint_name, cpoint_type); | ||
333 | return 0; | 636 | return 0; |
637 | |||
638 | out_err: | ||
639 | debugfs_remove_recursive(lkdtm_debugfs_root); | ||
640 | return ret; | ||
334 | } | 641 | } |
335 | 642 | ||
336 | static void __exit lkdtm_module_exit(void) | 643 | static void __exit lkdtm_module_exit(void) |
337 | { | 644 | { |
338 | unregister_jprobe(&lkdtm); | 645 | debugfs_remove_recursive(lkdtm_debugfs_root); |
339 | printk(KERN_INFO "lkdtm : Crash point unregistered\n"); | 646 | |
647 | unregister_jprobe(&lkdtm); | ||
648 | printk(KERN_INFO "lkdtm: Crash point unregistered\n"); | ||
340 | } | 649 | } |
341 | 650 | ||
342 | module_init(lkdtm_module_init); | 651 | module_init(lkdtm_module_init); |
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 04c27266f567..75ee0d3f6f45 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/poll.h> | 21 | #include <linux/poll.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/cdev.h> | 23 | #include <linux/cdev.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/phantom.h> | 25 | #include <linux/phantom.h> |
25 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
26 | #include <linux/smp_lock.h> | 27 | #include <linux/smp_lock.h> |
@@ -497,12 +498,7 @@ static struct pci_driver phantom_pci_driver = { | |||
497 | .resume = phantom_resume | 498 | .resume = phantom_resume |
498 | }; | 499 | }; |
499 | 500 | ||
500 | static ssize_t phantom_show_version(struct class *cls, char *buf) | 501 | static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION); |
501 | { | ||
502 | return sprintf(buf, PHANTOM_VERSION "\n"); | ||
503 | } | ||
504 | |||
505 | static CLASS_ATTR(version, 0444, phantom_show_version, NULL); | ||
506 | 502 | ||
507 | static int __init phantom_init(void) | 503 | static int __init phantom_init(void) |
508 | { | 504 | { |
@@ -515,7 +511,7 @@ static int __init phantom_init(void) | |||
515 | printk(KERN_ERR "phantom: can't register phantom class\n"); | 511 | printk(KERN_ERR "phantom: can't register phantom class\n"); |
516 | goto err; | 512 | goto err; |
517 | } | 513 | } |
518 | retval = class_create_file(phantom_class, &class_attr_version); | 514 | retval = class_create_file(phantom_class, &class_attr_version.attr); |
519 | if (retval) { | 515 | if (retval) { |
520 | printk(KERN_ERR "phantom: can't create sysfs version file\n"); | 516 | printk(KERN_ERR "phantom: can't create sysfs version file\n"); |
521 | goto err_class; | 517 | goto err_class; |
@@ -541,7 +537,7 @@ static int __init phantom_init(void) | |||
541 | err_unchr: | 537 | err_unchr: |
542 | unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); | 538 | unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); |
543 | err_attr: | 539 | err_attr: |
544 | class_remove_file(phantom_class, &class_attr_version); | 540 | class_remove_file(phantom_class, &class_attr_version.attr); |
545 | err_class: | 541 | err_class: |
546 | class_destroy(phantom_class); | 542 | class_destroy(phantom_class); |
547 | err: | 543 | err: |
@@ -554,7 +550,7 @@ static void __exit phantom_exit(void) | |||
554 | 550 | ||
555 | unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); | 551 | unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); |
556 | 552 | ||
557 | class_remove_file(phantom_class, &class_attr_version); | 553 | class_remove_file(phantom_class, &class_attr_version.attr); |
558 | class_destroy(phantom_class); | 554 | class_destroy(phantom_class); |
559 | 555 | ||
560 | pr_debug("phantom: module successfully removed\n"); | 556 | pr_debug("phantom: module successfully removed\n"); |
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h index f93f03a9e6e9..3ad76cd18b4b 100644 --- a/drivers/misc/sgi-gru/gru.h +++ b/drivers/misc/sgi-gru/gru.h | |||
@@ -53,6 +53,17 @@ struct gru_chiplet_info { | |||
53 | int free_user_cbr; | 53 | int free_user_cbr; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* | ||
57 | * Statictics kept for each context. | ||
58 | */ | ||
59 | struct gru_gseg_statistics { | ||
60 | unsigned long fmm_tlbmiss; | ||
61 | unsigned long upm_tlbmiss; | ||
62 | unsigned long tlbdropin; | ||
63 | unsigned long context_stolen; | ||
64 | unsigned long reserved[10]; | ||
65 | }; | ||
66 | |||
56 | /* Flags for GRU options on the gru_create_context() call */ | 67 | /* Flags for GRU options on the gru_create_context() call */ |
57 | /* Select one of the follow 4 options to specify how TLB misses are handled */ | 68 | /* Select one of the follow 4 options to specify how TLB misses are handled */ |
58 | #define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */ | 69 | #define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */ |
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h index 3c9c06618e6a..d95587cc794c 100644 --- a/drivers/misc/sgi-gru/gru_instructions.h +++ b/drivers/misc/sgi-gru/gru_instructions.h | |||
@@ -34,17 +34,17 @@ extern void gru_wait_abort_proc(void *cb); | |||
34 | #include <asm/intrinsics.h> | 34 | #include <asm/intrinsics.h> |
35 | #define __flush_cache(p) ia64_fc((unsigned long)p) | 35 | #define __flush_cache(p) ia64_fc((unsigned long)p) |
36 | /* Use volatile on IA64 to ensure ordering via st4.rel */ | 36 | /* Use volatile on IA64 to ensure ordering via st4.rel */ |
37 | #define gru_ordered_store_int(p, v) \ | 37 | #define gru_ordered_store_ulong(p, v) \ |
38 | do { \ | 38 | do { \ |
39 | barrier(); \ | 39 | barrier(); \ |
40 | *((volatile int *)(p)) = v; /* force st.rel */ \ | 40 | *((volatile unsigned long *)(p)) = v; /* force st.rel */ \ |
41 | } while (0) | 41 | } while (0) |
42 | #elif defined(CONFIG_X86_64) | 42 | #elif defined(CONFIG_X86_64) |
43 | #define __flush_cache(p) clflush(p) | 43 | #define __flush_cache(p) clflush(p) |
44 | #define gru_ordered_store_int(p, v) \ | 44 | #define gru_ordered_store_ulong(p, v) \ |
45 | do { \ | 45 | do { \ |
46 | barrier(); \ | 46 | barrier(); \ |
47 | *(int *)p = v; \ | 47 | *(unsigned long *)p = v; \ |
48 | } while (0) | 48 | } while (0) |
49 | #else | 49 | #else |
50 | #error "Unsupported architecture" | 50 | #error "Unsupported architecture" |
@@ -129,8 +129,13 @@ struct gru_instruction_bits { | |||
129 | */ | 129 | */ |
130 | struct gru_instruction { | 130 | struct gru_instruction { |
131 | /* DW 0 */ | 131 | /* DW 0 */ |
132 | unsigned int op32; /* icmd,xtype,iaa0,ima,opc */ | 132 | union { |
133 | unsigned int tri0; | 133 | unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */ |
134 | struct { | ||
135 | unsigned int op32; | ||
136 | unsigned int tri0; | ||
137 | }; | ||
138 | }; | ||
134 | unsigned long tri1_bufsize; /* DW 1 */ | 139 | unsigned long tri1_bufsize; /* DW 1 */ |
135 | unsigned long baddr0; /* DW 2 */ | 140 | unsigned long baddr0; /* DW 2 */ |
136 | unsigned long nelem; /* DW 3 */ | 141 | unsigned long nelem; /* DW 3 */ |
@@ -140,7 +145,7 @@ struct gru_instruction { | |||
140 | unsigned long avalue; /* DW 7 */ | 145 | unsigned long avalue; /* DW 7 */ |
141 | }; | 146 | }; |
142 | 147 | ||
143 | /* Some shifts and masks for the low 32 bits of a GRU command */ | 148 | /* Some shifts and masks for the low 64 bits of a GRU command */ |
144 | #define GRU_CB_ICMD_SHFT 0 | 149 | #define GRU_CB_ICMD_SHFT 0 |
145 | #define GRU_CB_ICMD_MASK 0x1 | 150 | #define GRU_CB_ICMD_MASK 0x1 |
146 | #define GRU_CB_XTYPE_SHFT 8 | 151 | #define GRU_CB_XTYPE_SHFT 8 |
@@ -155,6 +160,10 @@ struct gru_instruction { | |||
155 | #define GRU_CB_OPC_MASK 0xff | 160 | #define GRU_CB_OPC_MASK 0xff |
156 | #define GRU_CB_EXOPC_SHFT 24 | 161 | #define GRU_CB_EXOPC_SHFT 24 |
157 | #define GRU_CB_EXOPC_MASK 0xff | 162 | #define GRU_CB_EXOPC_MASK 0xff |
163 | #define GRU_IDEF2_SHFT 32 | ||
164 | #define GRU_IDEF2_MASK 0x3ffff | ||
165 | #define GRU_ISTATUS_SHFT 56 | ||
166 | #define GRU_ISTATUS_MASK 0x3 | ||
158 | 167 | ||
159 | /* GRU instruction opcodes (opc field) */ | 168 | /* GRU instruction opcodes (opc field) */ |
160 | #define OP_NOP 0x00 | 169 | #define OP_NOP 0x00 |
@@ -256,6 +265,7 @@ struct gru_instruction { | |||
256 | #define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16) | 265 | #define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16) |
257 | #define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17) | 266 | #define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17) |
258 | #define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18) | 267 | #define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18) |
268 | #define CBE_CAUSE_FORCED_ERROR (1 << 19) | ||
259 | 269 | ||
260 | /* CBE cbrexecstatus bits */ | 270 | /* CBE cbrexecstatus bits */ |
261 | #define CBR_EXS_ABORT_OCC_BIT 0 | 271 | #define CBR_EXS_ABORT_OCC_BIT 0 |
@@ -264,13 +274,15 @@ struct gru_instruction { | |||
264 | #define CBR_EXS_QUEUED_BIT 3 | 274 | #define CBR_EXS_QUEUED_BIT 3 |
265 | #define CBR_EXS_TLB_INVAL_BIT 4 | 275 | #define CBR_EXS_TLB_INVAL_BIT 4 |
266 | #define CBR_EXS_EXCEPTION_BIT 5 | 276 | #define CBR_EXS_EXCEPTION_BIT 5 |
277 | #define CBR_EXS_CB_INT_PENDING_BIT 6 | ||
267 | 278 | ||
268 | #define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) | 279 | #define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) |
269 | #define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) | 280 | #define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) |
270 | #define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) | 281 | #define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) |
271 | #define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) | 282 | #define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) |
272 | #define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT) | 283 | #define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT) |
273 | #define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) | 284 | #define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) |
285 | #define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT) | ||
274 | 286 | ||
275 | /* | 287 | /* |
276 | * Exceptions are retried for the following cases. If any OTHER bits are set | 288 | * Exceptions are retried for the following cases. If any OTHER bits are set |
@@ -296,12 +308,14 @@ union gru_mesqhead { | |||
296 | 308 | ||
297 | 309 | ||
298 | /* Generate the low word of a GRU instruction */ | 310 | /* Generate the low word of a GRU instruction */ |
299 | static inline unsigned int | 311 | static inline unsigned long |
300 | __opword(unsigned char opcode, unsigned char exopc, unsigned char xtype, | 312 | __opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype, |
301 | unsigned char iaa0, unsigned char iaa1, | 313 | unsigned char iaa0, unsigned char iaa1, |
302 | unsigned char ima) | 314 | unsigned long idef2, unsigned char ima) |
303 | { | 315 | { |
304 | return (1 << GRU_CB_ICMD_SHFT) | | 316 | return (1 << GRU_CB_ICMD_SHFT) | |
317 | ((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) | | ||
318 | (idef2<< GRU_IDEF2_SHFT) | | ||
305 | (iaa0 << GRU_CB_IAA0_SHFT) | | 319 | (iaa0 << GRU_CB_IAA0_SHFT) | |
306 | (iaa1 << GRU_CB_IAA1_SHFT) | | 320 | (iaa1 << GRU_CB_IAA1_SHFT) | |
307 | (ima << GRU_CB_IMA_SHFT) | | 321 | (ima << GRU_CB_IMA_SHFT) | |
@@ -319,12 +333,13 @@ static inline void gru_flush_cache(void *p) | |||
319 | } | 333 | } |
320 | 334 | ||
321 | /* | 335 | /* |
322 | * Store the lower 32 bits of the command including the "start" bit. Then | 336 | * Store the lower 64 bits of the command including the "start" bit. Then |
323 | * start the instruction executing. | 337 | * start the instruction executing. |
324 | */ | 338 | */ |
325 | static inline void gru_start_instruction(struct gru_instruction *ins, int op32) | 339 | static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64) |
326 | { | 340 | { |
327 | gru_ordered_store_int(ins, op32); | 341 | gru_ordered_store_ulong(ins, op64); |
342 | mb(); | ||
328 | gru_flush_cache(ins); | 343 | gru_flush_cache(ins); |
329 | } | 344 | } |
330 | 345 | ||
@@ -340,6 +355,30 @@ static inline void gru_start_instruction(struct gru_instruction *ins, int op32) | |||
340 | * - nelem and stride are in elements | 355 | * - nelem and stride are in elements |
341 | * - tri0/tri1 is in bytes for the beginning of the data segment. | 356 | * - tri0/tri1 is in bytes for the beginning of the data segment. |
342 | */ | 357 | */ |
358 | static inline void gru_vload_phys(void *cb, unsigned long gpa, | ||
359 | unsigned int tri0, int iaa, unsigned long hints) | ||
360 | { | ||
361 | struct gru_instruction *ins = (struct gru_instruction *)cb; | ||
362 | |||
363 | ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62); | ||
364 | ins->nelem = 1; | ||
365 | ins->op1_stride = 1; | ||
366 | gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0, | ||
367 | (unsigned long)tri0, CB_IMA(hints))); | ||
368 | } | ||
369 | |||
370 | static inline void gru_vstore_phys(void *cb, unsigned long gpa, | ||
371 | unsigned int tri0, int iaa, unsigned long hints) | ||
372 | { | ||
373 | struct gru_instruction *ins = (struct gru_instruction *)cb; | ||
374 | |||
375 | ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62); | ||
376 | ins->nelem = 1; | ||
377 | ins->op1_stride = 1; | ||
378 | gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0, | ||
379 | (unsigned long)tri0, CB_IMA(hints))); | ||
380 | } | ||
381 | |||
343 | static inline void gru_vload(void *cb, unsigned long mem_addr, | 382 | static inline void gru_vload(void *cb, unsigned long mem_addr, |
344 | unsigned int tri0, unsigned char xtype, unsigned long nelem, | 383 | unsigned int tri0, unsigned char xtype, unsigned long nelem, |
345 | unsigned long stride, unsigned long hints) | 384 | unsigned long stride, unsigned long hints) |
@@ -348,10 +387,9 @@ static inline void gru_vload(void *cb, unsigned long mem_addr, | |||
348 | 387 | ||
349 | ins->baddr0 = (long)mem_addr; | 388 | ins->baddr0 = (long)mem_addr; |
350 | ins->nelem = nelem; | 389 | ins->nelem = nelem; |
351 | ins->tri0 = tri0; | ||
352 | ins->op1_stride = stride; | 390 | ins->op1_stride = stride; |
353 | gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0, | 391 | gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0, |
354 | CB_IMA(hints))); | 392 | (unsigned long)tri0, CB_IMA(hints))); |
355 | } | 393 | } |
356 | 394 | ||
357 | static inline void gru_vstore(void *cb, unsigned long mem_addr, | 395 | static inline void gru_vstore(void *cb, unsigned long mem_addr, |
@@ -362,10 +400,9 @@ static inline void gru_vstore(void *cb, unsigned long mem_addr, | |||
362 | 400 | ||
363 | ins->baddr0 = (long)mem_addr; | 401 | ins->baddr0 = (long)mem_addr; |
364 | ins->nelem = nelem; | 402 | ins->nelem = nelem; |
365 | ins->tri0 = tri0; | ||
366 | ins->op1_stride = stride; | 403 | ins->op1_stride = stride; |
367 | gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0, | 404 | gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0, |
368 | CB_IMA(hints))); | 405 | tri0, CB_IMA(hints))); |
369 | } | 406 | } |
370 | 407 | ||
371 | static inline void gru_ivload(void *cb, unsigned long mem_addr, | 408 | static inline void gru_ivload(void *cb, unsigned long mem_addr, |
@@ -376,10 +413,9 @@ static inline void gru_ivload(void *cb, unsigned long mem_addr, | |||
376 | 413 | ||
377 | ins->baddr0 = (long)mem_addr; | 414 | ins->baddr0 = (long)mem_addr; |
378 | ins->nelem = nelem; | 415 | ins->nelem = nelem; |
379 | ins->tri0 = tri0; | ||
380 | ins->tri1_bufsize = tri1; | 416 | ins->tri1_bufsize = tri1; |
381 | gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0, | 417 | gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0, |
382 | CB_IMA(hints))); | 418 | tri0, CB_IMA(hints))); |
383 | } | 419 | } |
384 | 420 | ||
385 | static inline void gru_ivstore(void *cb, unsigned long mem_addr, | 421 | static inline void gru_ivstore(void *cb, unsigned long mem_addr, |
@@ -390,10 +426,9 @@ static inline void gru_ivstore(void *cb, unsigned long mem_addr, | |||
390 | 426 | ||
391 | ins->baddr0 = (long)mem_addr; | 427 | ins->baddr0 = (long)mem_addr; |
392 | ins->nelem = nelem; | 428 | ins->nelem = nelem; |
393 | ins->tri0 = tri0; | ||
394 | ins->tri1_bufsize = tri1; | 429 | ins->tri1_bufsize = tri1; |
395 | gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0, | 430 | gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0, |
396 | CB_IMA(hints))); | 431 | tri0, CB_IMA(hints))); |
397 | } | 432 | } |
398 | 433 | ||
399 | static inline void gru_vset(void *cb, unsigned long mem_addr, | 434 | static inline void gru_vset(void *cb, unsigned long mem_addr, |
@@ -406,8 +441,8 @@ static inline void gru_vset(void *cb, unsigned long mem_addr, | |||
406 | ins->op2_value_baddr1 = value; | 441 | ins->op2_value_baddr1 = value; |
407 | ins->nelem = nelem; | 442 | ins->nelem = nelem; |
408 | ins->op1_stride = stride; | 443 | ins->op1_stride = stride; |
409 | gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0, | 444 | gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0, |
410 | CB_IMA(hints))); | 445 | 0, CB_IMA(hints))); |
411 | } | 446 | } |
412 | 447 | ||
413 | static inline void gru_ivset(void *cb, unsigned long mem_addr, | 448 | static inline void gru_ivset(void *cb, unsigned long mem_addr, |
@@ -420,8 +455,8 @@ static inline void gru_ivset(void *cb, unsigned long mem_addr, | |||
420 | ins->op2_value_baddr1 = value; | 455 | ins->op2_value_baddr1 = value; |
421 | ins->nelem = nelem; | 456 | ins->nelem = nelem; |
422 | ins->tri1_bufsize = tri1; | 457 | ins->tri1_bufsize = tri1; |
423 | gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0, | 458 | gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0, |
424 | CB_IMA(hints))); | 459 | 0, CB_IMA(hints))); |
425 | } | 460 | } |
426 | 461 | ||
427 | static inline void gru_vflush(void *cb, unsigned long mem_addr, | 462 | static inline void gru_vflush(void *cb, unsigned long mem_addr, |
@@ -433,15 +468,15 @@ static inline void gru_vflush(void *cb, unsigned long mem_addr, | |||
433 | ins->baddr0 = (long)mem_addr; | 468 | ins->baddr0 = (long)mem_addr; |
434 | ins->op1_stride = stride; | 469 | ins->op1_stride = stride; |
435 | ins->nelem = nelem; | 470 | ins->nelem = nelem; |
436 | gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0, | 471 | gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0, |
437 | CB_IMA(hints))); | 472 | 0, CB_IMA(hints))); |
438 | } | 473 | } |
439 | 474 | ||
440 | static inline void gru_nop(void *cb, int hints) | 475 | static inline void gru_nop(void *cb, int hints) |
441 | { | 476 | { |
442 | struct gru_instruction *ins = (void *)cb; | 477 | struct gru_instruction *ins = (void *)cb; |
443 | 478 | ||
444 | gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints))); | 479 | gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints))); |
445 | } | 480 | } |
446 | 481 | ||
447 | 482 | ||
@@ -455,10 +490,9 @@ static inline void gru_bcopy(void *cb, const unsigned long src, | |||
455 | ins->baddr0 = (long)src; | 490 | ins->baddr0 = (long)src; |
456 | ins->op2_value_baddr1 = (long)dest; | 491 | ins->op2_value_baddr1 = (long)dest; |
457 | ins->nelem = nelem; | 492 | ins->nelem = nelem; |
458 | ins->tri0 = tri0; | ||
459 | ins->tri1_bufsize = bufsize; | 493 | ins->tri1_bufsize = bufsize; |
460 | gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM, | 494 | gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM, |
461 | IAA_RAM, CB_IMA(hints))); | 495 | IAA_RAM, tri0, CB_IMA(hints))); |
462 | } | 496 | } |
463 | 497 | ||
464 | static inline void gru_bstore(void *cb, const unsigned long src, | 498 | static inline void gru_bstore(void *cb, const unsigned long src, |
@@ -470,9 +504,8 @@ static inline void gru_bstore(void *cb, const unsigned long src, | |||
470 | ins->baddr0 = (long)src; | 504 | ins->baddr0 = (long)src; |
471 | ins->op2_value_baddr1 = (long)dest; | 505 | ins->op2_value_baddr1 = (long)dest; |
472 | ins->nelem = nelem; | 506 | ins->nelem = nelem; |
473 | ins->tri0 = tri0; | 507 | gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM, |
474 | gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM, | 508 | tri0, CB_IMA(hints))); |
475 | CB_IMA(hints))); | ||
476 | } | 509 | } |
477 | 510 | ||
478 | static inline void gru_gamir(void *cb, int exopc, unsigned long src, | 511 | static inline void gru_gamir(void *cb, int exopc, unsigned long src, |
@@ -481,8 +514,8 @@ static inline void gru_gamir(void *cb, int exopc, unsigned long src, | |||
481 | struct gru_instruction *ins = (void *)cb; | 514 | struct gru_instruction *ins = (void *)cb; |
482 | 515 | ||
483 | ins->baddr0 = (long)src; | 516 | ins->baddr0 = (long)src; |
484 | gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0, | 517 | gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0, |
485 | CB_IMA(hints))); | 518 | 0, CB_IMA(hints))); |
486 | } | 519 | } |
487 | 520 | ||
488 | static inline void gru_gamirr(void *cb, int exopc, unsigned long src, | 521 | static inline void gru_gamirr(void *cb, int exopc, unsigned long src, |
@@ -491,8 +524,8 @@ static inline void gru_gamirr(void *cb, int exopc, unsigned long src, | |||
491 | struct gru_instruction *ins = (void *)cb; | 524 | struct gru_instruction *ins = (void *)cb; |
492 | 525 | ||
493 | ins->baddr0 = (long)src; | 526 | ins->baddr0 = (long)src; |
494 | gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0, | 527 | gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0, |
495 | CB_IMA(hints))); | 528 | 0, CB_IMA(hints))); |
496 | } | 529 | } |
497 | 530 | ||
498 | static inline void gru_gamer(void *cb, int exopc, unsigned long src, | 531 | static inline void gru_gamer(void *cb, int exopc, unsigned long src, |
@@ -505,8 +538,8 @@ static inline void gru_gamer(void *cb, int exopc, unsigned long src, | |||
505 | ins->baddr0 = (long)src; | 538 | ins->baddr0 = (long)src; |
506 | ins->op1_stride = operand1; | 539 | ins->op1_stride = operand1; |
507 | ins->op2_value_baddr1 = operand2; | 540 | ins->op2_value_baddr1 = operand2; |
508 | gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0, | 541 | gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0, |
509 | CB_IMA(hints))); | 542 | 0, CB_IMA(hints))); |
510 | } | 543 | } |
511 | 544 | ||
512 | static inline void gru_gamerr(void *cb, int exopc, unsigned long src, | 545 | static inline void gru_gamerr(void *cb, int exopc, unsigned long src, |
@@ -518,8 +551,8 @@ static inline void gru_gamerr(void *cb, int exopc, unsigned long src, | |||
518 | ins->baddr0 = (long)src; | 551 | ins->baddr0 = (long)src; |
519 | ins->op1_stride = operand1; | 552 | ins->op1_stride = operand1; |
520 | ins->op2_value_baddr1 = operand2; | 553 | ins->op2_value_baddr1 = operand2; |
521 | gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0, | 554 | gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0, |
522 | CB_IMA(hints))); | 555 | 0, CB_IMA(hints))); |
523 | } | 556 | } |
524 | 557 | ||
525 | static inline void gru_gamxr(void *cb, unsigned long src, | 558 | static inline void gru_gamxr(void *cb, unsigned long src, |
@@ -529,8 +562,8 @@ static inline void gru_gamxr(void *cb, unsigned long src, | |||
529 | 562 | ||
530 | ins->baddr0 = (long)src; | 563 | ins->baddr0 = (long)src; |
531 | ins->nelem = 4; | 564 | ins->nelem = 4; |
532 | gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW, | 565 | gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW, |
533 | IAA_RAM, 0, CB_IMA(hints))); | 566 | IAA_RAM, 0, 0, CB_IMA(hints))); |
534 | } | 567 | } |
535 | 568 | ||
536 | static inline void gru_mesq(void *cb, unsigned long queue, | 569 | static inline void gru_mesq(void *cb, unsigned long queue, |
@@ -541,9 +574,8 @@ static inline void gru_mesq(void *cb, unsigned long queue, | |||
541 | 574 | ||
542 | ins->baddr0 = (long)queue; | 575 | ins->baddr0 = (long)queue; |
543 | ins->nelem = nelem; | 576 | ins->nelem = nelem; |
544 | ins->tri0 = tri0; | 577 | gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0, |
545 | gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0, | 578 | tri0, CB_IMA(hints))); |
546 | CB_IMA(hints))); | ||
547 | } | 579 | } |
548 | 580 | ||
549 | static inline unsigned long gru_get_amo_value(void *cb) | 581 | static inline unsigned long gru_get_amo_value(void *cb) |
@@ -662,6 +694,14 @@ static inline void gru_wait_abort(void *cb) | |||
662 | gru_wait_abort_proc(cb); | 694 | gru_wait_abort_proc(cb); |
663 | } | 695 | } |
664 | 696 | ||
697 | /* | ||
698 | * Get a pointer to the start of a gseg | ||
699 | * p - Any valid pointer within the gseg | ||
700 | */ | ||
701 | static inline void *gru_get_gseg_pointer (void *p) | ||
702 | { | ||
703 | return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1)); | ||
704 | } | ||
665 | 705 | ||
666 | /* | 706 | /* |
667 | * Get a pointer to a control block | 707 | * Get a pointer to a control block |
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 679e01778286..38657cdaf54d 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
@@ -40,6 +40,12 @@ | |||
40 | #include "gru_instructions.h" | 40 | #include "gru_instructions.h" |
41 | #include <asm/uv/uv_hub.h> | 41 | #include <asm/uv/uv_hub.h> |
42 | 42 | ||
43 | /* Return codes for vtop functions */ | ||
44 | #define VTOP_SUCCESS 0 | ||
45 | #define VTOP_INVALID -1 | ||
46 | #define VTOP_RETRY -2 | ||
47 | |||
48 | |||
43 | /* | 49 | /* |
44 | * Test if a physical address is a valid GRU GSEG address | 50 | * Test if a physical address is a valid GRU GSEG address |
45 | */ | 51 | */ |
@@ -90,19 +96,22 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) | |||
90 | { | 96 | { |
91 | struct mm_struct *mm = current->mm; | 97 | struct mm_struct *mm = current->mm; |
92 | struct vm_area_struct *vma; | 98 | struct vm_area_struct *vma; |
93 | struct gru_thread_state *gts = NULL; | 99 | struct gru_thread_state *gts = ERR_PTR(-EINVAL); |
94 | 100 | ||
95 | down_write(&mm->mmap_sem); | 101 | down_write(&mm->mmap_sem); |
96 | vma = gru_find_vma(vaddr); | 102 | vma = gru_find_vma(vaddr); |
97 | if (vma) | 103 | if (!vma) |
98 | gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); | 104 | goto err; |
99 | if (gts) { | ||
100 | mutex_lock(>s->ts_ctxlock); | ||
101 | downgrade_write(&mm->mmap_sem); | ||
102 | } else { | ||
103 | up_write(&mm->mmap_sem); | ||
104 | } | ||
105 | 105 | ||
106 | gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); | ||
107 | if (IS_ERR(gts)) | ||
108 | goto err; | ||
109 | mutex_lock(>s->ts_ctxlock); | ||
110 | downgrade_write(&mm->mmap_sem); | ||
111 | return gts; | ||
112 | |||
113 | err: | ||
114 | up_write(&mm->mmap_sem); | ||
106 | return gts; | 115 | return gts; |
107 | } | 116 | } |
108 | 117 | ||
@@ -122,39 +131,15 @@ static void gru_unlock_gts(struct gru_thread_state *gts) | |||
122 | * is necessary to prevent the user from seeing a stale cb.istatus that will | 131 | * is necessary to prevent the user from seeing a stale cb.istatus that will |
123 | * change as soon as the TFH restart is complete. Races may cause an | 132 | * change as soon as the TFH restart is complete. Races may cause an |
124 | * occasional failure to clear the cb.istatus, but that is ok. | 133 | * occasional failure to clear the cb.istatus, but that is ok. |
125 | * | ||
126 | * If the cb address is not valid (should not happen, but...), nothing | ||
127 | * bad will happen.. The get_user()/put_user() will fail but there | ||
128 | * are no bad side-effects. | ||
129 | */ | 134 | */ |
130 | static void gru_cb_set_istatus_active(unsigned long __user *cb) | 135 | static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk) |
131 | { | 136 | { |
132 | union { | 137 | if (cbk) { |
133 | struct gru_instruction_bits bits; | 138 | cbk->istatus = CBS_ACTIVE; |
134 | unsigned long dw; | ||
135 | } u; | ||
136 | |||
137 | if (cb) { | ||
138 | get_user(u.dw, cb); | ||
139 | u.bits.istatus = CBS_ACTIVE; | ||
140 | put_user(u.dw, cb); | ||
141 | } | 139 | } |
142 | } | 140 | } |
143 | 141 | ||
144 | /* | 142 | /* |
145 | * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the | ||
146 | * interrupt. Interrupts are always sent to a cpu on the blade that contains the | ||
147 | * GRU (except for headless blades which are not currently supported). A blade | ||
148 | * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ | ||
149 | * number uniquely identifies the GRU chiplet on the local blade that caused the | ||
150 | * interrupt. Always called in interrupt context. | ||
151 | */ | ||
152 | static inline struct gru_state *irq_to_gru(int irq) | ||
153 | { | ||
154 | return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU]; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Read & clear a TFM | 143 | * Read & clear a TFM |
159 | * | 144 | * |
160 | * The GRU has an array of fault maps. A map is private to a cpu | 145 | * The GRU has an array of fault maps. A map is private to a cpu |
@@ -207,10 +192,11 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma, | |||
207 | { | 192 | { |
208 | struct page *page; | 193 | struct page *page; |
209 | 194 | ||
210 | /* ZZZ Need to handle HUGE pages */ | 195 | #ifdef CONFIG_HUGETLB_PAGE |
211 | if (is_vm_hugetlb_page(vma)) | 196 | *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; |
212 | return -EFAULT; | 197 | #else |
213 | *pageshift = PAGE_SHIFT; | 198 | *pageshift = PAGE_SHIFT; |
199 | #endif | ||
214 | if (get_user_pages | 200 | if (get_user_pages |
215 | (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) | 201 | (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) |
216 | return -EFAULT; | 202 | return -EFAULT; |
@@ -268,7 +254,6 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, | |||
268 | return 0; | 254 | return 0; |
269 | 255 | ||
270 | err: | 256 | err: |
271 | local_irq_enable(); | ||
272 | return 1; | 257 | return 1; |
273 | } | 258 | } |
274 | 259 | ||
@@ -301,14 +286,69 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, | |||
301 | paddr = paddr & ~((1UL << ps) - 1); | 286 | paddr = paddr & ~((1UL << ps) - 1); |
302 | *gpa = uv_soc_phys_ram_to_gpa(paddr); | 287 | *gpa = uv_soc_phys_ram_to_gpa(paddr); |
303 | *pageshift = ps; | 288 | *pageshift = ps; |
304 | return 0; | 289 | return VTOP_SUCCESS; |
305 | 290 | ||
306 | inval: | 291 | inval: |
307 | return -1; | 292 | return VTOP_INVALID; |
308 | upm: | 293 | upm: |
309 | return -2; | 294 | return VTOP_RETRY; |
295 | } | ||
296 | |||
297 | |||
298 | /* | ||
299 | * Flush a CBE from cache. The CBE is clean in the cache. Dirty the | ||
300 | * CBE cacheline so that the line will be written back to home agent. | ||
301 | * Otherwise the line may be silently dropped. This has no impact | ||
302 | * except on performance. | ||
303 | */ | ||
304 | static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe) | ||
305 | { | ||
306 | if (unlikely(cbe)) { | ||
307 | cbe->cbrexecstatus = 0; /* make CL dirty */ | ||
308 | gru_flush_cache(cbe); | ||
309 | } | ||
310 | } | 310 | } |
311 | 311 | ||
312 | /* | ||
313 | * Preload the TLB with entries that may be required. Currently, preloading | ||
314 | * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to | ||
315 | * the end of the bcopy tranfer, whichever is smaller. | ||
316 | */ | ||
317 | static void gru_preload_tlb(struct gru_state *gru, | ||
318 | struct gru_thread_state *gts, int atomic, | ||
319 | unsigned long fault_vaddr, int asid, int write, | ||
320 | unsigned char tlb_preload_count, | ||
321 | struct gru_tlb_fault_handle *tfh, | ||
322 | struct gru_control_block_extended *cbe) | ||
323 | { | ||
324 | unsigned long vaddr = 0, gpa; | ||
325 | int ret, pageshift; | ||
326 | |||
327 | if (cbe->opccpy != OP_BCOPY) | ||
328 | return; | ||
329 | |||
330 | if (fault_vaddr == cbe->cbe_baddr0) | ||
331 | vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1; | ||
332 | else if (fault_vaddr == cbe->cbe_baddr1) | ||
333 | vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1; | ||
334 | |||
335 | fault_vaddr &= PAGE_MASK; | ||
336 | vaddr &= PAGE_MASK; | ||
337 | vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE); | ||
338 | |||
339 | while (vaddr > fault_vaddr) { | ||
340 | ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); | ||
341 | if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write, | ||
342 | GRU_PAGESIZE(pageshift))) | ||
343 | return; | ||
344 | gru_dbg(grudev, | ||
345 | "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n", | ||
346 | atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, | ||
347 | vaddr, asid, write, pageshift, gpa); | ||
348 | vaddr -= PAGE_SIZE; | ||
349 | STAT(tlb_preload_page); | ||
350 | } | ||
351 | } | ||
312 | 352 | ||
313 | /* | 353 | /* |
314 | * Drop a TLB entry into the GRU. The fault is described by info in an TFH. | 354 | * Drop a TLB entry into the GRU. The fault is described by info in an TFH. |
@@ -320,11 +360,14 @@ upm: | |||
320 | * < 0 = error code | 360 | * < 0 = error code |
321 | * | 361 | * |
322 | */ | 362 | */ |
323 | static int gru_try_dropin(struct gru_thread_state *gts, | 363 | static int gru_try_dropin(struct gru_state *gru, |
364 | struct gru_thread_state *gts, | ||
324 | struct gru_tlb_fault_handle *tfh, | 365 | struct gru_tlb_fault_handle *tfh, |
325 | unsigned long __user *cb) | 366 | struct gru_instruction_bits *cbk) |
326 | { | 367 | { |
327 | int pageshift = 0, asid, write, ret, atomic = !cb; | 368 | struct gru_control_block_extended *cbe = NULL; |
369 | unsigned char tlb_preload_count = gts->ts_tlb_preload_count; | ||
370 | int pageshift = 0, asid, write, ret, atomic = !cbk, indexway; | ||
328 | unsigned long gpa = 0, vaddr = 0; | 371 | unsigned long gpa = 0, vaddr = 0; |
329 | 372 | ||
330 | /* | 373 | /* |
@@ -335,24 +378,34 @@ static int gru_try_dropin(struct gru_thread_state *gts, | |||
335 | */ | 378 | */ |
336 | 379 | ||
337 | /* | 380 | /* |
381 | * Prefetch the CBE if doing TLB preloading | ||
382 | */ | ||
383 | if (unlikely(tlb_preload_count)) { | ||
384 | cbe = gru_tfh_to_cbe(tfh); | ||
385 | prefetchw(cbe); | ||
386 | } | ||
387 | |||
388 | /* | ||
338 | * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. | 389 | * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. |
339 | * Might be a hardware race OR a stupid user. Ignore FMM because FMM | 390 | * Might be a hardware race OR a stupid user. Ignore FMM because FMM |
340 | * is a transient state. | 391 | * is a transient state. |
341 | */ | 392 | */ |
342 | if (tfh->status != TFHSTATUS_EXCEPTION) { | 393 | if (tfh->status != TFHSTATUS_EXCEPTION) { |
343 | gru_flush_cache(tfh); | 394 | gru_flush_cache(tfh); |
395 | sync_core(); | ||
344 | if (tfh->status != TFHSTATUS_EXCEPTION) | 396 | if (tfh->status != TFHSTATUS_EXCEPTION) |
345 | goto failnoexception; | 397 | goto failnoexception; |
346 | STAT(tfh_stale_on_fault); | 398 | STAT(tfh_stale_on_fault); |
347 | } | 399 | } |
348 | if (tfh->state == TFHSTATE_IDLE) | 400 | if (tfh->state == TFHSTATE_IDLE) |
349 | goto failidle; | 401 | goto failidle; |
350 | if (tfh->state == TFHSTATE_MISS_FMM && cb) | 402 | if (tfh->state == TFHSTATE_MISS_FMM && cbk) |
351 | goto failfmm; | 403 | goto failfmm; |
352 | 404 | ||
353 | write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; | 405 | write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; |
354 | vaddr = tfh->missvaddr; | 406 | vaddr = tfh->missvaddr; |
355 | asid = tfh->missasid; | 407 | asid = tfh->missasid; |
408 | indexway = tfh->indexway; | ||
356 | if (asid == 0) | 409 | if (asid == 0) |
357 | goto failnoasid; | 410 | goto failnoasid; |
358 | 411 | ||
@@ -366,41 +419,51 @@ static int gru_try_dropin(struct gru_thread_state *gts, | |||
366 | goto failactive; | 419 | goto failactive; |
367 | 420 | ||
368 | ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); | 421 | ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); |
369 | if (ret == -1) | 422 | if (ret == VTOP_INVALID) |
370 | goto failinval; | 423 | goto failinval; |
371 | if (ret == -2) | 424 | if (ret == VTOP_RETRY) |
372 | goto failupm; | 425 | goto failupm; |
373 | 426 | ||
374 | if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { | 427 | if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { |
375 | gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); | 428 | gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); |
376 | if (atomic || !gru_update_cch(gts, 0)) { | 429 | if (atomic || !gru_update_cch(gts)) { |
377 | gts->ts_force_cch_reload = 1; | 430 | gts->ts_force_cch_reload = 1; |
378 | goto failupm; | 431 | goto failupm; |
379 | } | 432 | } |
380 | } | 433 | } |
381 | gru_cb_set_istatus_active(cb); | 434 | |
435 | if (unlikely(cbe) && pageshift == PAGE_SHIFT) { | ||
436 | gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe); | ||
437 | gru_flush_cache_cbe(cbe); | ||
438 | } | ||
439 | |||
440 | gru_cb_set_istatus_active(cbk); | ||
441 | gts->ustats.tlbdropin++; | ||
382 | tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, | 442 | tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, |
383 | GRU_PAGESIZE(pageshift)); | 443 | GRU_PAGESIZE(pageshift)); |
384 | STAT(tlb_dropin); | ||
385 | gru_dbg(grudev, | 444 | gru_dbg(grudev, |
386 | "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n", | 445 | "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x," |
387 | ret ? "non-atomic" : "atomic", tfh, vaddr, asid, | 446 | " rw %d, ps %d, gpa 0x%lx\n", |
388 | pageshift, gpa); | 447 | atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid, |
448 | indexway, write, pageshift, gpa); | ||
449 | STAT(tlb_dropin); | ||
389 | return 0; | 450 | return 0; |
390 | 451 | ||
391 | failnoasid: | 452 | failnoasid: |
392 | /* No asid (delayed unload). */ | 453 | /* No asid (delayed unload). */ |
393 | STAT(tlb_dropin_fail_no_asid); | 454 | STAT(tlb_dropin_fail_no_asid); |
394 | gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); | 455 | gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); |
395 | if (!cb) | 456 | if (!cbk) |
396 | tfh_user_polling_mode(tfh); | 457 | tfh_user_polling_mode(tfh); |
397 | else | 458 | else |
398 | gru_flush_cache(tfh); | 459 | gru_flush_cache(tfh); |
460 | gru_flush_cache_cbe(cbe); | ||
399 | return -EAGAIN; | 461 | return -EAGAIN; |
400 | 462 | ||
401 | failupm: | 463 | failupm: |
402 | /* Atomic failure switch CBR to UPM */ | 464 | /* Atomic failure switch CBR to UPM */ |
403 | tfh_user_polling_mode(tfh); | 465 | tfh_user_polling_mode(tfh); |
466 | gru_flush_cache_cbe(cbe); | ||
404 | STAT(tlb_dropin_fail_upm); | 467 | STAT(tlb_dropin_fail_upm); |
405 | gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); | 468 | gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); |
406 | return 1; | 469 | return 1; |
@@ -408,6 +471,7 @@ failupm: | |||
408 | failfmm: | 471 | failfmm: |
409 | /* FMM state on UPM call */ | 472 | /* FMM state on UPM call */ |
410 | gru_flush_cache(tfh); | 473 | gru_flush_cache(tfh); |
474 | gru_flush_cache_cbe(cbe); | ||
411 | STAT(tlb_dropin_fail_fmm); | 475 | STAT(tlb_dropin_fail_fmm); |
412 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); | 476 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); |
413 | return 0; | 477 | return 0; |
@@ -415,17 +479,20 @@ failfmm: | |||
415 | failnoexception: | 479 | failnoexception: |
416 | /* TFH status did not show exception pending */ | 480 | /* TFH status did not show exception pending */ |
417 | gru_flush_cache(tfh); | 481 | gru_flush_cache(tfh); |
418 | if (cb) | 482 | gru_flush_cache_cbe(cbe); |
419 | gru_flush_cache(cb); | 483 | if (cbk) |
484 | gru_flush_cache(cbk); | ||
420 | STAT(tlb_dropin_fail_no_exception); | 485 | STAT(tlb_dropin_fail_no_exception); |
421 | gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state); | 486 | gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", |
487 | tfh, tfh->status, tfh->state); | ||
422 | return 0; | 488 | return 0; |
423 | 489 | ||
424 | failidle: | 490 | failidle: |
425 | /* TFH state was idle - no miss pending */ | 491 | /* TFH state was idle - no miss pending */ |
426 | gru_flush_cache(tfh); | 492 | gru_flush_cache(tfh); |
427 | if (cb) | 493 | gru_flush_cache_cbe(cbe); |
428 | gru_flush_cache(cb); | 494 | if (cbk) |
495 | gru_flush_cache(cbk); | ||
429 | STAT(tlb_dropin_fail_idle); | 496 | STAT(tlb_dropin_fail_idle); |
430 | gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state); | 497 | gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state); |
431 | return 0; | 498 | return 0; |
@@ -433,16 +500,18 @@ failidle: | |||
433 | failinval: | 500 | failinval: |
434 | /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ | 501 | /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ |
435 | tfh_exception(tfh); | 502 | tfh_exception(tfh); |
503 | gru_flush_cache_cbe(cbe); | ||
436 | STAT(tlb_dropin_fail_invalid); | 504 | STAT(tlb_dropin_fail_invalid); |
437 | gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); | 505 | gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); |
438 | return -EFAULT; | 506 | return -EFAULT; |
439 | 507 | ||
440 | failactive: | 508 | failactive: |
441 | /* Range invalidate active. Switch to UPM iff atomic */ | 509 | /* Range invalidate active. Switch to UPM iff atomic */ |
442 | if (!cb) | 510 | if (!cbk) |
443 | tfh_user_polling_mode(tfh); | 511 | tfh_user_polling_mode(tfh); |
444 | else | 512 | else |
445 | gru_flush_cache(tfh); | 513 | gru_flush_cache(tfh); |
514 | gru_flush_cache_cbe(cbe); | ||
446 | STAT(tlb_dropin_fail_range_active); | 515 | STAT(tlb_dropin_fail_range_active); |
447 | gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", | 516 | gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", |
448 | tfh, vaddr); | 517 | tfh, vaddr); |
@@ -455,31 +524,41 @@ failactive: | |||
455 | * Note that this is the interrupt handler that is registered with linux | 524 | * Note that this is the interrupt handler that is registered with linux |
456 | * interrupt handlers. | 525 | * interrupt handlers. |
457 | */ | 526 | */ |
458 | irqreturn_t gru_intr(int irq, void *dev_id) | 527 | static irqreturn_t gru_intr(int chiplet, int blade) |
459 | { | 528 | { |
460 | struct gru_state *gru; | 529 | struct gru_state *gru; |
461 | struct gru_tlb_fault_map imap, dmap; | 530 | struct gru_tlb_fault_map imap, dmap; |
462 | struct gru_thread_state *gts; | 531 | struct gru_thread_state *gts; |
463 | struct gru_tlb_fault_handle *tfh = NULL; | 532 | struct gru_tlb_fault_handle *tfh = NULL; |
533 | struct completion *cmp; | ||
464 | int cbrnum, ctxnum; | 534 | int cbrnum, ctxnum; |
465 | 535 | ||
466 | STAT(intr); | 536 | STAT(intr); |
467 | 537 | ||
468 | gru = irq_to_gru(irq); | 538 | gru = &gru_base[blade]->bs_grus[chiplet]; |
469 | if (!gru) { | 539 | if (!gru) { |
470 | dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n", | 540 | dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n", |
471 | raw_smp_processor_id(), irq); | 541 | raw_smp_processor_id(), chiplet); |
472 | return IRQ_NONE; | 542 | return IRQ_NONE; |
473 | } | 543 | } |
474 | get_clear_fault_map(gru, &imap, &dmap); | 544 | get_clear_fault_map(gru, &imap, &dmap); |
545 | gru_dbg(grudev, | ||
546 | "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n", | ||
547 | smp_processor_id(), chiplet, gru->gs_gid, | ||
548 | imap.fault_bits[0], imap.fault_bits[1], | ||
549 | dmap.fault_bits[0], dmap.fault_bits[1]); | ||
475 | 550 | ||
476 | for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { | 551 | for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { |
477 | complete(gru->gs_blade->bs_async_wq); | 552 | STAT(intr_cbr); |
553 | cmp = gru->gs_blade->bs_async_wq; | ||
554 | if (cmp) | ||
555 | complete(cmp); | ||
478 | gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n", | 556 | gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n", |
479 | gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done); | 557 | gru->gs_gid, cbrnum, cmp ? cmp->done : -1); |
480 | } | 558 | } |
481 | 559 | ||
482 | for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { | 560 | for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { |
561 | STAT(intr_tfh); | ||
483 | tfh = get_tfh_by_index(gru, cbrnum); | 562 | tfh = get_tfh_by_index(gru, cbrnum); |
484 | prefetchw(tfh); /* Helps on hdw, required for emulator */ | 563 | prefetchw(tfh); /* Helps on hdw, required for emulator */ |
485 | 564 | ||
@@ -492,14 +571,20 @@ irqreturn_t gru_intr(int irq, void *dev_id) | |||
492 | ctxnum = tfh->ctxnum; | 571 | ctxnum = tfh->ctxnum; |
493 | gts = gru->gs_gts[ctxnum]; | 572 | gts = gru->gs_gts[ctxnum]; |
494 | 573 | ||
574 | /* Spurious interrupts can cause this. Ignore. */ | ||
575 | if (!gts) { | ||
576 | STAT(intr_spurious); | ||
577 | continue; | ||
578 | } | ||
579 | |||
495 | /* | 580 | /* |
496 | * This is running in interrupt context. Trylock the mmap_sem. | 581 | * This is running in interrupt context. Trylock the mmap_sem. |
497 | * If it fails, retry the fault in user context. | 582 | * If it fails, retry the fault in user context. |
498 | */ | 583 | */ |
584 | gts->ustats.fmm_tlbmiss++; | ||
499 | if (!gts->ts_force_cch_reload && | 585 | if (!gts->ts_force_cch_reload && |
500 | down_read_trylock(>s->ts_mm->mmap_sem)) { | 586 | down_read_trylock(>s->ts_mm->mmap_sem)) { |
501 | gts->ustats.fmm_tlbdropin++; | 587 | gru_try_dropin(gru, gts, tfh, NULL); |
502 | gru_try_dropin(gts, tfh, NULL); | ||
503 | up_read(>s->ts_mm->mmap_sem); | 588 | up_read(>s->ts_mm->mmap_sem); |
504 | } else { | 589 | } else { |
505 | tfh_user_polling_mode(tfh); | 590 | tfh_user_polling_mode(tfh); |
@@ -509,20 +594,43 @@ irqreturn_t gru_intr(int irq, void *dev_id) | |||
509 | return IRQ_HANDLED; | 594 | return IRQ_HANDLED; |
510 | } | 595 | } |
511 | 596 | ||
597 | irqreturn_t gru0_intr(int irq, void *dev_id) | ||
598 | { | ||
599 | return gru_intr(0, uv_numa_blade_id()); | ||
600 | } | ||
601 | |||
602 | irqreturn_t gru1_intr(int irq, void *dev_id) | ||
603 | { | ||
604 | return gru_intr(1, uv_numa_blade_id()); | ||
605 | } | ||
606 | |||
607 | irqreturn_t gru_intr_mblade(int irq, void *dev_id) | ||
608 | { | ||
609 | int blade; | ||
610 | |||
611 | for_each_possible_blade(blade) { | ||
612 | if (uv_blade_nr_possible_cpus(blade)) | ||
613 | continue; | ||
614 | gru_intr(0, blade); | ||
615 | gru_intr(1, blade); | ||
616 | } | ||
617 | return IRQ_HANDLED; | ||
618 | } | ||
619 | |||
512 | 620 | ||
513 | static int gru_user_dropin(struct gru_thread_state *gts, | 621 | static int gru_user_dropin(struct gru_thread_state *gts, |
514 | struct gru_tlb_fault_handle *tfh, | 622 | struct gru_tlb_fault_handle *tfh, |
515 | unsigned long __user *cb) | 623 | void *cb) |
516 | { | 624 | { |
517 | struct gru_mm_struct *gms = gts->ts_gms; | 625 | struct gru_mm_struct *gms = gts->ts_gms; |
518 | int ret; | 626 | int ret; |
519 | 627 | ||
520 | gts->ustats.upm_tlbdropin++; | 628 | gts->ustats.upm_tlbmiss++; |
521 | while (1) { | 629 | while (1) { |
522 | wait_event(gms->ms_wait_queue, | 630 | wait_event(gms->ms_wait_queue, |
523 | atomic_read(&gms->ms_range_active) == 0); | 631 | atomic_read(&gms->ms_range_active) == 0); |
524 | prefetchw(tfh); /* Helps on hdw, required for emulator */ | 632 | prefetchw(tfh); /* Helps on hdw, required for emulator */ |
525 | ret = gru_try_dropin(gts, tfh, cb); | 633 | ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb); |
526 | if (ret <= 0) | 634 | if (ret <= 0) |
527 | return ret; | 635 | return ret; |
528 | STAT(call_os_wait_queue); | 636 | STAT(call_os_wait_queue); |
@@ -538,52 +646,41 @@ int gru_handle_user_call_os(unsigned long cb) | |||
538 | { | 646 | { |
539 | struct gru_tlb_fault_handle *tfh; | 647 | struct gru_tlb_fault_handle *tfh; |
540 | struct gru_thread_state *gts; | 648 | struct gru_thread_state *gts; |
541 | unsigned long __user *cbp; | 649 | void *cbk; |
542 | int ucbnum, cbrnum, ret = -EINVAL; | 650 | int ucbnum, cbrnum, ret = -EINVAL; |
543 | 651 | ||
544 | STAT(call_os); | 652 | STAT(call_os); |
545 | gru_dbg(grudev, "address 0x%lx\n", cb); | ||
546 | 653 | ||
547 | /* sanity check the cb pointer */ | 654 | /* sanity check the cb pointer */ |
548 | ucbnum = get_cb_number((void *)cb); | 655 | ucbnum = get_cb_number((void *)cb); |
549 | if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) | 656 | if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) |
550 | return -EINVAL; | 657 | return -EINVAL; |
551 | cbp = (unsigned long *)cb; | ||
552 | 658 | ||
553 | gts = gru_find_lock_gts(cb); | 659 | gts = gru_find_lock_gts(cb); |
554 | if (!gts) | 660 | if (!gts) |
555 | return -EINVAL; | 661 | return -EINVAL; |
662 | gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts); | ||
556 | 663 | ||
557 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) | 664 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) |
558 | goto exit; | 665 | goto exit; |
559 | 666 | ||
560 | /* | 667 | gru_check_context_placement(gts); |
561 | * If force_unload is set, the UPM TLB fault is phony. The task | ||
562 | * has migrated to another node and the GSEG must be moved. Just | ||
563 | * unload the context. The task will page fault and assign a new | ||
564 | * context. | ||
565 | */ | ||
566 | if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 && | ||
567 | gts->ts_blade != uv_numa_blade_id()) { | ||
568 | STAT(call_os_offnode_reference); | ||
569 | gts->ts_force_unload = 1; | ||
570 | } | ||
571 | 668 | ||
572 | /* | 669 | /* |
573 | * CCH may contain stale data if ts_force_cch_reload is set. | 670 | * CCH may contain stale data if ts_force_cch_reload is set. |
574 | */ | 671 | */ |
575 | if (gts->ts_gru && gts->ts_force_cch_reload) { | 672 | if (gts->ts_gru && gts->ts_force_cch_reload) { |
576 | gts->ts_force_cch_reload = 0; | 673 | gts->ts_force_cch_reload = 0; |
577 | gru_update_cch(gts, 0); | 674 | gru_update_cch(gts); |
578 | } | 675 | } |
579 | 676 | ||
580 | ret = -EAGAIN; | 677 | ret = -EAGAIN; |
581 | cbrnum = thread_cbr_number(gts, ucbnum); | 678 | cbrnum = thread_cbr_number(gts, ucbnum); |
582 | if (gts->ts_force_unload) { | 679 | if (gts->ts_gru) { |
583 | gru_unload_context(gts, 1); | ||
584 | } else if (gts->ts_gru) { | ||
585 | tfh = get_tfh_by_index(gts->ts_gru, cbrnum); | 680 | tfh = get_tfh_by_index(gts->ts_gru, cbrnum); |
586 | ret = gru_user_dropin(gts, tfh, cbp); | 681 | cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr, |
682 | gts->ts_ctxnum, ucbnum); | ||
683 | ret = gru_user_dropin(gts, tfh, cbk); | ||
587 | } | 684 | } |
588 | exit: | 685 | exit: |
589 | gru_unlock_gts(gts); | 686 | gru_unlock_gts(gts); |
@@ -605,11 +702,11 @@ int gru_get_exception_detail(unsigned long arg) | |||
605 | if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet))) | 702 | if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet))) |
606 | return -EFAULT; | 703 | return -EFAULT; |
607 | 704 | ||
608 | gru_dbg(grudev, "address 0x%lx\n", excdet.cb); | ||
609 | gts = gru_find_lock_gts(excdet.cb); | 705 | gts = gru_find_lock_gts(excdet.cb); |
610 | if (!gts) | 706 | if (!gts) |
611 | return -EINVAL; | 707 | return -EINVAL; |
612 | 708 | ||
709 | gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts); | ||
613 | ucbnum = get_cb_number((void *)excdet.cb); | 710 | ucbnum = get_cb_number((void *)excdet.cb); |
614 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { | 711 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { |
615 | ret = -EINVAL; | 712 | ret = -EINVAL; |
@@ -617,6 +714,7 @@ int gru_get_exception_detail(unsigned long arg) | |||
617 | cbrnum = thread_cbr_number(gts, ucbnum); | 714 | cbrnum = thread_cbr_number(gts, ucbnum); |
618 | cbe = get_cbe_by_index(gts->ts_gru, cbrnum); | 715 | cbe = get_cbe_by_index(gts->ts_gru, cbrnum); |
619 | gru_flush_cache(cbe); /* CBE not coherent */ | 716 | gru_flush_cache(cbe); /* CBE not coherent */ |
717 | sync_core(); /* make sure we are have current data */ | ||
620 | excdet.opc = cbe->opccpy; | 718 | excdet.opc = cbe->opccpy; |
621 | excdet.exopc = cbe->exopccpy; | 719 | excdet.exopc = cbe->exopccpy; |
622 | excdet.ecause = cbe->ecause; | 720 | excdet.ecause = cbe->ecause; |
@@ -624,7 +722,7 @@ int gru_get_exception_detail(unsigned long arg) | |||
624 | excdet.exceptdet1 = cbe->idef3upd; | 722 | excdet.exceptdet1 = cbe->idef3upd; |
625 | excdet.cbrstate = cbe->cbrstate; | 723 | excdet.cbrstate = cbe->cbrstate; |
626 | excdet.cbrexecstatus = cbe->cbrexecstatus; | 724 | excdet.cbrexecstatus = cbe->cbrexecstatus; |
627 | gru_flush_cache(cbe); | 725 | gru_flush_cache_cbe(cbe); |
628 | ret = 0; | 726 | ret = 0; |
629 | } else { | 727 | } else { |
630 | ret = -EAGAIN; | 728 | ret = -EAGAIN; |
@@ -733,6 +831,11 @@ long gru_get_gseg_statistics(unsigned long arg) | |||
733 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) | 831 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) |
734 | return -EFAULT; | 832 | return -EFAULT; |
735 | 833 | ||
834 | /* | ||
835 | * The library creates arrays of contexts for threaded programs. | ||
836 | * If no gts exists in the array, the context has never been used & all | ||
837 | * statistics are implicitly 0. | ||
838 | */ | ||
736 | gts = gru_find_lock_gts(req.gseg); | 839 | gts = gru_find_lock_gts(req.gseg); |
737 | if (gts) { | 840 | if (gts) { |
738 | memcpy(&req.stats, >s->ustats, sizeof(gts->ustats)); | 841 | memcpy(&req.stats, >s->ustats, sizeof(gts->ustats)); |
@@ -762,11 +865,25 @@ int gru_set_context_option(unsigned long arg) | |||
762 | return -EFAULT; | 865 | return -EFAULT; |
763 | gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); | 866 | gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); |
764 | 867 | ||
765 | gts = gru_alloc_locked_gts(req.gseg); | 868 | gts = gru_find_lock_gts(req.gseg); |
766 | if (!gts) | 869 | if (!gts) { |
767 | return -EINVAL; | 870 | gts = gru_alloc_locked_gts(req.gseg); |
871 | if (IS_ERR(gts)) | ||
872 | return PTR_ERR(gts); | ||
873 | } | ||
768 | 874 | ||
769 | switch (req.op) { | 875 | switch (req.op) { |
876 | case sco_blade_chiplet: | ||
877 | /* Select blade/chiplet for GRU context */ | ||
878 | if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] || | ||
879 | req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) { | ||
880 | ret = -EINVAL; | ||
881 | } else { | ||
882 | gts->ts_user_blade_id = req.val1; | ||
883 | gts->ts_user_chiplet_id = req.val0; | ||
884 | gru_check_context_placement(gts); | ||
885 | } | ||
886 | break; | ||
770 | case sco_gseg_owner: | 887 | case sco_gseg_owner: |
771 | /* Register the current task as the GSEG owner */ | 888 | /* Register the current task as the GSEG owner */ |
772 | gts->ts_tgid_owner = current->tgid; | 889 | gts->ts_tgid_owner = current->tgid; |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 41c8fe2a928c..cb3b4d228475 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
@@ -35,6 +35,9 @@ | |||
35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
36 | #include <linux/proc_fs.h> | 36 | #include <linux/proc_fs.h> |
37 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
38 | #ifdef CONFIG_X86_64 | ||
39 | #include <asm/uv/uv_irq.h> | ||
40 | #endif | ||
38 | #include <asm/uv/uv.h> | 41 | #include <asm/uv/uv.h> |
39 | #include "gru.h" | 42 | #include "gru.h" |
40 | #include "grulib.h" | 43 | #include "grulib.h" |
@@ -92,7 +95,7 @@ static void gru_vma_close(struct vm_area_struct *vma) | |||
92 | /* | 95 | /* |
93 | * gru_file_mmap | 96 | * gru_file_mmap |
94 | * | 97 | * |
95 | * Called when mmaping the device. Initializes the vma with a fault handler | 98 | * Called when mmapping the device. Initializes the vma with a fault handler |
96 | * and private data structure necessary to allocate, track, and free the | 99 | * and private data structure necessary to allocate, track, and free the |
97 | * underlying pages. | 100 | * underlying pages. |
98 | */ | 101 | */ |
@@ -130,7 +133,6 @@ static int gru_create_new_context(unsigned long arg) | |||
130 | struct gru_vma_data *vdata; | 133 | struct gru_vma_data *vdata; |
131 | int ret = -EINVAL; | 134 | int ret = -EINVAL; |
132 | 135 | ||
133 | |||
134 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) | 136 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) |
135 | return -EFAULT; | 137 | return -EFAULT; |
136 | 138 | ||
@@ -150,6 +152,7 @@ static int gru_create_new_context(unsigned long arg) | |||
150 | vdata->vd_dsr_au_count = | 152 | vdata->vd_dsr_au_count = |
151 | GRU_DS_BYTES_TO_AU(req.data_segment_bytes); | 153 | GRU_DS_BYTES_TO_AU(req.data_segment_bytes); |
152 | vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); | 154 | vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); |
155 | vdata->vd_tlb_preload_count = req.tlb_preload_count; | ||
153 | ret = 0; | 156 | ret = 0; |
154 | } | 157 | } |
155 | up_write(¤t->mm->mmap_sem); | 158 | up_write(¤t->mm->mmap_sem); |
@@ -190,7 +193,7 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, | |||
190 | { | 193 | { |
191 | int err = -EBADRQC; | 194 | int err = -EBADRQC; |
192 | 195 | ||
193 | gru_dbg(grudev, "file %p\n", file); | 196 | gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg); |
194 | 197 | ||
195 | switch (req) { | 198 | switch (req) { |
196 | case GRU_CREATE_CONTEXT: | 199 | case GRU_CREATE_CONTEXT: |
@@ -232,23 +235,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, | |||
232 | * system. | 235 | * system. |
233 | */ | 236 | */ |
234 | static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, | 237 | static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, |
235 | void *vaddr, int nid, int bid, int grunum) | 238 | void *vaddr, int blade_id, int chiplet_id) |
236 | { | 239 | { |
237 | spin_lock_init(&gru->gs_lock); | 240 | spin_lock_init(&gru->gs_lock); |
238 | spin_lock_init(&gru->gs_asid_lock); | 241 | spin_lock_init(&gru->gs_asid_lock); |
239 | gru->gs_gru_base_paddr = paddr; | 242 | gru->gs_gru_base_paddr = paddr; |
240 | gru->gs_gru_base_vaddr = vaddr; | 243 | gru->gs_gru_base_vaddr = vaddr; |
241 | gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum; | 244 | gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id; |
242 | gru->gs_blade = gru_base[bid]; | 245 | gru->gs_blade = gru_base[blade_id]; |
243 | gru->gs_blade_id = bid; | 246 | gru->gs_blade_id = blade_id; |
247 | gru->gs_chiplet_id = chiplet_id; | ||
244 | gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; | 248 | gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; |
245 | gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; | 249 | gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; |
246 | gru->gs_asid_limit = MAX_ASID; | 250 | gru->gs_asid_limit = MAX_ASID; |
247 | gru_tgh_flush_init(gru); | 251 | gru_tgh_flush_init(gru); |
248 | if (gru->gs_gid >= gru_max_gids) | 252 | if (gru->gs_gid >= gru_max_gids) |
249 | gru_max_gids = gru->gs_gid + 1; | 253 | gru_max_gids = gru->gs_gid + 1; |
250 | gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", | 254 | gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n", |
251 | bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, | 255 | blade_id, gru->gs_gid, gru->gs_gru_base_vaddr, |
252 | gru->gs_gru_base_paddr); | 256 | gru->gs_gru_base_paddr); |
253 | } | 257 | } |
254 | 258 | ||
@@ -264,12 +268,10 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) | |||
264 | 268 | ||
265 | max_user_cbrs = GRU_NUM_CB; | 269 | max_user_cbrs = GRU_NUM_CB; |
266 | max_user_dsr_bytes = GRU_NUM_DSR_BYTES; | 270 | max_user_dsr_bytes = GRU_NUM_DSR_BYTES; |
267 | for_each_online_node(nid) { | 271 | for_each_possible_blade(bid) { |
268 | bid = uv_node_to_blade_id(nid); | 272 | pnode = uv_blade_to_pnode(bid); |
269 | pnode = uv_node_to_pnode(nid); | 273 | nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */ |
270 | if (bid < 0 || gru_base[bid]) | 274 | page = alloc_pages_node(nid, GFP_KERNEL, order); |
271 | continue; | ||
272 | page = alloc_pages_exact_node(nid, GFP_KERNEL, order); | ||
273 | if (!page) | 275 | if (!page) |
274 | goto fail; | 276 | goto fail; |
275 | gru_base[bid] = page_address(page); | 277 | gru_base[bid] = page_address(page); |
@@ -285,7 +287,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) | |||
285 | chip++, gru++) { | 287 | chip++, gru++) { |
286 | paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); | 288 | paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); |
287 | vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); | 289 | vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); |
288 | gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip); | 290 | gru_init_chiplet(gru, paddr, vaddr, bid, chip); |
289 | n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; | 291 | n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; |
290 | cbrs = max(cbrs, n); | 292 | cbrs = max(cbrs, n); |
291 | n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; | 293 | n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; |
@@ -298,39 +300,215 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) | |||
298 | return 0; | 300 | return 0; |
299 | 301 | ||
300 | fail: | 302 | fail: |
301 | for (nid--; nid >= 0; nid--) | 303 | for (bid--; bid >= 0; bid--) |
302 | free_pages((unsigned long)gru_base[nid], order); | 304 | free_pages((unsigned long)gru_base[bid], order); |
303 | return -ENOMEM; | 305 | return -ENOMEM; |
304 | } | 306 | } |
305 | 307 | ||
306 | #ifdef CONFIG_IA64 | 308 | static void gru_free_tables(void) |
309 | { | ||
310 | int bid; | ||
311 | int order = get_order(sizeof(struct gru_state) * | ||
312 | GRU_CHIPLETS_PER_BLADE); | ||
307 | 313 | ||
308 | static int get_base_irq(void) | 314 | for (bid = 0; bid < GRU_MAX_BLADES; bid++) |
315 | free_pages((unsigned long)gru_base[bid], order); | ||
316 | } | ||
317 | |||
318 | static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) | ||
309 | { | 319 | { |
310 | return IRQ_GRU; | 320 | unsigned long mmr = 0; |
321 | int core; | ||
322 | |||
323 | /* | ||
324 | * We target the cores of a blade and not the hyperthreads themselves. | ||
325 | * There is a max of 8 cores per socket and 2 sockets per blade, | ||
326 | * making for a max total of 16 cores (i.e., 16 CPUs without | ||
327 | * hyperthreading and 32 CPUs with hyperthreading). | ||
328 | */ | ||
329 | core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); | ||
330 | if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu)) | ||
331 | return 0; | ||
332 | |||
333 | if (chiplet == 0) { | ||
334 | mmr = UVH_GR0_TLB_INT0_CONFIG + | ||
335 | core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG); | ||
336 | } else if (chiplet == 1) { | ||
337 | mmr = UVH_GR1_TLB_INT0_CONFIG + | ||
338 | core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG); | ||
339 | } else { | ||
340 | BUG(); | ||
341 | } | ||
342 | |||
343 | *corep = core; | ||
344 | return mmr; | ||
311 | } | 345 | } |
312 | 346 | ||
313 | #elif defined CONFIG_X86_64 | 347 | #ifdef CONFIG_IA64 |
314 | 348 | ||
315 | static void noop(unsigned int irq) | 349 | static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; |
350 | |||
351 | static void gru_noop(unsigned int irq) | ||
316 | { | 352 | { |
317 | } | 353 | } |
318 | 354 | ||
319 | static struct irq_chip gru_chip = { | 355 | static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { |
320 | .name = "gru", | 356 | [0 ... GRU_CHIPLETS_PER_BLADE - 1] { |
321 | .mask = noop, | 357 | .mask = gru_noop, |
322 | .unmask = noop, | 358 | .unmask = gru_noop, |
323 | .ack = noop, | 359 | .ack = gru_noop |
360 | } | ||
324 | }; | 361 | }; |
325 | 362 | ||
326 | static int get_base_irq(void) | 363 | static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, |
364 | irq_handler_t irq_handler, int cpu, int blade) | ||
365 | { | ||
366 | unsigned long mmr; | ||
367 | int irq = IRQ_GRU + chiplet; | ||
368 | int ret, core; | ||
369 | |||
370 | mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); | ||
371 | if (mmr == 0) | ||
372 | return 0; | ||
373 | |||
374 | if (gru_irq_count[chiplet] == 0) { | ||
375 | gru_chip[chiplet].name = irq_name; | ||
376 | ret = set_irq_chip(irq, &gru_chip[chiplet]); | ||
377 | if (ret) { | ||
378 | printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", | ||
379 | GRU_DRIVER_ID_STR, -ret); | ||
380 | return ret; | ||
381 | } | ||
382 | |||
383 | ret = request_irq(irq, irq_handler, 0, irq_name, NULL); | ||
384 | if (ret) { | ||
385 | printk(KERN_ERR "%s: request_irq failed, errno=%d\n", | ||
386 | GRU_DRIVER_ID_STR, -ret); | ||
387 | return ret; | ||
388 | } | ||
389 | } | ||
390 | gru_irq_count[chiplet]++; | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade) | ||
396 | { | ||
397 | unsigned long mmr; | ||
398 | int core, irq = IRQ_GRU + chiplet; | ||
399 | |||
400 | if (gru_irq_count[chiplet] == 0) | ||
401 | return; | ||
402 | |||
403 | mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); | ||
404 | if (mmr == 0) | ||
405 | return; | ||
406 | |||
407 | if (--gru_irq_count[chiplet] == 0) | ||
408 | free_irq(irq, NULL); | ||
409 | } | ||
410 | |||
411 | #elif defined CONFIG_X86_64 | ||
412 | |||
413 | static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, | ||
414 | irq_handler_t irq_handler, int cpu, int blade) | ||
415 | { | ||
416 | unsigned long mmr; | ||
417 | int irq, core; | ||
418 | int ret; | ||
419 | |||
420 | mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); | ||
421 | if (mmr == 0) | ||
422 | return 0; | ||
423 | |||
424 | irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU); | ||
425 | if (irq < 0) { | ||
426 | printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n", | ||
427 | GRU_DRIVER_ID_STR, -irq); | ||
428 | return irq; | ||
429 | } | ||
430 | |||
431 | ret = request_irq(irq, irq_handler, 0, irq_name, NULL); | ||
432 | if (ret) { | ||
433 | uv_teardown_irq(irq); | ||
434 | printk(KERN_ERR "%s: request_irq failed, errno=%d\n", | ||
435 | GRU_DRIVER_ID_STR, -ret); | ||
436 | return ret; | ||
437 | } | ||
438 | gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq; | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade) | ||
327 | { | 443 | { |
328 | set_irq_chip(IRQ_GRU, &gru_chip); | 444 | int irq, core; |
329 | set_irq_chip(IRQ_GRU + 1, &gru_chip); | 445 | unsigned long mmr; |
330 | return IRQ_GRU; | 446 | |
447 | mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); | ||
448 | if (mmr) { | ||
449 | irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core]; | ||
450 | if (irq) { | ||
451 | free_irq(irq, NULL); | ||
452 | uv_teardown_irq(irq); | ||
453 | } | ||
454 | } | ||
331 | } | 455 | } |
456 | |||
332 | #endif | 457 | #endif |
333 | 458 | ||
459 | static void gru_teardown_tlb_irqs(void) | ||
460 | { | ||
461 | int blade; | ||
462 | int cpu; | ||
463 | |||
464 | for_each_online_cpu(cpu) { | ||
465 | blade = uv_cpu_to_blade_id(cpu); | ||
466 | gru_chiplet_teardown_tlb_irq(0, cpu, blade); | ||
467 | gru_chiplet_teardown_tlb_irq(1, cpu, blade); | ||
468 | } | ||
469 | for_each_possible_blade(blade) { | ||
470 | if (uv_blade_nr_possible_cpus(blade)) | ||
471 | continue; | ||
472 | gru_chiplet_teardown_tlb_irq(0, 0, blade); | ||
473 | gru_chiplet_teardown_tlb_irq(1, 0, blade); | ||
474 | } | ||
475 | } | ||
476 | |||
477 | static int gru_setup_tlb_irqs(void) | ||
478 | { | ||
479 | int blade; | ||
480 | int cpu; | ||
481 | int ret; | ||
482 | |||
483 | for_each_online_cpu(cpu) { | ||
484 | blade = uv_cpu_to_blade_id(cpu); | ||
485 | ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade); | ||
486 | if (ret != 0) | ||
487 | goto exit1; | ||
488 | |||
489 | ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade); | ||
490 | if (ret != 0) | ||
491 | goto exit1; | ||
492 | } | ||
493 | for_each_possible_blade(blade) { | ||
494 | if (uv_blade_nr_possible_cpus(blade)) | ||
495 | continue; | ||
496 | ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade); | ||
497 | if (ret != 0) | ||
498 | goto exit1; | ||
499 | |||
500 | ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade); | ||
501 | if (ret != 0) | ||
502 | goto exit1; | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | |||
507 | exit1: | ||
508 | gru_teardown_tlb_irqs(); | ||
509 | return ret; | ||
510 | } | ||
511 | |||
334 | /* | 512 | /* |
335 | * gru_init | 513 | * gru_init |
336 | * | 514 | * |
@@ -338,8 +516,7 @@ static int get_base_irq(void) | |||
338 | */ | 516 | */ |
339 | static int __init gru_init(void) | 517 | static int __init gru_init(void) |
340 | { | 518 | { |
341 | int ret, irq, chip; | 519 | int ret; |
342 | char id[10]; | ||
343 | 520 | ||
344 | if (!is_uv_system()) | 521 | if (!is_uv_system()) |
345 | return 0; | 522 | return 0; |
@@ -354,41 +531,29 @@ static int __init gru_init(void) | |||
354 | gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; | 531 | gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; |
355 | printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", | 532 | printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", |
356 | gru_start_paddr, gru_end_paddr); | 533 | gru_start_paddr, gru_end_paddr); |
357 | irq = get_base_irq(); | ||
358 | for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { | ||
359 | ret = request_irq(irq + chip, gru_intr, 0, id, NULL); | ||
360 | /* TODO: fix irq handling on x86. For now ignore failure because | ||
361 | * interrupts are not required & not yet fully supported */ | ||
362 | if (ret) { | ||
363 | printk(KERN_WARNING | ||
364 | "!!!WARNING: GRU ignoring request failure!!!\n"); | ||
365 | ret = 0; | ||
366 | } | ||
367 | if (ret) { | ||
368 | printk(KERN_ERR "%s: request_irq failed\n", | ||
369 | GRU_DRIVER_ID_STR); | ||
370 | goto exit1; | ||
371 | } | ||
372 | } | ||
373 | |||
374 | ret = misc_register(&gru_miscdev); | 534 | ret = misc_register(&gru_miscdev); |
375 | if (ret) { | 535 | if (ret) { |
376 | printk(KERN_ERR "%s: misc_register failed\n", | 536 | printk(KERN_ERR "%s: misc_register failed\n", |
377 | GRU_DRIVER_ID_STR); | 537 | GRU_DRIVER_ID_STR); |
378 | goto exit1; | 538 | goto exit0; |
379 | } | 539 | } |
380 | 540 | ||
381 | ret = gru_proc_init(); | 541 | ret = gru_proc_init(); |
382 | if (ret) { | 542 | if (ret) { |
383 | printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); | 543 | printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); |
384 | goto exit2; | 544 | goto exit1; |
385 | } | 545 | } |
386 | 546 | ||
387 | ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); | 547 | ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); |
388 | if (ret) { | 548 | if (ret) { |
389 | printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); | 549 | printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); |
390 | goto exit3; | 550 | goto exit2; |
391 | } | 551 | } |
552 | |||
553 | ret = gru_setup_tlb_irqs(); | ||
554 | if (ret != 0) | ||
555 | goto exit3; | ||
556 | |||
392 | gru_kservices_init(); | 557 | gru_kservices_init(); |
393 | 558 | ||
394 | printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, | 559 | printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, |
@@ -396,31 +561,24 @@ static int __init gru_init(void) | |||
396 | return 0; | 561 | return 0; |
397 | 562 | ||
398 | exit3: | 563 | exit3: |
399 | gru_proc_exit(); | 564 | gru_free_tables(); |
400 | exit2: | 565 | exit2: |
401 | misc_deregister(&gru_miscdev); | 566 | gru_proc_exit(); |
402 | exit1: | 567 | exit1: |
403 | for (--chip; chip >= 0; chip--) | 568 | misc_deregister(&gru_miscdev); |
404 | free_irq(irq + chip, NULL); | 569 | exit0: |
405 | return ret; | 570 | return ret; |
406 | 571 | ||
407 | } | 572 | } |
408 | 573 | ||
409 | static void __exit gru_exit(void) | 574 | static void __exit gru_exit(void) |
410 | { | 575 | { |
411 | int i, bid; | ||
412 | int order = get_order(sizeof(struct gru_state) * | ||
413 | GRU_CHIPLETS_PER_BLADE); | ||
414 | |||
415 | if (!is_uv_system()) | 576 | if (!is_uv_system()) |
416 | return; | 577 | return; |
417 | 578 | ||
418 | for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) | 579 | gru_teardown_tlb_irqs(); |
419 | free_irq(IRQ_GRU + i, NULL); | ||
420 | gru_kservices_exit(); | 580 | gru_kservices_exit(); |
421 | for (bid = 0; bid < GRU_MAX_BLADES; bid++) | 581 | gru_free_tables(); |
422 | free_pages((unsigned long)gru_base[bid], order); | ||
423 | |||
424 | misc_deregister(&gru_miscdev); | 582 | misc_deregister(&gru_miscdev); |
425 | gru_proc_exit(); | 583 | gru_proc_exit(); |
426 | } | 584 | } |
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c index 37e7cfc53b9c..2f30badc6ffd 100644 --- a/drivers/misc/sgi-gru/gruhandles.c +++ b/drivers/misc/sgi-gru/gruhandles.c | |||
@@ -27,9 +27,11 @@ | |||
27 | #ifdef CONFIG_IA64 | 27 | #ifdef CONFIG_IA64 |
28 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
29 | #define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) | 29 | #define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) |
30 | #define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq) | ||
30 | #else | 31 | #else |
31 | #include <asm/tsc.h> | 32 | #include <asm/tsc.h> |
32 | #define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) | 33 | #define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) |
34 | #define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz) | ||
33 | #endif | 35 | #endif |
34 | 36 | ||
35 | /* Extract the status field from a kernel handle */ | 37 | /* Extract the status field from a kernel handle */ |
@@ -39,21 +41,39 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last]; | |||
39 | 41 | ||
40 | static void update_mcs_stats(enum mcs_op op, unsigned long clks) | 42 | static void update_mcs_stats(enum mcs_op op, unsigned long clks) |
41 | { | 43 | { |
44 | unsigned long nsec; | ||
45 | |||
46 | nsec = CLKS2NSEC(clks); | ||
42 | atomic_long_inc(&mcs_op_statistics[op].count); | 47 | atomic_long_inc(&mcs_op_statistics[op].count); |
43 | atomic_long_add(clks, &mcs_op_statistics[op].total); | 48 | atomic_long_add(nsec, &mcs_op_statistics[op].total); |
44 | if (mcs_op_statistics[op].max < clks) | 49 | if (mcs_op_statistics[op].max < nsec) |
45 | mcs_op_statistics[op].max = clks; | 50 | mcs_op_statistics[op].max = nsec; |
46 | } | 51 | } |
47 | 52 | ||
48 | static void start_instruction(void *h) | 53 | static void start_instruction(void *h) |
49 | { | 54 | { |
50 | unsigned long *w0 = h; | 55 | unsigned long *w0 = h; |
51 | 56 | ||
52 | wmb(); /* setting CMD bit must be last */ | 57 | wmb(); /* setting CMD/STATUS bits must be last */ |
53 | *w0 = *w0 | 1; | 58 | *w0 = *w0 | 0x20001; |
54 | gru_flush_cache(h); | 59 | gru_flush_cache(h); |
55 | } | 60 | } |
56 | 61 | ||
62 | static void report_instruction_timeout(void *h) | ||
63 | { | ||
64 | unsigned long goff = GSEGPOFF((unsigned long)h); | ||
65 | char *id = "???"; | ||
66 | |||
67 | if (TYPE_IS(CCH, goff)) | ||
68 | id = "CCH"; | ||
69 | else if (TYPE_IS(TGH, goff)) | ||
70 | id = "TGH"; | ||
71 | else if (TYPE_IS(TFH, goff)) | ||
72 | id = "TFH"; | ||
73 | |||
74 | panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id); | ||
75 | } | ||
76 | |||
57 | static int wait_instruction_complete(void *h, enum mcs_op opc) | 77 | static int wait_instruction_complete(void *h, enum mcs_op opc) |
58 | { | 78 | { |
59 | int status; | 79 | int status; |
@@ -64,9 +84,10 @@ static int wait_instruction_complete(void *h, enum mcs_op opc) | |||
64 | status = GET_MSEG_HANDLE_STATUS(h); | 84 | status = GET_MSEG_HANDLE_STATUS(h); |
65 | if (status != CCHSTATUS_ACTIVE) | 85 | if (status != CCHSTATUS_ACTIVE) |
66 | break; | 86 | break; |
67 | if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) | 87 | if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) { |
68 | panic("GRU %p is malfunctioning: start %ld, end %ld\n", | 88 | report_instruction_timeout(h); |
69 | h, start_time, (unsigned long)get_cycles()); | 89 | start_time = get_cycles(); |
90 | } | ||
70 | } | 91 | } |
71 | if (gru_options & OPT_STATS) | 92 | if (gru_options & OPT_STATS) |
72 | update_mcs_stats(opc, get_cycles() - start_time); | 93 | update_mcs_stats(opc, get_cycles() - start_time); |
@@ -75,9 +96,18 @@ static int wait_instruction_complete(void *h, enum mcs_op opc) | |||
75 | 96 | ||
76 | int cch_allocate(struct gru_context_configuration_handle *cch) | 97 | int cch_allocate(struct gru_context_configuration_handle *cch) |
77 | { | 98 | { |
99 | int ret; | ||
100 | |||
78 | cch->opc = CCHOP_ALLOCATE; | 101 | cch->opc = CCHOP_ALLOCATE; |
79 | start_instruction(cch); | 102 | start_instruction(cch); |
80 | return wait_instruction_complete(cch, cchop_allocate); | 103 | ret = wait_instruction_complete(cch, cchop_allocate); |
104 | |||
105 | /* | ||
106 | * Stop speculation into the GSEG being mapped by the previous ALLOCATE. | ||
107 | * The GSEG memory does not exist until the ALLOCATE completes. | ||
108 | */ | ||
109 | sync_core(); | ||
110 | return ret; | ||
81 | } | 111 | } |
82 | 112 | ||
83 | int cch_start(struct gru_context_configuration_handle *cch) | 113 | int cch_start(struct gru_context_configuration_handle *cch) |
@@ -96,9 +126,18 @@ int cch_interrupt(struct gru_context_configuration_handle *cch) | |||
96 | 126 | ||
97 | int cch_deallocate(struct gru_context_configuration_handle *cch) | 127 | int cch_deallocate(struct gru_context_configuration_handle *cch) |
98 | { | 128 | { |
129 | int ret; | ||
130 | |||
99 | cch->opc = CCHOP_DEALLOCATE; | 131 | cch->opc = CCHOP_DEALLOCATE; |
100 | start_instruction(cch); | 132 | start_instruction(cch); |
101 | return wait_instruction_complete(cch, cchop_deallocate); | 133 | ret = wait_instruction_complete(cch, cchop_deallocate); |
134 | |||
135 | /* | ||
136 | * Stop speculation into the GSEG being unmapped by the previous | ||
137 | * DEALLOCATE. | ||
138 | */ | ||
139 | sync_core(); | ||
140 | return ret; | ||
102 | } | 141 | } |
103 | 142 | ||
104 | int cch_interrupt_sync(struct gru_context_configuration_handle | 143 | int cch_interrupt_sync(struct gru_context_configuration_handle |
@@ -126,17 +165,20 @@ int tgh_invalidate(struct gru_tlb_global_handle *tgh, | |||
126 | return wait_instruction_complete(tgh, tghop_invalidate); | 165 | return wait_instruction_complete(tgh, tghop_invalidate); |
127 | } | 166 | } |
128 | 167 | ||
129 | void tfh_write_only(struct gru_tlb_fault_handle *tfh, | 168 | int tfh_write_only(struct gru_tlb_fault_handle *tfh, |
130 | unsigned long pfn, unsigned long vaddr, | 169 | unsigned long paddr, int gaa, |
131 | int asid, int dirty, int pagesize) | 170 | unsigned long vaddr, int asid, int dirty, |
171 | int pagesize) | ||
132 | { | 172 | { |
133 | tfh->fillasid = asid; | 173 | tfh->fillasid = asid; |
134 | tfh->fillvaddr = vaddr; | 174 | tfh->fillvaddr = vaddr; |
135 | tfh->pfn = pfn; | 175 | tfh->pfn = paddr >> GRU_PADDR_SHIFT; |
176 | tfh->gaa = gaa; | ||
136 | tfh->dirty = dirty; | 177 | tfh->dirty = dirty; |
137 | tfh->pagesize = pagesize; | 178 | tfh->pagesize = pagesize; |
138 | tfh->opc = TFHOP_WRITE_ONLY; | 179 | tfh->opc = TFHOP_WRITE_ONLY; |
139 | start_instruction(tfh); | 180 | start_instruction(tfh); |
181 | return wait_instruction_complete(tfh, tfhop_write_only); | ||
140 | } | 182 | } |
141 | 183 | ||
142 | void tfh_write_restart(struct gru_tlb_fault_handle *tfh, | 184 | void tfh_write_restart(struct gru_tlb_fault_handle *tfh, |
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h index f44112242d00..3f998b924d8f 100644 --- a/drivers/misc/sgi-gru/gruhandles.h +++ b/drivers/misc/sgi-gru/gruhandles.h | |||
@@ -91,6 +91,12 @@ | |||
91 | /* Convert an arbitrary handle address to the beginning of the GRU segment */ | 91 | /* Convert an arbitrary handle address to the beginning of the GRU segment */ |
92 | #define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1))) | 92 | #define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1))) |
93 | 93 | ||
94 | /* Test a valid handle address to determine the type */ | ||
95 | #define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \ | ||
96 | GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \ | ||
97 | (((h) & (GRU_HANDLE_STRIDE - 1)) == 0)) | ||
98 | |||
99 | |||
94 | /* General addressing macros. */ | 100 | /* General addressing macros. */ |
95 | static inline void *get_gseg_base_address(void *base, int ctxnum) | 101 | static inline void *get_gseg_base_address(void *base, int ctxnum) |
96 | { | 102 | { |
@@ -158,6 +164,16 @@ static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet) | |||
158 | return vaddr + GRU_SIZE * (2 * pnode + chiplet); | 164 | return vaddr + GRU_SIZE * (2 * pnode + chiplet); |
159 | } | 165 | } |
160 | 166 | ||
167 | static inline struct gru_control_block_extended *gru_tfh_to_cbe( | ||
168 | struct gru_tlb_fault_handle *tfh) | ||
169 | { | ||
170 | unsigned long cbe; | ||
171 | |||
172 | cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE; | ||
173 | return (struct gru_control_block_extended*)cbe; | ||
174 | } | ||
175 | |||
176 | |||
161 | 177 | ||
162 | 178 | ||
163 | /* | 179 | /* |
@@ -236,6 +252,17 @@ enum gru_tgh_state { | |||
236 | TGHSTATE_RESTART_CTX, | 252 | TGHSTATE_RESTART_CTX, |
237 | }; | 253 | }; |
238 | 254 | ||
255 | enum gru_tgh_cause { | ||
256 | TGHCAUSE_RR_ECC, | ||
257 | TGHCAUSE_TLB_ECC, | ||
258 | TGHCAUSE_LRU_ECC, | ||
259 | TGHCAUSE_PS_ECC, | ||
260 | TGHCAUSE_MUL_ERR, | ||
261 | TGHCAUSE_DATA_ERR, | ||
262 | TGHCAUSE_SW_FORCE | ||
263 | }; | ||
264 | |||
265 | |||
239 | /* | 266 | /* |
240 | * TFH - TLB Global Handle | 267 | * TFH - TLB Global Handle |
241 | * Used for TLB dropins into the GRU TLB. | 268 | * Used for TLB dropins into the GRU TLB. |
@@ -440,6 +467,12 @@ struct gru_control_block_extended { | |||
440 | unsigned int cbrexecstatus:8; | 467 | unsigned int cbrexecstatus:8; |
441 | }; | 468 | }; |
442 | 469 | ||
470 | /* CBE fields for active BCOPY instructions */ | ||
471 | #define cbe_baddr0 idef1upd | ||
472 | #define cbe_baddr1 idef3upd | ||
473 | #define cbe_src_cl idef6cpy | ||
474 | #define cbe_nelemcur idef5upd | ||
475 | |||
443 | enum gru_cbr_state { | 476 | enum gru_cbr_state { |
444 | CBRSTATE_INACTIVE, | 477 | CBRSTATE_INACTIVE, |
445 | CBRSTATE_IDLE, | 478 | CBRSTATE_IDLE, |
@@ -487,8 +520,8 @@ int cch_interrupt_sync(struct gru_context_configuration_handle *cch); | |||
487 | int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, | 520 | int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, |
488 | unsigned long vaddrmask, int asid, int pagesize, int global, int n, | 521 | unsigned long vaddrmask, int asid, int pagesize, int global, int n, |
489 | unsigned short ctxbitmap); | 522 | unsigned short ctxbitmap); |
490 | void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn, | 523 | int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr, |
491 | unsigned long vaddr, int asid, int dirty, int pagesize); | 524 | int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); |
492 | void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, | 525 | void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, |
493 | int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); | 526 | int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); |
494 | void tfh_restart(struct gru_tlb_fault_handle *tfh); | 527 | void tfh_restart(struct gru_tlb_fault_handle *tfh); |
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c index 55eabfa85585..9b2062d17327 100644 --- a/drivers/misc/sgi-gru/grukdump.c +++ b/drivers/misc/sgi-gru/grukdump.c | |||
@@ -44,7 +44,8 @@ static int gru_user_copy_handle(void __user **dp, void *s) | |||
44 | 44 | ||
45 | static int gru_dump_context_data(void *grubase, | 45 | static int gru_dump_context_data(void *grubase, |
46 | struct gru_context_configuration_handle *cch, | 46 | struct gru_context_configuration_handle *cch, |
47 | void __user *ubuf, int ctxnum, int dsrcnt) | 47 | void __user *ubuf, int ctxnum, int dsrcnt, |
48 | int flush_cbrs) | ||
48 | { | 49 | { |
49 | void *cb, *cbe, *tfh, *gseg; | 50 | void *cb, *cbe, *tfh, *gseg; |
50 | int i, scr; | 51 | int i, scr; |
@@ -55,6 +56,8 @@ static int gru_dump_context_data(void *grubase, | |||
55 | tfh = grubase + GRU_TFH_BASE; | 56 | tfh = grubase + GRU_TFH_BASE; |
56 | 57 | ||
57 | for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { | 58 | for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { |
59 | if (flush_cbrs) | ||
60 | gru_flush_cache(cb); | ||
58 | if (gru_user_copy_handle(&ubuf, cb)) | 61 | if (gru_user_copy_handle(&ubuf, cb)) |
59 | goto fail; | 62 | goto fail; |
60 | if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) | 63 | if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) |
@@ -115,7 +118,7 @@ fail: | |||
115 | 118 | ||
116 | static int gru_dump_context(struct gru_state *gru, int ctxnum, | 119 | static int gru_dump_context(struct gru_state *gru, int ctxnum, |
117 | void __user *ubuf, void __user *ubufend, char data_opt, | 120 | void __user *ubuf, void __user *ubufend, char data_opt, |
118 | char lock_cch) | 121 | char lock_cch, char flush_cbrs) |
119 | { | 122 | { |
120 | struct gru_dump_context_header hdr; | 123 | struct gru_dump_context_header hdr; |
121 | struct gru_dump_context_header __user *uhdr = ubuf; | 124 | struct gru_dump_context_header __user *uhdr = ubuf; |
@@ -159,8 +162,7 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum, | |||
159 | ret = -EFBIG; | 162 | ret = -EFBIG; |
160 | else | 163 | else |
161 | ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, | 164 | ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, |
162 | dsrcnt); | 165 | dsrcnt, flush_cbrs); |
163 | |||
164 | } | 166 | } |
165 | if (cch_locked) | 167 | if (cch_locked) |
166 | unlock_cch_handle(cch); | 168 | unlock_cch_handle(cch); |
@@ -215,7 +217,8 @@ int gru_dump_chiplet_request(unsigned long arg) | |||
215 | for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { | 217 | for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { |
216 | if (req.ctxnum == ctxnum || req.ctxnum < 0) { | 218 | if (req.ctxnum == ctxnum || req.ctxnum < 0) { |
217 | ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, | 219 | ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, |
218 | req.data_opt, req.lock_cch); | 220 | req.data_opt, req.lock_cch, |
221 | req.flush_cbrs); | ||
219 | if (ret < 0) | 222 | if (ret < 0) |
220 | goto fail; | 223 | goto fail; |
221 | ubuf += ret; | 224 | ubuf += ret; |
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 766e21e15574..34749ee88dfa 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/uaccess.h> | 32 | #include <linux/uaccess.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <asm/io_apic.h> | ||
34 | #include "gru.h" | 35 | #include "gru.h" |
35 | #include "grulib.h" | 36 | #include "grulib.h" |
36 | #include "grutables.h" | 37 | #include "grutables.h" |
@@ -97,9 +98,6 @@ | |||
97 | #define ASYNC_HAN_TO_BID(h) ((h) - 1) | 98 | #define ASYNC_HAN_TO_BID(h) ((h) - 1) |
98 | #define ASYNC_BID_TO_HAN(b) ((b) + 1) | 99 | #define ASYNC_BID_TO_HAN(b) ((b) + 1) |
99 | #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] | 100 | #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] |
100 | #define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \ | ||
101 | (GRU_SIZE * GRU_CHIPLETS_PER_BLADE)) | ||
102 | #define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)] | ||
103 | 101 | ||
104 | #define GRU_NUM_KERNEL_CBR 1 | 102 | #define GRU_NUM_KERNEL_CBR 1 |
105 | #define GRU_NUM_KERNEL_DSR_BYTES 256 | 103 | #define GRU_NUM_KERNEL_DSR_BYTES 256 |
@@ -160,8 +158,10 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) | |||
160 | up_read(&bs->bs_kgts_sema); | 158 | up_read(&bs->bs_kgts_sema); |
161 | down_write(&bs->bs_kgts_sema); | 159 | down_write(&bs->bs_kgts_sema); |
162 | 160 | ||
163 | if (!bs->bs_kgts) | 161 | if (!bs->bs_kgts) { |
164 | bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0); | 162 | bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0); |
163 | bs->bs_kgts->ts_user_blade_id = blade_id; | ||
164 | } | ||
165 | kgts = bs->bs_kgts; | 165 | kgts = bs->bs_kgts; |
166 | 166 | ||
167 | if (!kgts->ts_gru) { | 167 | if (!kgts->ts_gru) { |
@@ -172,9 +172,9 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) | |||
172 | kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( | 172 | kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( |
173 | GRU_NUM_KERNEL_DSR_BYTES * ncpus + | 173 | GRU_NUM_KERNEL_DSR_BYTES * ncpus + |
174 | bs->bs_async_dsr_bytes); | 174 | bs->bs_async_dsr_bytes); |
175 | while (!gru_assign_gru_context(kgts, blade_id)) { | 175 | while (!gru_assign_gru_context(kgts)) { |
176 | msleep(1); | 176 | msleep(1); |
177 | gru_steal_context(kgts, blade_id); | 177 | gru_steal_context(kgts); |
178 | } | 178 | } |
179 | gru_load_context(kgts); | 179 | gru_load_context(kgts); |
180 | gru = bs->bs_kgts->ts_gru; | 180 | gru = bs->bs_kgts->ts_gru; |
@@ -200,13 +200,15 @@ static int gru_free_kernel_contexts(void) | |||
200 | bs = gru_base[bid]; | 200 | bs = gru_base[bid]; |
201 | if (!bs) | 201 | if (!bs) |
202 | continue; | 202 | continue; |
203 | |||
204 | /* Ignore busy contexts. Don't want to block here. */ | ||
203 | if (down_write_trylock(&bs->bs_kgts_sema)) { | 205 | if (down_write_trylock(&bs->bs_kgts_sema)) { |
204 | kgts = bs->bs_kgts; | 206 | kgts = bs->bs_kgts; |
205 | if (kgts && kgts->ts_gru) | 207 | if (kgts && kgts->ts_gru) |
206 | gru_unload_context(kgts, 0); | 208 | gru_unload_context(kgts, 0); |
207 | kfree(kgts); | ||
208 | bs->bs_kgts = NULL; | 209 | bs->bs_kgts = NULL; |
209 | up_write(&bs->bs_kgts_sema); | 210 | up_write(&bs->bs_kgts_sema); |
211 | kfree(kgts); | ||
210 | } else { | 212 | } else { |
211 | ret++; | 213 | ret++; |
212 | } | 214 | } |
@@ -220,13 +222,21 @@ static int gru_free_kernel_contexts(void) | |||
220 | static struct gru_blade_state *gru_lock_kernel_context(int blade_id) | 222 | static struct gru_blade_state *gru_lock_kernel_context(int blade_id) |
221 | { | 223 | { |
222 | struct gru_blade_state *bs; | 224 | struct gru_blade_state *bs; |
225 | int bid; | ||
223 | 226 | ||
224 | STAT(lock_kernel_context); | 227 | STAT(lock_kernel_context); |
225 | bs = gru_base[blade_id]; | 228 | again: |
229 | bid = blade_id < 0 ? uv_numa_blade_id() : blade_id; | ||
230 | bs = gru_base[bid]; | ||
226 | 231 | ||
232 | /* Handle the case where migration occured while waiting for the sema */ | ||
227 | down_read(&bs->bs_kgts_sema); | 233 | down_read(&bs->bs_kgts_sema); |
234 | if (blade_id < 0 && bid != uv_numa_blade_id()) { | ||
235 | up_read(&bs->bs_kgts_sema); | ||
236 | goto again; | ||
237 | } | ||
228 | if (!bs->bs_kgts || !bs->bs_kgts->ts_gru) | 238 | if (!bs->bs_kgts || !bs->bs_kgts->ts_gru) |
229 | gru_load_kernel_context(bs, blade_id); | 239 | gru_load_kernel_context(bs, bid); |
230 | return bs; | 240 | return bs; |
231 | 241 | ||
232 | } | 242 | } |
@@ -255,7 +265,7 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) | |||
255 | 265 | ||
256 | BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); | 266 | BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); |
257 | preempt_disable(); | 267 | preempt_disable(); |
258 | bs = gru_lock_kernel_context(uv_numa_blade_id()); | 268 | bs = gru_lock_kernel_context(-1); |
259 | lcpu = uv_blade_processor_id(); | 269 | lcpu = uv_blade_processor_id(); |
260 | *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; | 270 | *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; |
261 | *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; | 271 | *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; |
@@ -384,13 +394,31 @@ int gru_get_cb_exception_detail(void *cb, | |||
384 | struct control_block_extended_exc_detail *excdet) | 394 | struct control_block_extended_exc_detail *excdet) |
385 | { | 395 | { |
386 | struct gru_control_block_extended *cbe; | 396 | struct gru_control_block_extended *cbe; |
387 | struct gru_blade_state *bs; | 397 | struct gru_thread_state *kgts = NULL; |
388 | int cbrnum; | 398 | unsigned long off; |
389 | 399 | int cbrnum, bid; | |
390 | bs = KCB_TO_BS(cb); | 400 | |
391 | cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb)); | 401 | /* |
402 | * Locate kgts for cb. This algorithm is SLOW but | ||
403 | * this function is rarely called (ie., almost never). | ||
404 | * Performance does not matter. | ||
405 | */ | ||
406 | for_each_possible_blade(bid) { | ||
407 | if (!gru_base[bid]) | ||
408 | break; | ||
409 | kgts = gru_base[bid]->bs_kgts; | ||
410 | if (!kgts || !kgts->ts_gru) | ||
411 | continue; | ||
412 | off = cb - kgts->ts_gru->gs_gru_base_vaddr; | ||
413 | if (off < GRU_SIZE) | ||
414 | break; | ||
415 | kgts = NULL; | ||
416 | } | ||
417 | BUG_ON(!kgts); | ||
418 | cbrnum = thread_cbr_number(kgts, get_cb_number(cb)); | ||
392 | cbe = get_cbe(GRUBASE(cb), cbrnum); | 419 | cbe = get_cbe(GRUBASE(cb), cbrnum); |
393 | gru_flush_cache(cbe); /* CBE not coherent */ | 420 | gru_flush_cache(cbe); /* CBE not coherent */ |
421 | sync_core(); | ||
394 | excdet->opc = cbe->opccpy; | 422 | excdet->opc = cbe->opccpy; |
395 | excdet->exopc = cbe->exopccpy; | 423 | excdet->exopc = cbe->exopccpy; |
396 | excdet->ecause = cbe->ecause; | 424 | excdet->ecause = cbe->ecause; |
@@ -409,8 +437,8 @@ char *gru_get_cb_exception_detail_str(int ret, void *cb, | |||
409 | if (ret > 0 && gen->istatus == CBS_EXCEPTION) { | 437 | if (ret > 0 && gen->istatus == CBS_EXCEPTION) { |
410 | gru_get_cb_exception_detail(cb, &excdet); | 438 | gru_get_cb_exception_detail(cb, &excdet); |
411 | snprintf(buf, size, | 439 | snprintf(buf, size, |
412 | "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x," | 440 | "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x," |
413 | "excdet0 0x%lx, excdet1 0x%x", | 441 | "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(), |
414 | gen, excdet.opc, excdet.exopc, excdet.ecause, | 442 | gen, excdet.opc, excdet.exopc, excdet.ecause, |
415 | excdet.exceptdet0, excdet.exceptdet1); | 443 | excdet.exceptdet0, excdet.exceptdet1); |
416 | } else { | 444 | } else { |
@@ -457,9 +485,10 @@ int gru_check_status_proc(void *cb) | |||
457 | int ret; | 485 | int ret; |
458 | 486 | ||
459 | ret = gen->istatus; | 487 | ret = gen->istatus; |
460 | if (ret != CBS_EXCEPTION) | 488 | if (ret == CBS_EXCEPTION) |
461 | return ret; | 489 | ret = gru_retry_exception(cb); |
462 | return gru_retry_exception(cb); | 490 | rmb(); |
491 | return ret; | ||
463 | 492 | ||
464 | } | 493 | } |
465 | 494 | ||
@@ -471,7 +500,7 @@ int gru_wait_proc(void *cb) | |||
471 | ret = gru_wait_idle_or_exception(gen); | 500 | ret = gru_wait_idle_or_exception(gen); |
472 | if (ret == CBS_EXCEPTION) | 501 | if (ret == CBS_EXCEPTION) |
473 | ret = gru_retry_exception(cb); | 502 | ret = gru_retry_exception(cb); |
474 | 503 | rmb(); | |
475 | return ret; | 504 | return ret; |
476 | } | 505 | } |
477 | 506 | ||
@@ -538,7 +567,7 @@ int gru_create_message_queue(struct gru_message_queue_desc *mqd, | |||
538 | mqd->mq = mq; | 567 | mqd->mq = mq; |
539 | mqd->mq_gpa = uv_gpa(mq); | 568 | mqd->mq_gpa = uv_gpa(mq); |
540 | mqd->qlines = qlines; | 569 | mqd->qlines = qlines; |
541 | mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid); | 570 | mqd->interrupt_pnode = nasid >> 1; |
542 | mqd->interrupt_vector = vector; | 571 | mqd->interrupt_vector = vector; |
543 | mqd->interrupt_apicid = apicid; | 572 | mqd->interrupt_apicid = apicid; |
544 | return 0; | 573 | return 0; |
@@ -598,6 +627,8 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd, | |||
598 | ret = MQE_UNEXPECTED_CB_ERR; | 627 | ret = MQE_UNEXPECTED_CB_ERR; |
599 | break; | 628 | break; |
600 | case CBSS_PAGE_OVERFLOW: | 629 | case CBSS_PAGE_OVERFLOW: |
630 | STAT(mesq_noop_page_overflow); | ||
631 | /* fallthru */ | ||
601 | default: | 632 | default: |
602 | BUG(); | 633 | BUG(); |
603 | } | 634 | } |
@@ -673,18 +704,6 @@ cberr: | |||
673 | } | 704 | } |
674 | 705 | ||
675 | /* | 706 | /* |
676 | * Send a cross-partition interrupt to the SSI that contains the target | ||
677 | * message queue. Normally, the interrupt is automatically delivered by hardware | ||
678 | * but some error conditions require explicit delivery. | ||
679 | */ | ||
680 | static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd) | ||
681 | { | ||
682 | if (mqd->interrupt_vector) | ||
683 | uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid, | ||
684 | mqd->interrupt_vector); | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * Handle a PUT failure. Note: if message was a 2-line message, one of the | 707 | * Handle a PUT failure. Note: if message was a 2-line message, one of the |
689 | * lines might have successfully have been written. Before sending the | 708 | * lines might have successfully have been written. Before sending the |
690 | * message, "present" must be cleared in BOTH lines to prevent the receiver | 709 | * message, "present" must be cleared in BOTH lines to prevent the receiver |
@@ -693,7 +712,8 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd) | |||
693 | static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, | 712 | static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, |
694 | void *mesg, int lines) | 713 | void *mesg, int lines) |
695 | { | 714 | { |
696 | unsigned long m; | 715 | unsigned long m, *val = mesg, gpa, save; |
716 | int ret; | ||
697 | 717 | ||
698 | m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); | 718 | m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); |
699 | if (lines == 2) { | 719 | if (lines == 2) { |
@@ -704,7 +724,26 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, | |||
704 | gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); | 724 | gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); |
705 | if (gru_wait(cb) != CBS_IDLE) | 725 | if (gru_wait(cb) != CBS_IDLE) |
706 | return MQE_UNEXPECTED_CB_ERR; | 726 | return MQE_UNEXPECTED_CB_ERR; |
707 | send_message_queue_interrupt(mqd); | 727 | |
728 | if (!mqd->interrupt_vector) | ||
729 | return MQE_OK; | ||
730 | |||
731 | /* | ||
732 | * Send a cross-partition interrupt to the SSI that contains the target | ||
733 | * message queue. Normally, the interrupt is automatically delivered by | ||
734 | * hardware but some error conditions require explicit delivery. | ||
735 | * Use the GRU to deliver the interrupt. Otherwise partition failures | ||
736 | * could cause unrecovered errors. | ||
737 | */ | ||
738 | gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT); | ||
739 | save = *val; | ||
740 | *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector, | ||
741 | dest_Fixed); | ||
742 | gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA); | ||
743 | ret = gru_wait(cb); | ||
744 | *val = save; | ||
745 | if (ret != CBS_IDLE) | ||
746 | return MQE_UNEXPECTED_CB_ERR; | ||
708 | return MQE_OK; | 747 | return MQE_OK; |
709 | } | 748 | } |
710 | 749 | ||
@@ -739,6 +778,9 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd, | |||
739 | STAT(mesq_send_put_nacked); | 778 | STAT(mesq_send_put_nacked); |
740 | ret = send_message_put_nacked(cb, mqd, mesg, lines); | 779 | ret = send_message_put_nacked(cb, mqd, mesg, lines); |
741 | break; | 780 | break; |
781 | case CBSS_PAGE_OVERFLOW: | ||
782 | STAT(mesq_page_overflow); | ||
783 | /* fallthru */ | ||
742 | default: | 784 | default: |
743 | BUG(); | 785 | BUG(); |
744 | } | 786 | } |
@@ -831,7 +873,6 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd) | |||
831 | int present = mhdr->present; | 873 | int present = mhdr->present; |
832 | 874 | ||
833 | /* skip NOOP messages */ | 875 | /* skip NOOP messages */ |
834 | STAT(mesq_receive); | ||
835 | while (present == MQS_NOOP) { | 876 | while (present == MQS_NOOP) { |
836 | gru_free_message(mqd, mhdr); | 877 | gru_free_message(mqd, mhdr); |
837 | mhdr = mq->next; | 878 | mhdr = mq->next; |
@@ -851,6 +892,7 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd) | |||
851 | if (mhdr->lines == 2) | 892 | if (mhdr->lines == 2) |
852 | restore_present2(mhdr, mhdr->present2); | 893 | restore_present2(mhdr, mhdr->present2); |
853 | 894 | ||
895 | STAT(mesq_receive); | ||
854 | return mhdr; | 896 | return mhdr; |
855 | } | 897 | } |
856 | EXPORT_SYMBOL_GPL(gru_get_next_message); | 898 | EXPORT_SYMBOL_GPL(gru_get_next_message); |
@@ -858,6 +900,29 @@ EXPORT_SYMBOL_GPL(gru_get_next_message); | |||
858 | /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ | 900 | /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ |
859 | 901 | ||
860 | /* | 902 | /* |
903 | * Load a DW from a global GPA. The GPA can be a memory or MMR address. | ||
904 | */ | ||
905 | int gru_read_gpa(unsigned long *value, unsigned long gpa) | ||
906 | { | ||
907 | void *cb; | ||
908 | void *dsr; | ||
909 | int ret, iaa; | ||
910 | |||
911 | STAT(read_gpa); | ||
912 | if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) | ||
913 | return MQE_BUG_NO_RESOURCES; | ||
914 | iaa = gpa >> 62; | ||
915 | gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA); | ||
916 | ret = gru_wait(cb); | ||
917 | if (ret == CBS_IDLE) | ||
918 | *value = *(unsigned long *)dsr; | ||
919 | gru_free_cpu_resources(cb, dsr); | ||
920 | return ret; | ||
921 | } | ||
922 | EXPORT_SYMBOL_GPL(gru_read_gpa); | ||
923 | |||
924 | |||
925 | /* | ||
861 | * Copy a block of data using the GRU resources | 926 | * Copy a block of data using the GRU resources |
862 | */ | 927 | */ |
863 | int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, | 928 | int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, |
@@ -898,24 +963,24 @@ static int quicktest0(unsigned long arg) | |||
898 | 963 | ||
899 | gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); | 964 | gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); |
900 | if (gru_wait(cb) != CBS_IDLE) { | 965 | if (gru_wait(cb) != CBS_IDLE) { |
901 | printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n"); | 966 | printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id()); |
902 | goto done; | 967 | goto done; |
903 | } | 968 | } |
904 | 969 | ||
905 | if (*p != MAGIC) { | 970 | if (*p != MAGIC) { |
906 | printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p); | 971 | printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p); |
907 | goto done; | 972 | goto done; |
908 | } | 973 | } |
909 | gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); | 974 | gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); |
910 | if (gru_wait(cb) != CBS_IDLE) { | 975 | if (gru_wait(cb) != CBS_IDLE) { |
911 | printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n"); | 976 | printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id()); |
912 | goto done; | 977 | goto done; |
913 | } | 978 | } |
914 | 979 | ||
915 | if (word0 != word1 || word1 != MAGIC) { | 980 | if (word0 != word1 || word1 != MAGIC) { |
916 | printk(KERN_DEBUG | 981 | printk(KERN_DEBUG |
917 | "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n", | 982 | "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n", |
918 | word1, MAGIC); | 983 | smp_processor_id(), word1, MAGIC); |
919 | goto done; | 984 | goto done; |
920 | } | 985 | } |
921 | ret = 0; | 986 | ret = 0; |
@@ -952,8 +1017,11 @@ static int quicktest1(unsigned long arg) | |||
952 | if (ret) | 1017 | if (ret) |
953 | break; | 1018 | break; |
954 | } | 1019 | } |
955 | if (ret != MQE_QUEUE_FULL || i != 4) | 1020 | if (ret != MQE_QUEUE_FULL || i != 4) { |
1021 | printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n", | ||
1022 | smp_processor_id(), ret, i); | ||
956 | goto done; | 1023 | goto done; |
1024 | } | ||
957 | 1025 | ||
958 | for (i = 0; i < 6; i++) { | 1026 | for (i = 0; i < 6; i++) { |
959 | m = gru_get_next_message(&mqd); | 1027 | m = gru_get_next_message(&mqd); |
@@ -961,7 +1029,12 @@ static int quicktest1(unsigned long arg) | |||
961 | break; | 1029 | break; |
962 | gru_free_message(&mqd, m); | 1030 | gru_free_message(&mqd, m); |
963 | } | 1031 | } |
964 | ret = (i == 4) ? 0 : -EIO; | 1032 | if (i != 4) { |
1033 | printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n", | ||
1034 | smp_processor_id(), i, m, m ? m[8] : -1); | ||
1035 | goto done; | ||
1036 | } | ||
1037 | ret = 0; | ||
965 | 1038 | ||
966 | done: | 1039 | done: |
967 | kfree(p); | 1040 | kfree(p); |
@@ -977,6 +1050,7 @@ static int quicktest2(unsigned long arg) | |||
977 | int ret = 0; | 1050 | int ret = 0; |
978 | unsigned long *buf; | 1051 | unsigned long *buf; |
979 | void *cb0, *cb; | 1052 | void *cb0, *cb; |
1053 | struct gru_control_block_status *gen; | ||
980 | int i, k, istatus, bytes; | 1054 | int i, k, istatus, bytes; |
981 | 1055 | ||
982 | bytes = numcb * 4 * 8; | 1056 | bytes = numcb * 4 * 8; |
@@ -996,20 +1070,30 @@ static int quicktest2(unsigned long arg) | |||
996 | XTYPE_DW, 4, 1, IMA_INTERRUPT); | 1070 | XTYPE_DW, 4, 1, IMA_INTERRUPT); |
997 | 1071 | ||
998 | ret = 0; | 1072 | ret = 0; |
999 | for (k = 0; k < numcb; k++) { | 1073 | k = numcb; |
1074 | do { | ||
1000 | gru_wait_async_cbr(han); | 1075 | gru_wait_async_cbr(han); |
1001 | for (i = 0; i < numcb; i++) { | 1076 | for (i = 0; i < numcb; i++) { |
1002 | cb = cb0 + i * GRU_HANDLE_STRIDE; | 1077 | cb = cb0 + i * GRU_HANDLE_STRIDE; |
1003 | istatus = gru_check_status(cb); | 1078 | istatus = gru_check_status(cb); |
1004 | if (istatus == CBS_ACTIVE) | 1079 | if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS) |
1005 | continue; | 1080 | break; |
1006 | if (istatus == CBS_EXCEPTION) | ||
1007 | ret = -EFAULT; | ||
1008 | else if (buf[i] || buf[i + 1] || buf[i + 2] || | ||
1009 | buf[i + 3]) | ||
1010 | ret = -EIO; | ||
1011 | } | 1081 | } |
1012 | } | 1082 | if (i == numcb) |
1083 | continue; | ||
1084 | if (istatus != CBS_IDLE) { | ||
1085 | printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i); | ||
1086 | ret = -EFAULT; | ||
1087 | } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] || | ||
1088 | buf[4 * i + 3]) { | ||
1089 | printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n", | ||
1090 | smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]); | ||
1091 | ret = -EIO; | ||
1092 | } | ||
1093 | k--; | ||
1094 | gen = cb; | ||
1095 | gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */ | ||
1096 | } while (k); | ||
1013 | BUG_ON(cmp.done); | 1097 | BUG_ON(cmp.done); |
1014 | 1098 | ||
1015 | gru_unlock_async_resource(han); | 1099 | gru_unlock_async_resource(han); |
@@ -1019,6 +1103,22 @@ done: | |||
1019 | return ret; | 1103 | return ret; |
1020 | } | 1104 | } |
1021 | 1105 | ||
1106 | #define BUFSIZE 200 | ||
1107 | static int quicktest3(unsigned long arg) | ||
1108 | { | ||
1109 | char buf1[BUFSIZE], buf2[BUFSIZE]; | ||
1110 | int ret = 0; | ||
1111 | |||
1112 | memset(buf2, 0, sizeof(buf2)); | ||
1113 | memset(buf1, get_cycles() & 255, sizeof(buf1)); | ||
1114 | gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE); | ||
1115 | if (memcmp(buf1, buf2, BUFSIZE)) { | ||
1116 | printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id()); | ||
1117 | ret = -EIO; | ||
1118 | } | ||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1022 | /* | 1122 | /* |
1023 | * Debugging only. User hook for various kernel tests | 1123 | * Debugging only. User hook for various kernel tests |
1024 | * of driver & gru. | 1124 | * of driver & gru. |
@@ -1037,6 +1137,9 @@ int gru_ktest(unsigned long arg) | |||
1037 | case 2: | 1137 | case 2: |
1038 | ret = quicktest2(arg); | 1138 | ret = quicktest2(arg); |
1039 | break; | 1139 | break; |
1140 | case 3: | ||
1141 | ret = quicktest3(arg); | ||
1142 | break; | ||
1040 | case 99: | 1143 | case 99: |
1041 | ret = gru_free_kernel_contexts(); | 1144 | ret = gru_free_kernel_contexts(); |
1042 | break; | 1145 | break; |
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h index d60d34bca44d..02aa94d8484a 100644 --- a/drivers/misc/sgi-gru/grukservices.h +++ b/drivers/misc/sgi-gru/grukservices.h | |||
@@ -131,6 +131,20 @@ extern void *gru_get_next_message(struct gru_message_queue_desc *mqd); | |||
131 | 131 | ||
132 | 132 | ||
133 | /* | 133 | /* |
134 | * Read a GRU global GPA. Source can be located in a remote partition. | ||
135 | * | ||
136 | * Input: | ||
137 | * value memory address where MMR value is returned | ||
138 | * gpa source numalink physical address of GPA | ||
139 | * | ||
140 | * Output: | ||
141 | * 0 OK | ||
142 | * >0 error | ||
143 | */ | ||
144 | int gru_read_gpa(unsigned long *value, unsigned long gpa); | ||
145 | |||
146 | |||
147 | /* | ||
134 | * Copy data using the GRU. Source or destination can be located in a remote | 148 | * Copy data using the GRU. Source or destination can be located in a remote |
135 | * partition. | 149 | * partition. |
136 | * | 150 | * |
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h index 889bc442a3e8..e77d1b1f9d05 100644 --- a/drivers/misc/sgi-gru/grulib.h +++ b/drivers/misc/sgi-gru/grulib.h | |||
@@ -63,18 +63,9 @@ | |||
63 | #define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) | 63 | #define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) |
64 | #define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1))) | 64 | #define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1))) |
65 | 65 | ||
66 | /* | ||
67 | * Statictics kept on a per-GTS basis. | ||
68 | */ | ||
69 | struct gts_statistics { | ||
70 | unsigned long fmm_tlbdropin; | ||
71 | unsigned long upm_tlbdropin; | ||
72 | unsigned long context_stolen; | ||
73 | }; | ||
74 | |||
75 | struct gru_get_gseg_statistics_req { | 66 | struct gru_get_gseg_statistics_req { |
76 | unsigned long gseg; | 67 | unsigned long gseg; |
77 | struct gts_statistics stats; | 68 | struct gru_gseg_statistics stats; |
78 | }; | 69 | }; |
79 | 70 | ||
80 | /* | 71 | /* |
@@ -86,6 +77,7 @@ struct gru_create_context_req { | |||
86 | unsigned int control_blocks; | 77 | unsigned int control_blocks; |
87 | unsigned int maximum_thread_count; | 78 | unsigned int maximum_thread_count; |
88 | unsigned int options; | 79 | unsigned int options; |
80 | unsigned char tlb_preload_count; | ||
89 | }; | 81 | }; |
90 | 82 | ||
91 | /* | 83 | /* |
@@ -98,11 +90,12 @@ struct gru_unload_context_req { | |||
98 | /* | 90 | /* |
99 | * Structure used to set context options | 91 | * Structure used to set context options |
100 | */ | 92 | */ |
101 | enum {sco_gseg_owner, sco_cch_req_slice}; | 93 | enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet}; |
102 | struct gru_set_context_option_req { | 94 | struct gru_set_context_option_req { |
103 | unsigned long gseg; | 95 | unsigned long gseg; |
104 | int op; | 96 | int op; |
105 | unsigned long val1; | 97 | int val0; |
98 | long val1; | ||
106 | }; | 99 | }; |
107 | 100 | ||
108 | /* | 101 | /* |
@@ -124,6 +117,8 @@ struct gru_dump_chiplet_state_req { | |||
124 | int ctxnum; | 117 | int ctxnum; |
125 | char data_opt; | 118 | char data_opt; |
126 | char lock_cch; | 119 | char lock_cch; |
120 | char flush_cbrs; | ||
121 | char fill[10]; | ||
127 | pid_t pid; | 122 | pid_t pid; |
128 | void *buf; | 123 | void *buf; |
129 | size_t buflen; | 124 | size_t buflen; |
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 3bc643dad606..f8538bbd0bfa 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | #include <linux/list.h> | 29 | #include <linux/list.h> |
30 | #include <linux/err.h> | ||
30 | #include <asm/uv/uv_hub.h> | 31 | #include <asm/uv/uv_hub.h> |
31 | #include "gru.h" | 32 | #include "gru.h" |
32 | #include "grutables.h" | 33 | #include "grutables.h" |
@@ -48,12 +49,20 @@ struct device *grudev = &gru_device; | |||
48 | /* | 49 | /* |
49 | * Select a gru fault map to be used by the current cpu. Note that | 50 | * Select a gru fault map to be used by the current cpu. Note that |
50 | * multiple cpus may be using the same map. | 51 | * multiple cpus may be using the same map. |
51 | * ZZZ should "shift" be used?? Depends on HT cpu numbering | ||
52 | * ZZZ should be inline but did not work on emulator | 52 | * ZZZ should be inline but did not work on emulator |
53 | */ | 53 | */ |
54 | int gru_cpu_fault_map_id(void) | 54 | int gru_cpu_fault_map_id(void) |
55 | { | 55 | { |
56 | #ifdef CONFIG_IA64 | ||
56 | return uv_blade_processor_id() % GRU_NUM_TFM; | 57 | return uv_blade_processor_id() % GRU_NUM_TFM; |
58 | #else | ||
59 | int cpu = smp_processor_id(); | ||
60 | int id, core; | ||
61 | |||
62 | core = uv_cpu_core_number(cpu); | ||
63 | id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); | ||
64 | return id; | ||
65 | #endif | ||
57 | } | 66 | } |
58 | 67 | ||
59 | /*--------- ASID Management ------------------------------------------- | 68 | /*--------- ASID Management ------------------------------------------- |
@@ -286,7 +295,8 @@ static void gru_unload_mm_tracker(struct gru_state *gru, | |||
286 | void gts_drop(struct gru_thread_state *gts) | 295 | void gts_drop(struct gru_thread_state *gts) |
287 | { | 296 | { |
288 | if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { | 297 | if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { |
289 | gru_drop_mmu_notifier(gts->ts_gms); | 298 | if (gts->ts_gms) |
299 | gru_drop_mmu_notifier(gts->ts_gms); | ||
290 | kfree(gts); | 300 | kfree(gts); |
291 | STAT(gts_free); | 301 | STAT(gts_free); |
292 | } | 302 | } |
@@ -310,16 +320,18 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data | |||
310 | * Allocate a thread state structure. | 320 | * Allocate a thread state structure. |
311 | */ | 321 | */ |
312 | struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | 322 | struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, |
313 | int cbr_au_count, int dsr_au_count, int options, int tsid) | 323 | int cbr_au_count, int dsr_au_count, |
324 | unsigned char tlb_preload_count, int options, int tsid) | ||
314 | { | 325 | { |
315 | struct gru_thread_state *gts; | 326 | struct gru_thread_state *gts; |
327 | struct gru_mm_struct *gms; | ||
316 | int bytes; | 328 | int bytes; |
317 | 329 | ||
318 | bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); | 330 | bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); |
319 | bytes += sizeof(struct gru_thread_state); | 331 | bytes += sizeof(struct gru_thread_state); |
320 | gts = kmalloc(bytes, GFP_KERNEL); | 332 | gts = kmalloc(bytes, GFP_KERNEL); |
321 | if (!gts) | 333 | if (!gts) |
322 | return NULL; | 334 | return ERR_PTR(-ENOMEM); |
323 | 335 | ||
324 | STAT(gts_alloc); | 336 | STAT(gts_alloc); |
325 | memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ | 337 | memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ |
@@ -327,7 +339,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | |||
327 | mutex_init(>s->ts_ctxlock); | 339 | mutex_init(>s->ts_ctxlock); |
328 | gts->ts_cbr_au_count = cbr_au_count; | 340 | gts->ts_cbr_au_count = cbr_au_count; |
329 | gts->ts_dsr_au_count = dsr_au_count; | 341 | gts->ts_dsr_au_count = dsr_au_count; |
342 | gts->ts_tlb_preload_count = tlb_preload_count; | ||
330 | gts->ts_user_options = options; | 343 | gts->ts_user_options = options; |
344 | gts->ts_user_blade_id = -1; | ||
345 | gts->ts_user_chiplet_id = -1; | ||
331 | gts->ts_tsid = tsid; | 346 | gts->ts_tsid = tsid; |
332 | gts->ts_ctxnum = NULLCTX; | 347 | gts->ts_ctxnum = NULLCTX; |
333 | gts->ts_tlb_int_select = -1; | 348 | gts->ts_tlb_int_select = -1; |
@@ -336,9 +351,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | |||
336 | if (vma) { | 351 | if (vma) { |
337 | gts->ts_mm = current->mm; | 352 | gts->ts_mm = current->mm; |
338 | gts->ts_vma = vma; | 353 | gts->ts_vma = vma; |
339 | gts->ts_gms = gru_register_mmu_notifier(); | 354 | gms = gru_register_mmu_notifier(); |
340 | if (!gts->ts_gms) | 355 | if (IS_ERR(gms)) |
341 | goto err; | 356 | goto err; |
357 | gts->ts_gms = gms; | ||
342 | } | 358 | } |
343 | 359 | ||
344 | gru_dbg(grudev, "alloc gts %p\n", gts); | 360 | gru_dbg(grudev, "alloc gts %p\n", gts); |
@@ -346,7 +362,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | |||
346 | 362 | ||
347 | err: | 363 | err: |
348 | gts_drop(gts); | 364 | gts_drop(gts); |
349 | return NULL; | 365 | return ERR_CAST(gms); |
350 | } | 366 | } |
351 | 367 | ||
352 | /* | 368 | /* |
@@ -360,6 +376,7 @@ struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) | |||
360 | if (!vdata) | 376 | if (!vdata) |
361 | return NULL; | 377 | return NULL; |
362 | 378 | ||
379 | STAT(vdata_alloc); | ||
363 | INIT_LIST_HEAD(&vdata->vd_head); | 380 | INIT_LIST_HEAD(&vdata->vd_head); |
364 | spin_lock_init(&vdata->vd_lock); | 381 | spin_lock_init(&vdata->vd_lock); |
365 | gru_dbg(grudev, "alloc vdata %p\n", vdata); | 382 | gru_dbg(grudev, "alloc vdata %p\n", vdata); |
@@ -392,10 +409,12 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, | |||
392 | struct gru_vma_data *vdata = vma->vm_private_data; | 409 | struct gru_vma_data *vdata = vma->vm_private_data; |
393 | struct gru_thread_state *gts, *ngts; | 410 | struct gru_thread_state *gts, *ngts; |
394 | 411 | ||
395 | gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, | 412 | gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, |
413 | vdata->vd_dsr_au_count, | ||
414 | vdata->vd_tlb_preload_count, | ||
396 | vdata->vd_user_options, tsid); | 415 | vdata->vd_user_options, tsid); |
397 | if (!gts) | 416 | if (IS_ERR(gts)) |
398 | return NULL; | 417 | return gts; |
399 | 418 | ||
400 | spin_lock(&vdata->vd_lock); | 419 | spin_lock(&vdata->vd_lock); |
401 | ngts = gru_find_current_gts_nolock(vdata, tsid); | 420 | ngts = gru_find_current_gts_nolock(vdata, tsid); |
@@ -493,6 +512,9 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum, | |||
493 | memset(cbe + i * GRU_HANDLE_STRIDE, 0, | 512 | memset(cbe + i * GRU_HANDLE_STRIDE, 0, |
494 | GRU_CACHE_LINE_BYTES); | 513 | GRU_CACHE_LINE_BYTES); |
495 | } | 514 | } |
515 | /* Flush CBE to hide race in context restart */ | ||
516 | mb(); | ||
517 | gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); | ||
496 | cb += GRU_HANDLE_STRIDE; | 518 | cb += GRU_HANDLE_STRIDE; |
497 | } | 519 | } |
498 | 520 | ||
@@ -513,6 +535,12 @@ static void gru_unload_context_data(void *save, void *grubase, int ctxnum, | |||
513 | cb = gseg + GRU_CB_BASE; | 535 | cb = gseg + GRU_CB_BASE; |
514 | cbe = grubase + GRU_CBE_BASE; | 536 | cbe = grubase + GRU_CBE_BASE; |
515 | length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; | 537 | length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; |
538 | |||
539 | /* CBEs may not be coherent. Flush them from cache */ | ||
540 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) | ||
541 | gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); | ||
542 | mb(); /* Let the CL flush complete */ | ||
543 | |||
516 | gru_prefetch_context(gseg, cb, cbe, cbrmap, length); | 544 | gru_prefetch_context(gseg, cb, cbe, cbrmap, length); |
517 | 545 | ||
518 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { | 546 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { |
@@ -533,7 +561,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate) | |||
533 | zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); | 561 | zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); |
534 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | 562 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); |
535 | 563 | ||
536 | gru_dbg(grudev, "gts %p\n", gts); | 564 | gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n", |
565 | gts, gts->ts_cbr_map, gts->ts_dsr_map); | ||
537 | lock_cch_handle(cch); | 566 | lock_cch_handle(cch); |
538 | if (cch_interrupt_sync(cch)) | 567 | if (cch_interrupt_sync(cch)) |
539 | BUG(); | 568 | BUG(); |
@@ -549,7 +578,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate) | |||
549 | 578 | ||
550 | if (cch_deallocate(cch)) | 579 | if (cch_deallocate(cch)) |
551 | BUG(); | 580 | BUG(); |
552 | gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */ | ||
553 | unlock_cch_handle(cch); | 581 | unlock_cch_handle(cch); |
554 | 582 | ||
555 | gru_free_gru_context(gts); | 583 | gru_free_gru_context(gts); |
@@ -565,9 +593,7 @@ void gru_load_context(struct gru_thread_state *gts) | |||
565 | struct gru_context_configuration_handle *cch; | 593 | struct gru_context_configuration_handle *cch; |
566 | int i, err, asid, ctxnum = gts->ts_ctxnum; | 594 | int i, err, asid, ctxnum = gts->ts_ctxnum; |
567 | 595 | ||
568 | gru_dbg(grudev, "gts %p\n", gts); | ||
569 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | 596 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); |
570 | |||
571 | lock_cch_handle(cch); | 597 | lock_cch_handle(cch); |
572 | cch->tfm_fault_bit_enable = | 598 | cch->tfm_fault_bit_enable = |
573 | (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL | 599 | (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL |
@@ -591,6 +617,7 @@ void gru_load_context(struct gru_thread_state *gts) | |||
591 | cch->unmap_enable = 1; | 617 | cch->unmap_enable = 1; |
592 | cch->tfm_done_bit_enable = 1; | 618 | cch->tfm_done_bit_enable = 1; |
593 | cch->cb_int_enable = 1; | 619 | cch->cb_int_enable = 1; |
620 | cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */ | ||
594 | } else { | 621 | } else { |
595 | cch->unmap_enable = 0; | 622 | cch->unmap_enable = 0; |
596 | cch->tfm_done_bit_enable = 0; | 623 | cch->tfm_done_bit_enable = 0; |
@@ -616,17 +643,18 @@ void gru_load_context(struct gru_thread_state *gts) | |||
616 | if (cch_start(cch)) | 643 | if (cch_start(cch)) |
617 | BUG(); | 644 | BUG(); |
618 | unlock_cch_handle(cch); | 645 | unlock_cch_handle(cch); |
646 | |||
647 | gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n", | ||
648 | gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map, | ||
649 | (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select); | ||
619 | } | 650 | } |
620 | 651 | ||
621 | /* | 652 | /* |
622 | * Update fields in an active CCH: | 653 | * Update fields in an active CCH: |
623 | * - retarget interrupts on local blade | 654 | * - retarget interrupts on local blade |
624 | * - update sizeavail mask | 655 | * - update sizeavail mask |
625 | * - force a delayed context unload by clearing the CCH asids. This | ||
626 | * forces TLB misses for new GRU instructions. The context is unloaded | ||
627 | * when the next TLB miss occurs. | ||
628 | */ | 656 | */ |
629 | int gru_update_cch(struct gru_thread_state *gts, int force_unload) | 657 | int gru_update_cch(struct gru_thread_state *gts) |
630 | { | 658 | { |
631 | struct gru_context_configuration_handle *cch; | 659 | struct gru_context_configuration_handle *cch; |
632 | struct gru_state *gru = gts->ts_gru; | 660 | struct gru_state *gru = gts->ts_gru; |
@@ -640,21 +668,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload) | |||
640 | goto exit; | 668 | goto exit; |
641 | if (cch_interrupt(cch)) | 669 | if (cch_interrupt(cch)) |
642 | BUG(); | 670 | BUG(); |
643 | if (!force_unload) { | 671 | for (i = 0; i < 8; i++) |
644 | for (i = 0; i < 8; i++) | 672 | cch->sizeavail[i] = gts->ts_sizeavail; |
645 | cch->sizeavail[i] = gts->ts_sizeavail; | 673 | gts->ts_tlb_int_select = gru_cpu_fault_map_id(); |
646 | gts->ts_tlb_int_select = gru_cpu_fault_map_id(); | 674 | cch->tlb_int_select = gru_cpu_fault_map_id(); |
647 | cch->tlb_int_select = gru_cpu_fault_map_id(); | 675 | cch->tfm_fault_bit_enable = |
648 | cch->tfm_fault_bit_enable = | 676 | (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL |
649 | (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL | 677 | || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); |
650 | || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); | ||
651 | } else { | ||
652 | for (i = 0; i < 8; i++) | ||
653 | cch->asid[i] = 0; | ||
654 | cch->tfm_fault_bit_enable = 0; | ||
655 | cch->tlb_int_enable = 0; | ||
656 | gts->ts_force_unload = 1; | ||
657 | } | ||
658 | if (cch_start(cch)) | 678 | if (cch_start(cch)) |
659 | BUG(); | 679 | BUG(); |
660 | ret = 1; | 680 | ret = 1; |
@@ -679,7 +699,54 @@ static int gru_retarget_intr(struct gru_thread_state *gts) | |||
679 | 699 | ||
680 | gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, | 700 | gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, |
681 | gru_cpu_fault_map_id()); | 701 | gru_cpu_fault_map_id()); |
682 | return gru_update_cch(gts, 0); | 702 | return gru_update_cch(gts); |
703 | } | ||
704 | |||
705 | /* | ||
706 | * Check if a GRU context is allowed to use a specific chiplet. By default | ||
707 | * a context is assigned to any blade-local chiplet. However, users can | ||
708 | * override this. | ||
709 | * Returns 1 if assignment allowed, 0 otherwise | ||
710 | */ | ||
711 | static int gru_check_chiplet_assignment(struct gru_state *gru, | ||
712 | struct gru_thread_state *gts) | ||
713 | { | ||
714 | int blade_id; | ||
715 | int chiplet_id; | ||
716 | |||
717 | blade_id = gts->ts_user_blade_id; | ||
718 | if (blade_id < 0) | ||
719 | blade_id = uv_numa_blade_id(); | ||
720 | |||
721 | chiplet_id = gts->ts_user_chiplet_id; | ||
722 | return gru->gs_blade_id == blade_id && | ||
723 | (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id); | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * Unload the gru context if it is not assigned to the correct blade or | ||
728 | * chiplet. Misassignment can occur if the process migrates to a different | ||
729 | * blade or if the user changes the selected blade/chiplet. | ||
730 | */ | ||
731 | void gru_check_context_placement(struct gru_thread_state *gts) | ||
732 | { | ||
733 | struct gru_state *gru; | ||
734 | |||
735 | /* | ||
736 | * If the current task is the context owner, verify that the | ||
737 | * context is correctly placed. This test is skipped for non-owner | ||
738 | * references. Pthread apps use non-owner references to the CBRs. | ||
739 | */ | ||
740 | gru = gts->ts_gru; | ||
741 | if (!gru || gts->ts_tgid_owner != current->tgid) | ||
742 | return; | ||
743 | |||
744 | if (!gru_check_chiplet_assignment(gru, gts)) { | ||
745 | STAT(check_context_unload); | ||
746 | gru_unload_context(gts, 1); | ||
747 | } else if (gru_retarget_intr(gts)) { | ||
748 | STAT(check_context_retarget_intr); | ||
749 | } | ||
683 | } | 750 | } |
684 | 751 | ||
685 | 752 | ||
@@ -712,13 +779,17 @@ static void gts_stolen(struct gru_thread_state *gts, | |||
712 | } | 779 | } |
713 | } | 780 | } |
714 | 781 | ||
715 | void gru_steal_context(struct gru_thread_state *gts, int blade_id) | 782 | void gru_steal_context(struct gru_thread_state *gts) |
716 | { | 783 | { |
717 | struct gru_blade_state *blade; | 784 | struct gru_blade_state *blade; |
718 | struct gru_state *gru, *gru0; | 785 | struct gru_state *gru, *gru0; |
719 | struct gru_thread_state *ngts = NULL; | 786 | struct gru_thread_state *ngts = NULL; |
720 | int ctxnum, ctxnum0, flag = 0, cbr, dsr; | 787 | int ctxnum, ctxnum0, flag = 0, cbr, dsr; |
788 | int blade_id; | ||
721 | 789 | ||
790 | blade_id = gts->ts_user_blade_id; | ||
791 | if (blade_id < 0) | ||
792 | blade_id = uv_numa_blade_id(); | ||
722 | cbr = gts->ts_cbr_au_count; | 793 | cbr = gts->ts_cbr_au_count; |
723 | dsr = gts->ts_dsr_au_count; | 794 | dsr = gts->ts_dsr_au_count; |
724 | 795 | ||
@@ -729,35 +800,39 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id) | |||
729 | gru = blade->bs_lru_gru; | 800 | gru = blade->bs_lru_gru; |
730 | if (ctxnum == 0) | 801 | if (ctxnum == 0) |
731 | gru = next_gru(blade, gru); | 802 | gru = next_gru(blade, gru); |
803 | blade->bs_lru_gru = gru; | ||
804 | blade->bs_lru_ctxnum = ctxnum; | ||
732 | ctxnum0 = ctxnum; | 805 | ctxnum0 = ctxnum; |
733 | gru0 = gru; | 806 | gru0 = gru; |
734 | while (1) { | 807 | while (1) { |
735 | if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) | 808 | if (gru_check_chiplet_assignment(gru, gts)) { |
736 | break; | 809 | if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) |
737 | spin_lock(&gru->gs_lock); | ||
738 | for (; ctxnum < GRU_NUM_CCH; ctxnum++) { | ||
739 | if (flag && gru == gru0 && ctxnum == ctxnum0) | ||
740 | break; | 810 | break; |
741 | ngts = gru->gs_gts[ctxnum]; | 811 | spin_lock(&gru->gs_lock); |
742 | /* | 812 | for (; ctxnum < GRU_NUM_CCH; ctxnum++) { |
743 | * We are grabbing locks out of order, so trylock is | 813 | if (flag && gru == gru0 && ctxnum == ctxnum0) |
744 | * needed. GTSs are usually not locked, so the odds of | 814 | break; |
745 | * success are high. If trylock fails, try to steal a | 815 | ngts = gru->gs_gts[ctxnum]; |
746 | * different GSEG. | 816 | /* |
747 | */ | 817 | * We are grabbing locks out of order, so trylock is |
748 | if (ngts && is_gts_stealable(ngts, blade)) | 818 | * needed. GTSs are usually not locked, so the odds of |
819 | * success are high. If trylock fails, try to steal a | ||
820 | * different GSEG. | ||
821 | */ | ||
822 | if (ngts && is_gts_stealable(ngts, blade)) | ||
823 | break; | ||
824 | ngts = NULL; | ||
825 | } | ||
826 | spin_unlock(&gru->gs_lock); | ||
827 | if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) | ||
749 | break; | 828 | break; |
750 | ngts = NULL; | ||
751 | flag = 1; | ||
752 | } | 829 | } |
753 | spin_unlock(&gru->gs_lock); | 830 | if (flag && gru == gru0) |
754 | if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) | ||
755 | break; | 831 | break; |
832 | flag = 1; | ||
756 | ctxnum = 0; | 833 | ctxnum = 0; |
757 | gru = next_gru(blade, gru); | 834 | gru = next_gru(blade, gru); |
758 | } | 835 | } |
759 | blade->bs_lru_gru = gru; | ||
760 | blade->bs_lru_ctxnum = ctxnum; | ||
761 | spin_unlock(&blade->bs_lock); | 836 | spin_unlock(&blade->bs_lock); |
762 | 837 | ||
763 | if (ngts) { | 838 | if (ngts) { |
@@ -776,19 +851,34 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id) | |||
776 | } | 851 | } |
777 | 852 | ||
778 | /* | 853 | /* |
854 | * Assign a gru context. | ||
855 | */ | ||
856 | static int gru_assign_context_number(struct gru_state *gru) | ||
857 | { | ||
858 | int ctxnum; | ||
859 | |||
860 | ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); | ||
861 | __set_bit(ctxnum, &gru->gs_context_map); | ||
862 | return ctxnum; | ||
863 | } | ||
864 | |||
865 | /* | ||
779 | * Scan the GRUs on the local blade & assign a GRU context. | 866 | * Scan the GRUs on the local blade & assign a GRU context. |
780 | */ | 867 | */ |
781 | struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, | 868 | struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) |
782 | int blade) | ||
783 | { | 869 | { |
784 | struct gru_state *gru, *grux; | 870 | struct gru_state *gru, *grux; |
785 | int i, max_active_contexts; | 871 | int i, max_active_contexts; |
872 | int blade_id = gts->ts_user_blade_id; | ||
786 | 873 | ||
787 | 874 | if (blade_id < 0) | |
875 | blade_id = uv_numa_blade_id(); | ||
788 | again: | 876 | again: |
789 | gru = NULL; | 877 | gru = NULL; |
790 | max_active_contexts = GRU_NUM_CCH; | 878 | max_active_contexts = GRU_NUM_CCH; |
791 | for_each_gru_on_blade(grux, blade, i) { | 879 | for_each_gru_on_blade(grux, blade_id, i) { |
880 | if (!gru_check_chiplet_assignment(grux, gts)) | ||
881 | continue; | ||
792 | if (check_gru_resources(grux, gts->ts_cbr_au_count, | 882 | if (check_gru_resources(grux, gts->ts_cbr_au_count, |
793 | gts->ts_dsr_au_count, | 883 | gts->ts_dsr_au_count, |
794 | max_active_contexts)) { | 884 | max_active_contexts)) { |
@@ -809,12 +899,9 @@ again: | |||
809 | reserve_gru_resources(gru, gts); | 899 | reserve_gru_resources(gru, gts); |
810 | gts->ts_gru = gru; | 900 | gts->ts_gru = gru; |
811 | gts->ts_blade = gru->gs_blade_id; | 901 | gts->ts_blade = gru->gs_blade_id; |
812 | gts->ts_ctxnum = | 902 | gts->ts_ctxnum = gru_assign_context_number(gru); |
813 | find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); | ||
814 | BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); | ||
815 | atomic_inc(>s->ts_refcnt); | 903 | atomic_inc(>s->ts_refcnt); |
816 | gru->gs_gts[gts->ts_ctxnum] = gts; | 904 | gru->gs_gts[gts->ts_ctxnum] = gts; |
817 | __set_bit(gts->ts_ctxnum, &gru->gs_context_map); | ||
818 | spin_unlock(&gru->gs_lock); | 905 | spin_unlock(&gru->gs_lock); |
819 | 906 | ||
820 | STAT(assign_context); | 907 | STAT(assign_context); |
@@ -842,7 +929,6 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
842 | { | 929 | { |
843 | struct gru_thread_state *gts; | 930 | struct gru_thread_state *gts; |
844 | unsigned long paddr, vaddr; | 931 | unsigned long paddr, vaddr; |
845 | int blade_id; | ||
846 | 932 | ||
847 | vaddr = (unsigned long)vmf->virtual_address; | 933 | vaddr = (unsigned long)vmf->virtual_address; |
848 | gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", | 934 | gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", |
@@ -857,28 +943,18 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
857 | again: | 943 | again: |
858 | mutex_lock(>s->ts_ctxlock); | 944 | mutex_lock(>s->ts_ctxlock); |
859 | preempt_disable(); | 945 | preempt_disable(); |
860 | blade_id = uv_numa_blade_id(); | ||
861 | 946 | ||
862 | if (gts->ts_gru) { | 947 | gru_check_context_placement(gts); |
863 | if (gts->ts_gru->gs_blade_id != blade_id) { | ||
864 | STAT(migrated_nopfn_unload); | ||
865 | gru_unload_context(gts, 1); | ||
866 | } else { | ||
867 | if (gru_retarget_intr(gts)) | ||
868 | STAT(migrated_nopfn_retarget); | ||
869 | } | ||
870 | } | ||
871 | 948 | ||
872 | if (!gts->ts_gru) { | 949 | if (!gts->ts_gru) { |
873 | STAT(load_user_context); | 950 | STAT(load_user_context); |
874 | if (!gru_assign_gru_context(gts, blade_id)) { | 951 | if (!gru_assign_gru_context(gts)) { |
875 | preempt_enable(); | 952 | preempt_enable(); |
876 | mutex_unlock(>s->ts_ctxlock); | 953 | mutex_unlock(>s->ts_ctxlock); |
877 | set_current_state(TASK_INTERRUPTIBLE); | 954 | set_current_state(TASK_INTERRUPTIBLE); |
878 | schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ | 955 | schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ |
879 | blade_id = uv_numa_blade_id(); | ||
880 | if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) | 956 | if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) |
881 | gru_steal_context(gts, blade_id); | 957 | gru_steal_context(gts); |
882 | goto again; | 958 | goto again; |
883 | } | 959 | } |
884 | gru_load_context(gts); | 960 | gru_load_context(gts); |
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 3f2375c5ba5b..7768b87d995b 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c | |||
@@ -36,8 +36,7 @@ static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) | |||
36 | { | 36 | { |
37 | unsigned long val = atomic_long_read(v); | 37 | unsigned long val = atomic_long_read(v); |
38 | 38 | ||
39 | if (val) | 39 | seq_printf(s, "%16lu %s\n", val, id); |
40 | seq_printf(s, "%16lu %s\n", val, id); | ||
41 | } | 40 | } |
42 | 41 | ||
43 | static int statistics_show(struct seq_file *s, void *p) | 42 | static int statistics_show(struct seq_file *s, void *p) |
@@ -46,7 +45,8 @@ static int statistics_show(struct seq_file *s, void *p) | |||
46 | printstat(s, vdata_free); | 45 | printstat(s, vdata_free); |
47 | printstat(s, gts_alloc); | 46 | printstat(s, gts_alloc); |
48 | printstat(s, gts_free); | 47 | printstat(s, gts_free); |
49 | printstat(s, vdata_double_alloc); | 48 | printstat(s, gms_alloc); |
49 | printstat(s, gms_free); | ||
50 | printstat(s, gts_double_allocate); | 50 | printstat(s, gts_double_allocate); |
51 | printstat(s, assign_context); | 51 | printstat(s, assign_context); |
52 | printstat(s, assign_context_failed); | 52 | printstat(s, assign_context_failed); |
@@ -59,28 +59,25 @@ static int statistics_show(struct seq_file *s, void *p) | |||
59 | printstat(s, steal_kernel_context); | 59 | printstat(s, steal_kernel_context); |
60 | printstat(s, steal_context_failed); | 60 | printstat(s, steal_context_failed); |
61 | printstat(s, nopfn); | 61 | printstat(s, nopfn); |
62 | printstat(s, break_cow); | ||
63 | printstat(s, asid_new); | 62 | printstat(s, asid_new); |
64 | printstat(s, asid_next); | 63 | printstat(s, asid_next); |
65 | printstat(s, asid_wrap); | 64 | printstat(s, asid_wrap); |
66 | printstat(s, asid_reuse); | 65 | printstat(s, asid_reuse); |
67 | printstat(s, intr); | 66 | printstat(s, intr); |
67 | printstat(s, intr_cbr); | ||
68 | printstat(s, intr_tfh); | ||
69 | printstat(s, intr_spurious); | ||
68 | printstat(s, intr_mm_lock_failed); | 70 | printstat(s, intr_mm_lock_failed); |
69 | printstat(s, call_os); | 71 | printstat(s, call_os); |
70 | printstat(s, call_os_offnode_reference); | ||
71 | printstat(s, call_os_check_for_bug); | ||
72 | printstat(s, call_os_wait_queue); | 72 | printstat(s, call_os_wait_queue); |
73 | printstat(s, user_flush_tlb); | 73 | printstat(s, user_flush_tlb); |
74 | printstat(s, user_unload_context); | 74 | printstat(s, user_unload_context); |
75 | printstat(s, user_exception); | 75 | printstat(s, user_exception); |
76 | printstat(s, set_context_option); | 76 | printstat(s, set_context_option); |
77 | printstat(s, migrate_check); | 77 | printstat(s, check_context_retarget_intr); |
78 | printstat(s, migrated_retarget); | 78 | printstat(s, check_context_unload); |
79 | printstat(s, migrated_unload); | ||
80 | printstat(s, migrated_unload_delay); | ||
81 | printstat(s, migrated_nopfn_retarget); | ||
82 | printstat(s, migrated_nopfn_unload); | ||
83 | printstat(s, tlb_dropin); | 79 | printstat(s, tlb_dropin); |
80 | printstat(s, tlb_preload_page); | ||
84 | printstat(s, tlb_dropin_fail_no_asid); | 81 | printstat(s, tlb_dropin_fail_no_asid); |
85 | printstat(s, tlb_dropin_fail_upm); | 82 | printstat(s, tlb_dropin_fail_upm); |
86 | printstat(s, tlb_dropin_fail_invalid); | 83 | printstat(s, tlb_dropin_fail_invalid); |
@@ -88,16 +85,15 @@ static int statistics_show(struct seq_file *s, void *p) | |||
88 | printstat(s, tlb_dropin_fail_idle); | 85 | printstat(s, tlb_dropin_fail_idle); |
89 | printstat(s, tlb_dropin_fail_fmm); | 86 | printstat(s, tlb_dropin_fail_fmm); |
90 | printstat(s, tlb_dropin_fail_no_exception); | 87 | printstat(s, tlb_dropin_fail_no_exception); |
91 | printstat(s, tlb_dropin_fail_no_exception_war); | ||
92 | printstat(s, tfh_stale_on_fault); | 88 | printstat(s, tfh_stale_on_fault); |
93 | printstat(s, mmu_invalidate_range); | 89 | printstat(s, mmu_invalidate_range); |
94 | printstat(s, mmu_invalidate_page); | 90 | printstat(s, mmu_invalidate_page); |
95 | printstat(s, mmu_clear_flush_young); | ||
96 | printstat(s, flush_tlb); | 91 | printstat(s, flush_tlb); |
97 | printstat(s, flush_tlb_gru); | 92 | printstat(s, flush_tlb_gru); |
98 | printstat(s, flush_tlb_gru_tgh); | 93 | printstat(s, flush_tlb_gru_tgh); |
99 | printstat(s, flush_tlb_gru_zero_asid); | 94 | printstat(s, flush_tlb_gru_zero_asid); |
100 | printstat(s, copy_gpa); | 95 | printstat(s, copy_gpa); |
96 | printstat(s, read_gpa); | ||
101 | printstat(s, mesq_receive); | 97 | printstat(s, mesq_receive); |
102 | printstat(s, mesq_receive_none); | 98 | printstat(s, mesq_receive_none); |
103 | printstat(s, mesq_send); | 99 | printstat(s, mesq_send); |
@@ -108,7 +104,6 @@ static int statistics_show(struct seq_file *s, void *p) | |||
108 | printstat(s, mesq_send_qlimit_reached); | 104 | printstat(s, mesq_send_qlimit_reached); |
109 | printstat(s, mesq_send_amo_nacked); | 105 | printstat(s, mesq_send_amo_nacked); |
110 | printstat(s, mesq_send_put_nacked); | 106 | printstat(s, mesq_send_put_nacked); |
111 | printstat(s, mesq_qf_not_full); | ||
112 | printstat(s, mesq_qf_locked); | 107 | printstat(s, mesq_qf_locked); |
113 | printstat(s, mesq_qf_noop_not_full); | 108 | printstat(s, mesq_qf_noop_not_full); |
114 | printstat(s, mesq_qf_switch_head_failed); | 109 | printstat(s, mesq_qf_switch_head_failed); |
@@ -118,6 +113,7 @@ static int statistics_show(struct seq_file *s, void *p) | |||
118 | printstat(s, mesq_noop_qlimit_reached); | 113 | printstat(s, mesq_noop_qlimit_reached); |
119 | printstat(s, mesq_noop_amo_nacked); | 114 | printstat(s, mesq_noop_amo_nacked); |
120 | printstat(s, mesq_noop_put_nacked); | 115 | printstat(s, mesq_noop_put_nacked); |
116 | printstat(s, mesq_noop_page_overflow); | ||
121 | return 0; | 117 | return 0; |
122 | } | 118 | } |
123 | 119 | ||
@@ -133,8 +129,10 @@ static int mcs_statistics_show(struct seq_file *s, void *p) | |||
133 | int op; | 129 | int op; |
134 | unsigned long total, count, max; | 130 | unsigned long total, count, max; |
135 | static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt", | 131 | static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt", |
136 | "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"}; | 132 | "cch_interrupt_sync", "cch_deallocate", "tfh_write_only", |
133 | "tfh_write_restart", "tgh_invalidate"}; | ||
137 | 134 | ||
135 | seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); | ||
138 | for (op = 0; op < mcsop_last; op++) { | 136 | for (op = 0; op < mcsop_last; op++) { |
139 | count = atomic_long_read(&mcs_op_statistics[op].count); | 137 | count = atomic_long_read(&mcs_op_statistics[op].count); |
140 | total = atomic_long_read(&mcs_op_statistics[op].total); | 138 | total = atomic_long_read(&mcs_op_statistics[op].total); |
@@ -154,6 +152,7 @@ static ssize_t mcs_statistics_write(struct file *file, | |||
154 | 152 | ||
155 | static int options_show(struct seq_file *s, void *p) | 153 | static int options_show(struct seq_file *s, void *p) |
156 | { | 154 | { |
155 | seq_printf(s, "#bitmask: 1=trace, 2=statistics\n"); | ||
157 | seq_printf(s, "0x%lx\n", gru_options); | 156 | seq_printf(s, "0x%lx\n", gru_options); |
158 | return 0; | 157 | return 0; |
159 | } | 158 | } |
@@ -183,16 +182,17 @@ static int cch_seq_show(struct seq_file *file, void *data) | |||
183 | const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" }; | 182 | const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" }; |
184 | 183 | ||
185 | if (gid == 0) | 184 | if (gid == 0) |
186 | seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid", | 185 | seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid", |
187 | "ctx#", "pid", "cbrs", "dsbytes", "mode"); | 186 | "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode"); |
188 | if (gru) | 187 | if (gru) |
189 | for (i = 0; i < GRU_NUM_CCH; i++) { | 188 | for (i = 0; i < GRU_NUM_CCH; i++) { |
190 | ts = gru->gs_gts[i]; | 189 | ts = gru->gs_gts[i]; |
191 | if (!ts) | 190 | if (!ts) |
192 | continue; | 191 | continue; |
193 | seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n", | 192 | seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n", |
194 | gru->gs_gid, gru->gs_blade_id, i, | 193 | gru->gs_gid, gru->gs_blade_id, i, |
195 | ts->ts_tgid_owner, | 194 | is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid, |
195 | is_kernel_context(ts) ? 0 : ts->ts_tgid_owner, | ||
196 | ts->ts_cbr_au_count * GRU_CBR_AU_SIZE, | 196 | ts->ts_cbr_au_count * GRU_CBR_AU_SIZE, |
197 | ts->ts_cbr_au_count * GRU_DSR_AU_BYTES, | 197 | ts->ts_cbr_au_count * GRU_DSR_AU_BYTES, |
198 | mode[ts->ts_user_options & | 198 | mode[ts->ts_user_options & |
@@ -355,7 +355,7 @@ static void delete_proc_files(void) | |||
355 | for (p = proc_files; p->name; p++) | 355 | for (p = proc_files; p->name; p++) |
356 | if (p->entry) | 356 | if (p->entry) |
357 | remove_proc_entry(p->name, proc_gru); | 357 | remove_proc_entry(p->name, proc_gru); |
358 | remove_proc_entry("gru", NULL); | 358 | remove_proc_entry("gru", proc_gru->parent); |
359 | } | 359 | } |
360 | } | 360 | } |
361 | 361 | ||
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 46990bcfa536..7a8b9068ea03 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h | |||
@@ -161,7 +161,7 @@ extern unsigned int gru_max_gids; | |||
161 | #define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) | 161 | #define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) |
162 | 162 | ||
163 | #define GRU_DRIVER_ID_STR "SGI GRU Device Driver" | 163 | #define GRU_DRIVER_ID_STR "SGI GRU Device Driver" |
164 | #define GRU_DRIVER_VERSION_STR "0.80" | 164 | #define GRU_DRIVER_VERSION_STR "0.85" |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * GRU statistics. | 167 | * GRU statistics. |
@@ -171,7 +171,8 @@ struct gru_stats_s { | |||
171 | atomic_long_t vdata_free; | 171 | atomic_long_t vdata_free; |
172 | atomic_long_t gts_alloc; | 172 | atomic_long_t gts_alloc; |
173 | atomic_long_t gts_free; | 173 | atomic_long_t gts_free; |
174 | atomic_long_t vdata_double_alloc; | 174 | atomic_long_t gms_alloc; |
175 | atomic_long_t gms_free; | ||
175 | atomic_long_t gts_double_allocate; | 176 | atomic_long_t gts_double_allocate; |
176 | atomic_long_t assign_context; | 177 | atomic_long_t assign_context; |
177 | atomic_long_t assign_context_failed; | 178 | atomic_long_t assign_context_failed; |
@@ -184,28 +185,25 @@ struct gru_stats_s { | |||
184 | atomic_long_t steal_kernel_context; | 185 | atomic_long_t steal_kernel_context; |
185 | atomic_long_t steal_context_failed; | 186 | atomic_long_t steal_context_failed; |
186 | atomic_long_t nopfn; | 187 | atomic_long_t nopfn; |
187 | atomic_long_t break_cow; | ||
188 | atomic_long_t asid_new; | 188 | atomic_long_t asid_new; |
189 | atomic_long_t asid_next; | 189 | atomic_long_t asid_next; |
190 | atomic_long_t asid_wrap; | 190 | atomic_long_t asid_wrap; |
191 | atomic_long_t asid_reuse; | 191 | atomic_long_t asid_reuse; |
192 | atomic_long_t intr; | 192 | atomic_long_t intr; |
193 | atomic_long_t intr_cbr; | ||
194 | atomic_long_t intr_tfh; | ||
195 | atomic_long_t intr_spurious; | ||
193 | atomic_long_t intr_mm_lock_failed; | 196 | atomic_long_t intr_mm_lock_failed; |
194 | atomic_long_t call_os; | 197 | atomic_long_t call_os; |
195 | atomic_long_t call_os_offnode_reference; | ||
196 | atomic_long_t call_os_check_for_bug; | ||
197 | atomic_long_t call_os_wait_queue; | 198 | atomic_long_t call_os_wait_queue; |
198 | atomic_long_t user_flush_tlb; | 199 | atomic_long_t user_flush_tlb; |
199 | atomic_long_t user_unload_context; | 200 | atomic_long_t user_unload_context; |
200 | atomic_long_t user_exception; | 201 | atomic_long_t user_exception; |
201 | atomic_long_t set_context_option; | 202 | atomic_long_t set_context_option; |
202 | atomic_long_t migrate_check; | 203 | atomic_long_t check_context_retarget_intr; |
203 | atomic_long_t migrated_retarget; | 204 | atomic_long_t check_context_unload; |
204 | atomic_long_t migrated_unload; | ||
205 | atomic_long_t migrated_unload_delay; | ||
206 | atomic_long_t migrated_nopfn_retarget; | ||
207 | atomic_long_t migrated_nopfn_unload; | ||
208 | atomic_long_t tlb_dropin; | 205 | atomic_long_t tlb_dropin; |
206 | atomic_long_t tlb_preload_page; | ||
209 | atomic_long_t tlb_dropin_fail_no_asid; | 207 | atomic_long_t tlb_dropin_fail_no_asid; |
210 | atomic_long_t tlb_dropin_fail_upm; | 208 | atomic_long_t tlb_dropin_fail_upm; |
211 | atomic_long_t tlb_dropin_fail_invalid; | 209 | atomic_long_t tlb_dropin_fail_invalid; |
@@ -213,17 +211,16 @@ struct gru_stats_s { | |||
213 | atomic_long_t tlb_dropin_fail_idle; | 211 | atomic_long_t tlb_dropin_fail_idle; |
214 | atomic_long_t tlb_dropin_fail_fmm; | 212 | atomic_long_t tlb_dropin_fail_fmm; |
215 | atomic_long_t tlb_dropin_fail_no_exception; | 213 | atomic_long_t tlb_dropin_fail_no_exception; |
216 | atomic_long_t tlb_dropin_fail_no_exception_war; | ||
217 | atomic_long_t tfh_stale_on_fault; | 214 | atomic_long_t tfh_stale_on_fault; |
218 | atomic_long_t mmu_invalidate_range; | 215 | atomic_long_t mmu_invalidate_range; |
219 | atomic_long_t mmu_invalidate_page; | 216 | atomic_long_t mmu_invalidate_page; |
220 | atomic_long_t mmu_clear_flush_young; | ||
221 | atomic_long_t flush_tlb; | 217 | atomic_long_t flush_tlb; |
222 | atomic_long_t flush_tlb_gru; | 218 | atomic_long_t flush_tlb_gru; |
223 | atomic_long_t flush_tlb_gru_tgh; | 219 | atomic_long_t flush_tlb_gru_tgh; |
224 | atomic_long_t flush_tlb_gru_zero_asid; | 220 | atomic_long_t flush_tlb_gru_zero_asid; |
225 | 221 | ||
226 | atomic_long_t copy_gpa; | 222 | atomic_long_t copy_gpa; |
223 | atomic_long_t read_gpa; | ||
227 | 224 | ||
228 | atomic_long_t mesq_receive; | 225 | atomic_long_t mesq_receive; |
229 | atomic_long_t mesq_receive_none; | 226 | atomic_long_t mesq_receive_none; |
@@ -235,7 +232,7 @@ struct gru_stats_s { | |||
235 | atomic_long_t mesq_send_qlimit_reached; | 232 | atomic_long_t mesq_send_qlimit_reached; |
236 | atomic_long_t mesq_send_amo_nacked; | 233 | atomic_long_t mesq_send_amo_nacked; |
237 | atomic_long_t mesq_send_put_nacked; | 234 | atomic_long_t mesq_send_put_nacked; |
238 | atomic_long_t mesq_qf_not_full; | 235 | atomic_long_t mesq_page_overflow; |
239 | atomic_long_t mesq_qf_locked; | 236 | atomic_long_t mesq_qf_locked; |
240 | atomic_long_t mesq_qf_noop_not_full; | 237 | atomic_long_t mesq_qf_noop_not_full; |
241 | atomic_long_t mesq_qf_switch_head_failed; | 238 | atomic_long_t mesq_qf_switch_head_failed; |
@@ -245,11 +242,13 @@ struct gru_stats_s { | |||
245 | atomic_long_t mesq_noop_qlimit_reached; | 242 | atomic_long_t mesq_noop_qlimit_reached; |
246 | atomic_long_t mesq_noop_amo_nacked; | 243 | atomic_long_t mesq_noop_amo_nacked; |
247 | atomic_long_t mesq_noop_put_nacked; | 244 | atomic_long_t mesq_noop_put_nacked; |
245 | atomic_long_t mesq_noop_page_overflow; | ||
248 | 246 | ||
249 | }; | 247 | }; |
250 | 248 | ||
251 | enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, | 249 | enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, |
252 | cchop_deallocate, tghop_invalidate, mcsop_last}; | 250 | cchop_deallocate, tfhop_write_only, tfhop_write_restart, |
251 | tghop_invalidate, mcsop_last}; | ||
253 | 252 | ||
254 | struct mcs_op_statistic { | 253 | struct mcs_op_statistic { |
255 | atomic_long_t count; | 254 | atomic_long_t count; |
@@ -259,8 +258,8 @@ struct mcs_op_statistic { | |||
259 | 258 | ||
260 | extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; | 259 | extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; |
261 | 260 | ||
262 | #define OPT_DPRINT 1 | 261 | #define OPT_DPRINT 1 |
263 | #define OPT_STATS 2 | 262 | #define OPT_STATS 2 |
264 | 263 | ||
265 | 264 | ||
266 | #define IRQ_GRU 110 /* Starting IRQ number for interrupts */ | 265 | #define IRQ_GRU 110 /* Starting IRQ number for interrupts */ |
@@ -283,7 +282,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; | |||
283 | #define gru_dbg(dev, fmt, x...) \ | 282 | #define gru_dbg(dev, fmt, x...) \ |
284 | do { \ | 283 | do { \ |
285 | if (gru_options & OPT_DPRINT) \ | 284 | if (gru_options & OPT_DPRINT) \ |
286 | dev_dbg(dev, "%s: " fmt, __func__, x); \ | 285 | printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\ |
287 | } while (0) | 286 | } while (0) |
288 | #else | 287 | #else |
289 | #define gru_dbg(x...) | 288 | #define gru_dbg(x...) |
@@ -297,13 +296,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; | |||
297 | #define ASID_INC 8 /* number of regions */ | 296 | #define ASID_INC 8 /* number of regions */ |
298 | 297 | ||
299 | /* Generate a GRU asid value from a GRU base asid & a virtual address. */ | 298 | /* Generate a GRU asid value from a GRU base asid & a virtual address. */ |
300 | #if defined CONFIG_IA64 | ||
301 | #define VADDR_HI_BIT 64 | 299 | #define VADDR_HI_BIT 64 |
302 | #elif defined CONFIG_X86_64 | ||
303 | #define VADDR_HI_BIT 48 | ||
304 | #else | ||
305 | #error "Unsupported architecture" | ||
306 | #endif | ||
307 | #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) | 300 | #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) |
308 | #define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) | 301 | #define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) |
309 | 302 | ||
@@ -345,6 +338,7 @@ struct gru_vma_data { | |||
345 | long vd_user_options;/* misc user option flags */ | 338 | long vd_user_options;/* misc user option flags */ |
346 | int vd_cbr_au_count; | 339 | int vd_cbr_au_count; |
347 | int vd_dsr_au_count; | 340 | int vd_dsr_au_count; |
341 | unsigned char vd_tlb_preload_count; | ||
348 | }; | 342 | }; |
349 | 343 | ||
350 | /* | 344 | /* |
@@ -360,6 +354,7 @@ struct gru_thread_state { | |||
360 | struct gru_state *ts_gru; /* GRU where the context is | 354 | struct gru_state *ts_gru; /* GRU where the context is |
361 | loaded */ | 355 | loaded */ |
362 | struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ | 356 | struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ |
357 | unsigned char ts_tlb_preload_count; /* TLB preload pages */ | ||
363 | unsigned long ts_cbr_map; /* map of allocated CBRs */ | 358 | unsigned long ts_cbr_map; /* map of allocated CBRs */ |
364 | unsigned long ts_dsr_map; /* map of allocated DATA | 359 | unsigned long ts_dsr_map; /* map of allocated DATA |
365 | resources */ | 360 | resources */ |
@@ -368,6 +363,8 @@ struct gru_thread_state { | |||
368 | long ts_user_options;/* misc user option flags */ | 363 | long ts_user_options;/* misc user option flags */ |
369 | pid_t ts_tgid_owner; /* task that is using the | 364 | pid_t ts_tgid_owner; /* task that is using the |
370 | context - for migration */ | 365 | context - for migration */ |
366 | short ts_user_blade_id;/* user selected blade */ | ||
367 | char ts_user_chiplet_id;/* user selected chiplet */ | ||
371 | unsigned short ts_sizeavail; /* Pagesizes in use */ | 368 | unsigned short ts_sizeavail; /* Pagesizes in use */ |
372 | int ts_tsid; /* thread that owns the | 369 | int ts_tsid; /* thread that owns the |
373 | structure */ | 370 | structure */ |
@@ -384,13 +381,11 @@ struct gru_thread_state { | |||
384 | char ts_blade; /* If >= 0, migrate context if | 381 | char ts_blade; /* If >= 0, migrate context if |
385 | ref from diferent blade */ | 382 | ref from diferent blade */ |
386 | char ts_force_cch_reload; | 383 | char ts_force_cch_reload; |
387 | char ts_force_unload;/* force context to be unloaded | ||
388 | after migration */ | ||
389 | char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each | 384 | char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each |
390 | allocated CB */ | 385 | allocated CB */ |
391 | int ts_data_valid; /* Indicates if ts_gdata has | 386 | int ts_data_valid; /* Indicates if ts_gdata has |
392 | valid data */ | 387 | valid data */ |
393 | struct gts_statistics ustats; /* User statistics */ | 388 | struct gru_gseg_statistics ustats; /* User statistics */ |
394 | unsigned long ts_gdata[0]; /* save area for GRU data (CB, | 389 | unsigned long ts_gdata[0]; /* save area for GRU data (CB, |
395 | DS, CBE) */ | 390 | DS, CBE) */ |
396 | }; | 391 | }; |
@@ -422,6 +417,7 @@ struct gru_state { | |||
422 | gru segments (64) */ | 417 | gru segments (64) */ |
423 | unsigned short gs_gid; /* unique GRU number */ | 418 | unsigned short gs_gid; /* unique GRU number */ |
424 | unsigned short gs_blade_id; /* blade of GRU */ | 419 | unsigned short gs_blade_id; /* blade of GRU */ |
420 | unsigned char gs_chiplet_id; /* blade chiplet of GRU */ | ||
425 | unsigned char gs_tgh_local_shift; /* used to pick TGH for | 421 | unsigned char gs_tgh_local_shift; /* used to pick TGH for |
426 | local flush */ | 422 | local flush */ |
427 | unsigned char gs_tgh_first_remote; /* starting TGH# for | 423 | unsigned char gs_tgh_first_remote; /* starting TGH# for |
@@ -453,6 +449,7 @@ struct gru_state { | |||
453 | in use */ | 449 | in use */ |
454 | struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using | 450 | struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using |
455 | the context */ | 451 | the context */ |
452 | int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */ | ||
456 | }; | 453 | }; |
457 | 454 | ||
458 | /* | 455 | /* |
@@ -519,8 +516,7 @@ struct gru_blade_state { | |||
519 | 516 | ||
520 | /* Scan all active GRUs in a GRU bitmap */ | 517 | /* Scan all active GRUs in a GRU bitmap */ |
521 | #define for_each_gru_in_bitmap(gid, map) \ | 518 | #define for_each_gru_in_bitmap(gid, map) \ |
522 | for ((gid) = find_first_bit((map), GRU_MAX_GRUS); (gid) < GRU_MAX_GRUS;\ | 519 | for_each_set_bit((gid), (map), GRU_MAX_GRUS) |
523 | (gid)++, (gid) = find_next_bit((map), GRU_MAX_GRUS, (gid))) | ||
524 | 520 | ||
525 | /* Scan all active GRUs on a specific blade */ | 521 | /* Scan all active GRUs on a specific blade */ |
526 | #define for_each_gru_on_blade(gru, nid, i) \ | 522 | #define for_each_gru_on_blade(gru, nid, i) \ |
@@ -539,23 +535,17 @@ struct gru_blade_state { | |||
539 | 535 | ||
540 | /* Scan each CBR whose bit is set in a TFM (or copy of) */ | 536 | /* Scan each CBR whose bit is set in a TFM (or copy of) */ |
541 | #define for_each_cbr_in_tfm(i, map) \ | 537 | #define for_each_cbr_in_tfm(i, map) \ |
542 | for ((i) = find_first_bit(map, GRU_NUM_CBE); \ | 538 | for_each_set_bit((i), (map), GRU_NUM_CBE) |
543 | (i) < GRU_NUM_CBE; \ | ||
544 | (i)++, (i) = find_next_bit(map, GRU_NUM_CBE, i)) | ||
545 | 539 | ||
546 | /* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */ | 540 | /* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */ |
547 | #define for_each_cbr_in_allocation_map(i, map, k) \ | 541 | #define for_each_cbr_in_allocation_map(i, map, k) \ |
548 | for ((k) = find_first_bit(map, GRU_CBR_AU); (k) < GRU_CBR_AU; \ | 542 | for_each_set_bit((k), (map), GRU_CBR_AU) \ |
549 | (k) = find_next_bit(map, GRU_CBR_AU, (k) + 1)) \ | ||
550 | for ((i) = (k)*GRU_CBR_AU_SIZE; \ | 543 | for ((i) = (k)*GRU_CBR_AU_SIZE; \ |
551 | (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++) | 544 | (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++) |
552 | 545 | ||
553 | /* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */ | 546 | /* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */ |
554 | #define for_each_dsr_in_allocation_map(i, map, k) \ | 547 | #define for_each_dsr_in_allocation_map(i, map, k) \ |
555 | for ((k) = find_first_bit((const unsigned long *)map, GRU_DSR_AU);\ | 548 | for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \ |
556 | (k) < GRU_DSR_AU; \ | ||
557 | (k) = find_next_bit((const unsigned long *)map, \ | ||
558 | GRU_DSR_AU, (k) + 1)) \ | ||
559 | for ((i) = (k) * GRU_DSR_AU_CL; \ | 549 | for ((i) = (k) * GRU_DSR_AU_CL; \ |
560 | (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++) | 550 | (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++) |
561 | 551 | ||
@@ -619,6 +609,15 @@ static inline int is_kernel_context(struct gru_thread_state *gts) | |||
619 | return !gts->ts_mm; | 609 | return !gts->ts_mm; |
620 | } | 610 | } |
621 | 611 | ||
612 | /* | ||
613 | * The following are for Nehelem-EX. A more general scheme is needed for | ||
614 | * future processors. | ||
615 | */ | ||
616 | #define UV_MAX_INT_CORES 8 | ||
617 | #define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1) | ||
618 | #define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1) | ||
619 | #define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \ | ||
620 | ((cpu_physical_id(p) >> 1) & 3)) | ||
622 | /*----------------------------------------------------------------------------- | 621 | /*----------------------------------------------------------------------------- |
623 | * Function prototypes & externs | 622 | * Function prototypes & externs |
624 | */ | 623 | */ |
@@ -633,24 +632,26 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct | |||
633 | *vma, int tsid); | 632 | *vma, int tsid); |
634 | extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct | 633 | extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct |
635 | *vma, int tsid); | 634 | *vma, int tsid); |
636 | extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, | 635 | extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts); |
637 | int blade); | ||
638 | extern void gru_load_context(struct gru_thread_state *gts); | 636 | extern void gru_load_context(struct gru_thread_state *gts); |
639 | extern void gru_steal_context(struct gru_thread_state *gts, int blade_id); | 637 | extern void gru_steal_context(struct gru_thread_state *gts); |
640 | extern void gru_unload_context(struct gru_thread_state *gts, int savestate); | 638 | extern void gru_unload_context(struct gru_thread_state *gts, int savestate); |
641 | extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); | 639 | extern int gru_update_cch(struct gru_thread_state *gts); |
642 | extern void gts_drop(struct gru_thread_state *gts); | 640 | extern void gts_drop(struct gru_thread_state *gts); |
643 | extern void gru_tgh_flush_init(struct gru_state *gru); | 641 | extern void gru_tgh_flush_init(struct gru_state *gru); |
644 | extern int gru_kservices_init(void); | 642 | extern int gru_kservices_init(void); |
645 | extern void gru_kservices_exit(void); | 643 | extern void gru_kservices_exit(void); |
644 | extern irqreturn_t gru0_intr(int irq, void *dev_id); | ||
645 | extern irqreturn_t gru1_intr(int irq, void *dev_id); | ||
646 | extern irqreturn_t gru_intr_mblade(int irq, void *dev_id); | ||
646 | extern int gru_dump_chiplet_request(unsigned long arg); | 647 | extern int gru_dump_chiplet_request(unsigned long arg); |
647 | extern long gru_get_gseg_statistics(unsigned long arg); | 648 | extern long gru_get_gseg_statistics(unsigned long arg); |
648 | extern irqreturn_t gru_intr(int irq, void *dev_id); | ||
649 | extern int gru_handle_user_call_os(unsigned long address); | 649 | extern int gru_handle_user_call_os(unsigned long address); |
650 | extern int gru_user_flush_tlb(unsigned long arg); | 650 | extern int gru_user_flush_tlb(unsigned long arg); |
651 | extern int gru_user_unload_context(unsigned long arg); | 651 | extern int gru_user_unload_context(unsigned long arg); |
652 | extern int gru_get_exception_detail(unsigned long arg); | 652 | extern int gru_get_exception_detail(unsigned long arg); |
653 | extern int gru_set_context_option(unsigned long address); | 653 | extern int gru_set_context_option(unsigned long address); |
654 | extern void gru_check_context_placement(struct gru_thread_state *gts); | ||
654 | extern int gru_cpu_fault_map_id(void); | 655 | extern int gru_cpu_fault_map_id(void); |
655 | extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); | 656 | extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); |
656 | extern void gru_flush_all_tlb(struct gru_state *gru); | 657 | extern void gru_flush_all_tlb(struct gru_state *gru); |
@@ -658,7 +659,8 @@ extern int gru_proc_init(void); | |||
658 | extern void gru_proc_exit(void); | 659 | extern void gru_proc_exit(void); |
659 | 660 | ||
660 | extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | 661 | extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, |
661 | int cbr_au_count, int dsr_au_count, int options, int tsid); | 662 | int cbr_au_count, int dsr_au_count, |
663 | unsigned char tlb_preload_count, int options, int tsid); | ||
662 | extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, | 664 | extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, |
663 | int cbr_au_count, char *cbmap); | 665 | int cbr_au_count, char *cbmap); |
664 | extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, | 666 | extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, |
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index 1d125091f5e7..240a6d361665 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c | |||
@@ -184,8 +184,8 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, | |||
184 | STAT(flush_tlb_gru_tgh); | 184 | STAT(flush_tlb_gru_tgh); |
185 | asid = GRUASID(asid, start); | 185 | asid = GRUASID(asid, start); |
186 | gru_dbg(grudev, | 186 | gru_dbg(grudev, |
187 | " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", | 187 | " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n", |
188 | gid, asid, num, asids->mt_ctxbitmap); | 188 | gid, asid, start, grupagesize, num, asids->mt_ctxbitmap); |
189 | tgh = get_lock_tgh_handle(gru); | 189 | tgh = get_lock_tgh_handle(gru); |
190 | tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, | 190 | tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, |
191 | num - 1, asids->mt_ctxbitmap); | 191 | num - 1, asids->mt_ctxbitmap); |
@@ -299,6 +299,7 @@ struct gru_mm_struct *gru_register_mmu_notifier(void) | |||
299 | { | 299 | { |
300 | struct gru_mm_struct *gms; | 300 | struct gru_mm_struct *gms; |
301 | struct mmu_notifier *mn; | 301 | struct mmu_notifier *mn; |
302 | int err; | ||
302 | 303 | ||
303 | mn = mmu_find_ops(current->mm, &gru_mmuops); | 304 | mn = mmu_find_ops(current->mm, &gru_mmuops); |
304 | if (mn) { | 305 | if (mn) { |
@@ -307,16 +308,22 @@ struct gru_mm_struct *gru_register_mmu_notifier(void) | |||
307 | } else { | 308 | } else { |
308 | gms = kzalloc(sizeof(*gms), GFP_KERNEL); | 309 | gms = kzalloc(sizeof(*gms), GFP_KERNEL); |
309 | if (gms) { | 310 | if (gms) { |
311 | STAT(gms_alloc); | ||
310 | spin_lock_init(&gms->ms_asid_lock); | 312 | spin_lock_init(&gms->ms_asid_lock); |
311 | gms->ms_notifier.ops = &gru_mmuops; | 313 | gms->ms_notifier.ops = &gru_mmuops; |
312 | atomic_set(&gms->ms_refcnt, 1); | 314 | atomic_set(&gms->ms_refcnt, 1); |
313 | init_waitqueue_head(&gms->ms_wait_queue); | 315 | init_waitqueue_head(&gms->ms_wait_queue); |
314 | __mmu_notifier_register(&gms->ms_notifier, current->mm); | 316 | err = __mmu_notifier_register(&gms->ms_notifier, current->mm); |
317 | if (err) | ||
318 | goto error; | ||
315 | } | 319 | } |
316 | } | 320 | } |
317 | gru_dbg(grudev, "gms %p, refcnt %d\n", gms, | 321 | gru_dbg(grudev, "gms %p, refcnt %d\n", gms, |
318 | atomic_read(&gms->ms_refcnt)); | 322 | atomic_read(&gms->ms_refcnt)); |
319 | return gms; | 323 | return gms; |
324 | error: | ||
325 | kfree(gms); | ||
326 | return ERR_PTR(err); | ||
320 | } | 327 | } |
321 | 328 | ||
322 | void gru_drop_mmu_notifier(struct gru_mm_struct *gms) | 329 | void gru_drop_mmu_notifier(struct gru_mm_struct *gms) |
@@ -327,6 +334,7 @@ void gru_drop_mmu_notifier(struct gru_mm_struct *gms) | |||
327 | if (!gms->ms_released) | 334 | if (!gms->ms_released) |
328 | mmu_notifier_unregister(&gms->ms_notifier, current->mm); | 335 | mmu_notifier_unregister(&gms->ms_notifier, current->mm); |
329 | kfree(gms); | 336 | kfree(gms); |
337 | STAT(gms_free); | ||
330 | } | 338 | } |
331 | } | 339 | } |
332 | 340 | ||
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 2275126cb334..851b2f25ce0e 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -339,6 +339,7 @@ extern short xp_partition_id; | |||
339 | extern u8 xp_region_size; | 339 | extern u8 xp_region_size; |
340 | 340 | ||
341 | extern unsigned long (*xp_pa) (void *); | 341 | extern unsigned long (*xp_pa) (void *); |
342 | extern unsigned long (*xp_socket_pa) (unsigned long); | ||
342 | extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, | 343 | extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, |
343 | size_t); | 344 | size_t); |
344 | extern int (*xp_cpu_to_nasid) (int); | 345 | extern int (*xp_cpu_to_nasid) (int); |
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 7896849b16dc..01be66d02ca8 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
@@ -44,6 +44,9 @@ EXPORT_SYMBOL_GPL(xp_region_size); | |||
44 | unsigned long (*xp_pa) (void *addr); | 44 | unsigned long (*xp_pa) (void *addr); |
45 | EXPORT_SYMBOL_GPL(xp_pa); | 45 | EXPORT_SYMBOL_GPL(xp_pa); |
46 | 46 | ||
47 | unsigned long (*xp_socket_pa) (unsigned long gpa); | ||
48 | EXPORT_SYMBOL_GPL(xp_socket_pa); | ||
49 | |||
47 | enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, | 50 | enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, |
48 | const unsigned long src_gpa, size_t len); | 51 | const unsigned long src_gpa, size_t len); |
49 | EXPORT_SYMBOL_GPL(xp_remote_memcpy); | 52 | EXPORT_SYMBOL_GPL(xp_remote_memcpy); |
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index fb3ec9d735a9..d8e463f87241 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c | |||
@@ -84,6 +84,15 @@ xp_pa_sn2(void *addr) | |||
84 | } | 84 | } |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Convert a global physical to a socket physical address. | ||
88 | */ | ||
89 | static unsigned long | ||
90 | xp_socket_pa_sn2(unsigned long gpa) | ||
91 | { | ||
92 | return gpa; | ||
93 | } | ||
94 | |||
95 | /* | ||
87 | * Wrapper for bte_copy(). | 96 | * Wrapper for bte_copy(). |
88 | * | 97 | * |
89 | * dst_pa - physical address of the destination of the transfer. | 98 | * dst_pa - physical address of the destination of the transfer. |
@@ -162,6 +171,7 @@ xp_init_sn2(void) | |||
162 | xp_region_size = sn_region_size; | 171 | xp_region_size = sn_region_size; |
163 | 172 | ||
164 | xp_pa = xp_pa_sn2; | 173 | xp_pa = xp_pa_sn2; |
174 | xp_socket_pa = xp_socket_pa_sn2; | ||
165 | xp_remote_memcpy = xp_remote_memcpy_sn2; | 175 | xp_remote_memcpy = xp_remote_memcpy_sn2; |
166 | xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; | 176 | xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; |
167 | xp_expand_memprotect = xp_expand_memprotect_sn2; | 177 | xp_expand_memprotect = xp_expand_memprotect_sn2; |
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index d238576b26fa..a0d093274dc0 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c | |||
@@ -32,12 +32,44 @@ xp_pa_uv(void *addr) | |||
32 | return uv_gpa(addr); | 32 | return uv_gpa(addr); |
33 | } | 33 | } |
34 | 34 | ||
35 | /* | ||
36 | * Convert a global physical to socket physical address. | ||
37 | */ | ||
38 | static unsigned long | ||
39 | xp_socket_pa_uv(unsigned long gpa) | ||
40 | { | ||
41 | return uv_gpa_to_soc_phys_ram(gpa); | ||
42 | } | ||
43 | |||
44 | static enum xp_retval | ||
45 | xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa, | ||
46 | size_t len) | ||
47 | { | ||
48 | int ret; | ||
49 | unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa)); | ||
50 | |||
51 | BUG_ON(!uv_gpa_in_mmr_space(src_gpa)); | ||
52 | BUG_ON(len != 8); | ||
53 | |||
54 | ret = gru_read_gpa(dst_va, src_gpa); | ||
55 | if (ret == 0) | ||
56 | return xpSuccess; | ||
57 | |||
58 | dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " | ||
59 | "len=%ld\n", dst_gpa, src_gpa, len); | ||
60 | return xpGruCopyError; | ||
61 | } | ||
62 | |||
63 | |||
35 | static enum xp_retval | 64 | static enum xp_retval |
36 | xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, | 65 | xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, |
37 | size_t len) | 66 | size_t len) |
38 | { | 67 | { |
39 | int ret; | 68 | int ret; |
40 | 69 | ||
70 | if (uv_gpa_in_mmr_space(src_gpa)) | ||
71 | return xp_remote_mmr_read(dst_gpa, src_gpa, len); | ||
72 | |||
41 | ret = gru_copy_gpa(dst_gpa, src_gpa, len); | 73 | ret = gru_copy_gpa(dst_gpa, src_gpa, len); |
42 | if (ret == 0) | 74 | if (ret == 0) |
43 | return xpSuccess; | 75 | return xpSuccess; |
@@ -123,6 +155,7 @@ xp_init_uv(void) | |||
123 | xp_region_size = sn_region_size; | 155 | xp_region_size = sn_region_size; |
124 | 156 | ||
125 | xp_pa = xp_pa_uv; | 157 | xp_pa = xp_pa_uv; |
158 | xp_socket_pa = xp_socket_pa_uv; | ||
126 | xp_remote_memcpy = xp_remote_memcpy_uv; | 159 | xp_remote_memcpy = xp_remote_memcpy_uv; |
127 | xp_cpu_to_nasid = xp_cpu_to_nasid_uv; | 160 | xp_cpu_to_nasid = xp_cpu_to_nasid_uv; |
128 | xp_expand_memprotect = xp_expand_memprotect_uv; | 161 | xp_expand_memprotect = xp_expand_memprotect_uv; |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index fd3688a3e23f..8d082b46426b 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -44,6 +44,7 @@ | |||
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/slab.h> | ||
47 | #include <linux/sysctl.h> | 48 | #include <linux/sysctl.h> |
48 | #include <linux/device.h> | 49 | #include <linux/device.h> |
49 | #include <linux/delay.h> | 50 | #include <linux/delay.h> |
@@ -89,48 +90,40 @@ static int xpc_disengage_max_timelimit = 120; | |||
89 | 90 | ||
90 | static ctl_table xpc_sys_xpc_hb_dir[] = { | 91 | static ctl_table xpc_sys_xpc_hb_dir[] = { |
91 | { | 92 | { |
92 | .ctl_name = CTL_UNNUMBERED, | ||
93 | .procname = "hb_interval", | 93 | .procname = "hb_interval", |
94 | .data = &xpc_hb_interval, | 94 | .data = &xpc_hb_interval, |
95 | .maxlen = sizeof(int), | 95 | .maxlen = sizeof(int), |
96 | .mode = 0644, | 96 | .mode = 0644, |
97 | .proc_handler = &proc_dointvec_minmax, | 97 | .proc_handler = proc_dointvec_minmax, |
98 | .strategy = &sysctl_intvec, | ||
99 | .extra1 = &xpc_hb_min_interval, | 98 | .extra1 = &xpc_hb_min_interval, |
100 | .extra2 = &xpc_hb_max_interval}, | 99 | .extra2 = &xpc_hb_max_interval}, |
101 | { | 100 | { |
102 | .ctl_name = CTL_UNNUMBERED, | ||
103 | .procname = "hb_check_interval", | 101 | .procname = "hb_check_interval", |
104 | .data = &xpc_hb_check_interval, | 102 | .data = &xpc_hb_check_interval, |
105 | .maxlen = sizeof(int), | 103 | .maxlen = sizeof(int), |
106 | .mode = 0644, | 104 | .mode = 0644, |
107 | .proc_handler = &proc_dointvec_minmax, | 105 | .proc_handler = proc_dointvec_minmax, |
108 | .strategy = &sysctl_intvec, | ||
109 | .extra1 = &xpc_hb_check_min_interval, | 106 | .extra1 = &xpc_hb_check_min_interval, |
110 | .extra2 = &xpc_hb_check_max_interval}, | 107 | .extra2 = &xpc_hb_check_max_interval}, |
111 | {} | 108 | {} |
112 | }; | 109 | }; |
113 | static ctl_table xpc_sys_xpc_dir[] = { | 110 | static ctl_table xpc_sys_xpc_dir[] = { |
114 | { | 111 | { |
115 | .ctl_name = CTL_UNNUMBERED, | ||
116 | .procname = "hb", | 112 | .procname = "hb", |
117 | .mode = 0555, | 113 | .mode = 0555, |
118 | .child = xpc_sys_xpc_hb_dir}, | 114 | .child = xpc_sys_xpc_hb_dir}, |
119 | { | 115 | { |
120 | .ctl_name = CTL_UNNUMBERED, | ||
121 | .procname = "disengage_timelimit", | 116 | .procname = "disengage_timelimit", |
122 | .data = &xpc_disengage_timelimit, | 117 | .data = &xpc_disengage_timelimit, |
123 | .maxlen = sizeof(int), | 118 | .maxlen = sizeof(int), |
124 | .mode = 0644, | 119 | .mode = 0644, |
125 | .proc_handler = &proc_dointvec_minmax, | 120 | .proc_handler = proc_dointvec_minmax, |
126 | .strategy = &sysctl_intvec, | ||
127 | .extra1 = &xpc_disengage_min_timelimit, | 121 | .extra1 = &xpc_disengage_min_timelimit, |
128 | .extra2 = &xpc_disengage_max_timelimit}, | 122 | .extra2 = &xpc_disengage_max_timelimit}, |
129 | {} | 123 | {} |
130 | }; | 124 | }; |
131 | static ctl_table xpc_sys_dir[] = { | 125 | static ctl_table xpc_sys_dir[] = { |
132 | { | 126 | { |
133 | .ctl_name = CTL_UNNUMBERED, | ||
134 | .procname = "xpc", | 127 | .procname = "xpc", |
135 | .mode = 0555, | 128 | .mode = 0555, |
136 | .child = xpc_sys_xpc_dir}, | 129 | .child = xpc_sys_xpc_dir}, |
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 65877bc5edaa..d551f09ccb79 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -17,7 +17,9 @@ | |||
17 | 17 | ||
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/hardirq.h> | 19 | #include <linux/hardirq.h> |
20 | #include <linux/slab.h> | ||
20 | #include "xpc.h" | 21 | #include "xpc.h" |
22 | #include <asm/uv/uv_hub.h> | ||
21 | 23 | ||
22 | /* XPC is exiting flag */ | 24 | /* XPC is exiting flag */ |
23 | int xpc_exiting; | 25 | int xpc_exiting; |
@@ -92,8 +94,12 @@ xpc_get_rsvd_page_pa(int nasid) | |||
92 | break; | 94 | break; |
93 | 95 | ||
94 | /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ | 96 | /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ |
95 | if (L1_CACHE_ALIGN(len) > buf_len) { | 97 | if (is_shub()) |
96 | kfree(buf_base); | 98 | len = L1_CACHE_ALIGN(len); |
99 | |||
100 | if (len > buf_len) { | ||
101 | if (buf_base != NULL) | ||
102 | kfree(buf_base); | ||
97 | buf_len = L1_CACHE_ALIGN(len); | 103 | buf_len = L1_CACHE_ALIGN(len); |
98 | buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, | 104 | buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, |
99 | &buf_base); | 105 | &buf_base); |
@@ -105,7 +111,7 @@ xpc_get_rsvd_page_pa(int nasid) | |||
105 | } | 111 | } |
106 | } | 112 | } |
107 | 113 | ||
108 | ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len); | 114 | ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len); |
109 | if (ret != xpSuccess) { | 115 | if (ret != xpSuccess) { |
110 | dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); | 116 | dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); |
111 | break; | 117 | break; |
@@ -143,7 +149,7 @@ xpc_setup_rsvd_page(void) | |||
143 | dev_err(xpc_part, "SAL failed to locate the reserved page\n"); | 149 | dev_err(xpc_part, "SAL failed to locate the reserved page\n"); |
144 | return -ESRCH; | 150 | return -ESRCH; |
145 | } | 151 | } |
146 | rp = (struct xpc_rsvd_page *)__va(rp_pa); | 152 | rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa)); |
147 | 153 | ||
148 | if (rp->SAL_version < 3) { | 154 | if (rp->SAL_version < 3) { |
149 | /* SAL_versions < 3 had a SAL_partid defined as a u8 */ | 155 | /* SAL_versions < 3 had a SAL_partid defined as a u8 */ |
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 8b70e03f939f..7d71c04fc938 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/slab.h> | ||
17 | #include <asm/uncached.h> | 18 | #include <asm/uncached.h> |
18 | #include <asm/sn/mspec.h> | 19 | #include <asm/sn/mspec.h> |
19 | #include <asm/sn/sn_sal.h> | 20 | #include <asm/sn/sn_sal.h> |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index c76677afda1b..1f59ee2226ca 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/slab.h> | ||
22 | #include <asm/uv/uv_hub.h> | 23 | #include <asm/uv/uv_hub.h> |
23 | #if defined CONFIG_X86_64 | 24 | #if defined CONFIG_X86_64 |
24 | #include <asm/uv/bios.h> | 25 | #include <asm/uv/bios.h> |
@@ -106,7 +107,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) | |||
106 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | 107 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); |
107 | 108 | ||
108 | #if defined CONFIG_X86_64 | 109 | #if defined CONFIG_X86_64 |
109 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); | 110 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, |
111 | UV_AFFINITY_CPU); | ||
110 | if (mq->irq < 0) { | 112 | if (mq->irq < 0) { |
111 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", | 113 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", |
112 | -mq->irq); | 114 | -mq->irq); |
@@ -136,7 +138,7 @@ static void | |||
136 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) | 138 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) |
137 | { | 139 | { |
138 | #if defined CONFIG_X86_64 | 140 | #if defined CONFIG_X86_64 |
139 | uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); | 141 | uv_teardown_irq(mq->irq); |
140 | 142 | ||
141 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | 143 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
142 | int mmr_pnode; | 144 | int mmr_pnode; |
@@ -156,22 +158,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) | |||
156 | { | 158 | { |
157 | int ret; | 159 | int ret; |
158 | 160 | ||
159 | #if defined CONFIG_X86_64 | 161 | #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
160 | ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), | 162 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); |
161 | mq->order, &mq->mmr_offset); | 163 | |
162 | if (ret < 0) { | 164 | ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), |
163 | dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " | ||
164 | "ret=%d\n", ret); | ||
165 | return ret; | ||
166 | } | ||
167 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
168 | ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address), | ||
169 | mq->order, &mq->mmr_offset); | 165 | mq->order, &mq->mmr_offset); |
170 | if (ret < 0) { | 166 | if (ret < 0) { |
171 | dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", | 167 | dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", |
172 | ret); | 168 | ret); |
173 | return -EBUSY; | 169 | return -EBUSY; |
174 | } | 170 | } |
171 | #elif defined CONFIG_X86_64 | ||
172 | ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), | ||
173 | mq->order, &mq->mmr_offset); | ||
174 | if (ret < 0) { | ||
175 | dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " | ||
176 | "ret=%d\n", ret); | ||
177 | return ret; | ||
178 | } | ||
175 | #else | 179 | #else |
176 | #error not a supported configuration | 180 | #error not a supported configuration |
177 | #endif | 181 | #endif |
@@ -184,12 +188,13 @@ static void | |||
184 | xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) | 188 | xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) |
185 | { | 189 | { |
186 | int ret; | 190 | int ret; |
191 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | ||
187 | 192 | ||
188 | #if defined CONFIG_X86_64 | 193 | #if defined CONFIG_X86_64 |
189 | ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | 194 | ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); |
190 | BUG_ON(ret != BIOS_STATUS_SUCCESS); | 195 | BUG_ON(ret != BIOS_STATUS_SUCCESS); |
191 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | 196 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
192 | ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | 197 | ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); |
193 | BUG_ON(ret != SALRET_OK); | 198 | BUG_ON(ret != SALRET_OK); |
194 | #else | 199 | #else |
195 | #error not a supported configuration | 200 | #error not a supported configuration |
@@ -203,6 +208,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |||
203 | enum xp_retval xp_ret; | 208 | enum xp_retval xp_ret; |
204 | int ret; | 209 | int ret; |
205 | int nid; | 210 | int nid; |
211 | int nasid; | ||
206 | int pg_order; | 212 | int pg_order; |
207 | struct page *page; | 213 | struct page *page; |
208 | struct xpc_gru_mq_uv *mq; | 214 | struct xpc_gru_mq_uv *mq; |
@@ -258,9 +264,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |||
258 | goto out_5; | 264 | goto out_5; |
259 | } | 265 | } |
260 | 266 | ||
267 | nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); | ||
268 | |||
261 | mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; | 269 | mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; |
262 | ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, | 270 | ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, |
263 | nid, mmr_value->vector, mmr_value->dest); | 271 | nasid, mmr_value->vector, mmr_value->dest); |
264 | if (ret != 0) { | 272 | if (ret != 0) { |
265 | dev_err(xpc_part, "gru_create_message_queue() returned " | 273 | dev_err(xpc_part, "gru_create_message_queue() returned " |
266 | "error=%d\n", ret); | 274 | "error=%d\n", ret); |
@@ -945,11 +953,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) | |||
945 | head->first = first->next; | 953 | head->first = first->next; |
946 | if (head->first == NULL) | 954 | if (head->first == NULL) |
947 | head->last = NULL; | 955 | head->last = NULL; |
956 | |||
957 | head->n_entries--; | ||
958 | BUG_ON(head->n_entries < 0); | ||
959 | |||
960 | first->next = NULL; | ||
948 | } | 961 | } |
949 | head->n_entries--; | ||
950 | BUG_ON(head->n_entries < 0); | ||
951 | spin_unlock_irqrestore(&head->lock, irq_flags); | 962 | spin_unlock_irqrestore(&head->lock, irq_flags); |
952 | first->next = NULL; | ||
953 | return first; | 963 | return first; |
954 | } | 964 | } |
955 | 965 | ||
@@ -1018,7 +1028,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part) | |||
1018 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | 1028 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
1019 | XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); | 1029 | XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); |
1020 | 1030 | ||
1021 | while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { | 1031 | while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || |
1032 | (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { | ||
1022 | 1033 | ||
1023 | dev_dbg(xpc_part, "waiting to make first contact with " | 1034 | dev_dbg(xpc_part, "waiting to make first contact with " |
1024 | "partition %d\n", XPC_PARTID(part)); | 1035 | "partition %d\n", XPC_PARTID(part)); |
@@ -1421,7 +1432,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, | |||
1421 | msg_slot = ch_uv->recv_msg_slots + | 1432 | msg_slot = ch_uv->recv_msg_slots + |
1422 | (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; | 1433 | (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; |
1423 | 1434 | ||
1424 | BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); | ||
1425 | BUG_ON(msg_slot->hdr.size != 0); | 1435 | BUG_ON(msg_slot->hdr.size != 0); |
1426 | 1436 | ||
1427 | memcpy(msg_slot, msg, msg->hdr.size); | 1437 | memcpy(msg_slot, msg, msg->hdr.size); |
@@ -1645,8 +1655,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload) | |||
1645 | sizeof(struct xpc_notify_mq_msghdr_uv)); | 1655 | sizeof(struct xpc_notify_mq_msghdr_uv)); |
1646 | if (ret != xpSuccess) | 1656 | if (ret != xpSuccess) |
1647 | XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); | 1657 | XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); |
1648 | |||
1649 | msg->hdr.msg_slot_number += ch->remote_nentries; | ||
1650 | } | 1658 | } |
1651 | 1659 | ||
1652 | static struct xpc_arch_operations xpc_arch_ops_uv = { | 1660 | static struct xpc_arch_operations xpc_arch_ops_uv = { |
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 16f0abda1423..ee5109a3cd98 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c | |||
@@ -20,6 +20,7 @@ | |||
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/slab.h> | ||
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
25 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
@@ -475,7 +476,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
475 | 476 | ||
476 | if (skb->data[0] == 0xff) { | 477 | if (skb->data[0] == 0xff) { |
477 | /* we are being asked to broadcast to all partitions */ | 478 | /* we are being asked to broadcast to all partitions */ |
478 | for_each_bit(dest_partid, xpnet_broadcast_partitions, | 479 | for_each_set_bit(dest_partid, xpnet_broadcast_partitions, |
479 | xp_max_npartitions) { | 480 | xp_max_npartitions) { |
480 | 481 | ||
481 | xpnet_send(skb, queued_msg, start_addr, end_addr, | 482 | xpnet_send(skb, queued_msg, start_addr, end_addr, |
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c new file mode 100644 index 000000000000..d3f229a3a77e --- /dev/null +++ b/drivers/misc/ti_dac7512.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * dac7512.c - Linux kernel module for | ||
3 | * Texas Instruments DAC7512 | ||
4 | * | ||
5 | * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/spi/spi.h> | ||
25 | |||
26 | #define DAC7512_DRV_NAME "dac7512" | ||
27 | #define DRIVER_VERSION "1.0" | ||
28 | |||
29 | static ssize_t dac7512_store_val(struct device *dev, | ||
30 | struct device_attribute *attr, | ||
31 | const char *buf, size_t count) | ||
32 | { | ||
33 | struct spi_device *spi = to_spi_device(dev); | ||
34 | unsigned char tmp[2]; | ||
35 | unsigned long val; | ||
36 | |||
37 | if (strict_strtoul(buf, 10, &val) < 0) | ||
38 | return -EINVAL; | ||
39 | |||
40 | tmp[0] = val >> 8; | ||
41 | tmp[1] = val & 0xff; | ||
42 | spi_write(spi, tmp, sizeof(tmp)); | ||
43 | return count; | ||
44 | } | ||
45 | |||
46 | static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val); | ||
47 | |||
48 | static struct attribute *dac7512_attributes[] = { | ||
49 | &dev_attr_value.attr, | ||
50 | NULL | ||
51 | }; | ||
52 | |||
53 | static const struct attribute_group dac7512_attr_group = { | ||
54 | .attrs = dac7512_attributes, | ||
55 | }; | ||
56 | |||
57 | static int __devinit dac7512_probe(struct spi_device *spi) | ||
58 | { | ||
59 | int ret; | ||
60 | |||
61 | spi->bits_per_word = 8; | ||
62 | spi->mode = SPI_MODE_0; | ||
63 | ret = spi_setup(spi); | ||
64 | if (ret < 0) | ||
65 | return ret; | ||
66 | |||
67 | return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group); | ||
68 | } | ||
69 | |||
70 | static int __devexit dac7512_remove(struct spi_device *spi) | ||
71 | { | ||
72 | sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group); | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static struct spi_driver dac7512_driver = { | ||
77 | .driver = { | ||
78 | .name = DAC7512_DRV_NAME, | ||
79 | .owner = THIS_MODULE, | ||
80 | }, | ||
81 | .probe = dac7512_probe, | ||
82 | .remove = __devexit_p(dac7512_remove), | ||
83 | }; | ||
84 | |||
85 | static int __init dac7512_init(void) | ||
86 | { | ||
87 | return spi_register_driver(&dac7512_driver); | ||
88 | } | ||
89 | |||
90 | static void __exit dac7512_exit(void) | ||
91 | { | ||
92 | spi_unregister_driver(&dac7512_driver); | ||
93 | } | ||
94 | |||
95 | MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); | ||
96 | MODULE_DESCRIPTION("DAC7512 16-bit DAC"); | ||
97 | MODULE_LICENSE("GPL v2"); | ||
98 | MODULE_VERSION(DRIVER_VERSION); | ||
99 | |||
100 | module_init(dac7512_init); | ||
101 | module_exit(dac7512_exit); | ||
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 98bcba521da2..5f6852dff40b 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/tifm.h> | 12 | #include <linux/tifm.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/idr.h> | 15 | #include <linux/idr.h> |
15 | 16 | ||
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c new file mode 100644 index 000000000000..483ae5f7f68e --- /dev/null +++ b/drivers/misc/tsl2550.c | |||
@@ -0,0 +1,473 @@ | |||
1 | /* | ||
2 | * tsl2550.c - Linux kernel modules for ambient light sensor | ||
3 | * | ||
4 | * Copyright (C) 2007 Rodolfo Giometti <giometti@linux.it> | ||
5 | * Copyright (C) 2007 Eurotech S.p.A. <info@eurotech.it> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/i2c.h> | ||
26 | #include <linux/mutex.h> | ||
27 | |||
28 | #define TSL2550_DRV_NAME "tsl2550" | ||
29 | #define DRIVER_VERSION "1.2" | ||
30 | |||
31 | /* | ||
32 | * Defines | ||
33 | */ | ||
34 | |||
35 | #define TSL2550_POWER_DOWN 0x00 | ||
36 | #define TSL2550_POWER_UP 0x03 | ||
37 | #define TSL2550_STANDARD_RANGE 0x18 | ||
38 | #define TSL2550_EXTENDED_RANGE 0x1d | ||
39 | #define TSL2550_READ_ADC0 0x43 | ||
40 | #define TSL2550_READ_ADC1 0x83 | ||
41 | |||
42 | /* | ||
43 | * Structs | ||
44 | */ | ||
45 | |||
46 | struct tsl2550_data { | ||
47 | struct i2c_client *client; | ||
48 | struct mutex update_lock; | ||
49 | |||
50 | unsigned int power_state:1; | ||
51 | unsigned int operating_mode:1; | ||
52 | }; | ||
53 | |||
54 | /* | ||
55 | * Global data | ||
56 | */ | ||
57 | |||
58 | static const u8 TSL2550_MODE_RANGE[2] = { | ||
59 | TSL2550_STANDARD_RANGE, TSL2550_EXTENDED_RANGE, | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * Management functions | ||
64 | */ | ||
65 | |||
66 | static int tsl2550_set_operating_mode(struct i2c_client *client, int mode) | ||
67 | { | ||
68 | struct tsl2550_data *data = i2c_get_clientdata(client); | ||
69 | |||
70 | int ret = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[mode]); | ||
71 | |||
72 | data->operating_mode = mode; | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | static int tsl2550_set_power_state(struct i2c_client *client, int state) | ||
78 | { | ||
79 | struct tsl2550_data *data = i2c_get_clientdata(client); | ||
80 | int ret; | ||
81 | |||
82 | if (state == 0) | ||
83 | ret = i2c_smbus_write_byte(client, TSL2550_POWER_DOWN); | ||
84 | else { | ||
85 | ret = i2c_smbus_write_byte(client, TSL2550_POWER_UP); | ||
86 | |||
87 | /* On power up we should reset operating mode also... */ | ||
88 | tsl2550_set_operating_mode(client, data->operating_mode); | ||
89 | } | ||
90 | |||
91 | data->power_state = state; | ||
92 | |||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd) | ||
97 | { | ||
98 | int ret; | ||
99 | |||
100 | ret = i2c_smbus_read_byte_data(client, cmd); | ||
101 | if (ret < 0) | ||
102 | return ret; | ||
103 | if (!(ret & 0x80)) | ||
104 | return -EAGAIN; | ||
105 | return ret & 0x7f; /* remove the "valid" bit */ | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * LUX calculation | ||
110 | */ | ||
111 | |||
112 | #define TSL2550_MAX_LUX 1846 | ||
113 | |||
114 | static const u8 ratio_lut[] = { | ||
115 | 100, 100, 100, 100, 100, 100, 100, 100, | ||
116 | 100, 100, 100, 100, 100, 100, 99, 99, | ||
117 | 99, 99, 99, 99, 99, 99, 99, 99, | ||
118 | 99, 99, 99, 98, 98, 98, 98, 98, | ||
119 | 98, 98, 97, 97, 97, 97, 97, 96, | ||
120 | 96, 96, 96, 95, 95, 95, 94, 94, | ||
121 | 93, 93, 93, 92, 92, 91, 91, 90, | ||
122 | 89, 89, 88, 87, 87, 86, 85, 84, | ||
123 | 83, 82, 81, 80, 79, 78, 77, 75, | ||
124 | 74, 73, 71, 69, 68, 66, 64, 62, | ||
125 | 60, 58, 56, 54, 52, 49, 47, 44, | ||
126 | 42, 41, 40, 40, 39, 39, 38, 38, | ||
127 | 37, 37, 37, 36, 36, 36, 35, 35, | ||
128 | 35, 35, 34, 34, 34, 34, 33, 33, | ||
129 | 33, 33, 32, 32, 32, 32, 32, 31, | ||
130 | 31, 31, 31, 31, 30, 30, 30, 30, | ||
131 | 30, | ||
132 | }; | ||
133 | |||
134 | static const u16 count_lut[] = { | ||
135 | 0, 1, 2, 3, 4, 5, 6, 7, | ||
136 | 8, 9, 10, 11, 12, 13, 14, 15, | ||
137 | 16, 18, 20, 22, 24, 26, 28, 30, | ||
138 | 32, 34, 36, 38, 40, 42, 44, 46, | ||
139 | 49, 53, 57, 61, 65, 69, 73, 77, | ||
140 | 81, 85, 89, 93, 97, 101, 105, 109, | ||
141 | 115, 123, 131, 139, 147, 155, 163, 171, | ||
142 | 179, 187, 195, 203, 211, 219, 227, 235, | ||
143 | 247, 263, 279, 295, 311, 327, 343, 359, | ||
144 | 375, 391, 407, 423, 439, 455, 471, 487, | ||
145 | 511, 543, 575, 607, 639, 671, 703, 735, | ||
146 | 767, 799, 831, 863, 895, 927, 959, 991, | ||
147 | 1039, 1103, 1167, 1231, 1295, 1359, 1423, 1487, | ||
148 | 1551, 1615, 1679, 1743, 1807, 1871, 1935, 1999, | ||
149 | 2095, 2223, 2351, 2479, 2607, 2735, 2863, 2991, | ||
150 | 3119, 3247, 3375, 3503, 3631, 3759, 3887, 4015, | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * This function is described into Taos TSL2550 Designer's Notebook | ||
155 | * pages 2, 3. | ||
156 | */ | ||
157 | static int tsl2550_calculate_lux(u8 ch0, u8 ch1) | ||
158 | { | ||
159 | unsigned int lux; | ||
160 | |||
161 | /* Look up count from channel values */ | ||
162 | u16 c0 = count_lut[ch0]; | ||
163 | u16 c1 = count_lut[ch1]; | ||
164 | |||
165 | /* | ||
166 | * Calculate ratio. | ||
167 | * Note: the "128" is a scaling factor | ||
168 | */ | ||
169 | u8 r = 128; | ||
170 | |||
171 | /* Avoid division by 0 and count 1 cannot be greater than count 0 */ | ||
172 | if (c1 <= c0) | ||
173 | if (c0) { | ||
174 | r = c1 * 128 / c0; | ||
175 | |||
176 | /* Calculate LUX */ | ||
177 | lux = ((c0 - c1) * ratio_lut[r]) / 256; | ||
178 | } else | ||
179 | lux = 0; | ||
180 | else | ||
181 | return -EAGAIN; | ||
182 | |||
183 | /* LUX range check */ | ||
184 | return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * SysFS support | ||
189 | */ | ||
190 | |||
191 | static ssize_t tsl2550_show_power_state(struct device *dev, | ||
192 | struct device_attribute *attr, char *buf) | ||
193 | { | ||
194 | struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev)); | ||
195 | |||
196 | return sprintf(buf, "%u\n", data->power_state); | ||
197 | } | ||
198 | |||
199 | static ssize_t tsl2550_store_power_state(struct device *dev, | ||
200 | struct device_attribute *attr, const char *buf, size_t count) | ||
201 | { | ||
202 | struct i2c_client *client = to_i2c_client(dev); | ||
203 | struct tsl2550_data *data = i2c_get_clientdata(client); | ||
204 | unsigned long val = simple_strtoul(buf, NULL, 10); | ||
205 | int ret; | ||
206 | |||
207 | if (val < 0 || val > 1) | ||
208 | return -EINVAL; | ||
209 | |||
210 | mutex_lock(&data->update_lock); | ||
211 | ret = tsl2550_set_power_state(client, val); | ||
212 | mutex_unlock(&data->update_lock); | ||
213 | |||
214 | if (ret < 0) | ||
215 | return ret; | ||
216 | |||
217 | return count; | ||
218 | } | ||
219 | |||
220 | static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, | ||
221 | tsl2550_show_power_state, tsl2550_store_power_state); | ||
222 | |||
223 | static ssize_t tsl2550_show_operating_mode(struct device *dev, | ||
224 | struct device_attribute *attr, char *buf) | ||
225 | { | ||
226 | struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev)); | ||
227 | |||
228 | return sprintf(buf, "%u\n", data->operating_mode); | ||
229 | } | ||
230 | |||
231 | static ssize_t tsl2550_store_operating_mode(struct device *dev, | ||
232 | struct device_attribute *attr, const char *buf, size_t count) | ||
233 | { | ||
234 | struct i2c_client *client = to_i2c_client(dev); | ||
235 | struct tsl2550_data *data = i2c_get_clientdata(client); | ||
236 | unsigned long val = simple_strtoul(buf, NULL, 10); | ||
237 | int ret; | ||
238 | |||
239 | if (val < 0 || val > 1) | ||
240 | return -EINVAL; | ||
241 | |||
242 | if (data->power_state == 0) | ||
243 | return -EBUSY; | ||
244 | |||
245 | mutex_lock(&data->update_lock); | ||
246 | ret = tsl2550_set_operating_mode(client, val); | ||
247 | mutex_unlock(&data->update_lock); | ||
248 | |||
249 | if (ret < 0) | ||
250 | return ret; | ||
251 | |||
252 | return count; | ||
253 | } | ||
254 | |||
255 | static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO, | ||
256 | tsl2550_show_operating_mode, tsl2550_store_operating_mode); | ||
257 | |||
258 | static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf) | ||
259 | { | ||
260 | struct tsl2550_data *data = i2c_get_clientdata(client); | ||
261 | u8 ch0, ch1; | ||
262 | int ret; | ||
263 | |||
264 | ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC0); | ||
265 | if (ret < 0) | ||
266 | return ret; | ||
267 | ch0 = ret; | ||
268 | |||
269 | ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1); | ||
270 | if (ret < 0) | ||
271 | return ret; | ||
272 | ch1 = ret; | ||
273 | |||
274 | /* Do the job */ | ||
275 | ret = tsl2550_calculate_lux(ch0, ch1); | ||
276 | if (ret < 0) | ||
277 | return ret; | ||
278 | if (data->operating_mode == 1) | ||
279 | ret *= 5; | ||
280 | |||
281 | return sprintf(buf, "%d\n", ret); | ||
282 | } | ||
283 | |||
284 | static ssize_t tsl2550_show_lux1_input(struct device *dev, | ||
285 | struct device_attribute *attr, char *buf) | ||
286 | { | ||
287 | struct i2c_client *client = to_i2c_client(dev); | ||
288 | struct tsl2550_data *data = i2c_get_clientdata(client); | ||
289 | int ret; | ||
290 | |||
291 | /* No LUX data if not operational */ | ||
292 | if (!data->power_state) | ||
293 | return -EBUSY; | ||
294 | |||
295 | mutex_lock(&data->update_lock); | ||
296 | ret = __tsl2550_show_lux(client, buf); | ||
297 | mutex_unlock(&data->update_lock); | ||
298 | |||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | static DEVICE_ATTR(lux1_input, S_IRUGO, | ||
303 | tsl2550_show_lux1_input, NULL); | ||
304 | |||
305 | static struct attribute *tsl2550_attributes[] = { | ||
306 | &dev_attr_power_state.attr, | ||
307 | &dev_attr_operating_mode.attr, | ||
308 | &dev_attr_lux1_input.attr, | ||
309 | NULL | ||
310 | }; | ||
311 | |||
312 | static const struct attribute_group tsl2550_attr_group = { | ||
313 | .attrs = tsl2550_attributes, | ||
314 | }; | ||
315 | |||
316 | /* | ||
317 | * Initialization function | ||
318 | */ | ||
319 | |||
320 | static int tsl2550_init_client(struct i2c_client *client) | ||
321 | { | ||
322 | struct tsl2550_data *data = i2c_get_clientdata(client); | ||
323 | int err; | ||
324 | |||
325 | /* | ||
326 | * Probe the chip. To do so we try to power up the device and then to | ||
327 | * read back the 0x03 code | ||
328 | */ | ||
329 | err = i2c_smbus_read_byte_data(client, TSL2550_POWER_UP); | ||
330 | if (err < 0) | ||
331 | return err; | ||
332 | if (err != TSL2550_POWER_UP) | ||
333 | return -ENODEV; | ||
334 | data->power_state = 1; | ||
335 | |||
336 | /* Set the default operating mode */ | ||
337 | err = i2c_smbus_write_byte(client, | ||
338 | TSL2550_MODE_RANGE[data->operating_mode]); | ||
339 | if (err < 0) | ||
340 | return err; | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * I2C init/probing/exit functions | ||
347 | */ | ||
348 | |||
349 | static struct i2c_driver tsl2550_driver; | ||
350 | static int __devinit tsl2550_probe(struct i2c_client *client, | ||
351 | const struct i2c_device_id *id) | ||
352 | { | ||
353 | struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); | ||
354 | struct tsl2550_data *data; | ||
355 | int *opmode, err = 0; | ||
356 | |||
357 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE | ||
358 | | I2C_FUNC_SMBUS_READ_BYTE_DATA)) { | ||
359 | err = -EIO; | ||
360 | goto exit; | ||
361 | } | ||
362 | |||
363 | data = kzalloc(sizeof(struct tsl2550_data), GFP_KERNEL); | ||
364 | if (!data) { | ||
365 | err = -ENOMEM; | ||
366 | goto exit; | ||
367 | } | ||
368 | data->client = client; | ||
369 | i2c_set_clientdata(client, data); | ||
370 | |||
371 | /* Check platform data */ | ||
372 | opmode = client->dev.platform_data; | ||
373 | if (opmode) { | ||
374 | if (*opmode < 0 || *opmode > 1) { | ||
375 | dev_err(&client->dev, "invalid operating_mode (%d)\n", | ||
376 | *opmode); | ||
377 | err = -EINVAL; | ||
378 | goto exit_kfree; | ||
379 | } | ||
380 | data->operating_mode = *opmode; | ||
381 | } else | ||
382 | data->operating_mode = 0; /* default mode is standard */ | ||
383 | dev_info(&client->dev, "%s operating mode\n", | ||
384 | data->operating_mode ? "extended" : "standard"); | ||
385 | |||
386 | mutex_init(&data->update_lock); | ||
387 | |||
388 | /* Initialize the TSL2550 chip */ | ||
389 | err = tsl2550_init_client(client); | ||
390 | if (err) | ||
391 | goto exit_kfree; | ||
392 | |||
393 | /* Register sysfs hooks */ | ||
394 | err = sysfs_create_group(&client->dev.kobj, &tsl2550_attr_group); | ||
395 | if (err) | ||
396 | goto exit_kfree; | ||
397 | |||
398 | dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION); | ||
399 | |||
400 | return 0; | ||
401 | |||
402 | exit_kfree: | ||
403 | kfree(data); | ||
404 | exit: | ||
405 | return err; | ||
406 | } | ||
407 | |||
408 | static int __devexit tsl2550_remove(struct i2c_client *client) | ||
409 | { | ||
410 | sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group); | ||
411 | |||
412 | /* Power down the device */ | ||
413 | tsl2550_set_power_state(client, 0); | ||
414 | |||
415 | kfree(i2c_get_clientdata(client)); | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | #ifdef CONFIG_PM | ||
421 | |||
422 | static int tsl2550_suspend(struct i2c_client *client, pm_message_t mesg) | ||
423 | { | ||
424 | return tsl2550_set_power_state(client, 0); | ||
425 | } | ||
426 | |||
427 | static int tsl2550_resume(struct i2c_client *client) | ||
428 | { | ||
429 | return tsl2550_set_power_state(client, 1); | ||
430 | } | ||
431 | |||
432 | #else | ||
433 | |||
434 | #define tsl2550_suspend NULL | ||
435 | #define tsl2550_resume NULL | ||
436 | |||
437 | #endif /* CONFIG_PM */ | ||
438 | |||
439 | static const struct i2c_device_id tsl2550_id[] = { | ||
440 | { "tsl2550", 0 }, | ||
441 | { } | ||
442 | }; | ||
443 | MODULE_DEVICE_TABLE(i2c, tsl2550_id); | ||
444 | |||
445 | static struct i2c_driver tsl2550_driver = { | ||
446 | .driver = { | ||
447 | .name = TSL2550_DRV_NAME, | ||
448 | .owner = THIS_MODULE, | ||
449 | }, | ||
450 | .suspend = tsl2550_suspend, | ||
451 | .resume = tsl2550_resume, | ||
452 | .probe = tsl2550_probe, | ||
453 | .remove = __devexit_p(tsl2550_remove), | ||
454 | .id_table = tsl2550_id, | ||
455 | }; | ||
456 | |||
457 | static int __init tsl2550_init(void) | ||
458 | { | ||
459 | return i2c_add_driver(&tsl2550_driver); | ||
460 | } | ||
461 | |||
462 | static void __exit tsl2550_exit(void) | ||
463 | { | ||
464 | i2c_del_driver(&tsl2550_driver); | ||
465 | } | ||
466 | |||
467 | MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); | ||
468 | MODULE_DESCRIPTION("TSL2550 ambient light sensor driver"); | ||
469 | MODULE_LICENSE("GPL"); | ||
470 | MODULE_VERSION(DRIVER_VERSION); | ||
471 | |||
472 | module_init(tsl2550_init); | ||
473 | module_exit(tsl2550_exit); | ||
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c new file mode 100644 index 000000000000..e7161c4e3798 --- /dev/null +++ b/drivers/misc/vmware_balloon.c | |||
@@ -0,0 +1,832 @@ | |||
1 | /* | ||
2 | * VMware Balloon driver. | ||
3 | * | ||
4 | * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | * | ||
20 | * Maintained by: Dmitry Torokhov <dtor@vmware.com> | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This is VMware physical memory management driver for Linux. The driver | ||
25 | * acts like a "balloon" that can be inflated to reclaim physical pages by | ||
26 | * reserving them in the guest and invalidating them in the monitor, | ||
27 | * freeing up the underlying machine pages so they can be allocated to | ||
28 | * other guests. The balloon can also be deflated to allow the guest to | ||
29 | * use more physical memory. Higher level policies can control the sizes | ||
30 | * of balloons in VMs in order to manage physical memory resources. | ||
31 | */ | ||
32 | |||
33 | //#define DEBUG | ||
34 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
35 | |||
36 | #include <linux/types.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/mm.h> | ||
39 | #include <linux/sched.h> | ||
40 | #include <linux/module.h> | ||
41 | #include <linux/workqueue.h> | ||
42 | #include <linux/debugfs.h> | ||
43 | #include <linux/seq_file.h> | ||
44 | #include <asm/vmware.h> | ||
45 | |||
46 | MODULE_AUTHOR("VMware, Inc."); | ||
47 | MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); | ||
48 | MODULE_VERSION("1.2.1.0-K"); | ||
49 | MODULE_ALIAS("dmi:*:svnVMware*:*"); | ||
50 | MODULE_ALIAS("vmware_vmmemctl"); | ||
51 | MODULE_LICENSE("GPL"); | ||
52 | |||
53 | /* | ||
54 | * Various constants controlling rate of inflaint/deflating balloon, | ||
55 | * measured in pages. | ||
56 | */ | ||
57 | |||
58 | /* | ||
59 | * Rate of allocating memory when there is no memory pressure | ||
60 | * (driver performs non-sleeping allocations). | ||
61 | */ | ||
62 | #define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U | ||
63 | |||
64 | /* | ||
65 | * Rates of memory allocaton when guest experiences memory pressure | ||
66 | * (driver performs sleeping allocations). | ||
67 | */ | ||
68 | #define VMW_BALLOON_RATE_ALLOC_MIN 512U | ||
69 | #define VMW_BALLOON_RATE_ALLOC_MAX 2048U | ||
70 | #define VMW_BALLOON_RATE_ALLOC_INC 16U | ||
71 | |||
72 | /* | ||
73 | * Rates for releasing pages while deflating balloon. | ||
74 | */ | ||
75 | #define VMW_BALLOON_RATE_FREE_MIN 512U | ||
76 | #define VMW_BALLOON_RATE_FREE_MAX 16384U | ||
77 | #define VMW_BALLOON_RATE_FREE_INC 16U | ||
78 | |||
79 | /* | ||
80 | * When guest is under memory pressure, use a reduced page allocation | ||
81 | * rate for next several cycles. | ||
82 | */ | ||
83 | #define VMW_BALLOON_SLOW_CYCLES 4 | ||
84 | |||
85 | /* | ||
86 | * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't | ||
87 | * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use | ||
88 | * __GFP_NOWARN, to suppress page allocation failure warnings. | ||
89 | */ | ||
90 | #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN) | ||
91 | |||
92 | /* | ||
93 | * Use GFP_HIGHUSER when executing in a separate kernel thread | ||
94 | * context and allocation can sleep. This is less stressful to | ||
95 | * the guest memory system, since it allows the thread to block | ||
96 | * while memory is reclaimed, and won't take pages from emergency | ||
97 | * low-memory pools. | ||
98 | */ | ||
99 | #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER) | ||
100 | |||
101 | /* Maximum number of page allocations without yielding processor */ | ||
102 | #define VMW_BALLOON_YIELD_THRESHOLD 1024 | ||
103 | |||
104 | |||
105 | /* | ||
106 | * Hypervisor communication port definitions. | ||
107 | */ | ||
108 | #define VMW_BALLOON_HV_PORT 0x5670 | ||
109 | #define VMW_BALLOON_HV_MAGIC 0x456c6d6f | ||
110 | #define VMW_BALLOON_PROTOCOL_VERSION 2 | ||
111 | #define VMW_BALLOON_GUEST_ID 1 /* Linux */ | ||
112 | |||
113 | #define VMW_BALLOON_CMD_START 0 | ||
114 | #define VMW_BALLOON_CMD_GET_TARGET 1 | ||
115 | #define VMW_BALLOON_CMD_LOCK 2 | ||
116 | #define VMW_BALLOON_CMD_UNLOCK 3 | ||
117 | #define VMW_BALLOON_CMD_GUEST_ID 4 | ||
118 | |||
119 | /* error codes */ | ||
120 | #define VMW_BALLOON_SUCCESS 0 | ||
121 | #define VMW_BALLOON_FAILURE -1 | ||
122 | #define VMW_BALLOON_ERROR_CMD_INVALID 1 | ||
123 | #define VMW_BALLOON_ERROR_PPN_INVALID 2 | ||
124 | #define VMW_BALLOON_ERROR_PPN_LOCKED 3 | ||
125 | #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4 | ||
126 | #define VMW_BALLOON_ERROR_PPN_PINNED 5 | ||
127 | #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6 | ||
128 | #define VMW_BALLOON_ERROR_RESET 7 | ||
129 | #define VMW_BALLOON_ERROR_BUSY 8 | ||
130 | |||
131 | #define VMWARE_BALLOON_CMD(cmd, data, result) \ | ||
132 | ({ \ | ||
133 | unsigned long __stat, __dummy1, __dummy2; \ | ||
134 | __asm__ __volatile__ ("inl (%%dx)" : \ | ||
135 | "=a"(__stat), \ | ||
136 | "=c"(__dummy1), \ | ||
137 | "=d"(__dummy2), \ | ||
138 | "=b"(result) : \ | ||
139 | "0"(VMW_BALLOON_HV_MAGIC), \ | ||
140 | "1"(VMW_BALLOON_CMD_##cmd), \ | ||
141 | "2"(VMW_BALLOON_HV_PORT), \ | ||
142 | "3"(data) : \ | ||
143 | "memory"); \ | ||
144 | result &= -1UL; \ | ||
145 | __stat & -1UL; \ | ||
146 | }) | ||
147 | |||
148 | #ifdef CONFIG_DEBUG_FS | ||
149 | struct vmballoon_stats { | ||
150 | unsigned int timer; | ||
151 | |||
152 | /* allocation statustics */ | ||
153 | unsigned int alloc; | ||
154 | unsigned int alloc_fail; | ||
155 | unsigned int sleep_alloc; | ||
156 | unsigned int sleep_alloc_fail; | ||
157 | unsigned int refused_alloc; | ||
158 | unsigned int refused_free; | ||
159 | unsigned int free; | ||
160 | |||
161 | /* monitor operations */ | ||
162 | unsigned int lock; | ||
163 | unsigned int lock_fail; | ||
164 | unsigned int unlock; | ||
165 | unsigned int unlock_fail; | ||
166 | unsigned int target; | ||
167 | unsigned int target_fail; | ||
168 | unsigned int start; | ||
169 | unsigned int start_fail; | ||
170 | unsigned int guest_type; | ||
171 | unsigned int guest_type_fail; | ||
172 | }; | ||
173 | |||
174 | #define STATS_INC(stat) (stat)++ | ||
175 | #else | ||
176 | #define STATS_INC(stat) | ||
177 | #endif | ||
178 | |||
179 | struct vmballoon { | ||
180 | |||
181 | /* list of reserved physical pages */ | ||
182 | struct list_head pages; | ||
183 | |||
184 | /* transient list of non-balloonable pages */ | ||
185 | struct list_head refused_pages; | ||
186 | |||
187 | /* balloon size in pages */ | ||
188 | unsigned int size; | ||
189 | unsigned int target; | ||
190 | |||
191 | /* reset flag */ | ||
192 | bool reset_required; | ||
193 | |||
194 | /* adjustment rates (pages per second) */ | ||
195 | unsigned int rate_alloc; | ||
196 | unsigned int rate_free; | ||
197 | |||
198 | /* slowdown page allocations for next few cycles */ | ||
199 | unsigned int slow_allocation_cycles; | ||
200 | |||
201 | #ifdef CONFIG_DEBUG_FS | ||
202 | /* statistics */ | ||
203 | struct vmballoon_stats stats; | ||
204 | |||
205 | /* debugfs file exporting statistics */ | ||
206 | struct dentry *dbg_entry; | ||
207 | #endif | ||
208 | |||
209 | struct sysinfo sysinfo; | ||
210 | |||
211 | struct delayed_work dwork; | ||
212 | }; | ||
213 | |||
214 | static struct vmballoon balloon; | ||
215 | static struct workqueue_struct *vmballoon_wq; | ||
216 | |||
217 | /* | ||
218 | * Send "start" command to the host, communicating supported version | ||
219 | * of the protocol. | ||
220 | */ | ||
221 | static bool vmballoon_send_start(struct vmballoon *b) | ||
222 | { | ||
223 | unsigned long status, dummy; | ||
224 | |||
225 | STATS_INC(b->stats.start); | ||
226 | |||
227 | status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy); | ||
228 | if (status == VMW_BALLOON_SUCCESS) | ||
229 | return true; | ||
230 | |||
231 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
232 | STATS_INC(b->stats.start_fail); | ||
233 | return false; | ||
234 | } | ||
235 | |||
236 | static bool vmballoon_check_status(struct vmballoon *b, unsigned long status) | ||
237 | { | ||
238 | switch (status) { | ||
239 | case VMW_BALLOON_SUCCESS: | ||
240 | return true; | ||
241 | |||
242 | case VMW_BALLOON_ERROR_RESET: | ||
243 | b->reset_required = true; | ||
244 | /* fall through */ | ||
245 | |||
246 | default: | ||
247 | return false; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Communicate guest type to the host so that it can adjust ballooning | ||
253 | * algorithm to the one most appropriate for the guest. This command | ||
254 | * is normally issued after sending "start" command and is part of | ||
255 | * standard reset sequence. | ||
256 | */ | ||
257 | static bool vmballoon_send_guest_id(struct vmballoon *b) | ||
258 | { | ||
259 | unsigned long status, dummy; | ||
260 | |||
261 | status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy); | ||
262 | |||
263 | STATS_INC(b->stats.guest_type); | ||
264 | |||
265 | if (vmballoon_check_status(b, status)) | ||
266 | return true; | ||
267 | |||
268 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
269 | STATS_INC(b->stats.guest_type_fail); | ||
270 | return false; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Retrieve desired balloon size from the host. | ||
275 | */ | ||
276 | static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) | ||
277 | { | ||
278 | unsigned long status; | ||
279 | unsigned long target; | ||
280 | unsigned long limit; | ||
281 | u32 limit32; | ||
282 | |||
283 | /* | ||
284 | * si_meminfo() is cheap. Moreover, we want to provide dynamic | ||
285 | * max balloon size later. So let us call si_meminfo() every | ||
286 | * iteration. | ||
287 | */ | ||
288 | si_meminfo(&b->sysinfo); | ||
289 | limit = b->sysinfo.totalram; | ||
290 | |||
291 | /* Ensure limit fits in 32-bits */ | ||
292 | limit32 = (u32)limit; | ||
293 | if (limit != limit32) | ||
294 | return false; | ||
295 | |||
296 | /* update stats */ | ||
297 | STATS_INC(b->stats.target); | ||
298 | |||
299 | status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target); | ||
300 | if (vmballoon_check_status(b, status)) { | ||
301 | *new_target = target; | ||
302 | return true; | ||
303 | } | ||
304 | |||
305 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
306 | STATS_INC(b->stats.target_fail); | ||
307 | return false; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Notify the host about allocated page so that host can use it without | ||
312 | * fear that guest will need it. Host may reject some pages, we need to | ||
313 | * check the return value and maybe submit a different page. | ||
314 | */ | ||
315 | static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn) | ||
316 | { | ||
317 | unsigned long status, dummy; | ||
318 | u32 pfn32; | ||
319 | |||
320 | pfn32 = (u32)pfn; | ||
321 | if (pfn32 != pfn) | ||
322 | return false; | ||
323 | |||
324 | STATS_INC(b->stats.lock); | ||
325 | |||
326 | status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); | ||
327 | if (vmballoon_check_status(b, status)) | ||
328 | return true; | ||
329 | |||
330 | pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); | ||
331 | STATS_INC(b->stats.lock_fail); | ||
332 | return false; | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Notify the host that guest intends to release given page back into | ||
337 | * the pool of available (to the guest) pages. | ||
338 | */ | ||
339 | static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) | ||
340 | { | ||
341 | unsigned long status, dummy; | ||
342 | u32 pfn32; | ||
343 | |||
344 | pfn32 = (u32)pfn; | ||
345 | if (pfn32 != pfn) | ||
346 | return false; | ||
347 | |||
348 | STATS_INC(b->stats.unlock); | ||
349 | |||
350 | status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy); | ||
351 | if (vmballoon_check_status(b, status)) | ||
352 | return true; | ||
353 | |||
354 | pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); | ||
355 | STATS_INC(b->stats.unlock_fail); | ||
356 | return false; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Quickly release all pages allocated for the balloon. This function is | ||
361 | * called when host decides to "reset" balloon for one reason or another. | ||
362 | * Unlike normal "deflate" we do not (shall not) notify host of the pages | ||
363 | * being released. | ||
364 | */ | ||
365 | static void vmballoon_pop(struct vmballoon *b) | ||
366 | { | ||
367 | struct page *page, *next; | ||
368 | unsigned int count = 0; | ||
369 | |||
370 | list_for_each_entry_safe(page, next, &b->pages, lru) { | ||
371 | list_del(&page->lru); | ||
372 | __free_page(page); | ||
373 | STATS_INC(b->stats.free); | ||
374 | b->size--; | ||
375 | |||
376 | if (++count >= b->rate_free) { | ||
377 | count = 0; | ||
378 | cond_resched(); | ||
379 | } | ||
380 | } | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Perform standard reset sequence by popping the balloon (in case it | ||
385 | * is not empty) and then restarting protocol. This operation normally | ||
386 | * happens when host responds with VMW_BALLOON_ERROR_RESET to a command. | ||
387 | */ | ||
388 | static void vmballoon_reset(struct vmballoon *b) | ||
389 | { | ||
390 | /* free all pages, skipping monitor unlock */ | ||
391 | vmballoon_pop(b); | ||
392 | |||
393 | if (vmballoon_send_start(b)) { | ||
394 | b->reset_required = false; | ||
395 | if (!vmballoon_send_guest_id(b)) | ||
396 | pr_err("failed to send guest ID to the host\n"); | ||
397 | } | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Allocate (or reserve) a page for the balloon and notify the host. If host | ||
402 | * refuses the page put it on "refuse" list and allocate another one until host | ||
403 | * is satisfied. "Refused" pages are released at the end of inflation cycle | ||
404 | * (when we allocate b->rate_alloc pages). | ||
405 | */ | ||
406 | static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) | ||
407 | { | ||
408 | struct page *page; | ||
409 | gfp_t flags; | ||
410 | bool locked = false; | ||
411 | |||
412 | do { | ||
413 | if (!can_sleep) | ||
414 | STATS_INC(b->stats.alloc); | ||
415 | else | ||
416 | STATS_INC(b->stats.sleep_alloc); | ||
417 | |||
418 | flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP; | ||
419 | page = alloc_page(flags); | ||
420 | if (!page) { | ||
421 | if (!can_sleep) | ||
422 | STATS_INC(b->stats.alloc_fail); | ||
423 | else | ||
424 | STATS_INC(b->stats.sleep_alloc_fail); | ||
425 | return -ENOMEM; | ||
426 | } | ||
427 | |||
428 | /* inform monitor */ | ||
429 | locked = vmballoon_send_lock_page(b, page_to_pfn(page)); | ||
430 | if (!locked) { | ||
431 | if (b->reset_required) { | ||
432 | __free_page(page); | ||
433 | return -EIO; | ||
434 | } | ||
435 | |||
436 | /* place on list of non-balloonable pages, retry allocation */ | ||
437 | list_add(&page->lru, &b->refused_pages); | ||
438 | STATS_INC(b->stats.refused_alloc); | ||
439 | } | ||
440 | } while (!locked); | ||
441 | |||
442 | /* track allocated page */ | ||
443 | list_add(&page->lru, &b->pages); | ||
444 | |||
445 | /* update balloon size */ | ||
446 | b->size++; | ||
447 | |||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Release the page allocated for the balloon. Note that we first notify | ||
453 | * the host so it can make sure the page will be available for the guest | ||
454 | * to use, if needed. | ||
455 | */ | ||
456 | static int vmballoon_release_page(struct vmballoon *b, struct page *page) | ||
457 | { | ||
458 | if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) | ||
459 | return -EIO; | ||
460 | |||
461 | list_del(&page->lru); | ||
462 | |||
463 | /* deallocate page */ | ||
464 | __free_page(page); | ||
465 | STATS_INC(b->stats.free); | ||
466 | |||
467 | /* update balloon size */ | ||
468 | b->size--; | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * Release pages that were allocated while attempting to inflate the | ||
475 | * balloon but were refused by the host for one reason or another. | ||
476 | */ | ||
477 | static void vmballoon_release_refused_pages(struct vmballoon *b) | ||
478 | { | ||
479 | struct page *page, *next; | ||
480 | |||
481 | list_for_each_entry_safe(page, next, &b->refused_pages, lru) { | ||
482 | list_del(&page->lru); | ||
483 | __free_page(page); | ||
484 | STATS_INC(b->stats.refused_free); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * Inflate the balloon towards its target size. Note that we try to limit | ||
490 | * the rate of allocation to make sure we are not choking the rest of the | ||
491 | * system. | ||
492 | */ | ||
493 | static void vmballoon_inflate(struct vmballoon *b) | ||
494 | { | ||
495 | unsigned int goal; | ||
496 | unsigned int rate; | ||
497 | unsigned int i; | ||
498 | unsigned int allocations = 0; | ||
499 | int error = 0; | ||
500 | bool alloc_can_sleep = false; | ||
501 | |||
502 | pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); | ||
503 | |||
504 | /* | ||
505 | * First try NOSLEEP page allocations to inflate balloon. | ||
506 | * | ||
507 | * If we do not throttle nosleep allocations, we can drain all | ||
508 | * free pages in the guest quickly (if the balloon target is high). | ||
509 | * As a side-effect, draining free pages helps to inform (force) | ||
510 | * the guest to start swapping if balloon target is not met yet, | ||
511 | * which is a desired behavior. However, balloon driver can consume | ||
512 | * all available CPU cycles if too many pages are allocated in a | ||
513 | * second. Therefore, we throttle nosleep allocations even when | ||
514 | * the guest is not under memory pressure. OTOH, if we have already | ||
515 | * predicted that the guest is under memory pressure, then we | ||
516 | * slowdown page allocations considerably. | ||
517 | */ | ||
518 | |||
519 | goal = b->target - b->size; | ||
520 | /* | ||
521 | * Start with no sleep allocation rate which may be higher | ||
522 | * than sleeping allocation rate. | ||
523 | */ | ||
524 | rate = b->slow_allocation_cycles ? | ||
525 | b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX; | ||
526 | |||
527 | pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n", | ||
528 | __func__, goal, rate, b->rate_alloc); | ||
529 | |||
530 | for (i = 0; i < goal; i++) { | ||
531 | |||
532 | error = vmballoon_reserve_page(b, alloc_can_sleep); | ||
533 | if (error) { | ||
534 | if (error != -ENOMEM) { | ||
535 | /* | ||
536 | * Not a page allocation failure, stop this | ||
537 | * cycle. Maybe we'll get new target from | ||
538 | * the host soon. | ||
539 | */ | ||
540 | break; | ||
541 | } | ||
542 | |||
543 | if (alloc_can_sleep) { | ||
544 | /* | ||
545 | * CANSLEEP page allocation failed, so guest | ||
546 | * is under severe memory pressure. Quickly | ||
547 | * decrease allocation rate. | ||
548 | */ | ||
549 | b->rate_alloc = max(b->rate_alloc / 2, | ||
550 | VMW_BALLOON_RATE_ALLOC_MIN); | ||
551 | break; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * NOSLEEP page allocation failed, so the guest is | ||
556 | * under memory pressure. Let us slow down page | ||
557 | * allocations for next few cycles so that the guest | ||
558 | * gets out of memory pressure. Also, if we already | ||
559 | * allocated b->rate_alloc pages, let's pause, | ||
560 | * otherwise switch to sleeping allocations. | ||
561 | */ | ||
562 | b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; | ||
563 | |||
564 | if (i >= b->rate_alloc) | ||
565 | break; | ||
566 | |||
567 | alloc_can_sleep = true; | ||
568 | /* Lower rate for sleeping allocations. */ | ||
569 | rate = b->rate_alloc; | ||
570 | } | ||
571 | |||
572 | if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) { | ||
573 | cond_resched(); | ||
574 | allocations = 0; | ||
575 | } | ||
576 | |||
577 | if (i >= rate) { | ||
578 | /* We allocated enough pages, let's take a break. */ | ||
579 | break; | ||
580 | } | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * We reached our goal without failures so try increasing | ||
585 | * allocation rate. | ||
586 | */ | ||
587 | if (error == 0 && i >= b->rate_alloc) { | ||
588 | unsigned int mult = i / b->rate_alloc; | ||
589 | |||
590 | b->rate_alloc = | ||
591 | min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, | ||
592 | VMW_BALLOON_RATE_ALLOC_MAX); | ||
593 | } | ||
594 | |||
595 | vmballoon_release_refused_pages(b); | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Decrease the size of the balloon allowing guest to use more memory. | ||
600 | */ | ||
601 | static void vmballoon_deflate(struct vmballoon *b) | ||
602 | { | ||
603 | struct page *page, *next; | ||
604 | unsigned int i = 0; | ||
605 | unsigned int goal; | ||
606 | int error; | ||
607 | |||
608 | pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); | ||
609 | |||
610 | /* limit deallocation rate */ | ||
611 | goal = min(b->size - b->target, b->rate_free); | ||
612 | |||
613 | pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free); | ||
614 | |||
615 | /* free pages to reach target */ | ||
616 | list_for_each_entry_safe(page, next, &b->pages, lru) { | ||
617 | error = vmballoon_release_page(b, page); | ||
618 | if (error) { | ||
619 | /* quickly decrease rate in case of error */ | ||
620 | b->rate_free = max(b->rate_free / 2, | ||
621 | VMW_BALLOON_RATE_FREE_MIN); | ||
622 | return; | ||
623 | } | ||
624 | |||
625 | if (++i >= goal) | ||
626 | break; | ||
627 | } | ||
628 | |||
629 | /* slowly increase rate if there were no errors */ | ||
630 | b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC, | ||
631 | VMW_BALLOON_RATE_FREE_MAX); | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Balloon work function: reset protocol, if needed, get the new size and | ||
636 | * adjust balloon as needed. Repeat in 1 sec. | ||
637 | */ | ||
638 | static void vmballoon_work(struct work_struct *work) | ||
639 | { | ||
640 | struct delayed_work *dwork = to_delayed_work(work); | ||
641 | struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); | ||
642 | unsigned int target; | ||
643 | |||
644 | STATS_INC(b->stats.timer); | ||
645 | |||
646 | if (b->reset_required) | ||
647 | vmballoon_reset(b); | ||
648 | |||
649 | if (b->slow_allocation_cycles > 0) | ||
650 | b->slow_allocation_cycles--; | ||
651 | |||
652 | if (vmballoon_send_get_target(b, &target)) { | ||
653 | /* update target, adjust size */ | ||
654 | b->target = target; | ||
655 | |||
656 | if (b->size < target) | ||
657 | vmballoon_inflate(b); | ||
658 | else if (b->size > target) | ||
659 | vmballoon_deflate(b); | ||
660 | } | ||
661 | |||
662 | queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ)); | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * DEBUGFS Interface | ||
667 | */ | ||
668 | #ifdef CONFIG_DEBUG_FS | ||
669 | |||
670 | static int vmballoon_debug_show(struct seq_file *f, void *offset) | ||
671 | { | ||
672 | struct vmballoon *b = f->private; | ||
673 | struct vmballoon_stats *stats = &b->stats; | ||
674 | |||
675 | /* format size info */ | ||
676 | seq_printf(f, | ||
677 | "target: %8d pages\n" | ||
678 | "current: %8d pages\n", | ||
679 | b->target, b->size); | ||
680 | |||
681 | /* format rate info */ | ||
682 | seq_printf(f, | ||
683 | "rateNoSleepAlloc: %8d pages/sec\n" | ||
684 | "rateSleepAlloc: %8d pages/sec\n" | ||
685 | "rateFree: %8d pages/sec\n", | ||
686 | VMW_BALLOON_NOSLEEP_ALLOC_MAX, | ||
687 | b->rate_alloc, b->rate_free); | ||
688 | |||
689 | seq_printf(f, | ||
690 | "\n" | ||
691 | "timer: %8u\n" | ||
692 | "start: %8u (%4u failed)\n" | ||
693 | "guestType: %8u (%4u failed)\n" | ||
694 | "lock: %8u (%4u failed)\n" | ||
695 | "unlock: %8u (%4u failed)\n" | ||
696 | "target: %8u (%4u failed)\n" | ||
697 | "primNoSleepAlloc: %8u (%4u failed)\n" | ||
698 | "primCanSleepAlloc: %8u (%4u failed)\n" | ||
699 | "primFree: %8u\n" | ||
700 | "errAlloc: %8u\n" | ||
701 | "errFree: %8u\n", | ||
702 | stats->timer, | ||
703 | stats->start, stats->start_fail, | ||
704 | stats->guest_type, stats->guest_type_fail, | ||
705 | stats->lock, stats->lock_fail, | ||
706 | stats->unlock, stats->unlock_fail, | ||
707 | stats->target, stats->target_fail, | ||
708 | stats->alloc, stats->alloc_fail, | ||
709 | stats->sleep_alloc, stats->sleep_alloc_fail, | ||
710 | stats->free, | ||
711 | stats->refused_alloc, stats->refused_free); | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static int vmballoon_debug_open(struct inode *inode, struct file *file) | ||
717 | { | ||
718 | return single_open(file, vmballoon_debug_show, inode->i_private); | ||
719 | } | ||
720 | |||
721 | static const struct file_operations vmballoon_debug_fops = { | ||
722 | .owner = THIS_MODULE, | ||
723 | .open = vmballoon_debug_open, | ||
724 | .read = seq_read, | ||
725 | .llseek = seq_lseek, | ||
726 | .release = single_release, | ||
727 | }; | ||
728 | |||
729 | static int __init vmballoon_debugfs_init(struct vmballoon *b) | ||
730 | { | ||
731 | int error; | ||
732 | |||
733 | b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, | ||
734 | &vmballoon_debug_fops); | ||
735 | if (IS_ERR(b->dbg_entry)) { | ||
736 | error = PTR_ERR(b->dbg_entry); | ||
737 | pr_err("failed to create debugfs entry, error: %d\n", error); | ||
738 | return error; | ||
739 | } | ||
740 | |||
741 | return 0; | ||
742 | } | ||
743 | |||
744 | static void __exit vmballoon_debugfs_exit(struct vmballoon *b) | ||
745 | { | ||
746 | debugfs_remove(b->dbg_entry); | ||
747 | } | ||
748 | |||
749 | #else | ||
750 | |||
751 | static inline int vmballoon_debugfs_init(struct vmballoon *b) | ||
752 | { | ||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static inline void vmballoon_debugfs_exit(struct vmballoon *b) | ||
757 | { | ||
758 | } | ||
759 | |||
760 | #endif /* CONFIG_DEBUG_FS */ | ||
761 | |||
762 | static int __init vmballoon_init(void) | ||
763 | { | ||
764 | int error; | ||
765 | |||
766 | /* | ||
767 | * Check if we are running on VMware's hypervisor and bail out | ||
768 | * if we are not. | ||
769 | */ | ||
770 | if (!vmware_platform()) | ||
771 | return -ENODEV; | ||
772 | |||
773 | vmballoon_wq = create_freezeable_workqueue("vmmemctl"); | ||
774 | if (!vmballoon_wq) { | ||
775 | pr_err("failed to create workqueue\n"); | ||
776 | return -ENOMEM; | ||
777 | } | ||
778 | |||
779 | INIT_LIST_HEAD(&balloon.pages); | ||
780 | INIT_LIST_HEAD(&balloon.refused_pages); | ||
781 | |||
782 | /* initialize rates */ | ||
783 | balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX; | ||
784 | balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX; | ||
785 | |||
786 | INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); | ||
787 | |||
788 | /* | ||
789 | * Start balloon. | ||
790 | */ | ||
791 | if (!vmballoon_send_start(&balloon)) { | ||
792 | pr_err("failed to send start command to the host\n"); | ||
793 | error = -EIO; | ||
794 | goto fail; | ||
795 | } | ||
796 | |||
797 | if (!vmballoon_send_guest_id(&balloon)) { | ||
798 | pr_err("failed to send guest ID to the host\n"); | ||
799 | error = -EIO; | ||
800 | goto fail; | ||
801 | } | ||
802 | |||
803 | error = vmballoon_debugfs_init(&balloon); | ||
804 | if (error) | ||
805 | goto fail; | ||
806 | |||
807 | queue_delayed_work(vmballoon_wq, &balloon.dwork, 0); | ||
808 | |||
809 | return 0; | ||
810 | |||
811 | fail: | ||
812 | destroy_workqueue(vmballoon_wq); | ||
813 | return error; | ||
814 | } | ||
815 | module_init(vmballoon_init); | ||
816 | |||
817 | static void __exit vmballoon_exit(void) | ||
818 | { | ||
819 | cancel_delayed_work_sync(&balloon.dwork); | ||
820 | destroy_workqueue(vmballoon_wq); | ||
821 | |||
822 | vmballoon_debugfs_exit(&balloon); | ||
823 | |||
824 | /* | ||
825 | * Deallocate all reserved memory, and reset connection with monitor. | ||
826 | * Reset connection before deallocating memory to avoid potential for | ||
827 | * additional spurious resets from guest touching deallocated pages. | ||
828 | */ | ||
829 | vmballoon_send_start(&balloon); | ||
830 | vmballoon_pop(&balloon); | ||
831 | } | ||
832 | module_exit(vmballoon_exit); | ||