aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig263
-rw-r--r--drivers/misc/Makefile22
-rw-r--r--drivers/misc/ab8500-pwm.c168
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c136
-rw-r--r--drivers/misc/ad525x_dpot-spi.c174
-rw-r--r--drivers/misc/ad525x_dpot.c773
-rw-r--r--drivers/misc/ad525x_dpot.h219
-rw-r--r--drivers/misc/apds9802als.c346
-rw-r--r--drivers/misc/apds990x.c1295
-rw-r--r--drivers/misc/arm-charlcd.c396
-rw-r--r--drivers/misc/atmel-ssc.c1
-rw-r--r--drivers/misc/atmel_pwm.c1
-rw-r--r--drivers/misc/atmel_tclib.c1
-rw-r--r--drivers/misc/bh1770glc.c1417
-rw-r--r--drivers/misc/bh1780gli.c272
-rw-r--r--drivers/misc/bmp085.c482
-rw-r--r--drivers/misc/c2port/core.c9
-rw-r--r--drivers/misc/cb710/core.c2
-rw-r--r--drivers/misc/cb710/debug.c1
-rw-r--r--drivers/misc/cb710/sgbuf2.c4
-rw-r--r--drivers/misc/cs5535-mfgpt.c382
-rw-r--r--drivers/misc/ds1682.c268
-rw-r--r--drivers/misc/eeprom/at24.c176
-rw-r--r--drivers/misc/eeprom/at25.c13
-rw-r--r--drivers/misc/eeprom/eeprom.c47
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c39
-rw-r--r--drivers/misc/eeprom/max6875.c83
-rw-r--r--drivers/misc/enclosure.c82
-rw-r--r--drivers/misc/ep93xx_pwm.c385
-rw-r--r--drivers/misc/hdpuftrs/Makefile1
-rw-r--r--drivers/misc/hdpuftrs/hdpu_cpustate.c256
-rw-r--r--drivers/misc/hdpuftrs/hdpu_nexus.c149
-rw-r--r--drivers/misc/hmc6352.c166
-rw-r--r--drivers/misc/hpilo.c312
-rw-r--r--drivers/misc/hpilo.h27
-rw-r--r--drivers/misc/ibmasm/command.c2
-rw-r--r--drivers/misc/ibmasm/event.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c17
-rw-r--r--drivers/misc/ibmasm/module.c1
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c1
-rw-r--r--drivers/misc/ics932s401.c49
-rw-r--r--drivers/misc/ioc4.c46
-rw-r--r--drivers/misc/isl29020.c248
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c137
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c358
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h207
-rw-r--r--drivers/misc/iwmc3200top/log.c348
-rw-r--r--drivers/misc/iwmc3200top/log.h171
-rw-r--r--drivers/misc/iwmc3200top/main.c666
-rw-r--r--drivers/misc/kgdbts.c36
-rw-r--r--drivers/misc/lkdtm.c554
-rw-r--r--drivers/misc/pch_phub.c717
-rw-r--r--drivers/misc/phantom.c37
-rw-r--r--drivers/misc/sgi-gru/gru.h11
-rw-r--r--drivers/misc/sgi-gru/gru_instructions.h144
-rw-r--r--drivers/misc/sgi-gru/grufault.c311
-rw-r--r--drivers/misc/sgi-gru/grufile.c299
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c70
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h37
-rw-r--r--drivers/misc/sgi-gru/grukdump.c13
-rw-r--r--drivers/misc/sgi-gru/grukservices.c214
-rw-r--r--drivers/misc/sgi-gru/grukservices.h14
-rw-r--r--drivers/misc/sgi-gru/grulib.h21
-rw-r--r--drivers/misc/sgi-gru/grumain.c228
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c58
-rw-r--r--drivers/misc/sgi-gru/grutables.h92
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c14
-rw-r--r--drivers/misc/sgi-xp/xp.h1
-rw-r--r--drivers/misc/sgi-xp/xp_main.c3
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c10
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c33
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c15
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c39
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c41
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c69
-rw-r--r--drivers/misc/sgi-xp/xpnet.c12
-rw-r--r--drivers/misc/ti-st/Kconfig17
-rw-r--r--drivers/misc/ti-st/Makefile6
-rw-r--r--drivers/misc/ti-st/st_core.c992
-rw-r--r--drivers/misc/ti-st/st_kim.c799
-rw-r--r--drivers/misc/ti-st/st_ll.c150
-rw-r--r--drivers/misc/ti_dac7512.c101
-rw-r--r--drivers/misc/tifm_core.c1
-rw-r--r--drivers/misc/tsl2550.c473
-rw-r--r--drivers/misc/vmw_balloon.c844
89 files changed, 15862 insertions, 1455 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 68ab39d7cb35..4d073f1e4502 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig MISC_DEVICES 5menuconfig MISC_DEVICES
6 bool "Misc devices" 6 bool "Misc devices"
7 default y
8 ---help--- 7 ---help---
9 Say Y here to get to see options for device drivers from various 8 Say Y here to get to see options for device drivers from various
10 different categories. This option alone does not add any kernel code. 9 different categories. This option alone does not add any kernel code.
@@ -13,6 +12,47 @@ menuconfig MISC_DEVICES
13 12
14if MISC_DEVICES 13if MISC_DEVICES
15 14
15config AD525X_DPOT
16 tristate "Analog Devices Digital Potentiometers"
17 depends on (I2C || SPI) && SYSFS
18 help
19 If you say yes here, you get support for the Analog Devices
20 AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255
21 AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203,
22 AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235,
23 AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
24 AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
25 AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
26 ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173, AD5270,
27 AD5271, AD5272, AD5274
28 digital potentiometer chips.
29
30 See Documentation/misc-devices/ad525x_dpot.txt for the
31 userspace interface.
32
33 This driver can also be built as a module. If so, the module
34 will be called ad525x_dpot.
35
36config AD525X_DPOT_I2C
37 tristate "support I2C bus connection"
38 depends on AD525X_DPOT && I2C
39 help
40 Say Y here if you have a digital potentiometers hooked to an I2C bus.
41
42 To compile this driver as a module, choose M here: the
43 module will be called ad525x_dpot-i2c.
44
45config AD525X_DPOT_SPI
46 tristate "support SPI bus connection"
47 depends on AD525X_DPOT && SPI_MASTER
48 help
49 Say Y here if you have a digital potentiometers hooked to an SPI bus.
50
51 If unsure, say N (but it's safe to say "Y").
52
53 To compile this driver as a module, choose M here: the
54 module will be called ad525x_dpot-spi.
55
16config ATMEL_PWM 56config ATMEL_PWM
17 tristate "Atmel AT32/AT91 PWM support" 57 tristate "Atmel AT32/AT91 PWM support"
18 depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 58 depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
@@ -22,6 +62,15 @@ config ATMEL_PWM
22 purposes including software controlled power-efficient backlights 62 purposes including software controlled power-efficient backlights
23 on LCD displays, motor control, and waveform generation. 63 on LCD displays, motor control, and waveform generation.
24 64
65config AB8500_PWM
66 bool "AB8500 PWM support"
67 depends on AB8500_CORE
68 select HAVE_PWM
69 help
70 This driver exports functions to enable/disble/config/free Pulse
71 Width Modulation in the Analog Baseband Chip AB8500.
72 It is used by led and backlight driver to control the intensity.
73
25config ATMEL_TCLIB 74config ATMEL_TCLIB
26 bool "Atmel AT32/AT91 Timer/Counter Library" 75 bool "Atmel AT32/AT91 Timer/Counter Library"
27 depends on (AVR32 || ARCH_AT91) 76 depends on (AVR32 || ARCH_AT91)
@@ -32,7 +81,7 @@ config ATMEL_TCLIB
32 81
33config ATMEL_TCB_CLKSRC 82config ATMEL_TCB_CLKSRC
34 bool "TC Block Clocksource" 83 bool "TC Block Clocksource"
35 depends on ATMEL_TCLIB && GENERIC_TIME 84 depends on ATMEL_TCLIB
36 default y 85 default y
37 help 86 help
38 Select this to get a high precision clocksource based on a 87 Select this to get a high precision clocksource based on a
@@ -72,8 +121,8 @@ config IBM_ASM
72 121
73 WARNING: This software may not be supported or function 122 WARNING: This software may not be supported or function
74 correctly on your IBM server. Please consult the IBM ServerProven 123 correctly on your IBM server. Please consult the IBM ServerProven
75 website <http://www.pc.ibm.com/ww/eserver/xseries/serverproven> for 124 website <http://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
76 information on the specific driver level and support statement 125 for information on the specific driver level and support statement
77 for your IBM server. 126 for your IBM server.
78 127
79config PHANTOM 128config PHANTOM
@@ -173,16 +222,50 @@ config SGI_XP
173 this feature will allow for direct communication between SSIs 222 this feature will allow for direct communication between SSIs
174 based on a network adapter and DMA messaging. 223 based on a network adapter and DMA messaging.
175 224
225config CS5535_MFGPT
226 tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
227 depends on PCI
228 depends on X86
229 default n
230 help
231 This driver provides access to MFGPT functionality for other
232 drivers that need timers. MFGPTs are available in the CS5535 and
233 CS5536 companion chips that are found in AMD Geode and several
234 other platforms. They have a better resolution and max interval
235 than the generic PIT, and are suitable for use as high-res timers.
236 You probably don't want to enable this manually; other drivers that
237 make use of it should enable it.
238
239config CS5535_MFGPT_DEFAULT_IRQ
240 int
241 depends on CS5535_MFGPT
242 default 7
243 help
244 MFGPTs on the CS5535 require an interrupt. The selected IRQ
245 can be overridden as a module option as well as by driver that
246 use the cs5535_mfgpt_ API; however, different architectures might
247 want to use a different IRQ by default. This is here for
248 architectures to set as necessary.
249
250config CS5535_CLOCK_EVENT_SRC
251 tristate "CS5535/CS5536 high-res timer (MFGPT) events"
252 depends on GENERIC_CLOCKEVENTS && CS5535_MFGPT
253 help
254 This driver provides a clock event source based on the MFGPT
255 timer(s) in the CS5535 and CS5536 companion chips.
256 MFGPTs have a better resolution and max interval than the
257 generic PIT, and are suitable for use as high-res timers.
258
176config HP_ILO 259config HP_ILO
177 tristate "Channel interface driver for HP iLO/iLO2 processor" 260 tristate "Channel interface driver for the HP iLO processor"
178 depends on PCI 261 depends on PCI
179 default n 262 default n
180 help 263 help
181 The channel interface driver allows applications to communicate 264 The channel interface driver allows applications to communicate
182 with iLO/iLO2 management processors present on HP ProLiant 265 with iLO management processors present on HP ProLiant servers.
183 servers. Upon loading, the driver creates /dev/hpilo/dXccbN files, 266 Upon loading, the driver creates /dev/hpilo/dXccbN files, which
184 which can be used to gather data from the management processor, 267 can be used to gather data from the management processor, via
185 via read and write system calls. 268 read and write system calls.
186 269
187 To compile this driver as a module, choose M here: the 270 To compile this driver as a module, choose M here: the
188 module will be called hpilo. 271 module will be called hpilo.
@@ -210,18 +293,15 @@ config SGI_GRU_DEBUG
210 This option enables addition debugging code for the SGI GRU driver. If 293 This option enables addition debugging code for the SGI GRU driver. If
211 you are unsure, say N. 294 you are unsure, say N.
212 295
213config DELL_LAPTOP 296config APDS9802ALS
214 tristate "Dell Laptop Extras (EXPERIMENTAL)" 297 tristate "Medfield Avago APDS9802 ALS Sensor module"
215 depends on X86 298 depends on I2C
216 depends on DCDBAS 299 help
217 depends on EXPERIMENTAL 300 If you say yes here you get support for the ALS APDS9802 ambient
218 depends on BACKLIGHT_CLASS_DEVICE 301 light sensor.
219 depends on RFKILL 302
220 depends on POWER_SUPPLY 303 This driver can also be built as a module. If so, the module
221 default n 304 will be called apds9802als.
222 ---help---
223 This driver adds support for rfkill and backlight control to Dell
224 laptops.
225 305
226config ISL29003 306config ISL29003
227 tristate "Intersil ISL29003 ambient light sensor" 307 tristate "Intersil ISL29003 ambient light sensor"
@@ -233,8 +313,149 @@ config ISL29003
233 This driver can also be built as a module. If so, the module 313 This driver can also be built as a module. If so, the module
234 will be called isl29003. 314 will be called isl29003.
235 315
316config ISL29020
317 tristate "Intersil ISL29020 ambient light sensor"
318 depends on I2C
319 help
320 If you say yes here you get support for the Intersil ISL29020
321 ambient light sensor.
322
323 This driver can also be built as a module. If so, the module
324 will be called isl29020.
325
326config SENSORS_TSL2550
327 tristate "Taos TSL2550 ambient light sensor"
328 depends on I2C && SYSFS
329 help
330 If you say yes here you get support for the Taos TSL2550
331 ambient light sensor.
332
333 This driver can also be built as a module. If so, the module
334 will be called tsl2550.
335
336config SENSORS_BH1780
337 tristate "ROHM BH1780GLI ambient light sensor"
338 depends on I2C && SYSFS
339 help
340 If you say yes here you get support for the ROHM BH1780GLI
341 ambient light sensor.
342
343 This driver can also be built as a module. If so, the module
344 will be called bh1780gli.
345
346config SENSORS_BH1770
347 tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
348 depends on I2C
349 ---help---
350 Say Y here if you want to build a driver for BH1770GLC (ROHM) or
351 SFH7770 (Osram) combined ambient light and proximity sensor chip.
352
353 To compile this driver as a module, choose M here: the
354 module will be called bh1770glc. If unsure, say N here.
355
356config SENSORS_APDS990X
357 tristate "APDS990X combined als and proximity sensors"
358 depends on I2C
359 default n
360 ---help---
361 Say Y here if you want to build a driver for Avago APDS990x
362 combined ambient light and proximity sensor chip.
363
364 To compile this driver as a module, choose M here: the
365 module will be called apds990x. If unsure, say N here.
366
367config HMC6352
368 tristate "Honeywell HMC6352 compass"
369 depends on I2C
370 help
371 This driver provides support for the Honeywell HMC6352 compass,
372 providing configuration and heading data via sysfs.
373
374config EP93XX_PWM
375 tristate "EP93xx PWM support"
376 depends on ARCH_EP93XX
377 help
378 This option enables device driver support for the PWM channels
379 on the Cirrus EP93xx processors. The EP9307 chip only has one
380 PWM channel all the others have two, the second channel is an
381 alternate function of the EGPIO14 pin. A sysfs interface is
382 provided to control the PWM channels.
383
384 To compile this driver as a module, choose M here: the module will
385 be called ep93xx_pwm.
386
387config DS1682
388 tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
389 depends on I2C && EXPERIMENTAL
390 help
391 If you say yes here you get support for Dallas Semiconductor
392 DS1682 Total Elapsed Time Recorder.
393
394 This driver can also be built as a module. If so, the module
395 will be called ds1682.
396
397config TI_DAC7512
398 tristate "Texas Instruments DAC7512"
399 depends on SPI && SYSFS
400 help
401 If you say yes here you get support for the Texas Instruments
402 DAC7512 16-bit digital-to-analog converter.
403
404 This driver can also be built as a module. If so, the module
405 will be calles ti_dac7512.
406
407config VMWARE_BALLOON
408 tristate "VMware Balloon Driver"
409 depends on X86
410 help
411 This is VMware physical memory management driver which acts
412 like a "balloon" that can be inflated to reclaim physical pages
413 by reserving them in the guest and invalidating them in the
414 monitor, freeing up the underlying machine pages so they can
415 be allocated to other guests. The balloon can also be deflated
416 to allow the guest to use more physical memory.
417
418 If unsure, say N.
419
420 To compile this driver as a module, choose M here: the
421 module will be called vmw_balloon.
422
423config ARM_CHARLCD
424 bool "ARM Ltd. Character LCD Driver"
425 depends on PLAT_VERSATILE
426 help
427 This is a driver for the character LCD found on the ARM Ltd.
428 Versatile and RealView Platform Baseboards. It doesn't do
429 very much more than display the text "ARM Linux" on the first
430 line and the Linux version on the second line, but that's
431 still useful.
432
433config BMP085
434 tristate "BMP085 digital pressure sensor"
435 depends on I2C && SYSFS
436 help
437 If you say yes here you get support for the Bosch Sensortec
438 BMP085 digital pressure sensor.
439
440 To compile this driver as a module, choose M here: the
441 module will be called bmp085.
442
443config PCH_PHUB
444 tristate "PCH Packet Hub of Intel Topcliff"
445 depends on PCI
446 help
447 This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
448 Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
449 processor. The Topcliff has MAC address and Option ROM data in SROM.
450 This driver can access MAC address and Option ROM data in SROM.
451
452 To compile this driver as a module, choose M here: the module will
453 be called pch_phub.
454
236source "drivers/misc/c2port/Kconfig" 455source "drivers/misc/c2port/Kconfig"
237source "drivers/misc/eeprom/Kconfig" 456source "drivers/misc/eeprom/Kconfig"
238source "drivers/misc/cb710/Kconfig" 457source "drivers/misc/cb710/Kconfig"
458source "drivers/misc/iwmc3200top/Kconfig"
459source "drivers/misc/ti-st/Kconfig"
239 460
240endif # MISC_DEVICES 461endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 36f733cd60e6..98009cc20cb9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -3,22 +3,42 @@
3# 3#
4 4
5obj-$(CONFIG_IBM_ASM) += ibmasm/ 5obj-$(CONFIG_IBM_ASM) += ibmasm/
6obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 6obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
7obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
8obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
7obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o 9obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
8obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o 10obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
9obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o 11obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
12obj-$(CONFIG_BMP085) += bmp085.o
10obj-$(CONFIG_ICS932S401) += ics932s401.o 13obj-$(CONFIG_ICS932S401) += ics932s401.o
11obj-$(CONFIG_LKDTM) += lkdtm.o 14obj-$(CONFIG_LKDTM) += lkdtm.o
12obj-$(CONFIG_TIFM_CORE) += tifm_core.o 15obj-$(CONFIG_TIFM_CORE) += tifm_core.o
13obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 16obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
14obj-$(CONFIG_PHANTOM) += phantom.o 17obj-$(CONFIG_PHANTOM) += phantom.o
18obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
19obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
20obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
15obj-$(CONFIG_SGI_IOC4) += ioc4.o 21obj-$(CONFIG_SGI_IOC4) += ioc4.o
16obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 22obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
17obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 23obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
18obj-$(CONFIG_SGI_XP) += sgi-xp/ 24obj-$(CONFIG_SGI_XP) += sgi-xp/
19obj-$(CONFIG_SGI_GRU) += sgi-gru/ 25obj-$(CONFIG_SGI_GRU) += sgi-gru/
26obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
20obj-$(CONFIG_HP_ILO) += hpilo.o 27obj-$(CONFIG_HP_ILO) += hpilo.o
28obj-$(CONFIG_APDS9802ALS) += apds9802als.o
21obj-$(CONFIG_ISL29003) += isl29003.o 29obj-$(CONFIG_ISL29003) += isl29003.o
30obj-$(CONFIG_ISL29020) += isl29020.o
31obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
32obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
33obj-$(CONFIG_DS1682) += ds1682.o
34obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
22obj-$(CONFIG_C2PORT) += c2port/ 35obj-$(CONFIG_C2PORT) += c2port/
36obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
37obj-$(CONFIG_HMC6352) += hmc6352.o
23obj-y += eeprom/ 38obj-y += eeprom/
24obj-y += cb710/ 39obj-y += cb710/
40obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
41obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
42obj-$(CONFIG_PCH_PHUB) += pch_phub.o
43obj-y += ti-st/
44obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
new file mode 100644
index 000000000000..54e3d05b63cc
--- /dev/null
+++ b/drivers/misc/ab8500-pwm.c
@@ -0,0 +1,168 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * Author: Arun R Murthy <arun.murthy@stericsson.com>
5 * License terms: GNU General Public License (GPL) version 2
6 */
7#include <linux/err.h>
8#include <linux/platform_device.h>
9#include <linux/slab.h>
10#include <linux/pwm.h>
11#include <linux/mfd/ab8500.h>
12#include <linux/mfd/abx500.h>
13
14/*
15 * PWM Out generators
16 * Bank: 0x10
17 */
18#define AB8500_PWM_OUT_CTRL1_REG 0x60
19#define AB8500_PWM_OUT_CTRL2_REG 0x61
20#define AB8500_PWM_OUT_CTRL7_REG 0x66
21
22/* backlight driver constants */
23#define ENABLE_PWM 1
24#define DISABLE_PWM 0
25
26struct pwm_device {
27 struct device *dev;
28 struct list_head node;
29 const char *label;
30 unsigned int pwm_id;
31};
32
33static LIST_HEAD(pwm_list);
34
35int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
36{
37 int ret = 0;
38 unsigned int higher_val, lower_val;
39 u8 reg;
40
41 /*
42 * get the first 8 bits that are be written to
43 * AB8500_PWM_OUT_CTRL1_REG[0:7]
44 */
45 lower_val = duty_ns & 0x00FF;
46 /*
47 * get bits [9:10] that are to be written to
48 * AB8500_PWM_OUT_CTRL2_REG[0:1]
49 */
50 higher_val = ((duty_ns & 0x0300) >> 8);
51
52 reg = AB8500_PWM_OUT_CTRL1_REG + ((pwm->pwm_id - 1) * 2);
53
54 ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC,
55 reg, (u8)lower_val);
56 if (ret < 0)
57 return ret;
58 ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC,
59 (reg + 1), (u8)higher_val);
60
61 return ret;
62}
63EXPORT_SYMBOL(pwm_config);
64
65int pwm_enable(struct pwm_device *pwm)
66{
67 int ret;
68
69 ret = abx500_mask_and_set_register_interruptible(pwm->dev,
70 AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
71 1 << (pwm->pwm_id-1), ENABLE_PWM);
72 if (ret < 0)
73 dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
74 pwm->label, ret);
75 return ret;
76}
77EXPORT_SYMBOL(pwm_enable);
78
79void pwm_disable(struct pwm_device *pwm)
80{
81 int ret;
82
83 ret = abx500_mask_and_set_register_interruptible(pwm->dev,
84 AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
85 1 << (pwm->pwm_id-1), DISABLE_PWM);
86 if (ret < 0)
87 dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
88 pwm->label, ret);
89 return;
90}
91EXPORT_SYMBOL(pwm_disable);
92
93struct pwm_device *pwm_request(int pwm_id, const char *label)
94{
95 struct pwm_device *pwm;
96
97 list_for_each_entry(pwm, &pwm_list, node) {
98 if (pwm->pwm_id == pwm_id) {
99 pwm->label = label;
100 pwm->pwm_id = pwm_id;
101 return pwm;
102 }
103 }
104
105 return ERR_PTR(-ENOENT);
106}
107EXPORT_SYMBOL(pwm_request);
108
109void pwm_free(struct pwm_device *pwm)
110{
111 pwm_disable(pwm);
112}
113EXPORT_SYMBOL(pwm_free);
114
115static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
116{
117 struct pwm_device *pwm;
118 /*
119 * Nothing to be done in probe, this is required to get the
120 * device which is required for ab8500 read and write
121 */
122 pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
123 if (pwm == NULL) {
124 dev_err(&pdev->dev, "failed to allocate memory\n");
125 return -ENOMEM;
126 }
127 pwm->dev = &pdev->dev;
128 pwm->pwm_id = pdev->id;
129 list_add_tail(&pwm->node, &pwm_list);
130 platform_set_drvdata(pdev, pwm);
131 dev_dbg(pwm->dev, "pwm probe successful\n");
132 return 0;
133}
134
135static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
136{
137 struct pwm_device *pwm = platform_get_drvdata(pdev);
138 list_del(&pwm->node);
139 dev_dbg(&pdev->dev, "pwm driver removed\n");
140 kfree(pwm);
141 return 0;
142}
143
144static struct platform_driver ab8500_pwm_driver = {
145 .driver = {
146 .name = "ab8500-pwm",
147 .owner = THIS_MODULE,
148 },
149 .probe = ab8500_pwm_probe,
150 .remove = __devexit_p(ab8500_pwm_remove),
151};
152
153static int __init ab8500_pwm_init(void)
154{
155 return platform_driver_register(&ab8500_pwm_driver);
156}
157
158static void __exit ab8500_pwm_exit(void)
159{
160 platform_driver_unregister(&ab8500_pwm_driver);
161}
162
163subsys_initcall(ab8500_pwm_init);
164module_exit(ab8500_pwm_exit);
165MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");
166MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver");
167MODULE_ALIAS("AB8500 PWM driver");
168MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
new file mode 100644
index 000000000000..4ff73c215746
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -0,0 +1,136 @@
1/*
2 * Driver for the Analog Devices digital potentiometers (I2C bus)
3 *
4 * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/i2c.h>
10#include <linux/module.h>
11
12#include "ad525x_dpot.h"
13
14/* ------------------------------------------------------------------------- */
15/* I2C bus functions */
16static int write_d8(void *client, u8 val)
17{
18 return i2c_smbus_write_byte(client, val);
19}
20
21static int write_r8d8(void *client, u8 reg, u8 val)
22{
23 return i2c_smbus_write_byte_data(client, reg, val);
24}
25
26static int write_r8d16(void *client, u8 reg, u16 val)
27{
28 return i2c_smbus_write_word_data(client, reg, val);
29}
30
31static int read_d8(void *client)
32{
33 return i2c_smbus_read_byte(client);
34}
35
36static int read_r8d8(void *client, u8 reg)
37{
38 return i2c_smbus_read_byte_data(client, reg);
39}
40
41static int read_r8d16(void *client, u8 reg)
42{
43 return i2c_smbus_read_word_data(client, reg);
44}
45
46static const struct ad_dpot_bus_ops bops = {
47 .read_d8 = read_d8,
48 .read_r8d8 = read_r8d8,
49 .read_r8d16 = read_r8d16,
50 .write_d8 = write_d8,
51 .write_r8d8 = write_r8d8,
52 .write_r8d16 = write_r8d16,
53};
54
55static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
56 const struct i2c_device_id *id)
57{
58 struct ad_dpot_bus_data bdata = {
59 .client = client,
60 .bops = &bops,
61 };
62
63 struct ad_dpot_id dpot_id = {
64 .name = (char *) &id->name,
65 .devid = id->driver_data,
66 };
67
68 if (!i2c_check_functionality(client->adapter,
69 I2C_FUNC_SMBUS_WORD_DATA)) {
70 dev_err(&client->dev, "SMBUS Word Data not Supported\n");
71 return -EIO;
72 }
73
74 return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
75}
76
77static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
78{
79 return ad_dpot_remove(&client->dev);
80}
81
82static const struct i2c_device_id ad_dpot_id[] = {
83 {"ad5258", AD5258_ID},
84 {"ad5259", AD5259_ID},
85 {"ad5251", AD5251_ID},
86 {"ad5252", AD5252_ID},
87 {"ad5253", AD5253_ID},
88 {"ad5254", AD5254_ID},
89 {"ad5255", AD5255_ID},
90 {"ad5241", AD5241_ID},
91 {"ad5242", AD5242_ID},
92 {"ad5243", AD5243_ID},
93 {"ad5245", AD5245_ID},
94 {"ad5246", AD5246_ID},
95 {"ad5247", AD5247_ID},
96 {"ad5248", AD5248_ID},
97 {"ad5280", AD5280_ID},
98 {"ad5282", AD5282_ID},
99 {"adn2860", ADN2860_ID},
100 {"ad5273", AD5273_ID},
101 {"ad5171", AD5171_ID},
102 {"ad5170", AD5170_ID},
103 {"ad5172", AD5172_ID},
104 {"ad5173", AD5173_ID},
105 {"ad5272", AD5272_ID},
106 {"ad5274", AD5274_ID},
107 {}
108};
109MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
110
111static struct i2c_driver ad_dpot_i2c_driver = {
112 .driver = {
113 .name = "ad_dpot",
114 .owner = THIS_MODULE,
115 },
116 .probe = ad_dpot_i2c_probe,
117 .remove = __devexit_p(ad_dpot_i2c_remove),
118 .id_table = ad_dpot_id,
119};
120
121static int __init ad_dpot_i2c_init(void)
122{
123 return i2c_add_driver(&ad_dpot_i2c_driver);
124}
125module_init(ad_dpot_i2c_init);
126
127static void __exit ad_dpot_i2c_exit(void)
128{
129 i2c_del_driver(&ad_dpot_i2c_driver);
130}
131module_exit(ad_dpot_i2c_exit);
132
133MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
134MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
135MODULE_LICENSE("GPL");
136MODULE_ALIAS("i2c:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
new file mode 100644
index 000000000000..7f9a55afe05d
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -0,0 +1,174 @@
1/*
2 * Driver for the Analog Devices digital potentiometers (SPI bus)
3 *
4 * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/spi/spi.h>
10#include <linux/module.h>
11
12#include "ad525x_dpot.h"
13
14static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
15 {.name = "ad5160", .devid = AD5160_ID},
16 {.name = "ad5161", .devid = AD5161_ID},
17 {.name = "ad5162", .devid = AD5162_ID},
18 {.name = "ad5165", .devid = AD5165_ID},
19 {.name = "ad5200", .devid = AD5200_ID},
20 {.name = "ad5201", .devid = AD5201_ID},
21 {.name = "ad5203", .devid = AD5203_ID},
22 {.name = "ad5204", .devid = AD5204_ID},
23 {.name = "ad5206", .devid = AD5206_ID},
24 {.name = "ad5207", .devid = AD5207_ID},
25 {.name = "ad5231", .devid = AD5231_ID},
26 {.name = "ad5232", .devid = AD5232_ID},
27 {.name = "ad5233", .devid = AD5233_ID},
28 {.name = "ad5235", .devid = AD5235_ID},
29 {.name = "ad5260", .devid = AD5260_ID},
30 {.name = "ad5262", .devid = AD5262_ID},
31 {.name = "ad5263", .devid = AD5263_ID},
32 {.name = "ad5290", .devid = AD5290_ID},
33 {.name = "ad5291", .devid = AD5291_ID},
34 {.name = "ad5292", .devid = AD5292_ID},
35 {.name = "ad5293", .devid = AD5293_ID},
36 {.name = "ad7376", .devid = AD7376_ID},
37 {.name = "ad8400", .devid = AD8400_ID},
38 {.name = "ad8402", .devid = AD8402_ID},
39 {.name = "ad8403", .devid = AD8403_ID},
40 {.name = "adn2850", .devid = ADN2850_ID},
41 {.name = "ad5270", .devid = AD5270_ID},
42 {.name = "ad5271", .devid = AD5271_ID},
43 {}
44};
45
46/* ------------------------------------------------------------------------- */
47
48/* SPI bus functions */
49static int write8(void *client, u8 val)
50{
51 u8 data = val;
52 return spi_write(client, &data, 1);
53}
54
55static int write16(void *client, u8 reg, u8 val)
56{
57 u8 data[2] = {reg, val};
58 return spi_write(client, data, 2);
59}
60
61static int write24(void *client, u8 reg, u16 val)
62{
63 u8 data[3] = {reg, val >> 8, val};
64 return spi_write(client, data, 3);
65}
66
67static int read8(void *client)
68{
69 int ret;
70 u8 data;
71 ret = spi_read(client, &data, 1);
72 if (ret < 0)
73 return ret;
74
75 return data;
76}
77
78static int read16(void *client, u8 reg)
79{
80 int ret;
81 u8 buf_rx[2];
82
83 write16(client, reg, 0);
84 ret = spi_read(client, buf_rx, 2);
85 if (ret < 0)
86 return ret;
87
88 return (buf_rx[0] << 8) | buf_rx[1];
89}
90
91static int read24(void *client, u8 reg)
92{
93 int ret;
94 u8 buf_rx[3];
95
96 write24(client, reg, 0);
97 ret = spi_read(client, buf_rx, 3);
98 if (ret < 0)
99 return ret;
100
101 return (buf_rx[1] << 8) | buf_rx[2];
102}
103
104static const struct ad_dpot_bus_ops bops = {
105 .read_d8 = read8,
106 .read_r8d8 = read16,
107 .read_r8d16 = read24,
108 .write_d8 = write8,
109 .write_r8d8 = write16,
110 .write_r8d16 = write24,
111};
112
113static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
114 char *name)
115{
116 while (id->name && id->name[0]) {
117 if (strcmp(name, id->name) == 0)
118 return id;
119 id++;
120 }
121 return NULL;
122}
123
124static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
125{
126 char *name = spi->dev.platform_data;
127 const struct ad_dpot_id *dpot_id;
128
129 struct ad_dpot_bus_data bdata = {
130 .client = spi,
131 .bops = &bops,
132 };
133
134 dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
135
136 if (dpot_id == NULL) {
137 dev_err(&spi->dev, "%s not in supported device list", name);
138 return -ENODEV;
139 }
140
141 return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
142}
143
144static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
145{
146 return ad_dpot_remove(&spi->dev);
147}
148
149static struct spi_driver ad_dpot_spi_driver = {
150 .driver = {
151 .name = "ad_dpot",
152 .bus = &spi_bus_type,
153 .owner = THIS_MODULE,
154 },
155 .probe = ad_dpot_spi_probe,
156 .remove = __devexit_p(ad_dpot_spi_remove),
157};
158
159static int __init ad_dpot_spi_init(void)
160{
161 return spi_register_driver(&ad_dpot_spi_driver);
162}
163module_init(ad_dpot_spi_init);
164
165static void __exit ad_dpot_spi_exit(void)
166{
167 spi_unregister_driver(&ad_dpot_spi_driver);
168}
169module_exit(ad_dpot_spi_exit);
170
171MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
172MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
173MODULE_LICENSE("GPL");
174MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
new file mode 100644
index 000000000000..7cb911028d09
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.c
@@ -0,0 +1,773 @@
1/*
2 * ad525x_dpot: Driver for the Analog Devices digital potentiometers
3 * Copyright (c) 2009-2010 Analog Devices, Inc.
4 * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
5 *
6 * DEVID #Wipers #Positions Resistor Options (kOhm)
7 * AD5258 1 64 1, 10, 50, 100
8 * AD5259 1 256 5, 10, 50, 100
9 * AD5251 2 64 1, 10, 50, 100
10 * AD5252 2 256 1, 10, 50, 100
11 * AD5255 3 512 25, 250
12 * AD5253 4 64 1, 10, 50, 100
13 * AD5254 4 256 1, 10, 50, 100
14 * AD5160 1 256 5, 10, 50, 100
15 * AD5161 1 256 5, 10, 50, 100
16 * AD5162 2 256 2.5, 10, 50, 100
17 * AD5165 1 256 100
18 * AD5200 1 256 10, 50
19 * AD5201 1 33 10, 50
20 * AD5203 4 64 10, 100
21 * AD5204 4 256 10, 50, 100
22 * AD5206 6 256 10, 50, 100
23 * AD5207 2 256 10, 50, 100
24 * AD5231 1 1024 10, 50, 100
25 * AD5232 2 256 10, 50, 100
26 * AD5233 4 64 10, 50, 100
27 * AD5235 2 1024 25, 250
28 * AD5260 1 256 20, 50, 200
29 * AD5262 2 256 20, 50, 200
30 * AD5263 4 256 20, 50, 200
31 * AD5290 1 256 10, 50, 100
32 * AD5291 1 256 20, 50, 100 (20-TP)
33 * AD5292 1 1024 20, 50, 100 (20-TP)
34 * AD5293 1 1024 20, 50, 100
35 * AD7376 1 128 10, 50, 100, 1M
36 * AD8400 1 256 1, 10, 50, 100
37 * AD8402 2 256 1, 10, 50, 100
38 * AD8403 4 256 1, 10, 50, 100
39 * ADN2850 3 512 25, 250
40 * AD5241 1 256 10, 100, 1M
41 * AD5246 1 128 5, 10, 50, 100
42 * AD5247 1 128 5, 10, 50, 100
43 * AD5245 1 256 5, 10, 50, 100
44 * AD5243 2 256 2.5, 10, 50, 100
45 * AD5248 2 256 2.5, 10, 50, 100
46 * AD5242 2 256 20, 50, 200
47 * AD5280 1 256 20, 50, 200
48 * AD5282 2 256 20, 50, 200
49 * ADN2860 3 512 25, 250
50 * AD5273 1 64 1, 10, 50, 100 (OTP)
51 * AD5171 1 64 5, 10, 50, 100 (OTP)
52 * AD5170 1 256 2.5, 10, 50, 100 (OTP)
53 * AD5172 2 256 2.5, 10, 50, 100 (OTP)
54 * AD5173 2 256 2.5, 10, 50, 100 (OTP)
55 * AD5270 1 1024 20, 50, 100 (50-TP)
56 * AD5271 1 256 20, 50, 100 (50-TP)
57 * AD5272 1 1024 20, 50, 100 (50-TP)
58 * AD5274 1 256 20, 50, 100 (50-TP)
59 *
60 * See Documentation/misc-devices/ad525x_dpot.txt for more info.
61 *
62 * derived from ad5258.c
63 * Copyright (c) 2009 Cyber Switching, Inc.
64 * Author: Chris Verges <chrisv@cyberswitching.com>
65 *
66 * derived from ad5252.c
67 * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org>
68 *
69 * Licensed under the GPL-2 or later.
70 */
71
72#include <linux/module.h>
73#include <linux/device.h>
74#include <linux/kernel.h>
75#include <linux/init.h>
76#include <linux/delay.h>
77#include <linux/slab.h>
78
79#define DRIVER_VERSION "0.2"
80
81#include "ad525x_dpot.h"
82
83/*
84 * Client data (each client gets its own)
85 */
86
87struct dpot_data {
88 struct ad_dpot_bus_data bdata;
89 struct mutex update_lock;
90 unsigned rdac_mask;
91 unsigned max_pos;
92 unsigned long devid;
93 unsigned uid;
94 unsigned feat;
95 unsigned wipers;
96 u16 rdac_cache[MAX_RDACS];
97 DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
98};
99
100static inline int dpot_read_d8(struct dpot_data *dpot)
101{
102 return dpot->bdata.bops->read_d8(dpot->bdata.client);
103}
104
105static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg)
106{
107 return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg);
108}
109
110static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg)
111{
112 return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg);
113}
114
115static inline int dpot_write_d8(struct dpot_data *dpot, u8 val)
116{
117 return dpot->bdata.bops->write_d8(dpot->bdata.client, val);
118}
119
120static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val)
121{
122 return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val);
123}
124
125static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
126{
127 return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val);
128}
129
130static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
131{
132 unsigned ctrl = 0;
133 int value;
134
135 if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
136
137 if (dpot->feat & F_RDACS_WONLY)
138 return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
139 if (dpot->uid == DPOT_UID(AD5291_ID) ||
140 dpot->uid == DPOT_UID(AD5292_ID) ||
141 dpot->uid == DPOT_UID(AD5293_ID)) {
142
143 value = dpot_read_r8d8(dpot,
144 DPOT_AD5291_READ_RDAC << 2);
145
146 if (dpot->uid == DPOT_UID(AD5291_ID))
147 value = value >> 2;
148
149 return value;
150 } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
151 dpot->uid == DPOT_UID(AD5271_ID)) {
152
153 value = dpot_read_r8d8(dpot,
154 DPOT_AD5270_1_2_4_READ_RDAC << 2);
155
156 if (value < 0)
157 return value;
158
159 if (dpot->uid == DPOT_UID(AD5271_ID))
160 value = value >> 2;
161
162 return value;
163 }
164
165 ctrl = DPOT_SPI_READ_RDAC;
166 } else if (reg & DPOT_ADDR_EEPROM) {
167 ctrl = DPOT_SPI_READ_EEPROM;
168 }
169
170 if (dpot->feat & F_SPI_16BIT)
171 return dpot_read_r8d8(dpot, ctrl);
172 else if (dpot->feat & F_SPI_24BIT)
173 return dpot_read_r8d16(dpot, ctrl);
174
175 return -EFAULT;
176}
177
178static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
179{
180 int value;
181 unsigned ctrl = 0;
182 switch (dpot->uid) {
183 case DPOT_UID(AD5246_ID):
184 case DPOT_UID(AD5247_ID):
185 return dpot_read_d8(dpot);
186 case DPOT_UID(AD5245_ID):
187 case DPOT_UID(AD5241_ID):
188 case DPOT_UID(AD5242_ID):
189 case DPOT_UID(AD5243_ID):
190 case DPOT_UID(AD5248_ID):
191 case DPOT_UID(AD5280_ID):
192 case DPOT_UID(AD5282_ID):
193 ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
194 0 : DPOT_AD5282_RDAC_AB;
195 return dpot_read_r8d8(dpot, ctrl);
196 case DPOT_UID(AD5170_ID):
197 case DPOT_UID(AD5171_ID):
198 case DPOT_UID(AD5273_ID):
199 return dpot_read_d8(dpot);
200 case DPOT_UID(AD5172_ID):
201 case DPOT_UID(AD5173_ID):
202 ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
203 0 : DPOT_AD5172_3_A0;
204 return dpot_read_r8d8(dpot, ctrl);
205 case DPOT_UID(AD5272_ID):
206 case DPOT_UID(AD5274_ID):
207 dpot_write_r8d8(dpot,
208 (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
209
210 value = dpot_read_r8d16(dpot,
211 DPOT_AD5270_1_2_4_RDAC << 2);
212
213 if (value < 0)
214 return value;
215 /*
216 * AD5272/AD5274 returns high byte first, however
217 * underling smbus expects low byte first.
218 */
219 value = swab16(value);
220
221 if (dpot->uid == DPOT_UID(AD5271_ID))
222 value = value >> 2;
223 return value;
224 default:
225 if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
226 return dpot_read_r8d16(dpot, (reg & 0xF8) |
227 ((reg & 0x7) << 1));
228 else
229 return dpot_read_r8d8(dpot, reg);
230 }
231}
232
233static s32 dpot_read(struct dpot_data *dpot, u8 reg)
234{
235 if (dpot->feat & F_SPI)
236 return dpot_read_spi(dpot, reg);
237 else
238 return dpot_read_i2c(dpot, reg);
239}
240
241static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
242{
243 unsigned val = 0;
244
245 if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
246 if (dpot->feat & F_RDACS_WONLY)
247 dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
248
249 if (dpot->feat & F_AD_APPDATA) {
250 if (dpot->feat & F_SPI_8BIT) {
251 val = ((reg & DPOT_RDAC_MASK) <<
252 DPOT_MAX_POS(dpot->devid)) |
253 value;
254 return dpot_write_d8(dpot, val);
255 } else if (dpot->feat & F_SPI_16BIT) {
256 val = ((reg & DPOT_RDAC_MASK) <<
257 DPOT_MAX_POS(dpot->devid)) |
258 value;
259 return dpot_write_r8d8(dpot, val >> 8,
260 val & 0xFF);
261 } else
262 BUG();
263 } else {
264 if (dpot->uid == DPOT_UID(AD5291_ID) ||
265 dpot->uid == DPOT_UID(AD5292_ID) ||
266 dpot->uid == DPOT_UID(AD5293_ID)) {
267
268 dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2,
269 DPOT_AD5291_UNLOCK_CMD);
270
271 if (dpot->uid == DPOT_UID(AD5291_ID))
272 value = value << 2;
273
274 return dpot_write_r8d8(dpot,
275 (DPOT_AD5291_RDAC << 2) |
276 (value >> 8), value & 0xFF);
277 } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
278 dpot->uid == DPOT_UID(AD5271_ID)) {
279 dpot_write_r8d8(dpot,
280 DPOT_AD5270_1_2_4_CTRLREG << 2,
281 DPOT_AD5270_1_2_4_UNLOCK_CMD);
282
283 if (dpot->uid == DPOT_UID(AD5271_ID))
284 value = value << 2;
285
286 return dpot_write_r8d8(dpot,
287 (DPOT_AD5270_1_2_4_RDAC << 2) |
288 (value >> 8), value & 0xFF);
289 }
290 val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
291 }
292 } else if (reg & DPOT_ADDR_EEPROM) {
293 val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK);
294 } else if (reg & DPOT_ADDR_CMD) {
295 switch (reg) {
296 case DPOT_DEC_ALL_6DB:
297 val = DPOT_SPI_DEC_ALL_6DB;
298 break;
299 case DPOT_INC_ALL_6DB:
300 val = DPOT_SPI_INC_ALL_6DB;
301 break;
302 case DPOT_DEC_ALL:
303 val = DPOT_SPI_DEC_ALL;
304 break;
305 case DPOT_INC_ALL:
306 val = DPOT_SPI_INC_ALL;
307 break;
308 }
309 } else if (reg & DPOT_ADDR_OTP) {
310 if (dpot->uid == DPOT_UID(AD5291_ID) ||
311 dpot->uid == DPOT_UID(AD5292_ID)) {
312 return dpot_write_r8d8(dpot,
313 DPOT_AD5291_STORE_XTPM << 2, 0);
314 } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
315 dpot->uid == DPOT_UID(AD5271_ID)) {
316 return dpot_write_r8d8(dpot,
317 DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
318 }
319 } else
320 BUG();
321
322 if (dpot->feat & F_SPI_16BIT)
323 return dpot_write_r8d8(dpot, val, value);
324 else if (dpot->feat & F_SPI_24BIT)
325 return dpot_write_r8d16(dpot, val, value);
326
327 return -EFAULT;
328}
329
330static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
331{
332 /* Only write the instruction byte for certain commands */
333 unsigned tmp = 0, ctrl = 0;
334
335 switch (dpot->uid) {
336 case DPOT_UID(AD5246_ID):
337 case DPOT_UID(AD5247_ID):
338 return dpot_write_d8(dpot, value);
339 break;
340
341 case DPOT_UID(AD5245_ID):
342 case DPOT_UID(AD5241_ID):
343 case DPOT_UID(AD5242_ID):
344 case DPOT_UID(AD5243_ID):
345 case DPOT_UID(AD5248_ID):
346 case DPOT_UID(AD5280_ID):
347 case DPOT_UID(AD5282_ID):
348 ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
349 0 : DPOT_AD5282_RDAC_AB;
350 return dpot_write_r8d8(dpot, ctrl, value);
351 break;
352 case DPOT_UID(AD5171_ID):
353 case DPOT_UID(AD5273_ID):
354 if (reg & DPOT_ADDR_OTP) {
355 tmp = dpot_read_d8(dpot);
356 if (tmp >> 6) /* Ready to Program? */
357 return -EFAULT;
358 ctrl = DPOT_AD5273_FUSE;
359 }
360 return dpot_write_r8d8(dpot, ctrl, value);
361 break;
362 case DPOT_UID(AD5172_ID):
363 case DPOT_UID(AD5173_ID):
364 ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
365 0 : DPOT_AD5172_3_A0;
366 if (reg & DPOT_ADDR_OTP) {
367 tmp = dpot_read_r8d16(dpot, ctrl);
368 if (tmp >> 14) /* Ready to Program? */
369 return -EFAULT;
370 ctrl |= DPOT_AD5170_2_3_FUSE;
371 }
372 return dpot_write_r8d8(dpot, ctrl, value);
373 break;
374 case DPOT_UID(AD5170_ID):
375 if (reg & DPOT_ADDR_OTP) {
376 tmp = dpot_read_r8d16(dpot, tmp);
377 if (tmp >> 14) /* Ready to Program? */
378 return -EFAULT;
379 ctrl = DPOT_AD5170_2_3_FUSE;
380 }
381 return dpot_write_r8d8(dpot, ctrl, value);
382 break;
383 case DPOT_UID(AD5272_ID):
384 case DPOT_UID(AD5274_ID):
385 dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2,
386 DPOT_AD5270_1_2_4_UNLOCK_CMD);
387
388 if (reg & DPOT_ADDR_OTP)
389 return dpot_write_r8d8(dpot,
390 DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
391
392 if (dpot->uid == DPOT_UID(AD5274_ID))
393 value = value << 2;
394
395 return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) |
396 (value >> 8), value & 0xFF);
397 break;
398 default:
399 if (reg & DPOT_ADDR_CMD)
400 return dpot_write_d8(dpot, reg);
401
402 if (dpot->max_pos > 256)
403 return dpot_write_r8d16(dpot, (reg & 0xF8) |
404 ((reg & 0x7) << 1), value);
405 else
406 /* All other registers require instruction + data bytes */
407 return dpot_write_r8d8(dpot, reg, value);
408 }
409}
410
411static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
412{
413 if (dpot->feat & F_SPI)
414 return dpot_write_spi(dpot, reg, value);
415 else
416 return dpot_write_i2c(dpot, reg, value);
417}
418
419/* sysfs functions */
420
421static ssize_t sysfs_show_reg(struct device *dev,
422 struct device_attribute *attr,
423 char *buf, u32 reg)
424{
425 struct dpot_data *data = dev_get_drvdata(dev);
426 s32 value;
427
428 if (reg & DPOT_ADDR_OTP_EN)
429 return sprintf(buf, "%s\n",
430 test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
431 "enabled" : "disabled");
432
433
434 mutex_lock(&data->update_lock);
435 value = dpot_read(data, reg);
436 mutex_unlock(&data->update_lock);
437
438 if (value < 0)
439 return -EINVAL;
440 /*
441 * Let someone else deal with converting this ...
442 * the tolerance is a two-byte value where the MSB
443 * is a sign + integer value, and the LSB is a
444 * decimal value. See page 18 of the AD5258
445 * datasheet (Rev. A) for more details.
446 */
447
448 if (reg & DPOT_REG_TOL)
449 return sprintf(buf, "0x%04x\n", value & 0xFFFF);
450 else
451 return sprintf(buf, "%u\n", value & data->rdac_mask);
452}
453
454static ssize_t sysfs_set_reg(struct device *dev,
455 struct device_attribute *attr,
456 const char *buf, size_t count, u32 reg)
457{
458 struct dpot_data *data = dev_get_drvdata(dev);
459 unsigned long value;
460 int err;
461
462 if (reg & DPOT_ADDR_OTP_EN) {
463 if (!strncmp(buf, "enabled", sizeof("enabled")))
464 set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
465 else
466 clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
467
468 return count;
469 }
470
471 if ((reg & DPOT_ADDR_OTP) &&
472 !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask))
473 return -EPERM;
474
475 err = strict_strtoul(buf, 10, &value);
476 if (err)
477 return err;
478
479 if (value > data->rdac_mask)
480 value = data->rdac_mask;
481
482 mutex_lock(&data->update_lock);
483 dpot_write(data, reg, value);
484 if (reg & DPOT_ADDR_EEPROM)
485 msleep(26); /* Sleep while the EEPROM updates */
486 else if (reg & DPOT_ADDR_OTP)
487 msleep(400); /* Sleep while the OTP updates */
488 mutex_unlock(&data->update_lock);
489
490 return count;
491}
492
493static ssize_t sysfs_do_cmd(struct device *dev,
494 struct device_attribute *attr,
495 const char *buf, size_t count, u32 reg)
496{
497 struct dpot_data *data = dev_get_drvdata(dev);
498
499 mutex_lock(&data->update_lock);
500 dpot_write(data, reg, 0);
501 mutex_unlock(&data->update_lock);
502
503 return count;
504}
505
506/* ------------------------------------------------------------------------- */
507
508#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \
509show_##_name(struct device *dev, \
510 struct device_attribute *attr, char *buf) \
511{ \
512 return sysfs_show_reg(dev, attr, buf, _reg); \
513}
514
515#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \
516set_##_name(struct device *dev, \
517 struct device_attribute *attr, \
518 const char *buf, size_t count) \
519{ \
520 return sysfs_set_reg(dev, attr, buf, count, _reg); \
521}
522
523#define DPOT_DEVICE_SHOW_SET(name, reg) \
524DPOT_DEVICE_SHOW(name, reg) \
525DPOT_DEVICE_SET(name, reg) \
526static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name);
527
528#define DPOT_DEVICE_SHOW_ONLY(name, reg) \
529DPOT_DEVICE_SHOW(name, reg) \
530static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL);
531
532DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
533DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
534DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0);
535DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0);
536DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0);
537
538DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1);
539DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1);
540DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1);
541DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1);
542DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1);
543
544DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2);
545DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2);
546DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2);
547DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2);
548DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2);
549
550DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3);
551DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3);
552DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3);
553DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3);
554DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3);
555
556DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4);
557DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4);
558DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4);
559DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4);
560DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4);
561
562DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5);
563DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5);
564DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5);
565DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5);
566DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5);
567
568static const struct attribute *dpot_attrib_wipers[] = {
569 &dev_attr_rdac0.attr,
570 &dev_attr_rdac1.attr,
571 &dev_attr_rdac2.attr,
572 &dev_attr_rdac3.attr,
573 &dev_attr_rdac4.attr,
574 &dev_attr_rdac5.attr,
575 NULL
576};
577
578static const struct attribute *dpot_attrib_eeprom[] = {
579 &dev_attr_eeprom0.attr,
580 &dev_attr_eeprom1.attr,
581 &dev_attr_eeprom2.attr,
582 &dev_attr_eeprom3.attr,
583 &dev_attr_eeprom4.attr,
584 &dev_attr_eeprom5.attr,
585 NULL
586};
587
588static const struct attribute *dpot_attrib_otp[] = {
589 &dev_attr_otp0.attr,
590 &dev_attr_otp1.attr,
591 &dev_attr_otp2.attr,
592 &dev_attr_otp3.attr,
593 &dev_attr_otp4.attr,
594 &dev_attr_otp5.attr,
595 NULL
596};
597
598static const struct attribute *dpot_attrib_otp_en[] = {
599 &dev_attr_otp0en.attr,
600 &dev_attr_otp1en.attr,
601 &dev_attr_otp2en.attr,
602 &dev_attr_otp3en.attr,
603 &dev_attr_otp4en.attr,
604 &dev_attr_otp5en.attr,
605 NULL
606};
607
608static const struct attribute *dpot_attrib_tolerance[] = {
609 &dev_attr_tolerance0.attr,
610 &dev_attr_tolerance1.attr,
611 &dev_attr_tolerance2.attr,
612 &dev_attr_tolerance3.attr,
613 &dev_attr_tolerance4.attr,
614 &dev_attr_tolerance5.attr,
615 NULL
616};
617
618/* ------------------------------------------------------------------------- */
619
620#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \
621set_##_name(struct device *dev, \
622 struct device_attribute *attr, \
623 const char *buf, size_t count) \
624{ \
625 return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
626} \
627static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name);
628
629DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
630DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
631DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB);
632DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB);
633
634static struct attribute *ad525x_attributes_commands[] = {
635 &dev_attr_inc_all.attr,
636 &dev_attr_dec_all.attr,
637 &dev_attr_inc_all_6db.attr,
638 &dev_attr_dec_all_6db.attr,
639 NULL
640};
641
642static const struct attribute_group ad525x_group_commands = {
643 .attrs = ad525x_attributes_commands,
644};
645
646__devinit int ad_dpot_add_files(struct device *dev,
647 unsigned features, unsigned rdac)
648{
649 int err = sysfs_create_file(&dev->kobj,
650 dpot_attrib_wipers[rdac]);
651 if (features & F_CMD_EEP)
652 err |= sysfs_create_file(&dev->kobj,
653 dpot_attrib_eeprom[rdac]);
654 if (features & F_CMD_TOL)
655 err |= sysfs_create_file(&dev->kobj,
656 dpot_attrib_tolerance[rdac]);
657 if (features & F_CMD_OTP) {
658 err |= sysfs_create_file(&dev->kobj,
659 dpot_attrib_otp_en[rdac]);
660 err |= sysfs_create_file(&dev->kobj,
661 dpot_attrib_otp[rdac]);
662 }
663
664 if (err)
665 dev_err(dev, "failed to register sysfs hooks for RDAC%d\n",
666 rdac);
667
668 return err;
669}
670
671inline void ad_dpot_remove_files(struct device *dev,
672 unsigned features, unsigned rdac)
673{
674 sysfs_remove_file(&dev->kobj,
675 dpot_attrib_wipers[rdac]);
676 if (features & F_CMD_EEP)
677 sysfs_remove_file(&dev->kobj,
678 dpot_attrib_eeprom[rdac]);
679 if (features & F_CMD_TOL)
680 sysfs_remove_file(&dev->kobj,
681 dpot_attrib_tolerance[rdac]);
682 if (features & F_CMD_OTP) {
683 sysfs_remove_file(&dev->kobj,
684 dpot_attrib_otp_en[rdac]);
685 sysfs_remove_file(&dev->kobj,
686 dpot_attrib_otp[rdac]);
687 }
688}
689
690__devinit int ad_dpot_probe(struct device *dev,
691 struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
692{
693
694 struct dpot_data *data;
695 int i, err = 0;
696
697 data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
698 if (!data) {
699 err = -ENOMEM;
700 goto exit;
701 }
702
703 dev_set_drvdata(dev, data);
704 mutex_init(&data->update_lock);
705
706 data->bdata = *bdata;
707 data->devid = id->devid;
708
709 data->max_pos = 1 << DPOT_MAX_POS(data->devid);
710 data->rdac_mask = data->max_pos - 1;
711 data->feat = DPOT_FEAT(data->devid);
712 data->uid = DPOT_UID(data->devid);
713 data->wipers = DPOT_WIPERS(data->devid);
714
715 for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
716 if (data->wipers & (1 << i)) {
717 err = ad_dpot_add_files(dev, data->feat, i);
718 if (err)
719 goto exit_remove_files;
720 /* power-up midscale */
721 if (data->feat & F_RDACS_WONLY)
722 data->rdac_cache[i] = data->max_pos / 2;
723 }
724
725 if (data->feat & F_CMD_INC)
726 err = sysfs_create_group(&dev->kobj, &ad525x_group_commands);
727
728 if (err) {
729 dev_err(dev, "failed to register sysfs hooks\n");
730 goto exit_free;
731 }
732
733 dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
734 id->name, data->max_pos);
735
736 return 0;
737
738exit_remove_files:
739 for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
740 if (data->wipers & (1 << i))
741 ad_dpot_remove_files(dev, data->feat, i);
742
743exit_free:
744 kfree(data);
745 dev_set_drvdata(dev, NULL);
746exit:
747 dev_err(dev, "failed to create client for %s ID 0x%lX\n",
748 id->name, id->devid);
749 return err;
750}
751EXPORT_SYMBOL(ad_dpot_probe);
752
753__devexit int ad_dpot_remove(struct device *dev)
754{
755 struct dpot_data *data = dev_get_drvdata(dev);
756 int i;
757
758 for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
759 if (data->wipers & (1 << i))
760 ad_dpot_remove_files(dev, data->feat, i);
761
762 kfree(data);
763
764 return 0;
765}
766EXPORT_SYMBOL(ad_dpot_remove);
767
768
769MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
770 "Michael Hennerich <hennerich@blackfin.uclinux.org>");
771MODULE_DESCRIPTION("Digital potentiometer driver");
772MODULE_LICENSE("GPL");
773MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
new file mode 100644
index 000000000000..a662f5987b68
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.h
@@ -0,0 +1,219 @@
1/*
2 * Driver for the Analog Devices digital potentiometers
3 *
4 * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef _AD_DPOT_H_
10#define _AD_DPOT_H_
11
12#include <linux/types.h>
13
14#define DPOT_CONF(features, wipers, max_pos, uid) \
15 (((features) << 18) | (((wipers) & 0xFF) << 10) | \
16 ((max_pos & 0xF) << 6) | (uid & 0x3F))
17
18#define DPOT_UID(conf) (conf & 0x3F)
19#define DPOT_MAX_POS(conf) ((conf >> 6) & 0xF)
20#define DPOT_WIPERS(conf) ((conf >> 10) & 0xFF)
21#define DPOT_FEAT(conf) (conf >> 18)
22
23#define BRDAC0 (1 << 0)
24#define BRDAC1 (1 << 1)
25#define BRDAC2 (1 << 2)
26#define BRDAC3 (1 << 3)
27#define BRDAC4 (1 << 4)
28#define BRDAC5 (1 << 5)
29#define MAX_RDACS 6
30
31#define F_CMD_INC (1 << 0) /* Features INC/DEC ALL, 6dB */
32#define F_CMD_EEP (1 << 1) /* Features EEPROM */
33#define F_CMD_OTP (1 << 2) /* Features OTP */
34#define F_CMD_TOL (1 << 3) /* RDACS feature Tolerance REG */
35#define F_RDACS_RW (1 << 4) /* RDACS are Read/Write */
36#define F_RDACS_WONLY (1 << 5) /* RDACS are Write only */
37#define F_AD_APPDATA (1 << 6) /* RDAC Address append to data */
38#define F_SPI_8BIT (1 << 7) /* All SPI XFERS are 8-bit */
39#define F_SPI_16BIT (1 << 8) /* All SPI XFERS are 16-bit */
40#define F_SPI_24BIT (1 << 9) /* All SPI XFERS are 24-bit */
41
42#define F_RDACS_RW_TOL (F_RDACS_RW | F_CMD_EEP | F_CMD_TOL)
43#define F_RDACS_RW_EEP (F_RDACS_RW | F_CMD_EEP)
44#define F_SPI (F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT)
45
46enum dpot_devid {
47 AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
48 AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
49 AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
50 BRDAC1 | BRDAC3, 6, 2),
51 AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
52 BRDAC1 | BRDAC3, 8, 3),
53 AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
54 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
55 AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
56 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5),
57 AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
58 BRDAC0 | BRDAC1 | BRDAC2, 9, 6),
59 AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
60 BRDAC0, 8, 7), /* SPI */
61 AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
62 BRDAC0, 8, 8),
63 AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
64 BRDAC0 | BRDAC1, 8, 9),
65 AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
66 BRDAC0, 8, 10),
67 AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
68 BRDAC0, 8, 11),
69 AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
70 BRDAC0, 5, 12),
71 AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
72 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13),
73 AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
74 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14),
75 AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
76 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5,
77 8, 15),
78 AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
79 BRDAC0 | BRDAC1, 8, 16),
80 AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
81 BRDAC0, 10, 17),
82 AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
83 BRDAC0 | BRDAC1, 8, 18),
84 AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
85 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19),
86 AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
87 BRDAC0 | BRDAC1, 10, 20),
88 AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
89 BRDAC0, 8, 21),
90 AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
91 BRDAC0 | BRDAC1, 8, 22),
92 AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
93 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
94 AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
95 BRDAC0, 8, 24),
96 AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
97 BRDAC0, 8, 25),
98 AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
99 BRDAC0, 10, 26),
100 AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
101 AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
102 BRDAC0, 7, 28),
103 AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
104 BRDAC0, 8, 29),
105 AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
106 BRDAC0 | BRDAC1, 8, 30),
107 AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
108 BRDAC0 | BRDAC1 | BRDAC2, 8, 31),
109 ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
110 BRDAC0 | BRDAC1, 10, 32),
111 AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33),
112 AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34),
113 AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35),
114 AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36),
115 AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37),
116 AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38),
117 AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39),
118 AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40),
119 AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41),
120 ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
121 BRDAC0 | BRDAC1 | BRDAC2, 9, 42),
122 AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43),
123 AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44),
124 AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
125 AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
126 AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
127 AD5270_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
128 BRDAC0, 10, 48),
129 AD5271_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
130 BRDAC0, 8, 49),
131 AD5272_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 10, 50),
132 AD5274_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 51),
133};
134
135#define DPOT_RDAC0 0
136#define DPOT_RDAC1 1
137#define DPOT_RDAC2 2
138#define DPOT_RDAC3 3
139#define DPOT_RDAC4 4
140#define DPOT_RDAC5 5
141
142#define DPOT_RDAC_MASK 0x1F
143
144#define DPOT_REG_TOL 0x18
145#define DPOT_TOL_RDAC0 (DPOT_REG_TOL | DPOT_RDAC0)
146#define DPOT_TOL_RDAC1 (DPOT_REG_TOL | DPOT_RDAC1)
147#define DPOT_TOL_RDAC2 (DPOT_REG_TOL | DPOT_RDAC2)
148#define DPOT_TOL_RDAC3 (DPOT_REG_TOL | DPOT_RDAC3)
149#define DPOT_TOL_RDAC4 (DPOT_REG_TOL | DPOT_RDAC4)
150#define DPOT_TOL_RDAC5 (DPOT_REG_TOL | DPOT_RDAC5)
151
152/* RDAC-to-EEPROM Interface Commands */
153#define DPOT_ADDR_RDAC (0x0 << 5)
154#define DPOT_ADDR_EEPROM (0x1 << 5)
155#define DPOT_ADDR_OTP (0x1 << 6)
156#define DPOT_ADDR_CMD (0x1 << 7)
157#define DPOT_ADDR_OTP_EN (0x1 << 9)
158
159#define DPOT_DEC_ALL_6DB (DPOT_ADDR_CMD | (0x4 << 3))
160#define DPOT_INC_ALL_6DB (DPOT_ADDR_CMD | (0x9 << 3))
161#define DPOT_DEC_ALL (DPOT_ADDR_CMD | (0x6 << 3))
162#define DPOT_INC_ALL (DPOT_ADDR_CMD | (0xB << 3))
163
164#define DPOT_SPI_RDAC 0xB0
165#define DPOT_SPI_EEPROM 0x30
166#define DPOT_SPI_READ_RDAC 0xA0
167#define DPOT_SPI_READ_EEPROM 0x90
168#define DPOT_SPI_DEC_ALL_6DB 0x50
169#define DPOT_SPI_INC_ALL_6DB 0xD0
170#define DPOT_SPI_DEC_ALL 0x70
171#define DPOT_SPI_INC_ALL 0xF0
172
173/* AD5291/2/3 use special commands */
174#define DPOT_AD5291_RDAC 0x01
175#define DPOT_AD5291_READ_RDAC 0x02
176#define DPOT_AD5291_STORE_XTPM 0x03
177#define DPOT_AD5291_CTRLREG 0x06
178#define DPOT_AD5291_UNLOCK_CMD 0x03
179
180/* AD5270/1/2/4 use special commands */
181#define DPOT_AD5270_1_2_4_RDAC 0x01
182#define DPOT_AD5270_1_2_4_READ_RDAC 0x02
183#define DPOT_AD5270_1_2_4_STORE_XTPM 0x03
184#define DPOT_AD5270_1_2_4_CTRLREG 0x07
185#define DPOT_AD5270_1_2_4_UNLOCK_CMD 0x03
186
187#define DPOT_AD5282_RDAC_AB 0x80
188
189#define DPOT_AD5273_FUSE 0x80
190#define DPOT_AD5170_2_3_FUSE 0x20
191#define DPOT_AD5170_2_3_OW 0x08
192#define DPOT_AD5172_3_A0 0x08
193#define DPOT_AD5170_2FUSE 0x80
194
195struct dpot_data;
196
197struct ad_dpot_bus_ops {
198 int (*read_d8) (void *client);
199 int (*read_r8d8) (void *client, u8 reg);
200 int (*read_r8d16) (void *client, u8 reg);
201 int (*write_d8) (void *client, u8 val);
202 int (*write_r8d8) (void *client, u8 reg, u8 val);
203 int (*write_r8d16) (void *client, u8 reg, u16 val);
204};
205
206struct ad_dpot_bus_data {
207 void *client;
208 const struct ad_dpot_bus_ops *bops;
209};
210
211struct ad_dpot_id {
212 char *name;
213 unsigned long devid;
214};
215
216int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
217int ad_dpot_remove(struct device *dev);
218
219#endif
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
new file mode 100644
index 000000000000..644d4cd071cc
--- /dev/null
+++ b/drivers/misc/apds9802als.c
@@ -0,0 +1,346 @@
1/*
2 * apds9802als.c - apds9802 ALS Driver
3 *
4 * Copyright (C) 2009 Intel Corp
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/i2c.h>
28#include <linux/err.h>
29#include <linux/delay.h>
30#include <linux/mutex.h>
31#include <linux/sysfs.h>
32#include <linux/pm_runtime.h>
33
34#define ALS_MIN_RANGE_VAL 1
35#define ALS_MAX_RANGE_VAL 2
36#define POWER_STA_ENABLE 1
37#define POWER_STA_DISABLE 0
38
39#define DRIVER_NAME "apds9802als"
40
41struct als_data {
42 struct mutex mutex;
43};
44
45static ssize_t als_sensing_range_show(struct device *dev,
46 struct device_attribute *attr, char *buf)
47{
48 struct i2c_client *client = to_i2c_client(dev);
49 int val;
50
51 val = i2c_smbus_read_byte_data(client, 0x81);
52 if (val < 0)
53 return val;
54 if (val & 1)
55 return sprintf(buf, "4095\n");
56 else
57 return sprintf(buf, "65535\n");
58}
59
60static int als_wait_for_data_ready(struct device *dev)
61{
62 struct i2c_client *client = to_i2c_client(dev);
63 int ret;
64 int retry = 10;
65
66 do {
67 msleep(30);
68 ret = i2c_smbus_read_byte_data(client, 0x86);
69 } while (!(ret & 0x80) && retry--);
70
71 if (!retry) {
72 dev_warn(dev, "timeout waiting for data ready\n");
73 return -ETIMEDOUT;
74 }
75
76 return 0;
77}
78
79static ssize_t als_lux0_input_data_show(struct device *dev,
80 struct device_attribute *attr, char *buf)
81{
82 struct i2c_client *client = to_i2c_client(dev);
83 struct als_data *data = i2c_get_clientdata(client);
84 int ret_val;
85 int temp;
86
87 /* Protect against parallel reads */
88 pm_runtime_get_sync(dev);
89 mutex_lock(&data->mutex);
90
91 /* clear EOC interrupt status */
92 i2c_smbus_write_byte(client, 0x40);
93 /* start measurement */
94 temp = i2c_smbus_read_byte_data(client, 0x81);
95 i2c_smbus_write_byte_data(client, 0x81, temp | 0x08);
96
97 ret_val = als_wait_for_data_ready(dev);
98 if (ret_val < 0)
99 goto failed;
100
101 temp = i2c_smbus_read_byte_data(client, 0x8C); /* LSB data */
102 if (temp < 0) {
103 ret_val = temp;
104 goto failed;
105 }
106 ret_val = i2c_smbus_read_byte_data(client, 0x8D); /* MSB data */
107 if (ret_val < 0)
108 goto failed;
109
110 mutex_unlock(&data->mutex);
111 pm_runtime_put_sync(dev);
112
113 temp = (ret_val << 8) | temp;
114 return sprintf(buf, "%d\n", temp);
115failed:
116 mutex_unlock(&data->mutex);
117 pm_runtime_put_sync(dev);
118 return ret_val;
119}
120
121static ssize_t als_sensing_range_store(struct device *dev,
122 struct device_attribute *attr, const char *buf, size_t count)
123{
124 struct i2c_client *client = to_i2c_client(dev);
125 struct als_data *data = i2c_get_clientdata(client);
126 int ret_val;
127 unsigned long val;
128
129 if (strict_strtoul(buf, 10, &val))
130 return -EINVAL;
131
132 if (val < 4096)
133 val = 1;
134 else if (val < 65536)
135 val = 2;
136 else
137 return -ERANGE;
138
139 pm_runtime_get_sync(dev);
140
141 /* Make sure nobody else reads/modifies/writes 0x81 while we
142 are active */
143 mutex_lock(&data->mutex);
144
145 ret_val = i2c_smbus_read_byte_data(client, 0x81);
146 if (ret_val < 0)
147 goto fail;
148
149 /* Reset the bits before setting them */
150 ret_val = ret_val & 0xFA;
151
152 if (val == 1) /* Setting detection range up to 4k LUX */
153 ret_val = (ret_val | 0x01);
154 else /* Setting detection range up to 64k LUX*/
155 ret_val = (ret_val | 0x00);
156
157 ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val);
158
159 if (ret_val >= 0) {
160 /* All OK */
161 mutex_unlock(&data->mutex);
162 pm_runtime_put_sync(dev);
163 return count;
164 }
165fail:
166 mutex_unlock(&data->mutex);
167 pm_runtime_put_sync(dev);
168 return ret_val;
169}
170
171static int als_set_power_state(struct i2c_client *client, bool on_off)
172{
173 int ret_val;
174 struct als_data *data = i2c_get_clientdata(client);
175
176 mutex_lock(&data->mutex);
177 ret_val = i2c_smbus_read_byte_data(client, 0x80);
178 if (ret_val < 0)
179 goto fail;
180 if (on_off)
181 ret_val = ret_val | 0x01;
182 else
183 ret_val = ret_val & 0xFE;
184 ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val);
185fail:
186 mutex_unlock(&data->mutex);
187 return ret_val;
188}
189
190static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
191 als_sensing_range_show, als_sensing_range_store);
192static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux0_input_data_show, NULL);
193
194static struct attribute *mid_att_als[] = {
195 &dev_attr_lux0_sensor_range.attr,
196 &dev_attr_lux0_input.attr,
197 NULL
198};
199
200static struct attribute_group m_als_gr = {
201 .name = "apds9802als",
202 .attrs = mid_att_als
203};
204
205static int als_set_default_config(struct i2c_client *client)
206{
207 int ret_val;
208 /* Write the command and then switch on */
209 ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01);
210 if (ret_val < 0) {
211 dev_err(&client->dev, "failed default switch on write\n");
212 return ret_val;
213 }
214 /* detection range: 1~64K Lux, maunal measurement */
215 ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x08);
216 if (ret_val < 0)
217 dev_err(&client->dev, "failed default LUX on write\n");
218
219 /* We always get 0 for the 1st measurement after system power on,
220 * so make sure it is finished before user asks for data.
221 */
222 als_wait_for_data_ready(&client->dev);
223
224 return ret_val;
225}
226
227static int apds9802als_probe(struct i2c_client *client,
228 const struct i2c_device_id *id)
229{
230 int res;
231 struct als_data *data;
232
233 data = kzalloc(sizeof(struct als_data), GFP_KERNEL);
234 if (data == NULL) {
235 dev_err(&client->dev, "Memory allocation failed\n");
236 return -ENOMEM;
237 }
238 i2c_set_clientdata(client, data);
239 res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
240 if (res) {
241 dev_err(&client->dev, "device create file failed\n");
242 goto als_error1;
243 }
244 dev_info(&client->dev, "ALS chip found\n");
245 als_set_default_config(client);
246 mutex_init(&data->mutex);
247
248 pm_runtime_enable(&client->dev);
249 pm_runtime_get(&client->dev);
250 pm_runtime_put(&client->dev);
251
252 return res;
253als_error1:
254 kfree(data);
255 return res;
256}
257
258static int apds9802als_remove(struct i2c_client *client)
259{
260 struct als_data *data = i2c_get_clientdata(client);
261
262 als_set_power_state(client, false);
263 sysfs_remove_group(&client->dev.kobj, &m_als_gr);
264 kfree(data);
265 return 0;
266}
267
268#ifdef CONFIG_PM
269static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
270{
271 als_set_power_state(client, false);
272 return 0;
273}
274
275static int apds9802als_resume(struct i2c_client *client)
276{
277 als_set_default_config(client);
278
279 pm_runtime_get(&client->dev);
280 pm_runtime_put(&client->dev);
281 return 0;
282}
283
284static int apds9802als_runtime_suspend(struct device *dev)
285{
286 struct i2c_client *client = to_i2c_client(dev);
287
288 als_set_power_state(client, false);
289 return 0;
290}
291
292static int apds9802als_runtime_resume(struct device *dev)
293{
294 struct i2c_client *client = to_i2c_client(dev);
295
296 als_set_power_state(client, true);
297 return 0;
298}
299
300static const struct dev_pm_ops apds9802als_pm_ops = {
301 .runtime_suspend = apds9802als_runtime_suspend,
302 .runtime_resume = apds9802als_runtime_resume,
303};
304
305#define APDS9802ALS_PM_OPS (&apds9802als_pm_ops)
306
307#else /* CONFIG_PM */
308#define apds9802als_suspend NULL
309#define apds9802als_resume NULL
310#define APDS9802ALS_PM_OPS NULL
311#endif /* CONFIG_PM */
312
313static struct i2c_device_id apds9802als_id[] = {
314 { DRIVER_NAME, 0 },
315 { }
316};
317
318MODULE_DEVICE_TABLE(i2c, apds9802als_id);
319
320static struct i2c_driver apds9802als_driver = {
321 .driver = {
322 .name = DRIVER_NAME,
323 .pm = APDS9802ALS_PM_OPS,
324 },
325 .probe = apds9802als_probe,
326 .remove = apds9802als_remove,
327 .suspend = apds9802als_suspend,
328 .resume = apds9802als_resume,
329 .id_table = apds9802als_id,
330};
331
332static int __init sensor_apds9802als_init(void)
333{
334 return i2c_add_driver(&apds9802als_driver);
335}
336
337static void __exit sensor_apds9802als_exit(void)
338{
339 i2c_del_driver(&apds9802als_driver);
340}
341module_init(sensor_apds9802als_init);
342module_exit(sensor_apds9802als_exit);
343
344MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com");
345MODULE_DESCRIPTION("Avago apds9802als ALS Driver");
346MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
new file mode 100644
index 000000000000..200311fea369
--- /dev/null
+++ b/drivers/misc/apds990x.c
@@ -0,0 +1,1295 @@
1/*
2 * This file is part of the APDS990x sensor driver.
3 * Chip is combined proximity and ambient light sensor.
4 *
5 * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
6 *
7 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/i2c.h>
28#include <linux/interrupt.h>
29#include <linux/mutex.h>
30#include <linux/regulator/consumer.h>
31#include <linux/pm_runtime.h>
32#include <linux/delay.h>
33#include <linux/wait.h>
34#include <linux/slab.h>
35#include <linux/i2c/apds990x.h>
36
37/* Register map */
38#define APDS990X_ENABLE 0x00 /* Enable of states and interrupts */
39#define APDS990X_ATIME 0x01 /* ALS ADC time */
40#define APDS990X_PTIME 0x02 /* Proximity ADC time */
41#define APDS990X_WTIME 0x03 /* Wait time */
42#define APDS990X_AILTL 0x04 /* ALS interrupt low threshold low byte */
43#define APDS990X_AILTH 0x05 /* ALS interrupt low threshold hi byte */
44#define APDS990X_AIHTL 0x06 /* ALS interrupt hi threshold low byte */
45#define APDS990X_AIHTH 0x07 /* ALS interrupt hi threshold hi byte */
46#define APDS990X_PILTL 0x08 /* Proximity interrupt low threshold low byte */
47#define APDS990X_PILTH 0x09 /* Proximity interrupt low threshold hi byte */
48#define APDS990X_PIHTL 0x0a /* Proximity interrupt hi threshold low byte */
49#define APDS990X_PIHTH 0x0b /* Proximity interrupt hi threshold hi byte */
50#define APDS990X_PERS 0x0c /* Interrupt persistence filters */
51#define APDS990X_CONFIG 0x0d /* Configuration */
52#define APDS990X_PPCOUNT 0x0e /* Proximity pulse count */
53#define APDS990X_CONTROL 0x0f /* Gain control register */
54#define APDS990X_REV 0x11 /* Revision Number */
55#define APDS990X_ID 0x12 /* Device ID */
56#define APDS990X_STATUS 0x13 /* Device status */
57#define APDS990X_CDATAL 0x14 /* Clear ADC low data register */
58#define APDS990X_CDATAH 0x15 /* Clear ADC high data register */
59#define APDS990X_IRDATAL 0x16 /* IR ADC low data register */
60#define APDS990X_IRDATAH 0x17 /* IR ADC high data register */
61#define APDS990X_PDATAL 0x18 /* Proximity ADC low data register */
62#define APDS990X_PDATAH 0x19 /* Proximity ADC high data register */
63
64/* Control */
65#define APDS990X_MAX_AGAIN 3
66
67/* Enable register */
68#define APDS990X_EN_PIEN (0x1 << 5)
69#define APDS990X_EN_AIEN (0x1 << 4)
70#define APDS990X_EN_WEN (0x1 << 3)
71#define APDS990X_EN_PEN (0x1 << 2)
72#define APDS990X_EN_AEN (0x1 << 1)
73#define APDS990X_EN_PON (0x1 << 0)
74#define APDS990X_EN_DISABLE_ALL 0
75
76/* Status register */
77#define APDS990X_ST_PINT (0x1 << 5)
78#define APDS990X_ST_AINT (0x1 << 4)
79
80/* I2C access types */
81#define APDS990x_CMD_TYPE_MASK (0x03 << 5)
82#define APDS990x_CMD_TYPE_RB (0x00 << 5) /* Repeated byte */
83#define APDS990x_CMD_TYPE_INC (0x01 << 5) /* Auto increment */
84#define APDS990x_CMD_TYPE_SPE (0x03 << 5) /* Special function */
85
86#define APDS990x_ADDR_SHIFT 0
87#define APDS990x_CMD 0x80
88
89/* Interrupt ack commands */
90#define APDS990X_INT_ACK_ALS 0x6
91#define APDS990X_INT_ACK_PS 0x5
92#define APDS990X_INT_ACK_BOTH 0x7
93
94/* ptime */
95#define APDS990X_PTIME_DEFAULT 0xff /* Recommended conversion time 2.7ms*/
96
97/* wtime */
98#define APDS990X_WTIME_DEFAULT 0xee /* ~50ms wait time */
99
100#define APDS990X_TIME_TO_ADC 1024 /* One timetick as ADC count value */
101
102/* Persistence */
103#define APDS990X_APERS_SHIFT 0
104#define APDS990X_PPERS_SHIFT 4
105
106/* Supported ID:s */
107#define APDS990X_ID_0 0x0
108#define APDS990X_ID_4 0x4
109#define APDS990X_ID_29 0x29
110
111/* pgain and pdiode settings */
112#define APDS_PGAIN_1X 0x0
113#define APDS_PDIODE_IR 0x2
114
115#define APDS990X_LUX_OUTPUT_SCALE 10
116
117/* Reverse chip factors for threshold calculation */
118struct reverse_factors {
119 u32 afactor;
120 int cf1;
121 int irf1;
122 int cf2;
123 int irf2;
124};
125
126struct apds990x_chip {
127 struct apds990x_platform_data *pdata;
128 struct i2c_client *client;
129 struct mutex mutex; /* avoid parallel access */
130 struct regulator_bulk_data regs[2];
131 wait_queue_head_t wait;
132
133 int prox_en;
134 bool prox_continuous_mode;
135 bool lux_wait_fresh_res;
136
137 /* Chip parameters */
138 struct apds990x_chip_factors cf;
139 struct reverse_factors rcf;
140 u16 atime; /* als integration time */
141 u16 arate; /* als reporting rate */
142 u16 a_max_result; /* Max possible ADC value with current atime */
143 u8 again_meas; /* Gain used in last measurement */
144 u8 again_next; /* Next calculated gain */
145 u8 pgain;
146 u8 pdiode;
147 u8 pdrive;
148 u8 lux_persistence;
149 u8 prox_persistence;
150
151 u32 lux_raw;
152 u32 lux;
153 u16 lux_clear;
154 u16 lux_ir;
155 u16 lux_calib;
156 u32 lux_thres_hi;
157 u32 lux_thres_lo;
158
159 u32 prox_thres;
160 u16 prox_data;
161 u16 prox_calib;
162
163 char chipname[10];
164 u8 revision;
165};
166
167#define APDS_CALIB_SCALER 8192
168#define APDS_LUX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
169#define APDS_PROX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
170
171#define APDS_PROX_DEF_THRES 600
172#define APDS_PROX_HYSTERESIS 50
173#define APDS_LUX_DEF_THRES_HI 101
174#define APDS_LUX_DEF_THRES_LO 100
175#define APDS_DEFAULT_PROX_PERS 1
176
177#define APDS_TIMEOUT 2000
178#define APDS_STARTUP_DELAY 25000 /* us */
179#define APDS_RANGE 65535
180#define APDS_PROX_RANGE 1023
181#define APDS_LUX_GAIN_LO_LIMIT 100
182#define APDS_LUX_GAIN_LO_LIMIT_STRICT 25
183
184#define TIMESTEP 87 /* 2.7ms is about 87 / 32 */
185#define TIME_STEP_SCALER 32
186
187#define APDS_LUX_AVERAGING_TIME 50 /* tolerates 50/60Hz ripple */
188#define APDS_LUX_DEFAULT_RATE 200
189
190static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
191static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */
192
193/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
194static const u16 arates_hz[] = {10, 5, 2, 1};
195static const u8 apersis[] = {1, 2, 4, 5};
196
197/* Regulators */
198static const char reg_vcc[] = "Vdd";
199static const char reg_vled[] = "Vled";
200
201static int apds990x_read_byte(struct apds990x_chip *chip, u8 reg, u8 *data)
202{
203 struct i2c_client *client = chip->client;
204 s32 ret;
205
206 reg &= ~APDS990x_CMD_TYPE_MASK;
207 reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
208
209 ret = i2c_smbus_read_byte_data(client, reg);
210 *data = ret;
211 return (int)ret;
212}
213
214static int apds990x_read_word(struct apds990x_chip *chip, u8 reg, u16 *data)
215{
216 struct i2c_client *client = chip->client;
217 s32 ret;
218
219 reg &= ~APDS990x_CMD_TYPE_MASK;
220 reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
221
222 ret = i2c_smbus_read_word_data(client, reg);
223 *data = ret;
224 return (int)ret;
225}
226
227static int apds990x_write_byte(struct apds990x_chip *chip, u8 reg, u8 data)
228{
229 struct i2c_client *client = chip->client;
230 s32 ret;
231
232 reg &= ~APDS990x_CMD_TYPE_MASK;
233 reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
234
235 ret = i2c_smbus_write_byte_data(client, reg, data);
236 return (int)ret;
237}
238
239static int apds990x_write_word(struct apds990x_chip *chip, u8 reg, u16 data)
240{
241 struct i2c_client *client = chip->client;
242 s32 ret;
243
244 reg &= ~APDS990x_CMD_TYPE_MASK;
245 reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
246
247 ret = i2c_smbus_write_word_data(client, reg, data);
248 return (int)ret;
249}
250
251static int apds990x_mode_on(struct apds990x_chip *chip)
252{
253 /* ALS is mandatory, proximity optional */
254 u8 reg = APDS990X_EN_AIEN | APDS990X_EN_PON | APDS990X_EN_AEN |
255 APDS990X_EN_WEN;
256
257 if (chip->prox_en)
258 reg |= APDS990X_EN_PIEN | APDS990X_EN_PEN;
259
260 return apds990x_write_byte(chip, APDS990X_ENABLE, reg);
261}
262
263static u16 apds990x_lux_to_threshold(struct apds990x_chip *chip, u32 lux)
264{
265 u32 thres;
266 u32 cpl;
267 u32 ir;
268
269 if (lux == 0)
270 return 0;
271 else if (lux == APDS_RANGE)
272 return APDS_RANGE;
273
274 /*
275 * Reported LUX value is a combination of the IR and CLEAR channel
276 * values. However, interrupt threshold is only for clear channel.
277 * This function approximates needed HW threshold value for a given
278 * LUX value in the current lightning type.
279 * IR level compared to visible light varies heavily depending on the
280 * source of the light
281 *
282 * Calculate threshold value for the next measurement period.
283 * Math: threshold = lux * cpl where
284 * cpl = atime * again / (glass_attenuation * device_factor)
285 * (count-per-lux)
286 *
287 * First remove calibration. Division by four is to avoid overflow
288 */
289 lux = lux * (APDS_CALIB_SCALER / 4) / (chip->lux_calib / 4);
290
291 /* Multiplication by 64 is to increase accuracy */
292 cpl = ((u32)chip->atime * (u32)again[chip->again_next] *
293 APDS_PARAM_SCALE * 64) / (chip->cf.ga * chip->cf.df);
294
295 thres = lux * cpl / 64;
296 /*
297 * Convert IR light from the latest result to match with
298 * new gain step. This helps to adapt with the current
299 * source of light.
300 */
301 ir = (u32)chip->lux_ir * (u32)again[chip->again_next] /
302 (u32)again[chip->again_meas];
303
304 /*
305 * Compensate count with IR light impact
306 * IAC1 > IAC2 (see apds990x_get_lux for formulas)
307 */
308 if (chip->lux_clear * APDS_PARAM_SCALE >=
309 chip->rcf.afactor * chip->lux_ir)
310 thres = (chip->rcf.cf1 * thres + chip->rcf.irf1 * ir) /
311 APDS_PARAM_SCALE;
312 else
313 thres = (chip->rcf.cf2 * thres + chip->rcf.irf2 * ir) /
314 APDS_PARAM_SCALE;
315
316 if (thres >= chip->a_max_result)
317 thres = chip->a_max_result - 1;
318 return thres;
319}
320
321static inline int apds990x_set_atime(struct apds990x_chip *chip, u32 time_ms)
322{
323 u8 reg_value;
324
325 chip->atime = time_ms;
326 /* Formula is specified in the data sheet */
327 reg_value = 256 - ((time_ms * TIME_STEP_SCALER) / TIMESTEP);
328 /* Calculate max ADC value for given integration time */
329 chip->a_max_result = (u16)(256 - reg_value) * APDS990X_TIME_TO_ADC;
330 return apds990x_write_byte(chip, APDS990X_ATIME, reg_value);
331}
332
333/* Called always with mutex locked */
334static int apds990x_refresh_pthres(struct apds990x_chip *chip, int data)
335{
336 int ret, lo, hi;
337
338 /* If the chip is not in use, don't try to access it */
339 if (pm_runtime_suspended(&chip->client->dev))
340 return 0;
341
342 if (data < chip->prox_thres) {
343 lo = 0;
344 hi = chip->prox_thres;
345 } else {
346 lo = chip->prox_thres - APDS_PROX_HYSTERESIS;
347 if (chip->prox_continuous_mode)
348 hi = chip->prox_thres;
349 else
350 hi = APDS_RANGE;
351 }
352
353 ret = apds990x_write_word(chip, APDS990X_PILTL, lo);
354 ret |= apds990x_write_word(chip, APDS990X_PIHTL, hi);
355 return ret;
356}
357
358/* Called always with mutex locked */
359static int apds990x_refresh_athres(struct apds990x_chip *chip)
360{
361 int ret;
362 /* If the chip is not in use, don't try to access it */
363 if (pm_runtime_suspended(&chip->client->dev))
364 return 0;
365
366 ret = apds990x_write_word(chip, APDS990X_AILTL,
367 apds990x_lux_to_threshold(chip, chip->lux_thres_lo));
368 ret |= apds990x_write_word(chip, APDS990X_AIHTL,
369 apds990x_lux_to_threshold(chip, chip->lux_thres_hi));
370
371 return ret;
372}
373
374/* Called always with mutex locked */
375static void apds990x_force_a_refresh(struct apds990x_chip *chip)
376{
377 /* This will force ALS interrupt after the next measurement. */
378 apds990x_write_word(chip, APDS990X_AILTL, APDS_LUX_DEF_THRES_LO);
379 apds990x_write_word(chip, APDS990X_AIHTL, APDS_LUX_DEF_THRES_HI);
380}
381
382/* Called always with mutex locked */
383static void apds990x_force_p_refresh(struct apds990x_chip *chip)
384{
385 /* This will force proximity interrupt after the next measurement. */
386 apds990x_write_word(chip, APDS990X_PILTL, APDS_PROX_DEF_THRES - 1);
387 apds990x_write_word(chip, APDS990X_PIHTL, APDS_PROX_DEF_THRES);
388}
389
390/* Called always with mutex locked */
391static int apds990x_calc_again(struct apds990x_chip *chip)
392{
393 int curr_again = chip->again_meas;
394 int next_again = chip->again_meas;
395 int ret = 0;
396
397 /* Calculate suitable als gain */
398 if (chip->lux_clear == chip->a_max_result)
399 next_again -= 2; /* ALS saturated. Decrease gain by 2 steps */
400 else if (chip->lux_clear > chip->a_max_result / 2)
401 next_again--;
402 else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
403 next_again += 2; /* Too dark. Increase gain by 2 steps */
404 else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT)
405 next_again++;
406
407 /* Limit gain to available range */
408 if (next_again < 0)
409 next_again = 0;
410 else if (next_again > APDS990X_MAX_AGAIN)
411 next_again = APDS990X_MAX_AGAIN;
412
413 /* Let's check can we trust the measured result */
414 if (chip->lux_clear == chip->a_max_result)
415 /* Result can be totally garbage due to saturation */
416 ret = -ERANGE;
417 else if (next_again != curr_again &&
418 chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
419 /*
420 * Gain is changed and measurement result is very small.
421 * Result can be totally garbage due to underflow
422 */
423 ret = -ERANGE;
424
425 chip->again_next = next_again;
426 apds990x_write_byte(chip, APDS990X_CONTROL,
427 (chip->pdrive << 6) |
428 (chip->pdiode << 4) |
429 (chip->pgain << 2) |
430 (chip->again_next << 0));
431
432 /*
433 * Error means bad result -> re-measurement is needed. The forced
434 * refresh uses fastest possible persistence setting to get result
435 * as soon as possible.
436 */
437 if (ret < 0)
438 apds990x_force_a_refresh(chip);
439 else
440 apds990x_refresh_athres(chip);
441
442 return ret;
443}
444
445/* Called always with mutex locked */
446static int apds990x_get_lux(struct apds990x_chip *chip, int clear, int ir)
447{
448 int iac, iac1, iac2; /* IR adjusted counts */
449 u32 lpc; /* Lux per count */
450
451 /* Formulas:
452 * iac1 = CF1 * CLEAR_CH - IRF1 * IR_CH
453 * iac2 = CF2 * CLEAR_CH - IRF2 * IR_CH
454 */
455 iac1 = (chip->cf.cf1 * clear - chip->cf.irf1 * ir) / APDS_PARAM_SCALE;
456 iac2 = (chip->cf.cf2 * clear - chip->cf.irf2 * ir) / APDS_PARAM_SCALE;
457
458 iac = max(iac1, iac2);
459 iac = max(iac, 0);
460
461 lpc = APDS990X_LUX_OUTPUT_SCALE * (chip->cf.df * chip->cf.ga) /
462 (u32)(again[chip->again_meas] * (u32)chip->atime);
463
464 return (iac * lpc) / APDS_PARAM_SCALE;
465}
466
467static int apds990x_ack_int(struct apds990x_chip *chip, u8 mode)
468{
469 struct i2c_client *client = chip->client;
470 s32 ret;
471 u8 reg = APDS990x_CMD | APDS990x_CMD_TYPE_SPE;
472
473 switch (mode & (APDS990X_ST_AINT | APDS990X_ST_PINT)) {
474 case APDS990X_ST_AINT:
475 reg |= APDS990X_INT_ACK_ALS;
476 break;
477 case APDS990X_ST_PINT:
478 reg |= APDS990X_INT_ACK_PS;
479 break;
480 default:
481 reg |= APDS990X_INT_ACK_BOTH;
482 break;
483 }
484
485 ret = i2c_smbus_read_byte_data(client, reg);
486 return (int)ret;
487}
488
489static irqreturn_t apds990x_irq(int irq, void *data)
490{
491 struct apds990x_chip *chip = data;
492 u8 status;
493
494 apds990x_read_byte(chip, APDS990X_STATUS, &status);
495 apds990x_ack_int(chip, status);
496
497 mutex_lock(&chip->mutex);
498 if (!pm_runtime_suspended(&chip->client->dev)) {
499 if (status & APDS990X_ST_AINT) {
500 apds990x_read_word(chip, APDS990X_CDATAL,
501 &chip->lux_clear);
502 apds990x_read_word(chip, APDS990X_IRDATAL,
503 &chip->lux_ir);
504 /* Store used gain for calculations */
505 chip->again_meas = chip->again_next;
506
507 chip->lux_raw = apds990x_get_lux(chip,
508 chip->lux_clear,
509 chip->lux_ir);
510
511 if (apds990x_calc_again(chip) == 0) {
512 /* Result is valid */
513 chip->lux = chip->lux_raw;
514 chip->lux_wait_fresh_res = false;
515 wake_up(&chip->wait);
516 sysfs_notify(&chip->client->dev.kobj,
517 NULL, "lux0_input");
518 }
519 }
520
521 if ((status & APDS990X_ST_PINT) && chip->prox_en) {
522 u16 clr_ch;
523
524 apds990x_read_word(chip, APDS990X_CDATAL, &clr_ch);
525 /*
526 * If ALS channel is saturated at min gain,
527 * proximity gives false posivite values.
528 * Just ignore them.
529 */
530 if (chip->again_meas == 0 &&
531 clr_ch == chip->a_max_result)
532 chip->prox_data = 0;
533 else
534 apds990x_read_word(chip,
535 APDS990X_PDATAL,
536 &chip->prox_data);
537
538 apds990x_refresh_pthres(chip, chip->prox_data);
539 if (chip->prox_data < chip->prox_thres)
540 chip->prox_data = 0;
541 else if (!chip->prox_continuous_mode)
542 chip->prox_data = APDS_PROX_RANGE;
543 sysfs_notify(&chip->client->dev.kobj,
544 NULL, "prox0_raw");
545 }
546 }
547 mutex_unlock(&chip->mutex);
548 return IRQ_HANDLED;
549}
550
551static int apds990x_configure(struct apds990x_chip *chip)
552{
553 /* It is recommended to use disabled mode during these operations */
554 apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
555
556 /* conversion and wait times for different state machince states */
557 apds990x_write_byte(chip, APDS990X_PTIME, APDS990X_PTIME_DEFAULT);
558 apds990x_write_byte(chip, APDS990X_WTIME, APDS990X_WTIME_DEFAULT);
559 apds990x_set_atime(chip, APDS_LUX_AVERAGING_TIME);
560
561 apds990x_write_byte(chip, APDS990X_CONFIG, 0);
562
563 /* Persistence levels */
564 apds990x_write_byte(chip, APDS990X_PERS,
565 (chip->lux_persistence << APDS990X_APERS_SHIFT) |
566 (chip->prox_persistence << APDS990X_PPERS_SHIFT));
567
568 apds990x_write_byte(chip, APDS990X_PPCOUNT, chip->pdata->ppcount);
569
570 /* Start with relatively small gain */
571 chip->again_meas = 1;
572 chip->again_next = 1;
573 apds990x_write_byte(chip, APDS990X_CONTROL,
574 (chip->pdrive << 6) |
575 (chip->pdiode << 4) |
576 (chip->pgain << 2) |
577 (chip->again_next << 0));
578 return 0;
579}
580
581static int apds990x_detect(struct apds990x_chip *chip)
582{
583 struct i2c_client *client = chip->client;
584 int ret;
585 u8 id;
586
587 ret = apds990x_read_byte(chip, APDS990X_ID, &id);
588 if (ret < 0) {
589 dev_err(&client->dev, "ID read failed\n");
590 return ret;
591 }
592
593 ret = apds990x_read_byte(chip, APDS990X_REV, &chip->revision);
594 if (ret < 0) {
595 dev_err(&client->dev, "REV read failed\n");
596 return ret;
597 }
598
599 switch (id) {
600 case APDS990X_ID_0:
601 case APDS990X_ID_4:
602 case APDS990X_ID_29:
603 snprintf(chip->chipname, sizeof(chip->chipname), "APDS-990x");
604 break;
605 default:
606 ret = -ENODEV;
607 break;
608 }
609 return ret;
610}
611
612static int apds990x_chip_on(struct apds990x_chip *chip)
613{
614 int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
615 chip->regs);
616 if (err < 0)
617 return err;
618
619 usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
620
621 /* Refresh all configs in case of regulators were off */
622 chip->prox_data = 0;
623 apds990x_configure(chip);
624 apds990x_mode_on(chip);
625 return 0;
626}
627
628static int apds990x_chip_off(struct apds990x_chip *chip)
629{
630 apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
631 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
632 return 0;
633}
634
635static ssize_t apds990x_lux_show(struct device *dev,
636 struct device_attribute *attr, char *buf)
637{
638 struct apds990x_chip *chip = dev_get_drvdata(dev);
639 ssize_t ret;
640 u32 result;
641 long timeout;
642
643 if (pm_runtime_suspended(dev))
644 return -EIO;
645
646 timeout = wait_event_interruptible_timeout(chip->wait,
647 !chip->lux_wait_fresh_res,
648 msecs_to_jiffies(APDS_TIMEOUT));
649 if (!timeout)
650 return -EIO;
651
652 mutex_lock(&chip->mutex);
653 result = (chip->lux * chip->lux_calib) / APDS_CALIB_SCALER;
654 if (result > (APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE))
655 result = APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE;
656
657 ret = sprintf(buf, "%d.%d\n",
658 result / APDS990X_LUX_OUTPUT_SCALE,
659 result % APDS990X_LUX_OUTPUT_SCALE);
660 mutex_unlock(&chip->mutex);
661 return ret;
662}
663
664static DEVICE_ATTR(lux0_input, S_IRUGO, apds990x_lux_show, NULL);
665
666static ssize_t apds990x_lux_range_show(struct device *dev,
667 struct device_attribute *attr, char *buf)
668{
669 return sprintf(buf, "%u\n", APDS_RANGE);
670}
671
672static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, apds990x_lux_range_show, NULL);
673
674static ssize_t apds990x_lux_calib_format_show(struct device *dev,
675 struct device_attribute *attr, char *buf)
676{
677 return sprintf(buf, "%u\n", APDS_CALIB_SCALER);
678}
679
680static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
681 apds990x_lux_calib_format_show, NULL);
682
683static ssize_t apds990x_lux_calib_show(struct device *dev,
684 struct device_attribute *attr, char *buf)
685{
686 struct apds990x_chip *chip = dev_get_drvdata(dev);
687
688 return sprintf(buf, "%u\n", chip->lux_calib);
689}
690
691static ssize_t apds990x_lux_calib_store(struct device *dev,
692 struct device_attribute *attr,
693 const char *buf, size_t len)
694{
695 struct apds990x_chip *chip = dev_get_drvdata(dev);
696 unsigned long value;
697
698 if (strict_strtoul(buf, 0, &value))
699 return -EINVAL;
700
701 if (chip->lux_calib > APDS_RANGE)
702 return -EINVAL;
703
704 chip->lux_calib = value;
705
706 return len;
707}
708
709static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, apds990x_lux_calib_show,
710 apds990x_lux_calib_store);
711
712static ssize_t apds990x_rate_avail(struct device *dev,
713 struct device_attribute *attr, char *buf)
714{
715 int i;
716 int pos = 0;
717 for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
718 pos += sprintf(buf + pos, "%d ", arates_hz[i]);
719 sprintf(buf + pos - 1, "\n");
720 return pos;
721}
722
723static ssize_t apds990x_rate_show(struct device *dev,
724 struct device_attribute *attr, char *buf)
725{
726 struct apds990x_chip *chip = dev_get_drvdata(dev);
727 return sprintf(buf, "%d\n", chip->arate);
728}
729
730static int apds990x_set_arate(struct apds990x_chip *chip, int rate)
731{
732 int i;
733
734 for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
735 if (rate >= arates_hz[i])
736 break;
737
738 if (i == ARRAY_SIZE(arates_hz))
739 return -EINVAL;
740
741 /* Pick up corresponding persistence value */
742 chip->lux_persistence = apersis[i];
743 chip->arate = arates_hz[i];
744
745 /* If the chip is not in use, don't try to access it */
746 if (pm_runtime_suspended(&chip->client->dev))
747 return 0;
748
749 /* Persistence levels */
750 return apds990x_write_byte(chip, APDS990X_PERS,
751 (chip->lux_persistence << APDS990X_APERS_SHIFT) |
752 (chip->prox_persistence << APDS990X_PPERS_SHIFT));
753}
754
755static ssize_t apds990x_rate_store(struct device *dev,
756 struct device_attribute *attr,
757 const char *buf, size_t len)
758{
759 struct apds990x_chip *chip = dev_get_drvdata(dev);
760 unsigned long value;
761 int ret;
762
763 if (strict_strtoul(buf, 0, &value))
764 return -EINVAL;
765
766 mutex_lock(&chip->mutex);
767 ret = apds990x_set_arate(chip, value);
768 mutex_unlock(&chip->mutex);
769
770 if (ret < 0)
771 return ret;
772 return len;
773}
774
775static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, apds990x_rate_avail, NULL);
776
777static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, apds990x_rate_show,
778 apds990x_rate_store);
779
780static ssize_t apds990x_prox_show(struct device *dev,
781 struct device_attribute *attr, char *buf)
782{
783 ssize_t ret;
784 struct apds990x_chip *chip = dev_get_drvdata(dev);
785 if (pm_runtime_suspended(dev) || !chip->prox_en)
786 return -EIO;
787
788 mutex_lock(&chip->mutex);
789 ret = sprintf(buf, "%d\n", chip->prox_data);
790 mutex_unlock(&chip->mutex);
791 return ret;
792}
793
794static DEVICE_ATTR(prox0_raw, S_IRUGO, apds990x_prox_show, NULL);
795
796static ssize_t apds990x_prox_range_show(struct device *dev,
797 struct device_attribute *attr, char *buf)
798{
799 return sprintf(buf, "%u\n", APDS_PROX_RANGE);
800}
801
802static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, apds990x_prox_range_show, NULL);
803
804static ssize_t apds990x_prox_enable_show(struct device *dev,
805 struct device_attribute *attr, char *buf)
806{
807 struct apds990x_chip *chip = dev_get_drvdata(dev);
808 return sprintf(buf, "%d\n", chip->prox_en);
809}
810
811static ssize_t apds990x_prox_enable_store(struct device *dev,
812 struct device_attribute *attr,
813 const char *buf, size_t len)
814{
815 struct apds990x_chip *chip = dev_get_drvdata(dev);
816 unsigned long value;
817
818 if (strict_strtoul(buf, 0, &value))
819 return -EINVAL;
820
821 mutex_lock(&chip->mutex);
822
823 if (!chip->prox_en)
824 chip->prox_data = 0;
825
826 if (value)
827 chip->prox_en++;
828 else if (chip->prox_en > 0)
829 chip->prox_en--;
830
831 if (!pm_runtime_suspended(dev))
832 apds990x_mode_on(chip);
833 mutex_unlock(&chip->mutex);
834 return len;
835}
836
837static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show,
838 apds990x_prox_enable_store);
839
840static const char reporting_modes[][9] = {"trigger", "periodic"};
841
842static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
843 struct device_attribute *attr, char *buf)
844{
845 struct apds990x_chip *chip = dev_get_drvdata(dev);
846 return sprintf(buf, "%s\n",
847 reporting_modes[!!chip->prox_continuous_mode]);
848}
849
850static ssize_t apds990x_prox_reporting_mode_store(struct device *dev,
851 struct device_attribute *attr,
852 const char *buf, size_t len)
853{
854 struct apds990x_chip *chip = dev_get_drvdata(dev);
855
856 if (sysfs_streq(buf, reporting_modes[0]))
857 chip->prox_continuous_mode = 0;
858 else if (sysfs_streq(buf, reporting_modes[1]))
859 chip->prox_continuous_mode = 1;
860 else
861 return -EINVAL;
862 return len;
863}
864
865static DEVICE_ATTR(prox0_reporting_mode, S_IRUGO | S_IWUSR,
866 apds990x_prox_reporting_mode_show,
867 apds990x_prox_reporting_mode_store);
868
869static ssize_t apds990x_prox_reporting_avail_show(struct device *dev,
870 struct device_attribute *attr, char *buf)
871{
872 return sprintf(buf, "%s %s\n", reporting_modes[0], reporting_modes[1]);
873}
874
875static DEVICE_ATTR(prox0_reporting_mode_avail, S_IRUGO | S_IWUSR,
876 apds990x_prox_reporting_avail_show, NULL);
877
878
879static ssize_t apds990x_lux_thresh_above_show(struct device *dev,
880 struct device_attribute *attr, char *buf)
881{
882 struct apds990x_chip *chip = dev_get_drvdata(dev);
883 return sprintf(buf, "%d\n", chip->lux_thres_hi);
884}
885
886static ssize_t apds990x_lux_thresh_below_show(struct device *dev,
887 struct device_attribute *attr, char *buf)
888{
889 struct apds990x_chip *chip = dev_get_drvdata(dev);
890 return sprintf(buf, "%d\n", chip->lux_thres_lo);
891}
892
893static ssize_t apds990x_set_lux_thresh(struct apds990x_chip *chip, u32 *target,
894 const char *buf)
895{
896 int ret = 0;
897 unsigned long thresh;
898
899 if (strict_strtoul(buf, 0, &thresh))
900 return -EINVAL;
901
902 if (thresh > APDS_RANGE)
903 return -EINVAL;
904
905 mutex_lock(&chip->mutex);
906 *target = thresh;
907 /*
908 * Don't update values in HW if we are still waiting for
909 * first interrupt to come after device handle open call.
910 */
911 if (!chip->lux_wait_fresh_res)
912 apds990x_refresh_athres(chip);
913 mutex_unlock(&chip->mutex);
914 return ret;
915
916}
917
918static ssize_t apds990x_lux_thresh_above_store(struct device *dev,
919 struct device_attribute *attr,
920 const char *buf, size_t len)
921{
922 struct apds990x_chip *chip = dev_get_drvdata(dev);
923 int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf);
924 if (ret < 0)
925 return ret;
926 return len;
927}
928
929static ssize_t apds990x_lux_thresh_below_store(struct device *dev,
930 struct device_attribute *attr,
931 const char *buf, size_t len)
932{
933 struct apds990x_chip *chip = dev_get_drvdata(dev);
934 int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf);
935 if (ret < 0)
936 return ret;
937 return len;
938}
939
940static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
941 apds990x_lux_thresh_above_show,
942 apds990x_lux_thresh_above_store);
943
944static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
945 apds990x_lux_thresh_below_show,
946 apds990x_lux_thresh_below_store);
947
948static ssize_t apds990x_prox_threshold_show(struct device *dev,
949 struct device_attribute *attr, char *buf)
950{
951 struct apds990x_chip *chip = dev_get_drvdata(dev);
952 return sprintf(buf, "%d\n", chip->prox_thres);
953}
954
955static ssize_t apds990x_prox_threshold_store(struct device *dev,
956 struct device_attribute *attr,
957 const char *buf, size_t len)
958{
959 struct apds990x_chip *chip = dev_get_drvdata(dev);
960 unsigned long value;
961
962 if (strict_strtoul(buf, 0, &value))
963 return -EINVAL;
964
965 if ((value > APDS_RANGE) || (value == 0) ||
966 (value < APDS_PROX_HYSTERESIS))
967 return -EINVAL;
968
969 mutex_lock(&chip->mutex);
970 chip->prox_thres = value;
971
972 apds990x_force_p_refresh(chip);
973 mutex_unlock(&chip->mutex);
974 return len;
975}
976
977static DEVICE_ATTR(prox0_thresh_above_value, S_IRUGO | S_IWUSR,
978 apds990x_prox_threshold_show,
979 apds990x_prox_threshold_store);
980
981static ssize_t apds990x_power_state_show(struct device *dev,
982 struct device_attribute *attr, char *buf)
983{
984 return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
985 return 0;
986}
987
988static ssize_t apds990x_power_state_store(struct device *dev,
989 struct device_attribute *attr,
990 const char *buf, size_t len)
991{
992 struct apds990x_chip *chip = dev_get_drvdata(dev);
993 unsigned long value;
994
995 if (strict_strtoul(buf, 0, &value))
996 return -EINVAL;
997 if (value) {
998 pm_runtime_get_sync(dev);
999 mutex_lock(&chip->mutex);
1000 chip->lux_wait_fresh_res = true;
1001 apds990x_force_a_refresh(chip);
1002 apds990x_force_p_refresh(chip);
1003 mutex_unlock(&chip->mutex);
1004 } else {
1005 if (!pm_runtime_suspended(dev))
1006 pm_runtime_put(dev);
1007 }
1008 return len;
1009}
1010
1011static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
1012 apds990x_power_state_show,
1013 apds990x_power_state_store);
1014
1015static ssize_t apds990x_chip_id_show(struct device *dev,
1016 struct device_attribute *attr, char *buf)
1017{
1018 struct apds990x_chip *chip = dev_get_drvdata(dev);
1019 return sprintf(buf, "%s %d\n", chip->chipname, chip->revision);
1020}
1021
1022static DEVICE_ATTR(chip_id, S_IRUGO, apds990x_chip_id_show, NULL);
1023
1024static struct attribute *sysfs_attrs_ctrl[] = {
1025 &dev_attr_lux0_calibscale.attr,
1026 &dev_attr_lux0_calibscale_default.attr,
1027 &dev_attr_lux0_input.attr,
1028 &dev_attr_lux0_sensor_range.attr,
1029 &dev_attr_lux0_rate.attr,
1030 &dev_attr_lux0_rate_avail.attr,
1031 &dev_attr_lux0_thresh_above_value.attr,
1032 &dev_attr_lux0_thresh_below_value.attr,
1033 &dev_attr_prox0_raw_en.attr,
1034 &dev_attr_prox0_raw.attr,
1035 &dev_attr_prox0_sensor_range.attr,
1036 &dev_attr_prox0_thresh_above_value.attr,
1037 &dev_attr_prox0_reporting_mode.attr,
1038 &dev_attr_prox0_reporting_mode_avail.attr,
1039 &dev_attr_chip_id.attr,
1040 &dev_attr_power_state.attr,
1041 NULL
1042};
1043
1044static struct attribute_group apds990x_attribute_group[] = {
1045 {.attrs = sysfs_attrs_ctrl },
1046};
1047
1048static int __devinit apds990x_probe(struct i2c_client *client,
1049 const struct i2c_device_id *id)
1050{
1051 struct apds990x_chip *chip;
1052 int err;
1053
1054 chip = kzalloc(sizeof *chip, GFP_KERNEL);
1055 if (!chip)
1056 return -ENOMEM;
1057
1058 i2c_set_clientdata(client, chip);
1059 chip->client = client;
1060
1061 init_waitqueue_head(&chip->wait);
1062 mutex_init(&chip->mutex);
1063 chip->pdata = client->dev.platform_data;
1064
1065 if (chip->pdata == NULL) {
1066 dev_err(&client->dev, "platform data is mandatory\n");
1067 err = -EINVAL;
1068 goto fail1;
1069 }
1070
1071 if (chip->pdata->cf.ga == 0) {
1072 /* set uncovered sensor default parameters */
1073 chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */
1074 chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */
1075 chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */
1076 chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */
1077 chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */
1078 chip->cf.df = 52;
1079 } else {
1080 chip->cf = chip->pdata->cf;
1081 }
1082
1083 /* precalculate inverse chip factors for threshold control */
1084 chip->rcf.afactor =
1085 (chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE /
1086 (chip->cf.cf1 - chip->cf.cf2);
1087 chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
1088 chip->cf.cf1;
1089 chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE /
1090 chip->cf.cf1;
1091 chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
1092 chip->cf.cf2;
1093 chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE /
1094 chip->cf.cf2;
1095
1096 /* Set something to start with */
1097 chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI;
1098 chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO;
1099 chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE;
1100
1101 chip->prox_thres = APDS_PROX_DEF_THRES;
1102 chip->pdrive = chip->pdata->pdrive;
1103 chip->pdiode = APDS_PDIODE_IR;
1104 chip->pgain = APDS_PGAIN_1X;
1105 chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE;
1106 chip->prox_persistence = APDS_DEFAULT_PROX_PERS;
1107 chip->prox_continuous_mode = false;
1108
1109 chip->regs[0].supply = reg_vcc;
1110 chip->regs[1].supply = reg_vled;
1111
1112 err = regulator_bulk_get(&client->dev,
1113 ARRAY_SIZE(chip->regs), chip->regs);
1114 if (err < 0) {
1115 dev_err(&client->dev, "Cannot get regulators\n");
1116 goto fail1;
1117 }
1118
1119 err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs);
1120 if (err < 0) {
1121 dev_err(&client->dev, "Cannot enable regulators\n");
1122 goto fail2;
1123 }
1124
1125 usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
1126
1127 err = apds990x_detect(chip);
1128 if (err < 0) {
1129 dev_err(&client->dev, "APDS990X not found\n");
1130 goto fail3;
1131 }
1132
1133 pm_runtime_set_active(&client->dev);
1134
1135 apds990x_configure(chip);
1136 apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE);
1137 apds990x_mode_on(chip);
1138
1139 pm_runtime_enable(&client->dev);
1140
1141 if (chip->pdata->setup_resources) {
1142 err = chip->pdata->setup_resources();
1143 if (err) {
1144 err = -EINVAL;
1145 goto fail3;
1146 }
1147 }
1148
1149 err = sysfs_create_group(&chip->client->dev.kobj,
1150 apds990x_attribute_group);
1151 if (err < 0) {
1152 dev_err(&chip->client->dev, "Sysfs registration failed\n");
1153 goto fail4;
1154 }
1155
1156 err = request_threaded_irq(client->irq, NULL,
1157 apds990x_irq,
1158 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW |
1159 IRQF_ONESHOT,
1160 "apds990x", chip);
1161 if (err) {
1162 dev_err(&client->dev, "could not get IRQ %d\n",
1163 client->irq);
1164 goto fail5;
1165 }
1166 return err;
1167fail5:
1168 sysfs_remove_group(&chip->client->dev.kobj,
1169 &apds990x_attribute_group[0]);
1170fail4:
1171 if (chip->pdata && chip->pdata->release_resources)
1172 chip->pdata->release_resources();
1173fail3:
1174 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
1175fail2:
1176 regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
1177fail1:
1178 kfree(chip);
1179 return err;
1180}
1181
1182static int __devexit apds990x_remove(struct i2c_client *client)
1183{
1184 struct apds990x_chip *chip = i2c_get_clientdata(client);
1185
1186 free_irq(client->irq, chip);
1187 sysfs_remove_group(&chip->client->dev.kobj,
1188 apds990x_attribute_group);
1189
1190 if (chip->pdata && chip->pdata->release_resources)
1191 chip->pdata->release_resources();
1192
1193 if (!pm_runtime_suspended(&client->dev))
1194 apds990x_chip_off(chip);
1195
1196 pm_runtime_disable(&client->dev);
1197 pm_runtime_set_suspended(&client->dev);
1198
1199 regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
1200
1201 kfree(chip);
1202 return 0;
1203}
1204
1205#ifdef CONFIG_PM
1206static int apds990x_suspend(struct device *dev)
1207{
1208 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
1209 struct apds990x_chip *chip = i2c_get_clientdata(client);
1210
1211 apds990x_chip_off(chip);
1212 return 0;
1213}
1214
1215static int apds990x_resume(struct device *dev)
1216{
1217 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
1218 struct apds990x_chip *chip = i2c_get_clientdata(client);
1219
1220 /*
1221 * If we were enabled at suspend time, it is expected
1222 * everything works nice and smoothly. Chip_on is enough
1223 */
1224 apds990x_chip_on(chip);
1225
1226 return 0;
1227}
1228#else
1229#define apds990x_suspend NULL
1230#define apds990x_resume NULL
1231#define apds990x_shutdown NULL
1232#endif
1233
1234#ifdef CONFIG_PM_RUNTIME
1235static int apds990x_runtime_suspend(struct device *dev)
1236{
1237 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
1238 struct apds990x_chip *chip = i2c_get_clientdata(client);
1239
1240 apds990x_chip_off(chip);
1241 return 0;
1242}
1243
1244static int apds990x_runtime_resume(struct device *dev)
1245{
1246 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
1247 struct apds990x_chip *chip = i2c_get_clientdata(client);
1248
1249 apds990x_chip_on(chip);
1250 return 0;
1251}
1252
1253#endif
1254
1255static const struct i2c_device_id apds990x_id[] = {
1256 {"apds990x", 0 },
1257 {}
1258};
1259
1260MODULE_DEVICE_TABLE(i2c, apds990x_id);
1261
1262static const struct dev_pm_ops apds990x_pm_ops = {
1263 SET_SYSTEM_SLEEP_PM_OPS(apds990x_suspend, apds990x_resume)
1264 SET_RUNTIME_PM_OPS(apds990x_runtime_suspend,
1265 apds990x_runtime_resume,
1266 NULL)
1267};
1268
1269static struct i2c_driver apds990x_driver = {
1270 .driver = {
1271 .name = "apds990x",
1272 .owner = THIS_MODULE,
1273 .pm = &apds990x_pm_ops,
1274 },
1275 .probe = apds990x_probe,
1276 .remove = __devexit_p(apds990x_remove),
1277 .id_table = apds990x_id,
1278};
1279
1280static int __init apds990x_init(void)
1281{
1282 return i2c_add_driver(&apds990x_driver);
1283}
1284
1285static void __exit apds990x_exit(void)
1286{
1287 i2c_del_driver(&apds990x_driver);
1288}
1289
1290MODULE_DESCRIPTION("APDS990X combined ALS and proximity sensor");
1291MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
1292MODULE_LICENSE("GPL v2");
1293
1294module_init(apds990x_init);
1295module_exit(apds990x_exit);
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
new file mode 100644
index 000000000000..9e3879ef58f2
--- /dev/null
+++ b/drivers/misc/arm-charlcd.c
@@ -0,0 +1,396 @@
1/*
2 * Driver for the on-board character LCD found on some ARM reference boards
3 * This is basically an Hitachi HD44780 LCD with a custom IP block to drive it
4 * http://en.wikipedia.org/wiki/HD44780_Character_LCD
5 * Currently it will just display the text "ARM Linux" and the linux version
6 *
7 * License terms: GNU General Public License (GPL) version 2
8 * Author: Linus Walleij <triad@df.lth.se>
9 */
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/platform_device.h>
14#include <linux/completion.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/slab.h>
18#include <linux/workqueue.h>
19#include <generated/utsrelease.h>
20
21#define DRIVERNAME "arm-charlcd"
22#define CHARLCD_TIMEOUT (msecs_to_jiffies(1000))
23
24/* Offsets to registers */
25#define CHAR_COM 0x00U
26#define CHAR_DAT 0x04U
27#define CHAR_RD 0x08U
28#define CHAR_RAW 0x0CU
29#define CHAR_MASK 0x10U
30#define CHAR_STAT 0x14U
31
32#define CHAR_RAW_CLEAR 0x00000000U
33#define CHAR_RAW_VALID 0x00000100U
34
35/* Hitachi HD44780 display commands */
36#define HD_CLEAR 0x01U
37#define HD_HOME 0x02U
38#define HD_ENTRYMODE 0x04U
39#define HD_ENTRYMODE_INCREMENT 0x02U
40#define HD_ENTRYMODE_SHIFT 0x01U
41#define HD_DISPCTRL 0x08U
42#define HD_DISPCTRL_ON 0x04U
43#define HD_DISPCTRL_CURSOR_ON 0x02U
44#define HD_DISPCTRL_CURSOR_BLINK 0x01U
45#define HD_CRSR_SHIFT 0x10U
46#define HD_CRSR_SHIFT_DISPLAY 0x08U
47#define HD_CRSR_SHIFT_DISPLAY_RIGHT 0x04U
48#define HD_FUNCSET 0x20U
49#define HD_FUNCSET_8BIT 0x10U
50#define HD_FUNCSET_2_LINES 0x08U
51#define HD_FUNCSET_FONT_5X10 0x04U
52#define HD_SET_CGRAM 0x40U
53#define HD_SET_DDRAM 0x80U
54#define HD_BUSY_FLAG 0x80U
55
56/**
57 * @dev: a pointer back to containing device
58 * @phybase: the offset to the controller in physical memory
59 * @physize: the size of the physical page
60 * @virtbase: the offset to the controller in virtual memory
61 * @irq: reserved interrupt number
62 * @complete: completion structure for the last LCD command
63 */
64struct charlcd {
65 struct device *dev;
66 u32 phybase;
67 u32 physize;
68 void __iomem *virtbase;
69 int irq;
70 struct completion complete;
71 struct delayed_work init_work;
72};
73
74static irqreturn_t charlcd_interrupt(int irq, void *data)
75{
76 struct charlcd *lcd = data;
77 u8 status;
78
79 status = readl(lcd->virtbase + CHAR_STAT) & 0x01;
80 /* Clear IRQ */
81 writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW);
82 if (status)
83 complete(&lcd->complete);
84 else
85 dev_info(lcd->dev, "Spurious IRQ (%02x)\n", status);
86 return IRQ_HANDLED;
87}
88
89
90static void charlcd_wait_complete_irq(struct charlcd *lcd)
91{
92 int ret;
93
94 ret = wait_for_completion_interruptible_timeout(&lcd->complete,
95 CHARLCD_TIMEOUT);
96 /* Disable IRQ after completion */
97 writel(0x00, lcd->virtbase + CHAR_MASK);
98
99 if (ret < 0) {
100 dev_err(lcd->dev,
101 "wait_for_completion_interruptible_timeout() "
102 "returned %d waiting for ready\n", ret);
103 return;
104 }
105
106 if (ret == 0) {
107 dev_err(lcd->dev, "charlcd controller timed out "
108 "waiting for ready\n");
109 return;
110 }
111}
112
113static u8 charlcd_4bit_read_char(struct charlcd *lcd)
114{
115 u8 data;
116 u32 val;
117 int i;
118
119 /* If we can, use an IRQ to wait for the data, else poll */
120 if (lcd->irq >= 0)
121 charlcd_wait_complete_irq(lcd);
122 else {
123 i = 0;
124 val = 0;
125 while (!(val & CHAR_RAW_VALID) && i < 10) {
126 udelay(100);
127 val = readl(lcd->virtbase + CHAR_RAW);
128 i++;
129 }
130
131 writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW);
132 }
133 msleep(1);
134
135 /* Read the 4 high bits of the data */
136 data = readl(lcd->virtbase + CHAR_RD) & 0xf0;
137
138 /*
139 * The second read for the low bits does not trigger an IRQ
140 * so in this case we have to poll for the 4 lower bits
141 */
142 i = 0;
143 val = 0;
144 while (!(val & CHAR_RAW_VALID) && i < 10) {
145 udelay(100);
146 val = readl(lcd->virtbase + CHAR_RAW);
147 i++;
148 }
149 writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW);
150 msleep(1);
151
152 /* Read the 4 low bits of the data */
153 data |= (readl(lcd->virtbase + CHAR_RD) >> 4) & 0x0f;
154
155 return data;
156}
157
158static bool charlcd_4bit_read_bf(struct charlcd *lcd)
159{
160 if (lcd->irq >= 0) {
161 /*
162 * If we'll use IRQs to wait for the busyflag, clear any
163 * pending flag and enable IRQ
164 */
165 writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW);
166 init_completion(&lcd->complete);
167 writel(0x01, lcd->virtbase + CHAR_MASK);
168 }
169 readl(lcd->virtbase + CHAR_COM);
170 return charlcd_4bit_read_char(lcd) & HD_BUSY_FLAG ? true : false;
171}
172
173static void charlcd_4bit_wait_busy(struct charlcd *lcd)
174{
175 int retries = 50;
176
177 udelay(100);
178 while (charlcd_4bit_read_bf(lcd) && retries)
179 retries--;
180 if (!retries)
181 dev_err(lcd->dev, "timeout waiting for busyflag\n");
182}
183
184static void charlcd_4bit_command(struct charlcd *lcd, u8 cmd)
185{
186 u32 cmdlo = (cmd << 4) & 0xf0;
187 u32 cmdhi = (cmd & 0xf0);
188
189 writel(cmdhi, lcd->virtbase + CHAR_COM);
190 udelay(10);
191 writel(cmdlo, lcd->virtbase + CHAR_COM);
192 charlcd_4bit_wait_busy(lcd);
193}
194
195static void charlcd_4bit_char(struct charlcd *lcd, u8 ch)
196{
197 u32 chlo = (ch << 4) & 0xf0;
198 u32 chhi = (ch & 0xf0);
199
200 writel(chhi, lcd->virtbase + CHAR_DAT);
201 udelay(10);
202 writel(chlo, lcd->virtbase + CHAR_DAT);
203 charlcd_4bit_wait_busy(lcd);
204}
205
206static void charlcd_4bit_print(struct charlcd *lcd, int line, const char *str)
207{
208 u8 offset;
209 int i;
210
211 /*
212 * We support line 0, 1
213 * Line 1 runs from 0x00..0x27
214 * Line 2 runs from 0x28..0x4f
215 */
216 if (line == 0)
217 offset = 0;
218 else if (line == 1)
219 offset = 0x28;
220 else
221 return;
222
223 /* Set offset */
224 charlcd_4bit_command(lcd, HD_SET_DDRAM | offset);
225
226 /* Send string */
227 for (i = 0; i < strlen(str) && i < 0x28; i++)
228 charlcd_4bit_char(lcd, str[i]);
229}
230
231static void charlcd_4bit_init(struct charlcd *lcd)
232{
233 /* These commands cannot be checked with the busy flag */
234 writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM);
235 msleep(5);
236 writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM);
237 udelay(100);
238 writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM);
239 udelay(100);
240 /* Go to 4bit mode */
241 writel(HD_FUNCSET, lcd->virtbase + CHAR_COM);
242 udelay(100);
243 /*
244 * 4bit mode, 2 lines, 5x8 font, after this the number of lines
245 * and the font cannot be changed until the next initialization sequence
246 */
247 charlcd_4bit_command(lcd, HD_FUNCSET | HD_FUNCSET_2_LINES);
248 charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON);
249 charlcd_4bit_command(lcd, HD_ENTRYMODE | HD_ENTRYMODE_INCREMENT);
250 charlcd_4bit_command(lcd, HD_CLEAR);
251 charlcd_4bit_command(lcd, HD_HOME);
252 /* Put something useful in the display */
253 charlcd_4bit_print(lcd, 0, "ARM Linux");
254 charlcd_4bit_print(lcd, 1, UTS_RELEASE);
255}
256
257static void charlcd_init_work(struct work_struct *work)
258{
259 struct charlcd *lcd =
260 container_of(work, struct charlcd, init_work.work);
261
262 charlcd_4bit_init(lcd);
263}
264
265static int __init charlcd_probe(struct platform_device *pdev)
266{
267 int ret;
268 struct charlcd *lcd;
269 struct resource *res;
270
271 lcd = kzalloc(sizeof(struct charlcd), GFP_KERNEL);
272 if (!lcd)
273 return -ENOMEM;
274
275 lcd->dev = &pdev->dev;
276
277 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
278 if (!res) {
279 ret = -ENOENT;
280 goto out_no_resource;
281 }
282 lcd->phybase = res->start;
283 lcd->physize = resource_size(res);
284
285 if (request_mem_region(lcd->phybase, lcd->physize,
286 DRIVERNAME) == NULL) {
287 ret = -EBUSY;
288 goto out_no_memregion;
289 }
290
291 lcd->virtbase = ioremap(lcd->phybase, lcd->physize);
292 if (!lcd->virtbase) {
293 ret = -ENOMEM;
294 goto out_no_remap;
295 }
296
297 lcd->irq = platform_get_irq(pdev, 0);
298 /* If no IRQ is supplied, we'll survive without it */
299 if (lcd->irq >= 0) {
300 if (request_irq(lcd->irq, charlcd_interrupt, IRQF_DISABLED,
301 DRIVERNAME, lcd)) {
302 ret = -EIO;
303 goto out_no_irq;
304 }
305 }
306
307 platform_set_drvdata(pdev, lcd);
308
309 /*
310 * Initialize the display in a delayed work, because
311 * it is VERY slow and would slow down the boot of the system.
312 */
313 INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work);
314 schedule_delayed_work(&lcd->init_work, 0);
315
316 dev_info(&pdev->dev, "initalized ARM character LCD at %08x\n",
317 lcd->phybase);
318
319 return 0;
320
321out_no_irq:
322 iounmap(lcd->virtbase);
323out_no_remap:
324 platform_set_drvdata(pdev, NULL);
325out_no_memregion:
326 release_mem_region(lcd->phybase, SZ_4K);
327out_no_resource:
328 kfree(lcd);
329 return ret;
330}
331
332static int __exit charlcd_remove(struct platform_device *pdev)
333{
334 struct charlcd *lcd = platform_get_drvdata(pdev);
335
336 if (lcd) {
337 free_irq(lcd->irq, lcd);
338 iounmap(lcd->virtbase);
339 release_mem_region(lcd->phybase, lcd->physize);
340 platform_set_drvdata(pdev, NULL);
341 kfree(lcd);
342 }
343
344 return 0;
345}
346
347static int charlcd_suspend(struct device *dev)
348{
349 struct platform_device *pdev = to_platform_device(dev);
350 struct charlcd *lcd = platform_get_drvdata(pdev);
351
352 /* Power the display off */
353 charlcd_4bit_command(lcd, HD_DISPCTRL);
354 return 0;
355}
356
357static int charlcd_resume(struct device *dev)
358{
359 struct platform_device *pdev = to_platform_device(dev);
360 struct charlcd *lcd = platform_get_drvdata(pdev);
361
362 /* Turn the display back on */
363 charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON);
364 return 0;
365}
366
367static const struct dev_pm_ops charlcd_pm_ops = {
368 .suspend = charlcd_suspend,
369 .resume = charlcd_resume,
370};
371
372static struct platform_driver charlcd_driver = {
373 .driver = {
374 .name = DRIVERNAME,
375 .owner = THIS_MODULE,
376 .pm = &charlcd_pm_ops,
377 },
378 .remove = __exit_p(charlcd_remove),
379};
380
381static int __init charlcd_init(void)
382{
383 return platform_driver_probe(&charlcd_driver, charlcd_probe);
384}
385
386static void __exit charlcd_exit(void)
387{
388 platform_driver_unregister(&charlcd_driver);
389}
390
391module_init(charlcd_init);
392module_exit(charlcd_exit);
393
394MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>");
395MODULE_DESCRIPTION("ARM Character LCD Driver");
396MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 558bf3f2c276..4afffe610f99 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -15,6 +15,7 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/atmel-ssc.h> 17#include <linux/atmel-ssc.h>
18#include <linux/slab.h>
18 19
19/* Serialize access to ssc_list and user count */ 20/* Serialize access to ssc_list and user count */
20static DEFINE_SPINLOCK(user_lock); 21static DEFINE_SPINLOCK(user_lock);
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
index 6aa5294dfec4..0f3fb4f03bdf 100644
--- a/drivers/misc/atmel_pwm.c
+++ b/drivers/misc/atmel_pwm.c
@@ -1,6 +1,7 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/clk.h> 2#include <linux/clk.h>
3#include <linux/err.h> 3#include <linux/err.h>
4#include <linux/slab.h>
4#include <linux/io.h> 5#include <linux/io.h>
5#include <linux/interrupt.h> 6#include <linux/interrupt.h>
6#include <linux/platform_device.h> 7#include <linux/platform_device.h>
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 05dc8a31f280..3891124001f2 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -6,6 +6,7 @@
6#include <linux/ioport.h> 6#include <linux/ioport.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/platform_device.h> 8#include <linux/platform_device.h>
9#include <linux/slab.h>
9 10
10/* Number of bytes to reserve for the iomem resource */ 11/* Number of bytes to reserve for the iomem resource */
11#define ATMEL_TC_IOMEM_SIZE 256 12#define ATMEL_TC_IOMEM_SIZE 256
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
new file mode 100644
index 000000000000..d79a972f2c79
--- /dev/null
+++ b/drivers/misc/bh1770glc.c
@@ -0,0 +1,1417 @@
1/*
2 * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
3 * Chip is combined proximity and ambient light sensor.
4 *
5 * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
6 *
7 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/i2c.h>
28#include <linux/interrupt.h>
29#include <linux/mutex.h>
30#include <linux/i2c/bh1770glc.h>
31#include <linux/regulator/consumer.h>
32#include <linux/pm_runtime.h>
33#include <linux/workqueue.h>
34#include <linux/delay.h>
35#include <linux/wait.h>
36#include <linux/slab.h>
37
38#define BH1770_ALS_CONTROL 0x80 /* ALS operation mode control */
39#define BH1770_PS_CONTROL 0x81 /* PS operation mode control */
40#define BH1770_I_LED 0x82 /* active LED and LED1, LED2 current */
41#define BH1770_I_LED3 0x83 /* LED3 current setting */
42#define BH1770_ALS_PS_MEAS 0x84 /* Forced mode trigger */
43#define BH1770_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */
44#define BH1770_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */
45#define BH1770_PART_ID 0x8a /* Part number and revision ID */
46#define BH1770_MANUFACT_ID 0x8b /* Manufacturerer ID */
47#define BH1770_ALS_DATA_0 0x8c /* ALS DATA low byte */
48#define BH1770_ALS_DATA_1 0x8d /* ALS DATA high byte */
49#define BH1770_ALS_PS_STATUS 0x8e /* Measurement data and int status */
50#define BH1770_PS_DATA_LED1 0x8f /* PS data from LED1 */
51#define BH1770_PS_DATA_LED2 0x90 /* PS data from LED2 */
52#define BH1770_PS_DATA_LED3 0x91 /* PS data from LED3 */
53#define BH1770_INTERRUPT 0x92 /* Interrupt setting */
54#define BH1770_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */
55#define BH1770_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */
56#define BH1770_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */
57#define BH1770_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */
58#define BH1770_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */
59#define BH1770_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */
60#define BH1770_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */
61
62/* MANUFACT_ID */
63#define BH1770_MANUFACT_ROHM 0x01
64#define BH1770_MANUFACT_OSRAM 0x03
65
66/* PART_ID */
67#define BH1770_PART 0x90
68#define BH1770_PART_MASK 0xf0
69#define BH1770_REV_MASK 0x0f
70#define BH1770_REV_SHIFT 0
71#define BH1770_REV_0 0x00
72#define BH1770_REV_1 0x01
73
74/* Operating modes for both */
75#define BH1770_STANDBY 0x00
76#define BH1770_FORCED 0x02
77#define BH1770_STANDALONE 0x03
78#define BH1770_SWRESET (0x01 << 2)
79
80#define BH1770_PS_TRIG_MEAS (1 << 0)
81#define BH1770_ALS_TRIG_MEAS (1 << 1)
82
83/* Interrupt control */
84#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */
85#define BH1770_INT_POLARITY (1 << 2) /* 1 = active high */
86#define BH1770_INT_ALS_ENA (1 << 1)
87#define BH1770_INT_PS_ENA (1 << 0)
88
89/* Interrupt status */
90#define BH1770_INT_LED1_DATA (1 << 0)
91#define BH1770_INT_LED1_INT (1 << 1)
92#define BH1770_INT_LED2_DATA (1 << 2)
93#define BH1770_INT_LED2_INT (1 << 3)
94#define BH1770_INT_LED3_DATA (1 << 4)
95#define BH1770_INT_LED3_INT (1 << 5)
96#define BH1770_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5))
97#define BH1770_INT_ALS_DATA (1 << 6)
98#define BH1770_INT_ALS_INT (1 << 7)
99
100/* Led channels */
101#define BH1770_LED1 0x00
102
103#define BH1770_DISABLE 0
104#define BH1770_ENABLE 1
105#define BH1770_PROX_CHANNELS 1
106
107#define BH1770_LUX_DEFAULT_RATE 1 /* Index to lux rate table */
108#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */
109#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */
110#define BH1770_STARTUP_DELAY 50
111#define BH1770_RESET_TIME 10
112#define BH1770_TIMEOUT 2100 /* Timeout in 2.1 seconds */
113
114#define BH1770_LUX_RANGE 65535
115#define BH1770_PROX_RANGE 255
116#define BH1770_COEF_SCALER 1024
117#define BH1770_CALIB_SCALER 8192
118#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER)
119#define BH1770_LUX_DEF_THRES 1000
120#define BH1770_PROX_DEF_THRES 70
121#define BH1770_PROX_DEF_ABS_THRES 100
122#define BH1770_DEFAULT_PERSISTENCE 10
123#define BH1770_PROX_MAX_PERSISTENCE 50
124#define BH1770_LUX_GA_SCALE 16384
125#define BH1770_LUX_CF_SCALE 2048 /* CF ChipFactor */
126#define BH1770_NEUTRAL_CF BH1770_LUX_CF_SCALE
127#define BH1770_LUX_CORR_SCALE 4096
128
129#define PROX_ABOVE_THRESHOLD 1
130#define PROX_BELOW_THRESHOLD 0
131
132#define PROX_IGNORE_LUX_LIMIT 500
133
134struct bh1770_chip {
135 struct bh1770_platform_data *pdata;
136 char chipname[10];
137 u8 revision;
138 struct i2c_client *client;
139 struct regulator_bulk_data regs[2];
140 struct mutex mutex; /* avoid parallel access */
141 wait_queue_head_t wait;
142
143 bool int_mode_prox;
144 bool int_mode_lux;
145 struct delayed_work prox_work;
146 u32 lux_cf; /* Chip specific factor */
147 u32 lux_ga;
148 u32 lux_calib;
149 int lux_rate_index;
150 u32 lux_corr;
151 u16 lux_data_raw;
152 u16 lux_threshold_hi;
153 u16 lux_threshold_lo;
154 u16 lux_thres_hi_onchip;
155 u16 lux_thres_lo_onchip;
156 bool lux_wait_result;
157
158 int prox_enable_count;
159 u16 prox_coef;
160 u16 prox_const;
161 int prox_rate;
162 int prox_rate_threshold;
163 u8 prox_persistence;
164 u8 prox_persistence_counter;
165 u8 prox_data;
166 u8 prox_threshold;
167 u8 prox_threshold_hw;
168 bool prox_force_update;
169 u8 prox_abs_thres;
170 u8 prox_led;
171};
172
173static const char reg_vcc[] = "Vcc";
174static const char reg_vleds[] = "Vleds";
175
176/*
177 * Supported stand alone rates in ms from chip data sheet
178 * {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000};
179 */
180static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
181static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
182
183/* Supported IR-led currents in mA */
184static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
185
186/*
187 * Supported stand alone rates in ms from chip data sheet
188 * {100, 200, 500, 1000, 2000};
189 */
190static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0};
191
192/*
193 * interrupt control functions are called while keeping chip->mutex
194 * excluding module probe / remove
195 */
196static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip,
197 int lux)
198{
199 chip->int_mode_lux = lux;
200 /* Set interrupt modes, interrupt active low, latched */
201 return i2c_smbus_write_byte_data(chip->client,
202 BH1770_INTERRUPT,
203 (lux << 1) | chip->int_mode_prox);
204}
205
206static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip,
207 int ps)
208{
209 chip->int_mode_prox = ps;
210 return i2c_smbus_write_byte_data(chip->client,
211 BH1770_INTERRUPT,
212 (chip->int_mode_lux << 1) | (ps << 0));
213}
214
215/* chip->mutex is always kept here */
216static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index)
217{
218 /* sysfs may call this when the chip is powered off */
219 if (pm_runtime_suspended(&chip->client->dev))
220 return 0;
221
222 /* Proper proximity response needs fastest lux rate (100ms) */
223 if (chip->prox_enable_count)
224 rate_index = 0;
225
226 return i2c_smbus_write_byte_data(chip->client,
227 BH1770_ALS_MEAS_RATE,
228 rate_index);
229}
230
231static int bh1770_prox_rate(struct bh1770_chip *chip, int mode)
232{
233 int rate;
234
235 rate = (mode == PROX_ABOVE_THRESHOLD) ?
236 chip->prox_rate_threshold : chip->prox_rate;
237
238 return i2c_smbus_write_byte_data(chip->client,
239 BH1770_PS_MEAS_RATE,
240 rate);
241}
242
243/* InfraredLED is controlled by the chip during proximity scanning */
244static inline int bh1770_led_cfg(struct bh1770_chip *chip)
245{
246 /* LED cfg, current for leds 1 and 2 */
247 return i2c_smbus_write_byte_data(chip->client,
248 BH1770_I_LED,
249 (BH1770_LED1 << 6) |
250 (BH1770_LED_5mA << 3) |
251 chip->prox_led);
252}
253
254/*
255 * Following two functions converts raw ps values from HW to normalized
256 * values. Purpose is to compensate differences between different sensor
257 * versions and variants so that result means about the same between
258 * versions.
259 */
260static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw)
261{
262 u16 adjusted;
263 adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) /
264 BH1770_COEF_SCALER);
265 if (adjusted > BH1770_PROX_RANGE)
266 adjusted = BH1770_PROX_RANGE;
267 return adjusted;
268}
269
270static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps)
271{
272 u16 raw;
273
274 raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef);
275 if (raw > chip->prox_const)
276 raw = raw - chip->prox_const;
277 else
278 raw = 0;
279 return raw;
280}
281
282/*
283 * Following two functions converts raw lux values from HW to normalized
284 * values. Purpose is to compensate differences between different sensor
285 * versions and variants so that result means about the same between
286 * versions. Chip->mutex is kept when this is called.
287 */
288static int bh1770_prox_set_threshold(struct bh1770_chip *chip)
289{
290 u8 tmp = 0;
291
292 /* sysfs may call this when the chip is powered off */
293 if (pm_runtime_suspended(&chip->client->dev))
294 return 0;
295
296 tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold);
297 chip->prox_threshold_hw = tmp;
298
299 return i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1,
300 tmp);
301}
302
303static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw)
304{
305 u32 lux;
306 lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE;
307 return min(lux, (u32)BH1770_LUX_RANGE);
308}
309
310static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip,
311 u16 adjusted)
312{
313 return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr;
314}
315
316/* chip->mutex is kept when this is called */
317static int bh1770_lux_update_thresholds(struct bh1770_chip *chip,
318 u16 threshold_hi, u16 threshold_lo)
319{
320 u8 data[4];
321 int ret;
322
323 /* sysfs may call this when the chip is powered off */
324 if (pm_runtime_suspended(&chip->client->dev))
325 return 0;
326
327 /*
328 * Compensate threshold values with the correction factors if not
329 * set to minimum or maximum.
330 * Min & max values disables interrupts.
331 */
332 if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0)
333 threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi);
334
335 if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0)
336 threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo);
337
338 if (chip->lux_thres_hi_onchip == threshold_hi &&
339 chip->lux_thres_lo_onchip == threshold_lo)
340 return 0;
341
342 chip->lux_thres_hi_onchip = threshold_hi;
343 chip->lux_thres_lo_onchip = threshold_lo;
344
345 data[0] = threshold_hi;
346 data[1] = threshold_hi >> 8;
347 data[2] = threshold_lo;
348 data[3] = threshold_lo >> 8;
349
350 ret = i2c_smbus_write_i2c_block_data(chip->client,
351 BH1770_ALS_TH_UP_0,
352 ARRAY_SIZE(data),
353 data);
354 return ret;
355}
356
357static int bh1770_lux_get_result(struct bh1770_chip *chip)
358{
359 u16 data;
360 int ret;
361
362 ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0);
363 if (ret < 0)
364 return ret;
365
366 data = ret & 0xff;
367 ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1);
368 if (ret < 0)
369 return ret;
370
371 chip->lux_data_raw = data | ((ret & 0xff) << 8);
372
373 return 0;
374}
375
376/* Calculate correction value which contains chip and device specific parts */
377static u32 bh1770_get_corr_value(struct bh1770_chip *chip)
378{
379 u32 tmp;
380 /* Impact of glass attenuation correction */
381 tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE;
382 /* Impact of chip factor correction */
383 tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE;
384 /* Impact of Device specific calibration correction */
385 tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER;
386 return tmp;
387}
388
389static int bh1770_lux_read_result(struct bh1770_chip *chip)
390{
391 bh1770_lux_get_result(chip);
392 return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw);
393}
394
395/*
396 * Chip on / off functions are called while keeping mutex except probe
397 * or remove phase
398 */
399static int bh1770_chip_on(struct bh1770_chip *chip)
400{
401 int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
402 chip->regs);
403 if (ret < 0)
404 return ret;
405
406 usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
407
408 /* Reset the chip */
409 i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL,
410 BH1770_SWRESET);
411 usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2);
412
413 /*
414 * ALS is started always since proximity needs als results
415 * for realibility estimation.
416 * Let's assume dark until the first ALS measurement is ready.
417 */
418 chip->lux_data_raw = 0;
419 chip->prox_data = 0;
420 ret = i2c_smbus_write_byte_data(chip->client,
421 BH1770_ALS_CONTROL, BH1770_STANDALONE);
422
423 /* Assume reset defaults */
424 chip->lux_thres_hi_onchip = BH1770_LUX_RANGE;
425 chip->lux_thres_lo_onchip = 0;
426
427 return ret;
428}
429
430static void bh1770_chip_off(struct bh1770_chip *chip)
431{
432 i2c_smbus_write_byte_data(chip->client,
433 BH1770_INTERRUPT, BH1770_DISABLE);
434 i2c_smbus_write_byte_data(chip->client,
435 BH1770_ALS_CONTROL, BH1770_STANDBY);
436 i2c_smbus_write_byte_data(chip->client,
437 BH1770_PS_CONTROL, BH1770_STANDBY);
438 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
439}
440
441/* chip->mutex is kept when this is called */
442static int bh1770_prox_mode_control(struct bh1770_chip *chip)
443{
444 if (chip->prox_enable_count) {
445 chip->prox_force_update = true; /* Force immediate update */
446
447 bh1770_lux_rate(chip, chip->lux_rate_index);
448 bh1770_prox_set_threshold(chip);
449 bh1770_led_cfg(chip);
450 bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD);
451 bh1770_prox_interrupt_control(chip, BH1770_ENABLE);
452 i2c_smbus_write_byte_data(chip->client,
453 BH1770_PS_CONTROL, BH1770_STANDALONE);
454 } else {
455 chip->prox_data = 0;
456 bh1770_lux_rate(chip, chip->lux_rate_index);
457 bh1770_prox_interrupt_control(chip, BH1770_DISABLE);
458 i2c_smbus_write_byte_data(chip->client,
459 BH1770_PS_CONTROL, BH1770_STANDBY);
460 }
461 return 0;
462}
463
464/* chip->mutex is kept when this is called */
465static int bh1770_prox_read_result(struct bh1770_chip *chip)
466{
467 int ret;
468 bool above;
469 u8 mode;
470
471 ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1);
472 if (ret < 0)
473 goto out;
474
475 if (ret > chip->prox_threshold_hw)
476 above = true;
477 else
478 above = false;
479
480 /*
481 * when ALS levels goes above limit, proximity result may be
482 * false proximity. Thus ignore the result. With real proximity
483 * there is a shadow causing low als levels.
484 */
485 if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT)
486 ret = 0;
487
488 chip->prox_data = bh1770_psraw_to_adjusted(chip, ret);
489
490 /* Strong proximity level or force mode requires immediate response */
491 if (chip->prox_data >= chip->prox_abs_thres ||
492 chip->prox_force_update)
493 chip->prox_persistence_counter = chip->prox_persistence;
494
495 chip->prox_force_update = false;
496
497 /* Persistence filttering to reduce false proximity events */
498 if (likely(above)) {
499 if (chip->prox_persistence_counter < chip->prox_persistence) {
500 chip->prox_persistence_counter++;
501 ret = -ENODATA;
502 } else {
503 mode = PROX_ABOVE_THRESHOLD;
504 ret = 0;
505 }
506 } else {
507 chip->prox_persistence_counter = 0;
508 mode = PROX_BELOW_THRESHOLD;
509 chip->prox_data = 0;
510 ret = 0;
511 }
512
513 /* Set proximity detection rate based on above or below value */
514 if (ret == 0) {
515 bh1770_prox_rate(chip, mode);
516 sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw");
517 }
518out:
519 return ret;
520}
521
522static int bh1770_detect(struct bh1770_chip *chip)
523{
524 struct i2c_client *client = chip->client;
525 s32 ret;
526 u8 manu, part;
527
528 ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID);
529 if (ret < 0)
530 goto error;
531 manu = (u8)ret;
532
533 ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID);
534 if (ret < 0)
535 goto error;
536 part = (u8)ret;
537
538 chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT;
539 chip->prox_coef = BH1770_COEF_SCALER;
540 chip->prox_const = 0;
541 chip->lux_cf = BH1770_NEUTRAL_CF;
542
543 if ((manu == BH1770_MANUFACT_ROHM) &&
544 ((part & BH1770_PART_MASK) == BH1770_PART)) {
545 snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC");
546 return 0;
547 }
548
549 if ((manu == BH1770_MANUFACT_OSRAM) &&
550 ((part & BH1770_PART_MASK) == BH1770_PART)) {
551 snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770");
552 /* Values selected by comparing different versions */
553 chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */
554 chip->prox_const = 40;
555 return 0;
556 }
557
558 ret = -ENODEV;
559error:
560 dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n");
561
562 return ret;
563}
564
565/*
566 * This work is re-scheduled at every proximity interrupt.
567 * If this work is running, it means that there hasn't been any
568 * proximity interrupt in time. Situation is handled as no-proximity.
569 * It would be nice to have low-threshold interrupt or interrupt
570 * when measurement and hi-threshold are both 0. But neither of those exists.
571 * This is a workaroud for missing HW feature.
572 */
573
574static void bh1770_prox_work(struct work_struct *work)
575{
576 struct bh1770_chip *chip =
577 container_of(work, struct bh1770_chip, prox_work.work);
578
579 mutex_lock(&chip->mutex);
580 bh1770_prox_read_result(chip);
581 mutex_unlock(&chip->mutex);
582}
583
584/* This is threaded irq handler */
585static irqreturn_t bh1770_irq(int irq, void *data)
586{
587 struct bh1770_chip *chip = data;
588 int status;
589 int rate = 0;
590
591 mutex_lock(&chip->mutex);
592 status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS);
593
594 /* Acknowledge interrupt by reading this register */
595 i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT);
596
597 /*
598 * Check if there is fresh data available for als.
599 * If this is the very first data, update thresholds after that.
600 */
601 if (status & BH1770_INT_ALS_DATA) {
602 bh1770_lux_get_result(chip);
603 if (unlikely(chip->lux_wait_result)) {
604 chip->lux_wait_result = false;
605 wake_up(&chip->wait);
606 bh1770_lux_update_thresholds(chip,
607 chip->lux_threshold_hi,
608 chip->lux_threshold_lo);
609 }
610 }
611
612 /* Disable interrupt logic to guarantee acknowledgement */
613 i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
614 (0 << 1) | (0 << 0));
615
616 if ((status & BH1770_INT_ALS_INT))
617 sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input");
618
619 if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) {
620 rate = prox_rates_ms[chip->prox_rate_threshold];
621 bh1770_prox_read_result(chip);
622 }
623
624 /* Re-enable interrupt logic */
625 i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
626 (chip->int_mode_lux << 1) |
627 (chip->int_mode_prox << 0));
628 mutex_unlock(&chip->mutex);
629
630 /*
631 * Can't cancel work while keeping mutex since the work uses the
632 * same mutex.
633 */
634 if (rate) {
635 /*
636 * Simulate missing no-proximity interrupt 50ms after the
637 * next expected interrupt time.
638 */
639 cancel_delayed_work_sync(&chip->prox_work);
640 schedule_delayed_work(&chip->prox_work,
641 msecs_to_jiffies(rate + 50));
642 }
643 return IRQ_HANDLED;
644}
645
646static ssize_t bh1770_power_state_store(struct device *dev,
647 struct device_attribute *attr,
648 const char *buf, size_t count)
649{
650 struct bh1770_chip *chip = dev_get_drvdata(dev);
651 unsigned long value;
652 ssize_t ret;
653
654 if (strict_strtoul(buf, 0, &value))
655 return -EINVAL;
656
657 mutex_lock(&chip->mutex);
658 if (value) {
659 pm_runtime_get_sync(dev);
660
661 ret = bh1770_lux_rate(chip, chip->lux_rate_index);
662 if (ret < 0) {
663 pm_runtime_put(dev);
664 goto leave;
665 }
666
667 ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
668 if (ret < 0) {
669 pm_runtime_put(dev);
670 goto leave;
671 }
672
673 /* This causes interrupt after the next measurement cycle */
674 bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
675 BH1770_LUX_DEF_THRES);
676 /* Inform that we are waiting for a result from ALS */
677 chip->lux_wait_result = true;
678 bh1770_prox_mode_control(chip);
679 } else if (!pm_runtime_suspended(dev)) {
680 pm_runtime_put(dev);
681 }
682 ret = count;
683leave:
684 mutex_unlock(&chip->mutex);
685 return ret;
686}
687
688static ssize_t bh1770_power_state_show(struct device *dev,
689 struct device_attribute *attr, char *buf)
690{
691 return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
692}
693
694static ssize_t bh1770_lux_result_show(struct device *dev,
695 struct device_attribute *attr, char *buf)
696{
697 struct bh1770_chip *chip = dev_get_drvdata(dev);
698 ssize_t ret;
699 long timeout;
700
701 if (pm_runtime_suspended(dev))
702 return -EIO; /* Chip is not enabled at all */
703
704 timeout = wait_event_interruptible_timeout(chip->wait,
705 !chip->lux_wait_result,
706 msecs_to_jiffies(BH1770_TIMEOUT));
707 if (!timeout)
708 return -EIO;
709
710 mutex_lock(&chip->mutex);
711 ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip));
712 mutex_unlock(&chip->mutex);
713
714 return ret;
715}
716
717static ssize_t bh1770_lux_range_show(struct device *dev,
718 struct device_attribute *attr, char *buf)
719{
720 return sprintf(buf, "%d\n", BH1770_LUX_RANGE);
721}
722
723static ssize_t bh1770_prox_enable_store(struct device *dev,
724 struct device_attribute *attr,
725 const char *buf, size_t count)
726{
727 struct bh1770_chip *chip = dev_get_drvdata(dev);
728 unsigned long value;
729
730 if (strict_strtoul(buf, 0, &value))
731 return -EINVAL;
732
733 mutex_lock(&chip->mutex);
734 /* Assume no proximity. Sensor will tell real state soon */
735 if (!chip->prox_enable_count)
736 chip->prox_data = 0;
737
738 if (value)
739 chip->prox_enable_count++;
740 else if (chip->prox_enable_count > 0)
741 chip->prox_enable_count--;
742 else
743 goto leave;
744
745 /* Run control only when chip is powered on */
746 if (!pm_runtime_suspended(dev))
747 bh1770_prox_mode_control(chip);
748leave:
749 mutex_unlock(&chip->mutex);
750 return count;
751}
752
753static ssize_t bh1770_prox_enable_show(struct device *dev,
754 struct device_attribute *attr, char *buf)
755{
756 struct bh1770_chip *chip = dev_get_drvdata(dev);
757 ssize_t len;
758
759 mutex_lock(&chip->mutex);
760 len = sprintf(buf, "%d\n", chip->prox_enable_count);
761 mutex_unlock(&chip->mutex);
762 return len;
763}
764
765static ssize_t bh1770_prox_result_show(struct device *dev,
766 struct device_attribute *attr, char *buf)
767{
768 struct bh1770_chip *chip = dev_get_drvdata(dev);
769 ssize_t ret;
770
771 mutex_lock(&chip->mutex);
772 if (chip->prox_enable_count && !pm_runtime_suspended(dev))
773 ret = sprintf(buf, "%d\n", chip->prox_data);
774 else
775 ret = -EIO;
776 mutex_unlock(&chip->mutex);
777 return ret;
778}
779
780static ssize_t bh1770_prox_range_show(struct device *dev,
781 struct device_attribute *attr, char *buf)
782{
783 return sprintf(buf, "%d\n", BH1770_PROX_RANGE);
784}
785
786static ssize_t bh1770_get_prox_rate_avail(struct device *dev,
787 struct device_attribute *attr, char *buf)
788{
789 int i;
790 int pos = 0;
791 for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++)
792 pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]);
793 sprintf(buf + pos - 1, "\n");
794 return pos;
795}
796
797static ssize_t bh1770_get_prox_rate_above(struct device *dev,
798 struct device_attribute *attr, char *buf)
799{
800 struct bh1770_chip *chip = dev_get_drvdata(dev);
801 return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]);
802}
803
804static ssize_t bh1770_get_prox_rate_below(struct device *dev,
805 struct device_attribute *attr, char *buf)
806{
807 struct bh1770_chip *chip = dev_get_drvdata(dev);
808 return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]);
809}
810
811static int bh1770_prox_rate_validate(int rate)
812{
813 int i;
814
815 for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++)
816 if (rate >= prox_rates_hz[i])
817 break;
818 return i;
819}
820
821static ssize_t bh1770_set_prox_rate_above(struct device *dev,
822 struct device_attribute *attr,
823 const char *buf, size_t count)
824{
825 struct bh1770_chip *chip = dev_get_drvdata(dev);
826 unsigned long value;
827
828 if (strict_strtoul(buf, 0, &value))
829 return -EINVAL;
830
831 mutex_lock(&chip->mutex);
832 chip->prox_rate_threshold = bh1770_prox_rate_validate(value);
833 mutex_unlock(&chip->mutex);
834 return count;
835}
836
837static ssize_t bh1770_set_prox_rate_below(struct device *dev,
838 struct device_attribute *attr,
839 const char *buf, size_t count)
840{
841 struct bh1770_chip *chip = dev_get_drvdata(dev);
842 unsigned long value;
843
844 if (strict_strtoul(buf, 0, &value))
845 return -EINVAL;
846
847 mutex_lock(&chip->mutex);
848 chip->prox_rate = bh1770_prox_rate_validate(value);
849 mutex_unlock(&chip->mutex);
850 return count;
851}
852
853static ssize_t bh1770_get_prox_thres(struct device *dev,
854 struct device_attribute *attr, char *buf)
855{
856 struct bh1770_chip *chip = dev_get_drvdata(dev);
857 return sprintf(buf, "%d\n", chip->prox_threshold);
858}
859
860static ssize_t bh1770_set_prox_thres(struct device *dev,
861 struct device_attribute *attr,
862 const char *buf, size_t count)
863{
864 struct bh1770_chip *chip = dev_get_drvdata(dev);
865 unsigned long value;
866 int ret;
867
868 if (strict_strtoul(buf, 0, &value))
869 return -EINVAL;
870 if (value > BH1770_PROX_RANGE)
871 return -EINVAL;
872
873 mutex_lock(&chip->mutex);
874 chip->prox_threshold = value;
875 ret = bh1770_prox_set_threshold(chip);
876 mutex_unlock(&chip->mutex);
877 if (ret < 0)
878 return ret;
879 return count;
880}
881
882static ssize_t bh1770_prox_persistence_show(struct device *dev,
883 struct device_attribute *attr, char *buf)
884{
885 struct bh1770_chip *chip = dev_get_drvdata(dev);
886
887 return sprintf(buf, "%u\n", chip->prox_persistence);
888}
889
890static ssize_t bh1770_prox_persistence_store(struct device *dev,
891 struct device_attribute *attr,
892 const char *buf, size_t len)
893{
894 struct bh1770_chip *chip = dev_get_drvdata(dev);
895 unsigned long value;
896
897 if (strict_strtoul(buf, 0, &value))
898 return -EINVAL;
899
900 if (value > BH1770_PROX_MAX_PERSISTENCE)
901 return -EINVAL;
902
903 chip->prox_persistence = value;
904
905 return len;
906}
907
908static ssize_t bh1770_prox_abs_thres_show(struct device *dev,
909 struct device_attribute *attr, char *buf)
910{
911 struct bh1770_chip *chip = dev_get_drvdata(dev);
912 return sprintf(buf, "%u\n", chip->prox_abs_thres);
913}
914
915static ssize_t bh1770_prox_abs_thres_store(struct device *dev,
916 struct device_attribute *attr,
917 const char *buf, size_t len)
918{
919 struct bh1770_chip *chip = dev_get_drvdata(dev);
920 unsigned long value;
921
922 if (strict_strtoul(buf, 0, &value))
923 return -EINVAL;
924
925 if (value > BH1770_PROX_RANGE)
926 return -EINVAL;
927
928 chip->prox_abs_thres = value;
929
930 return len;
931}
932
933static ssize_t bh1770_chip_id_show(struct device *dev,
934 struct device_attribute *attr, char *buf)
935{
936 struct bh1770_chip *chip = dev_get_drvdata(dev);
937 return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision);
938}
939
940static ssize_t bh1770_lux_calib_default_show(struct device *dev,
941 struct device_attribute *attr, char *buf)
942{
943 return sprintf(buf, "%u\n", BH1770_CALIB_SCALER);
944}
945
946static ssize_t bh1770_lux_calib_show(struct device *dev,
947 struct device_attribute *attr, char *buf)
948{
949 struct bh1770_chip *chip = dev_get_drvdata(dev);
950 ssize_t len;
951
952 mutex_lock(&chip->mutex);
953 len = sprintf(buf, "%u\n", chip->lux_calib);
954 mutex_unlock(&chip->mutex);
955 return len;
956}
957
958static ssize_t bh1770_lux_calib_store(struct device *dev,
959 struct device_attribute *attr,
960 const char *buf, size_t len)
961{
962 struct bh1770_chip *chip = dev_get_drvdata(dev);
963 unsigned long value;
964 u32 old_calib;
965 u32 new_corr;
966
967 if (strict_strtoul(buf, 0, &value))
968 return -EINVAL;
969
970 mutex_lock(&chip->mutex);
971 old_calib = chip->lux_calib;
972 chip->lux_calib = value;
973 new_corr = bh1770_get_corr_value(chip);
974 if (new_corr == 0) {
975 chip->lux_calib = old_calib;
976 mutex_unlock(&chip->mutex);
977 return -EINVAL;
978 }
979 chip->lux_corr = new_corr;
980 /* Refresh thresholds on HW after changing correction value */
981 bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi,
982 chip->lux_threshold_lo);
983
984 mutex_unlock(&chip->mutex);
985
986 return len;
987}
988
989static ssize_t bh1770_get_lux_rate_avail(struct device *dev,
990 struct device_attribute *attr, char *buf)
991{
992 int i;
993 int pos = 0;
994 for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++)
995 pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]);
996 sprintf(buf + pos - 1, "\n");
997 return pos;
998}
999
1000static ssize_t bh1770_get_lux_rate(struct device *dev,
1001 struct device_attribute *attr, char *buf)
1002{
1003 struct bh1770_chip *chip = dev_get_drvdata(dev);
1004 return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]);
1005}
1006
1007static ssize_t bh1770_set_lux_rate(struct device *dev,
1008 struct device_attribute *attr,
1009 const char *buf, size_t count)
1010{
1011 struct bh1770_chip *chip = dev_get_drvdata(dev);
1012 unsigned long rate_hz;
1013 int ret, i;
1014
1015 if (strict_strtoul(buf, 0, &rate_hz))
1016 return -EINVAL;
1017
1018 for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++)
1019 if (rate_hz >= lux_rates_hz[i])
1020 break;
1021
1022 mutex_lock(&chip->mutex);
1023 chip->lux_rate_index = i;
1024 ret = bh1770_lux_rate(chip, i);
1025 mutex_unlock(&chip->mutex);
1026
1027 if (ret < 0)
1028 return ret;
1029
1030 return count;
1031}
1032
1033static ssize_t bh1770_get_lux_thresh_above(struct device *dev,
1034 struct device_attribute *attr, char *buf)
1035{
1036 struct bh1770_chip *chip = dev_get_drvdata(dev);
1037 return sprintf(buf, "%d\n", chip->lux_threshold_hi);
1038}
1039
1040static ssize_t bh1770_get_lux_thresh_below(struct device *dev,
1041 struct device_attribute *attr, char *buf)
1042{
1043 struct bh1770_chip *chip = dev_get_drvdata(dev);
1044 return sprintf(buf, "%d\n", chip->lux_threshold_lo);
1045}
1046
1047static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target,
1048 const char *buf)
1049{
1050 int ret = 0;
1051 unsigned long thresh;
1052
1053 if (strict_strtoul(buf, 0, &thresh))
1054 return -EINVAL;
1055
1056 if (thresh > BH1770_LUX_RANGE)
1057 return -EINVAL;
1058
1059 mutex_lock(&chip->mutex);
1060 *target = thresh;
1061 /*
1062 * Don't update values in HW if we are still waiting for
1063 * first interrupt to come after device handle open call.
1064 */
1065 if (!chip->lux_wait_result)
1066 ret = bh1770_lux_update_thresholds(chip,
1067 chip->lux_threshold_hi,
1068 chip->lux_threshold_lo);
1069 mutex_unlock(&chip->mutex);
1070 return ret;
1071
1072}
1073
1074static ssize_t bh1770_set_lux_thresh_above(struct device *dev,
1075 struct device_attribute *attr,
1076 const char *buf, size_t len)
1077{
1078 struct bh1770_chip *chip = dev_get_drvdata(dev);
1079 int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf);
1080 if (ret < 0)
1081 return ret;
1082 return len;
1083}
1084
1085static ssize_t bh1770_set_lux_thresh_below(struct device *dev,
1086 struct device_attribute *attr,
1087 const char *buf, size_t len)
1088{
1089 struct bh1770_chip *chip = dev_get_drvdata(dev);
1090 int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf);
1091 if (ret < 0)
1092 return ret;
1093 return len;
1094}
1095
1096static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show,
1097 bh1770_prox_enable_store);
1098static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR,
1099 bh1770_prox_abs_thres_show,
1100 bh1770_prox_abs_thres_store);
1101static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR,
1102 bh1770_get_prox_thres,
1103 bh1770_set_prox_thres);
1104static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL);
1105static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL);
1106static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR,
1107 bh1770_prox_persistence_show,
1108 bh1770_prox_persistence_store);
1109static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR,
1110 bh1770_get_prox_rate_above,
1111 bh1770_set_prox_rate_above);
1112static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR,
1113 bh1770_get_prox_rate_below,
1114 bh1770_set_prox_rate_below);
1115static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL);
1116
1117static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show,
1118 bh1770_lux_calib_store);
1119static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
1120 bh1770_lux_calib_default_show,
1121 NULL);
1122static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL);
1123static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL);
1124static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate,
1125 bh1770_set_lux_rate);
1126static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL);
1127static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
1128 bh1770_get_lux_thresh_above,
1129 bh1770_set_lux_thresh_above);
1130static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
1131 bh1770_get_lux_thresh_below,
1132 bh1770_set_lux_thresh_below);
1133static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL);
1134static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show,
1135 bh1770_power_state_store);
1136
1137
1138static struct attribute *sysfs_attrs[] = {
1139 &dev_attr_lux0_calibscale.attr,
1140 &dev_attr_lux0_calibscale_default.attr,
1141 &dev_attr_lux0_input.attr,
1142 &dev_attr_lux0_sensor_range.attr,
1143 &dev_attr_lux0_rate.attr,
1144 &dev_attr_lux0_rate_avail.attr,
1145 &dev_attr_lux0_thresh_above_value.attr,
1146 &dev_attr_lux0_thresh_below_value.attr,
1147 &dev_attr_prox0_raw.attr,
1148 &dev_attr_prox0_sensor_range.attr,
1149 &dev_attr_prox0_raw_en.attr,
1150 &dev_attr_prox0_thresh_above_count.attr,
1151 &dev_attr_prox0_rate_above.attr,
1152 &dev_attr_prox0_rate_below.attr,
1153 &dev_attr_prox0_rate_avail.attr,
1154 &dev_attr_prox0_thresh_above0_value.attr,
1155 &dev_attr_prox0_thresh_above1_value.attr,
1156 &dev_attr_chip_id.attr,
1157 &dev_attr_power_state.attr,
1158 NULL
1159};
1160
1161static struct attribute_group bh1770_attribute_group = {
1162 .attrs = sysfs_attrs
1163};
1164
1165static int __devinit bh1770_probe(struct i2c_client *client,
1166 const struct i2c_device_id *id)
1167{
1168 struct bh1770_chip *chip;
1169 int err;
1170
1171 chip = kzalloc(sizeof *chip, GFP_KERNEL);
1172 if (!chip)
1173 return -ENOMEM;
1174
1175 i2c_set_clientdata(client, chip);
1176 chip->client = client;
1177
1178 mutex_init(&chip->mutex);
1179 init_waitqueue_head(&chip->wait);
1180 INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work);
1181
1182 if (client->dev.platform_data == NULL) {
1183 dev_err(&client->dev, "platform data is mandatory\n");
1184 err = -EINVAL;
1185 goto fail1;
1186 }
1187
1188 chip->pdata = client->dev.platform_data;
1189 chip->lux_calib = BH1770_LUX_NEUTRAL_CALIB_VALUE;
1190 chip->lux_rate_index = BH1770_LUX_DEFAULT_RATE;
1191 chip->lux_threshold_lo = BH1770_LUX_DEF_THRES;
1192 chip->lux_threshold_hi = BH1770_LUX_DEF_THRES;
1193
1194 if (chip->pdata->glass_attenuation == 0)
1195 chip->lux_ga = BH1770_NEUTRAL_GA;
1196 else
1197 chip->lux_ga = chip->pdata->glass_attenuation;
1198
1199 chip->prox_threshold = BH1770_PROX_DEF_THRES;
1200 chip->prox_led = chip->pdata->led_def_curr;
1201 chip->prox_abs_thres = BH1770_PROX_DEF_ABS_THRES;
1202 chip->prox_persistence = BH1770_DEFAULT_PERSISTENCE;
1203 chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH;
1204 chip->prox_rate = BH1770_PROX_DEFAULT_RATE;
1205 chip->prox_data = 0;
1206
1207 chip->regs[0].supply = reg_vcc;
1208 chip->regs[1].supply = reg_vleds;
1209
1210 err = regulator_bulk_get(&client->dev,
1211 ARRAY_SIZE(chip->regs), chip->regs);
1212 if (err < 0) {
1213 dev_err(&client->dev, "Cannot get regulators\n");
1214 goto fail1;
1215 }
1216
1217 err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
1218 chip->regs);
1219 if (err < 0) {
1220 dev_err(&client->dev, "Cannot enable regulators\n");
1221 goto fail2;
1222 }
1223
1224 usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
1225 err = bh1770_detect(chip);
1226 if (err < 0)
1227 goto fail3;
1228
1229 /* Start chip */
1230 bh1770_chip_on(chip);
1231 pm_runtime_set_active(&client->dev);
1232 pm_runtime_enable(&client->dev);
1233
1234 chip->lux_corr = bh1770_get_corr_value(chip);
1235 if (chip->lux_corr == 0) {
1236 dev_err(&client->dev, "Improper correction values\n");
1237 err = -EINVAL;
1238 goto fail3;
1239 }
1240
1241 if (chip->pdata->setup_resources) {
1242 err = chip->pdata->setup_resources();
1243 if (err) {
1244 err = -EINVAL;
1245 goto fail3;
1246 }
1247 }
1248
1249 err = sysfs_create_group(&chip->client->dev.kobj,
1250 &bh1770_attribute_group);
1251 if (err < 0) {
1252 dev_err(&chip->client->dev, "Sysfs registration failed\n");
1253 goto fail4;
1254 }
1255
1256 /*
1257 * Chip needs level triggered interrupt to work. However,
1258 * level triggering doesn't work always correctly with power
1259 * management. Select both
1260 */
1261 err = request_threaded_irq(client->irq, NULL,
1262 bh1770_irq,
1263 IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
1264 IRQF_TRIGGER_LOW,
1265 "bh1770", chip);
1266 if (err) {
1267 dev_err(&client->dev, "could not get IRQ %d\n",
1268 client->irq);
1269 goto fail5;
1270 }
1271 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
1272 return err;
1273fail5:
1274 sysfs_remove_group(&chip->client->dev.kobj,
1275 &bh1770_attribute_group);
1276fail4:
1277 if (chip->pdata->release_resources)
1278 chip->pdata->release_resources();
1279fail3:
1280 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
1281fail2:
1282 regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
1283fail1:
1284 kfree(chip);
1285 return err;
1286}
1287
1288static int __devexit bh1770_remove(struct i2c_client *client)
1289{
1290 struct bh1770_chip *chip = i2c_get_clientdata(client);
1291
1292 free_irq(client->irq, chip);
1293
1294 sysfs_remove_group(&chip->client->dev.kobj,
1295 &bh1770_attribute_group);
1296
1297 if (chip->pdata->release_resources)
1298 chip->pdata->release_resources();
1299
1300 cancel_delayed_work_sync(&chip->prox_work);
1301
1302 if (!pm_runtime_suspended(&client->dev))
1303 bh1770_chip_off(chip);
1304
1305 pm_runtime_disable(&client->dev);
1306 pm_runtime_set_suspended(&client->dev);
1307
1308 regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
1309 kfree(chip);
1310 return 0;
1311}
1312
1313#ifdef CONFIG_PM
1314static int bh1770_suspend(struct device *dev)
1315{
1316 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
1317 struct bh1770_chip *chip = i2c_get_clientdata(client);
1318
1319 bh1770_chip_off(chip);
1320
1321 return 0;
1322}
1323
1324static int bh1770_resume(struct device *dev)
1325{
1326 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
1327 struct bh1770_chip *chip = i2c_get_clientdata(client);
1328 int ret = 0;
1329
1330 bh1770_chip_on(chip);
1331
1332 if (!pm_runtime_suspended(dev)) {
1333 /*
1334 * If we were enabled at suspend time, it is expected
1335 * everything works nice and smoothly
1336 */
1337 ret = bh1770_lux_rate(chip, chip->lux_rate_index);
1338 ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
1339
1340 /* This causes interrupt after the next measurement cycle */
1341 bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
1342 BH1770_LUX_DEF_THRES);
1343 /* Inform that we are waiting for a result from ALS */
1344 chip->lux_wait_result = true;
1345 bh1770_prox_mode_control(chip);
1346 }
1347 return ret;
1348}
1349
1350#else
1351#define bh1770_suspend NULL
1352#define bh1770_shutdown NULL
1353#define bh1770_resume NULL
1354#endif
1355
1356#ifdef CONFIG_PM_RUNTIME
1357static int bh1770_runtime_suspend(struct device *dev)
1358{
1359 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
1360 struct bh1770_chip *chip = i2c_get_clientdata(client);
1361
1362 bh1770_chip_off(chip);
1363
1364 return 0;
1365}
1366
1367static int bh1770_runtime_resume(struct device *dev)
1368{
1369 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
1370 struct bh1770_chip *chip = i2c_get_clientdata(client);
1371
1372 bh1770_chip_on(chip);
1373
1374 return 0;
1375}
1376#endif
1377
1378static const struct i2c_device_id bh1770_id[] = {
1379 {"bh1770glc", 0 },
1380 {"sfh7770", 0 },
1381 {}
1382};
1383
1384MODULE_DEVICE_TABLE(i2c, bh1770_id);
1385
1386static const struct dev_pm_ops bh1770_pm_ops = {
1387 SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume)
1388 SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL)
1389};
1390
1391static struct i2c_driver bh1770_driver = {
1392 .driver = {
1393 .name = "bh1770glc",
1394 .owner = THIS_MODULE,
1395 .pm = &bh1770_pm_ops,
1396 },
1397 .probe = bh1770_probe,
1398 .remove = __devexit_p(bh1770_remove),
1399 .id_table = bh1770_id,
1400};
1401
1402static int __init bh1770_init(void)
1403{
1404 return i2c_add_driver(&bh1770_driver);
1405}
1406
1407static void __exit bh1770_exit(void)
1408{
1409 i2c_del_driver(&bh1770_driver);
1410}
1411
1412MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor");
1413MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
1414MODULE_LICENSE("GPL v2");
1415
1416module_init(bh1770_init);
1417module_exit(bh1770_exit);
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
new file mode 100644
index 000000000000..d5f3a3fd2319
--- /dev/null
+++ b/drivers/misc/bh1780gli.c
@@ -0,0 +1,272 @@
1/*
2 * bh1780gli.c
3 * ROHM Ambient Light Sensor Driver
4 *
5 * Copyright (C) 2010 Texas Instruments
6 * Author: Hemanth V <hemanthv@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#include <linux/i2c.h>
21#include <linux/slab.h>
22#include <linux/mutex.h>
23#include <linux/platform_device.h>
24#include <linux/delay.h>
25
26#define BH1780_REG_CONTROL 0x80
27#define BH1780_REG_PARTID 0x8A
28#define BH1780_REG_MANFID 0x8B
29#define BH1780_REG_DLOW 0x8C
30#define BH1780_REG_DHIGH 0x8D
31
32#define BH1780_REVMASK (0xf)
33#define BH1780_POWMASK (0x3)
34#define BH1780_POFF (0x0)
35#define BH1780_PON (0x3)
36
37/* power on settling time in ms */
38#define BH1780_PON_DELAY 2
39
40struct bh1780_data {
41 struct i2c_client *client;
42 int power_state;
43 /* lock for sysfs operations */
44 struct mutex lock;
45};
46
47static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
48{
49 int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
50 if (ret < 0)
51 dev_err(&ddata->client->dev,
52 "i2c_smbus_write_byte_data failed error %d\
53 Register (%s)\n", ret, msg);
54 return ret;
55}
56
57static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg)
58{
59 int ret = i2c_smbus_read_byte_data(ddata->client, reg);
60 if (ret < 0)
61 dev_err(&ddata->client->dev,
62 "i2c_smbus_read_byte_data failed error %d\
63 Register (%s)\n", ret, msg);
64 return ret;
65}
66
67static ssize_t bh1780_show_lux(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct platform_device *pdev = to_platform_device(dev);
71 struct bh1780_data *ddata = platform_get_drvdata(pdev);
72 int lsb, msb;
73
74 lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
75 if (lsb < 0)
76 return lsb;
77
78 msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH");
79 if (msb < 0)
80 return msb;
81
82 return sprintf(buf, "%d\n", (msb << 8) | lsb);
83}
84
85static ssize_t bh1780_show_power_state(struct device *dev,
86 struct device_attribute *attr,
87 char *buf)
88{
89 struct platform_device *pdev = to_platform_device(dev);
90 struct bh1780_data *ddata = platform_get_drvdata(pdev);
91 int state;
92
93 state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
94 if (state < 0)
95 return state;
96
97 return sprintf(buf, "%d\n", state & BH1780_POWMASK);
98}
99
100static ssize_t bh1780_store_power_state(struct device *dev,
101 struct device_attribute *attr,
102 const char *buf, size_t count)
103{
104 struct platform_device *pdev = to_platform_device(dev);
105 struct bh1780_data *ddata = platform_get_drvdata(pdev);
106 unsigned long val;
107 int error;
108
109 error = strict_strtoul(buf, 0, &val);
110 if (error)
111 return error;
112
113 if (val < BH1780_POFF || val > BH1780_PON)
114 return -EINVAL;
115
116 mutex_lock(&ddata->lock);
117
118 error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
119 if (error < 0) {
120 mutex_unlock(&ddata->lock);
121 return error;
122 }
123
124 msleep(BH1780_PON_DELAY);
125 ddata->power_state = val;
126 mutex_unlock(&ddata->lock);
127
128 return count;
129}
130
131static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
132
133static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
134 bh1780_show_power_state, bh1780_store_power_state);
135
136static struct attribute *bh1780_attributes[] = {
137 &dev_attr_power_state.attr,
138 &dev_attr_lux.attr,
139 NULL
140};
141
142static const struct attribute_group bh1780_attr_group = {
143 .attrs = bh1780_attributes,
144};
145
146static int __devinit bh1780_probe(struct i2c_client *client,
147 const struct i2c_device_id *id)
148{
149 int ret;
150 struct bh1780_data *ddata = NULL;
151 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
152
153 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
154 ret = -EIO;
155 goto err_op_failed;
156 }
157
158 ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL);
159 if (ddata == NULL) {
160 ret = -ENOMEM;
161 goto err_op_failed;
162 }
163
164 ddata->client = client;
165 i2c_set_clientdata(client, ddata);
166
167 ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
168 if (ret < 0)
169 goto err_op_failed;
170
171 dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
172 (ret & BH1780_REVMASK));
173
174 mutex_init(&ddata->lock);
175
176 ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
177 if (ret)
178 goto err_op_failed;
179
180 return 0;
181
182err_op_failed:
183 kfree(ddata);
184 return ret;
185}
186
187static int __devexit bh1780_remove(struct i2c_client *client)
188{
189 struct bh1780_data *ddata;
190
191 ddata = i2c_get_clientdata(client);
192 sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
193 kfree(ddata);
194
195 return 0;
196}
197
198#ifdef CONFIG_PM
199static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg)
200{
201 struct bh1780_data *ddata;
202 int state, ret;
203
204 ddata = i2c_get_clientdata(client);
205 state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
206 if (state < 0)
207 return state;
208
209 ddata->power_state = state & BH1780_POWMASK;
210
211 ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
212 "CONTROL");
213
214 if (ret < 0)
215 return ret;
216
217 return 0;
218}
219
220static int bh1780_resume(struct i2c_client *client)
221{
222 struct bh1780_data *ddata;
223 int state, ret;
224
225 ddata = i2c_get_clientdata(client);
226 state = ddata->power_state;
227
228 ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
229 "CONTROL");
230
231 if (ret < 0)
232 return ret;
233
234 return 0;
235}
236#else
237#define bh1780_suspend NULL
238#define bh1780_resume NULL
239#endif /* CONFIG_PM */
240
241static const struct i2c_device_id bh1780_id[] = {
242 { "bh1780", 0 },
243 { },
244};
245
246static struct i2c_driver bh1780_driver = {
247 .probe = bh1780_probe,
248 .remove = bh1780_remove,
249 .id_table = bh1780_id,
250 .suspend = bh1780_suspend,
251 .resume = bh1780_resume,
252 .driver = {
253 .name = "bh1780"
254 },
255};
256
257static int __init bh1780_init(void)
258{
259 return i2c_add_driver(&bh1780_driver);
260}
261
262static void __exit bh1780_exit(void)
263{
264 i2c_del_driver(&bh1780_driver);
265}
266
267module_init(bh1780_init)
268module_exit(bh1780_exit)
269
270MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver");
271MODULE_LICENSE("GPL");
272MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
new file mode 100644
index 000000000000..63ee4c1a5315
--- /dev/null
+++ b/drivers/misc/bmp085.c
@@ -0,0 +1,482 @@
1/* Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com>
2
3 This driver supports the bmp085 digital barometric pressure
4 and temperature sensor from Bosch Sensortec. The datasheet
5 is avaliable from their website:
6 http://www.bosch-sensortec.com/content/language1/downloads/BST-BMP085-DS000-05.pdf
7
8 A pressure measurement is issued by reading from pressure0_input.
9 The return value ranges from 30000 to 110000 pascal with a resulution
10 of 1 pascal (0.01 millibar) which enables measurements from 9000m above
11 to 500m below sea level.
12
13 The temperature can be read from temp0_input. Values range from
14 -400 to 850 representing the ambient temperature in degree celsius
15 multiplied by 10.The resolution is 0.1 celsius.
16
17 Because ambient pressure is temperature dependent, a temperature
18 measurement will be executed automatically even if the user is reading
19 from pressure0_input. This happens if the last temperature measurement
20 has been executed more then one second ago.
21
22 To decrease RMS noise from pressure measurements, the bmp085 can
23 autonomously calculate the average of up to eight samples. This is
24 set up by writing to the oversampling sysfs file. Accepted values
25 are 0, 1, 2 and 3. 2^x when x is the value written to this file
26 specifies the number of samples used to calculate the ambient pressure.
27 RMS noise is specified with six pascal (without averaging) and decreases
28 down to 3 pascal when using an oversampling setting of 3.
29
30 This program is free software; you can redistribute it and/or modify
31 it under the terms of the GNU General Public License as published by
32 the Free Software Foundation; either version 2 of the License, or
33 (at your option) any later version.
34
35 This program is distributed in the hope that it will be useful,
36 but WITHOUT ANY WARRANTY; without even the implied warranty of
37 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
38 GNU General Public License for more details.
39
40 You should have received a copy of the GNU General Public License
41 along with this program; if not, write to the Free Software
42 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
43*/
44
45
46#include <linux/module.h>
47#include <linux/init.h>
48#include <linux/i2c.h>
49#include <linux/slab.h>
50#include <linux/delay.h>
51
52
53#define BMP085_I2C_ADDRESS 0x77
54#define BMP085_CHIP_ID 0x55
55
56#define BMP085_CALIBRATION_DATA_START 0xAA
57#define BMP085_CALIBRATION_DATA_LENGTH 11 /* 16 bit values */
58#define BMP085_CHIP_ID_REG 0xD0
59#define BMP085_VERSION_REG 0xD1
60#define BMP085_CTRL_REG 0xF4
61#define BMP085_TEMP_MEASUREMENT 0x2E
62#define BMP085_PRESSURE_MEASUREMENT 0x34
63#define BMP085_CONVERSION_REGISTER_MSB 0xF6
64#define BMP085_CONVERSION_REGISTER_LSB 0xF7
65#define BMP085_CONVERSION_REGISTER_XLSB 0xF8
66#define BMP085_TEMP_CONVERSION_TIME 5
67
68#define BMP085_CLIENT_NAME "bmp085"
69
70
71static const unsigned short normal_i2c[] = { BMP085_I2C_ADDRESS,
72 I2C_CLIENT_END };
73
74struct bmp085_calibration_data {
75 s16 AC1, AC2, AC3;
76 u16 AC4, AC5, AC6;
77 s16 B1, B2;
78 s16 MB, MC, MD;
79};
80
81
82/* Each client has this additional data */
83struct bmp085_data {
84 struct i2c_client *client;
85 struct mutex lock;
86 struct bmp085_calibration_data calibration;
87 u32 raw_temperature;
88 u32 raw_pressure;
89 unsigned char oversampling_setting;
90 u32 last_temp_measurement;
91 s32 b6; /* calculated temperature correction coefficient */
92};
93
94
95static s32 bmp085_read_calibration_data(struct i2c_client *client)
96{
97 u16 tmp[BMP085_CALIBRATION_DATA_LENGTH];
98 struct bmp085_data *data = i2c_get_clientdata(client);
99 struct bmp085_calibration_data *cali = &(data->calibration);
100 s32 status = i2c_smbus_read_i2c_block_data(client,
101 BMP085_CALIBRATION_DATA_START,
102 BMP085_CALIBRATION_DATA_LENGTH*sizeof(u16),
103 (u8 *)tmp);
104 if (status < 0)
105 return status;
106
107 if (status != BMP085_CALIBRATION_DATA_LENGTH*sizeof(u16))
108 return -EIO;
109
110 cali->AC1 = be16_to_cpu(tmp[0]);
111 cali->AC2 = be16_to_cpu(tmp[1]);
112 cali->AC3 = be16_to_cpu(tmp[2]);
113 cali->AC4 = be16_to_cpu(tmp[3]);
114 cali->AC5 = be16_to_cpu(tmp[4]);
115 cali->AC6 = be16_to_cpu(tmp[5]);
116 cali->B1 = be16_to_cpu(tmp[6]);
117 cali->B2 = be16_to_cpu(tmp[7]);
118 cali->MB = be16_to_cpu(tmp[8]);
119 cali->MC = be16_to_cpu(tmp[9]);
120 cali->MD = be16_to_cpu(tmp[10]);
121 return 0;
122}
123
124
125static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
126{
127 u16 tmp;
128 s32 status;
129
130 mutex_lock(&data->lock);
131 status = i2c_smbus_write_byte_data(data->client, BMP085_CTRL_REG,
132 BMP085_TEMP_MEASUREMENT);
133 if (status != 0) {
134 dev_err(&data->client->dev,
135 "Error while requesting temperature measurement.\n");
136 goto exit;
137 }
138 msleep(BMP085_TEMP_CONVERSION_TIME);
139
140 status = i2c_smbus_read_i2c_block_data(data->client,
141 BMP085_CONVERSION_REGISTER_MSB, sizeof(tmp), (u8 *)&tmp);
142 if (status < 0)
143 goto exit;
144 if (status != sizeof(tmp)) {
145 dev_err(&data->client->dev,
146 "Error while reading temperature measurement result\n");
147 status = -EIO;
148 goto exit;
149 }
150 data->raw_temperature = be16_to_cpu(tmp);
151 data->last_temp_measurement = jiffies;
152 status = 0; /* everything ok, return 0 */
153
154exit:
155 mutex_unlock(&data->lock);
156 return status;
157}
158
159static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
160{
161 u32 tmp = 0;
162 s32 status;
163
164 mutex_lock(&data->lock);
165 status = i2c_smbus_write_byte_data(data->client, BMP085_CTRL_REG,
166 BMP085_PRESSURE_MEASUREMENT + (data->oversampling_setting<<6));
167 if (status != 0) {
168 dev_err(&data->client->dev,
169 "Error while requesting pressure measurement.\n");
170 goto exit;
171 }
172
173 /* wait for the end of conversion */
174 msleep(2+(3 << data->oversampling_setting));
175
176 /* copy data into a u32 (4 bytes), but skip the first byte. */
177 status = i2c_smbus_read_i2c_block_data(data->client,
178 BMP085_CONVERSION_REGISTER_MSB, 3, ((u8 *)&tmp)+1);
179 if (status < 0)
180 goto exit;
181 if (status != 3) {
182 dev_err(&data->client->dev,
183 "Error while reading pressure measurement results\n");
184 status = -EIO;
185 goto exit;
186 }
187 data->raw_pressure = be32_to_cpu((tmp));
188 data->raw_pressure >>= (8-data->oversampling_setting);
189 status = 0; /* everything ok, return 0 */
190
191exit:
192 mutex_unlock(&data->lock);
193 return status;
194}
195
196
197/*
198 * This function starts the temperature measurement and returns the value
199 * in tenth of a degree celsius.
200 */
201static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature)
202{
203 struct bmp085_calibration_data *cali = &data->calibration;
204 long x1, x2;
205 int status;
206
207 status = bmp085_update_raw_temperature(data);
208 if (status != 0)
209 goto exit;
210
211 x1 = ((data->raw_temperature - cali->AC6) * cali->AC5) >> 15;
212 x2 = (cali->MC << 11) / (x1 + cali->MD);
213 data->b6 = x1 + x2 - 4000;
214 /* if NULL just update b6. Used for pressure only measurements */
215 if (temperature != NULL)
216 *temperature = (x1+x2+8) >> 4;
217
218exit:
219 return status;;
220}
221
222/*
223 * This function starts the pressure measurement and returns the value
224 * in millibar. Since the pressure depends on the ambient temperature,
225 * a temperature measurement is executed if the last known value is older
226 * than one second.
227 */
228static s32 bmp085_get_pressure(struct bmp085_data *data, int *pressure)
229{
230 struct bmp085_calibration_data *cali = &data->calibration;
231 s32 x1, x2, x3, b3;
232 u32 b4, b7;
233 s32 p;
234 int status;
235
236 /* alt least every second force an update of the ambient temperature */
237 if (data->last_temp_measurement + 1*HZ < jiffies) {
238 status = bmp085_get_temperature(data, NULL);
239 if (status != 0)
240 goto exit;
241 }
242
243 status = bmp085_update_raw_pressure(data);
244 if (status != 0)
245 goto exit;
246
247 x1 = (data->b6 * data->b6) >> 12;
248 x1 *= cali->B2;
249 x1 >>= 11;
250
251 x2 = cali->AC2 * data->b6;
252 x2 >>= 11;
253
254 x3 = x1 + x2;
255
256 b3 = (((((s32)cali->AC1) * 4 + x3) << data->oversampling_setting) + 2);
257 b3 >>= 2;
258
259 x1 = (cali->AC3 * data->b6) >> 13;
260 x2 = (cali->B1 * ((data->b6 * data->b6) >> 12)) >> 16;
261 x3 = (x1 + x2 + 2) >> 2;
262 b4 = (cali->AC4 * (u32)(x3 + 32768)) >> 15;
263
264 b7 = ((u32)data->raw_pressure - b3) *
265 (50000 >> data->oversampling_setting);
266 p = ((b7 < 0x80000000) ? ((b7 << 1) / b4) : ((b7 / b4) * 2));
267
268 x1 = p >> 8;
269 x1 *= x1;
270 x1 = (x1 * 3038) >> 16;
271 x2 = (-7357 * p) >> 16;
272 p += (x1 + x2 + 3791) >> 4;
273
274 *pressure = p;
275
276exit:
277 return status;
278}
279
280/*
281 * This function sets the chip-internal oversampling. Valid values are 0..3.
282 * The chip will use 2^oversampling samples for internal averaging.
283 * This influences the measurement time and the accuracy; larger values
284 * increase both. The datasheet gives on overview on how measurement time,
285 * accuracy and noise correlate.
286 */
287static void bmp085_set_oversampling(struct bmp085_data *data,
288 unsigned char oversampling)
289{
290 if (oversampling > 3)
291 oversampling = 3;
292 data->oversampling_setting = oversampling;
293}
294
295/*
296 * Returns the currently selected oversampling. Range: 0..3
297 */
298static unsigned char bmp085_get_oversampling(struct bmp085_data *data)
299{
300 return data->oversampling_setting;
301}
302
303/* sysfs callbacks */
304static ssize_t set_oversampling(struct device *dev,
305 struct device_attribute *attr,
306 const char *buf, size_t count)
307{
308 struct i2c_client *client = to_i2c_client(dev);
309 struct bmp085_data *data = i2c_get_clientdata(client);
310 unsigned long oversampling;
311 int success = strict_strtoul(buf, 10, &oversampling);
312 if (success == 0) {
313 bmp085_set_oversampling(data, oversampling);
314 return count;
315 }
316 return success;
317}
318
319static ssize_t show_oversampling(struct device *dev,
320 struct device_attribute *attr, char *buf)
321{
322 struct i2c_client *client = to_i2c_client(dev);
323 struct bmp085_data *data = i2c_get_clientdata(client);
324 return sprintf(buf, "%u\n", bmp085_get_oversampling(data));
325}
326static DEVICE_ATTR(oversampling, S_IWUSR | S_IRUGO,
327 show_oversampling, set_oversampling);
328
329
330static ssize_t show_temperature(struct device *dev,
331 struct device_attribute *attr, char *buf)
332{
333 int temperature;
334 int status;
335 struct i2c_client *client = to_i2c_client(dev);
336 struct bmp085_data *data = i2c_get_clientdata(client);
337
338 status = bmp085_get_temperature(data, &temperature);
339 if (status != 0)
340 return status;
341 else
342 return sprintf(buf, "%d\n", temperature);
343}
344static DEVICE_ATTR(temp0_input, S_IRUGO, show_temperature, NULL);
345
346
347static ssize_t show_pressure(struct device *dev,
348 struct device_attribute *attr, char *buf)
349{
350 int pressure;
351 int status;
352 struct i2c_client *client = to_i2c_client(dev);
353 struct bmp085_data *data = i2c_get_clientdata(client);
354
355 status = bmp085_get_pressure(data, &pressure);
356 if (status != 0)
357 return status;
358 else
359 return sprintf(buf, "%d\n", pressure);
360}
361static DEVICE_ATTR(pressure0_input, S_IRUGO, show_pressure, NULL);
362
363
364static struct attribute *bmp085_attributes[] = {
365 &dev_attr_temp0_input.attr,
366 &dev_attr_pressure0_input.attr,
367 &dev_attr_oversampling.attr,
368 NULL
369};
370
371static const struct attribute_group bmp085_attr_group = {
372 .attrs = bmp085_attributes,
373};
374
375static int bmp085_detect(struct i2c_client *client, struct i2c_board_info *info)
376{
377 if (client->addr != BMP085_I2C_ADDRESS)
378 return -ENODEV;
379
380 if (i2c_smbus_read_byte_data(client, BMP085_CHIP_ID_REG) != BMP085_CHIP_ID)
381 return -ENODEV;
382
383 return 0;
384}
385
386static int bmp085_init_client(struct i2c_client *client)
387{
388 unsigned char version;
389 int status;
390 struct bmp085_data *data = i2c_get_clientdata(client);
391 data->client = client;
392 status = bmp085_read_calibration_data(client);
393 if (status != 0)
394 goto exit;
395 version = i2c_smbus_read_byte_data(client, BMP085_VERSION_REG);
396 data->last_temp_measurement = 0;
397 data->oversampling_setting = 3;
398 mutex_init(&data->lock);
399 dev_info(&data->client->dev, "BMP085 ver. %d.%d found.\n",
400 (version & 0x0F), (version & 0xF0) >> 4);
401exit:
402 return status;
403}
404
405static int bmp085_probe(struct i2c_client *client,
406 const struct i2c_device_id *id)
407{
408 struct bmp085_data *data;
409 int err = 0;
410
411 data = kzalloc(sizeof(struct bmp085_data), GFP_KERNEL);
412 if (!data) {
413 err = -ENOMEM;
414 goto exit;
415 }
416
417 /* default settings after POR */
418 data->oversampling_setting = 0x00;
419
420 i2c_set_clientdata(client, data);
421
422 /* Initialize the BMP085 chip */
423 err = bmp085_init_client(client);
424 if (err != 0)
425 goto exit_free;
426
427 /* Register sysfs hooks */
428 err = sysfs_create_group(&client->dev.kobj, &bmp085_attr_group);
429 if (err)
430 goto exit_free;
431
432 dev_info(&data->client->dev, "Succesfully initialized bmp085!\n");
433 goto exit;
434
435exit_free:
436 kfree(data);
437exit:
438 return err;
439}
440
441static int bmp085_remove(struct i2c_client *client)
442{
443 sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group);
444 kfree(i2c_get_clientdata(client));
445 return 0;
446}
447
448static const struct i2c_device_id bmp085_id[] = {
449 { "bmp085", 0 },
450 { }
451};
452
453static struct i2c_driver bmp085_driver = {
454 .driver = {
455 .owner = THIS_MODULE,
456 .name = "bmp085"
457 },
458 .id_table = bmp085_id,
459 .probe = bmp085_probe,
460 .remove = bmp085_remove,
461
462 .detect = bmp085_detect,
463 .address_list = normal_i2c
464};
465
466static int __init bmp085_init(void)
467{
468 return i2c_add_driver(&bmp085_driver);
469}
470
471static void __exit bmp085_exit(void)
472{
473 i2c_del_driver(&bmp085_driver);
474}
475
476
477MODULE_AUTHOR("Christoph Mair <christoph.mair@gmail.com");
478MODULE_DESCRIPTION("BMP085 driver");
479MODULE_LICENSE("GPL");
480
481module_init(bmp085_init);
482module_exit(bmp085_exit);
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index b5346b4db91a..19fc7c1cb428 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/idr.h> 21#include <linux/idr.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/slab.h>
23 24
24#include <linux/c2port.h> 25#include <linux/c2port.h>
25 26
@@ -706,7 +707,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
706 return nread; 707 return nread;
707} 708}
708 709
709static ssize_t c2port_read_flash_data(struct kobject *kobj, 710static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
710 struct bin_attribute *attr, 711 struct bin_attribute *attr,
711 char *buffer, loff_t offset, size_t count) 712 char *buffer, loff_t offset, size_t count)
712{ 713{
@@ -823,7 +824,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
823 return nwrite; 824 return nwrite;
824} 825}
825 826
826static ssize_t c2port_write_flash_data(struct kobject *kobj, 827static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
827 struct bin_attribute *attr, 828 struct bin_attribute *attr,
828 char *buffer, loff_t offset, size_t count) 829 char *buffer, loff_t offset, size_t count)
829{ 830{
@@ -912,8 +913,8 @@ struct c2port_device *c2port_device_register(char *name,
912 913
913 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, 914 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
914 "c2port%d", id); 915 "c2port%d", id);
915 if (unlikely(!c2dev->dev)) { 916 if (unlikely(IS_ERR(c2dev->dev))) {
916 ret = -ENOMEM; 917 ret = PTR_ERR(c2dev->dev);
917 goto error_device_create; 918 goto error_device_create;
918 } 919 }
919 dev_set_drvdata(c2dev->dev, c2dev); 920 dev_set_drvdata(c2dev->dev, c2dev);
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index b14eab0f2ba5..efec4139c3f6 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -9,11 +9,11 @@
9 */ 9 */
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/pci.h> 12#include <linux/pci.h>
14#include <linux/spinlock.h> 13#include <linux/spinlock.h>
15#include <linux/idr.h> 14#include <linux/idr.h>
16#include <linux/cb710.h> 15#include <linux/cb710.h>
16#include <linux/gfp.h>
17 17
18static DEFINE_IDA(cb710_ida); 18static DEFINE_IDA(cb710_ida);
19static DEFINE_SPINLOCK(cb710_ida_lock); 19static DEFINE_SPINLOCK(cb710_ida_lock);
diff --git a/drivers/misc/cb710/debug.c b/drivers/misc/cb710/debug.c
index 02358d086e03..fcb3b8e30c52 100644
--- a/drivers/misc/cb710/debug.c
+++ b/drivers/misc/cb710/debug.c
@@ -10,7 +10,6 @@
10#include <linux/cb710.h> 10#include <linux/cb710.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
14 13
15#define CB710_REG_COUNT 0x80 14#define CB710_REG_COUNT 0x80
16 15
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index d38a7acdb6ec..d019746551f3 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -114,7 +114,6 @@ static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
114 if (!left) 114 if (!left)
115 return; 115 return;
116 addr += len; 116 addr += len;
117 flush_kernel_dcache_page(miter->page);
118 } while (sg_dwiter_next(miter)); 117 } while (sg_dwiter_next(miter));
119} 118}
120 119
@@ -142,9 +141,6 @@ void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t da
142 return; 141 return;
143 } else 142 } else
144 sg_dwiter_write_slow(miter, data); 143 sg_dwiter_write_slow(miter, data);
145
146 if (miter->length == miter->consumed)
147 flush_kernel_dcache_page(miter->page);
148} 144}
149EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block); 145EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block);
150 146
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
new file mode 100644
index 000000000000..6f6218061b0d
--- /dev/null
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -0,0 +1,382 @@
1/*
2 * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT)
3 *
4 * Copyright (C) 2006, Advanced Micro Devices, Inc.
5 * Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
6 * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public License
10 * as published by the Free Software Foundation.
11 *
12 * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
13 */
14
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/cs5535.h>
21#include <linux/slab.h>
22
23#define DRV_NAME "cs5535-mfgpt"
24#define MFGPT_BAR 2
25
26static int mfgpt_reset_timers;
27module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
28MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; "
29 "required by some broken BIOSes (ie, TinyBIOS < 0.99).");
30
31struct cs5535_mfgpt_timer {
32 struct cs5535_mfgpt_chip *chip;
33 int nr;
34};
35
36static struct cs5535_mfgpt_chip {
37 DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
38 resource_size_t base;
39
40 struct pci_dev *pdev;
41 spinlock_t lock;
42 int initialized;
43} cs5535_mfgpt_chip;
44
45int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
46 int event, int enable)
47{
48 uint32_t msr, mask, value, dummy;
49 int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
50
51 if (!timer) {
52 WARN_ON(1);
53 return -EIO;
54 }
55
56 /*
57 * The register maps for these are described in sections 6.17.1.x of
58 * the AMD Geode CS5536 Companion Device Data Book.
59 */
60 switch (event) {
61 case MFGPT_EVENT_RESET:
62 /*
63 * XXX: According to the docs, we cannot reset timers above
64 * 6; that is, resets for 7 and 8 will be ignored. Is this
65 * a problem? -dilinger
66 */
67 msr = MSR_MFGPT_NR;
68 mask = 1 << (timer->nr + 24);
69 break;
70
71 case MFGPT_EVENT_NMI:
72 msr = MSR_MFGPT_NR;
73 mask = 1 << (timer->nr + shift);
74 break;
75
76 case MFGPT_EVENT_IRQ:
77 msr = MSR_MFGPT_IRQ;
78 mask = 1 << (timer->nr + shift);
79 break;
80
81 default:
82 return -EIO;
83 }
84
85 rdmsr(msr, value, dummy);
86
87 if (enable)
88 value |= mask;
89 else
90 value &= ~mask;
91
92 wrmsr(msr, value, dummy);
93 return 0;
94}
95EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event);
96
97int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq,
98 int enable)
99{
100 uint32_t zsel, lpc, dummy;
101 int shift;
102
103 if (!timer) {
104 WARN_ON(1);
105 return -EIO;
106 }
107
108 /*
109 * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
110 * is using the same CMP of the timer's Siamese twin, the IRQ is set to
111 * 2, and we mustn't use nor change it.
112 * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
113 * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
114 * with *irq==0 is safe. Currently there _are_ no 2 drivers.
115 */
116 rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
117 shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4;
118 if (((zsel >> shift) & 0xF) == 2)
119 return -EIO;
120
121 /* Choose IRQ: if none supplied, keep IRQ already set or use default */
122 if (!*irq)
123 *irq = (zsel >> shift) & 0xF;
124 if (!*irq)
125 *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
126
127 /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
128 if (*irq < 1 || *irq == 2 || *irq > 15)
129 return -EIO;
130 rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
131 if (lpc & (1 << *irq))
132 return -EIO;
133
134 /* All chosen and checked - go for it */
135 if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
136 return -EIO;
137 if (enable) {
138 zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
139 wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
140 }
141
142 return 0;
143}
144EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq);
145
146struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
147{
148 struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip;
149 struct cs5535_mfgpt_timer *timer = NULL;
150 unsigned long flags;
151 int max;
152
153 if (!mfgpt->initialized)
154 goto done;
155
156 /* only allocate timers from the working domain if requested */
157 if (domain == MFGPT_DOMAIN_WORKING)
158 max = 6;
159 else
160 max = MFGPT_MAX_TIMERS;
161
162 if (timer_nr >= max) {
163 /* programmer error. silly programmers! */
164 WARN_ON(1);
165 goto done;
166 }
167
168 spin_lock_irqsave(&mfgpt->lock, flags);
169 if (timer_nr < 0) {
170 unsigned long t;
171
172 /* try to find any available timer */
173 t = find_first_bit(mfgpt->avail, max);
174 /* set timer_nr to -1 if no timers available */
175 timer_nr = t < max ? (int) t : -1;
176 } else {
177 /* check if the requested timer's available */
178 if (test_bit(timer_nr, mfgpt->avail))
179 timer_nr = -1;
180 }
181
182 if (timer_nr >= 0)
183 /* if timer_nr is not -1, it's an available timer */
184 __clear_bit(timer_nr, mfgpt->avail);
185 spin_unlock_irqrestore(&mfgpt->lock, flags);
186
187 if (timer_nr < 0)
188 goto done;
189
190 timer = kmalloc(sizeof(*timer), GFP_KERNEL);
191 if (!timer) {
192 /* aw hell */
193 spin_lock_irqsave(&mfgpt->lock, flags);
194 __set_bit(timer_nr, mfgpt->avail);
195 spin_unlock_irqrestore(&mfgpt->lock, flags);
196 goto done;
197 }
198 timer->chip = mfgpt;
199 timer->nr = timer_nr;
200 dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr);
201
202done:
203 return timer;
204}
205EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer);
206
207/*
208 * XXX: This frees the timer memory, but never resets the actual hardware
209 * timer. The old geode_mfgpt code did this; it would be good to figure
210 * out a way to actually release the hardware timer. See comments below.
211 */
212void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer)
213{
214 unsigned long flags;
215 uint16_t val;
216
217 /* timer can be made available again only if never set up */
218 val = cs5535_mfgpt_read(timer, MFGPT_REG_SETUP);
219 if (!(val & MFGPT_SETUP_SETUP)) {
220 spin_lock_irqsave(&timer->chip->lock, flags);
221 __set_bit(timer->nr, timer->chip->avail);
222 spin_unlock_irqrestore(&timer->chip->lock, flags);
223 }
224
225 kfree(timer);
226}
227EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer);
228
229uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg)
230{
231 return inw(timer->chip->base + reg + (timer->nr * 8));
232}
233EXPORT_SYMBOL_GPL(cs5535_mfgpt_read);
234
235void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
236 uint16_t value)
237{
238 outw(value, timer->chip->base + reg + (timer->nr * 8));
239}
240EXPORT_SYMBOL_GPL(cs5535_mfgpt_write);
241
242/*
243 * This is a sledgehammer that resets all MFGPT timers. This is required by
244 * some broken BIOSes which leave the system in an unstable state
245 * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to
246 * whether or not this secret MSR can be used to release individual timers.
247 * Jordan tells me that he and Mitch once played w/ it, but it's unclear
248 * what the results of that were (and they experienced some instability).
249 */
250static void __init reset_all_timers(void)
251{
252 uint32_t val, dummy;
253
254 /* The following undocumented bit resets the MFGPT timers */
255 val = 0xFF; dummy = 0;
256 wrmsr(MSR_MFGPT_SETUP, val, dummy);
257}
258
259/*
260 * Check whether any MFGPTs are available for the kernel to use. In most
261 * cases, firmware that uses AMD's VSA code will claim all timers during
262 * bootup; we certainly don't want to take them if they're already in use.
263 * In other cases (such as with VSAless OpenFirmware), the system firmware
264 * leaves timers available for us to use.
265 */
266static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
267{
268 struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
269 unsigned long flags;
270 int timers = 0;
271 uint16_t val;
272 int i;
273
274 /* bios workaround */
275 if (mfgpt_reset_timers)
276 reset_all_timers();
277
278 /* just to be safe, protect this section w/ lock */
279 spin_lock_irqsave(&mfgpt->lock, flags);
280 for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
281 timer.nr = i;
282 val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP);
283 if (!(val & MFGPT_SETUP_SETUP)) {
284 __set_bit(i, mfgpt->avail);
285 timers++;
286 }
287 }
288 spin_unlock_irqrestore(&mfgpt->lock, flags);
289
290 return timers;
291}
292
293static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
294 const struct pci_device_id *pci_id)
295{
296 int err, t;
297
298 /* There are two ways to get the MFGPT base address; one is by
299 * fetching it from MSR_LBAR_MFGPT, the other is by reading the
300 * PCI BAR info. The latter method is easier (especially across
301 * different architectures), so we'll stick with that for now. If
302 * it turns out to be unreliable in the face of crappy BIOSes, we
303 * can always go back to using MSRs.. */
304
305 err = pci_enable_device_io(pdev);
306 if (err) {
307 dev_err(&pdev->dev, "can't enable device IO\n");
308 goto done;
309 }
310
311 err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME);
312 if (err) {
313 dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR);
314 goto done;
315 }
316
317 /* set up the driver-specific struct */
318 cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR);
319 cs5535_mfgpt_chip.pdev = pdev;
320 spin_lock_init(&cs5535_mfgpt_chip.lock);
321
322 dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR,
323 (unsigned long long) cs5535_mfgpt_chip.base);
324
325 /* detect the available timers */
326 t = scan_timers(&cs5535_mfgpt_chip);
327 dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t);
328 cs5535_mfgpt_chip.initialized = 1;
329 return 0;
330
331done:
332 return err;
333}
334
335static struct pci_device_id cs5535_mfgpt_pci_tbl[] = {
336 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
337 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
338 { 0, },
339};
340MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl);
341
342/*
343 * Just like with the cs5535-gpio driver, we can't use the standard PCI driver
344 * registration stuff. It only allows only one driver to bind to each PCI
345 * device, and we want the GPIO and MFGPT drivers to be able to share a PCI
346 * device. Instead, we manually scan for the PCI device, request a single
347 * region, and keep track of the devices that we're using.
348 */
349
350static int __init cs5535_mfgpt_scan_pci(void)
351{
352 struct pci_dev *pdev;
353 int err = -ENODEV;
354 int i;
355
356 for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) {
357 pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor,
358 cs5535_mfgpt_pci_tbl[i].device, NULL);
359 if (pdev) {
360 err = cs5535_mfgpt_probe(pdev,
361 &cs5535_mfgpt_pci_tbl[i]);
362 if (err)
363 pci_dev_put(pdev);
364
365 /* we only support a single CS5535/6 southbridge */
366 break;
367 }
368 }
369
370 return err;
371}
372
373static int __init cs5535_mfgpt_init(void)
374{
375 return cs5535_mfgpt_scan_pci();
376}
377
378module_init(cs5535_mfgpt_init);
379
380MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
381MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
382MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
new file mode 100644
index 000000000000..a513f0aa6432
--- /dev/null
+++ b/drivers/misc/ds1682.c
@@ -0,0 +1,268 @@
1/*
2 * Dallas Semiconductor DS1682 Elapsed Time Recorder device driver
3 *
4 * Written by: Grant Likely <grant.likely@secretlab.ca>
5 *
6 * Copyright (C) 2007 Secret Lab Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 * The DS1682 elapsed timer recorder is a simple device that implements
15 * one elapsed time counter, one event counter, an alarm signal and 10
16 * bytes of general purpose EEPROM.
17 *
18 * This driver provides access to the DS1682 counters and user data via
19 * the sysfs. The following attributes are added to the device node:
20 * elapsed_time (u32): Total elapsed event time in ms resolution
21 * alarm_time (u32): When elapsed time exceeds the value in alarm_time,
22 * then the alarm pin is asserted.
23 * event_count (u16): number of times the event pin has gone low.
24 * eeprom (u8[10]): general purpose EEPROM
25 *
26 * Counter registers and user data are both read/write unless the device
27 * has been write protected. This driver does not support turning off write
28 * protection. Once write protection is turned on, it is impossible to
29 * turn it off again, so I have left the feature out of this driver to avoid
30 * accidental enabling, but it is trivial to add write protect support.
31 *
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/i2c.h>
37#include <linux/string.h>
38#include <linux/list.h>
39#include <linux/sysfs.h>
40#include <linux/ctype.h>
41#include <linux/hwmon-sysfs.h>
42
43/* Device registers */
44#define DS1682_REG_CONFIG 0x00
45#define DS1682_REG_ALARM 0x01
46#define DS1682_REG_ELAPSED 0x05
47#define DS1682_REG_EVT_CNTR 0x09
48#define DS1682_REG_EEPROM 0x0b
49#define DS1682_REG_RESET 0x1d
50#define DS1682_REG_WRITE_DISABLE 0x1e
51#define DS1682_REG_WRITE_MEM_DISABLE 0x1f
52
53#define DS1682_EEPROM_SIZE 10
54
55/*
56 * Generic counter attributes
57 */
58static ssize_t ds1682_show(struct device *dev, struct device_attribute *attr,
59 char *buf)
60{
61 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
62 struct i2c_client *client = to_i2c_client(dev);
63 __le32 val = 0;
64 int rc;
65
66 dev_dbg(dev, "ds1682_show() called on %s\n", attr->attr.name);
67
68 /* Read the register */
69 rc = i2c_smbus_read_i2c_block_data(client, sattr->index, sattr->nr,
70 (u8 *) & val);
71 if (rc < 0)
72 return -EIO;
73
74 /* Special case: the 32 bit regs are time values with 1/4s
75 * resolution, scale them up to milliseconds */
76 if (sattr->nr == 4)
77 return sprintf(buf, "%llu\n",
78 ((unsigned long long)le32_to_cpu(val)) * 250);
79
80 /* Format the output string and return # of bytes */
81 return sprintf(buf, "%li\n", (long)le32_to_cpu(val));
82}
83
84static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
85 const char *buf, size_t count)
86{
87 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
88 struct i2c_client *client = to_i2c_client(dev);
89 char *endp;
90 u64 val;
91 __le32 val_le;
92 int rc;
93
94 dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name);
95
96 /* Decode input */
97 val = simple_strtoull(buf, &endp, 0);
98 if (buf == endp) {
99 dev_dbg(dev, "input string not a number\n");
100 return -EINVAL;
101 }
102
103 /* Special case: the 32 bit regs are time values with 1/4s
104 * resolution, scale input down to quarter-seconds */
105 if (sattr->nr == 4)
106 do_div(val, 250);
107
108 /* write out the value */
109 val_le = cpu_to_le32(val);
110 rc = i2c_smbus_write_i2c_block_data(client, sattr->index, sattr->nr,
111 (u8 *) & val_le);
112 if (rc < 0) {
113 dev_err(dev, "register write failed; reg=0x%x, size=%i\n",
114 sattr->index, sattr->nr);
115 return -EIO;
116 }
117
118 return count;
119}
120
121/*
122 * Simple register attributes
123 */
124static SENSOR_DEVICE_ATTR_2(elapsed_time, S_IRUGO | S_IWUSR, ds1682_show,
125 ds1682_store, 4, DS1682_REG_ELAPSED);
126static SENSOR_DEVICE_ATTR_2(alarm_time, S_IRUGO | S_IWUSR, ds1682_show,
127 ds1682_store, 4, DS1682_REG_ALARM);
128static SENSOR_DEVICE_ATTR_2(event_count, S_IRUGO | S_IWUSR, ds1682_show,
129 ds1682_store, 2, DS1682_REG_EVT_CNTR);
130
131static const struct attribute_group ds1682_group = {
132 .attrs = (struct attribute *[]) {
133 &sensor_dev_attr_elapsed_time.dev_attr.attr,
134 &sensor_dev_attr_alarm_time.dev_attr.attr,
135 &sensor_dev_attr_event_count.dev_attr.attr,
136 NULL,
137 },
138};
139
140/*
141 * User data attribute
142 */
143static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
144 struct bin_attribute *attr,
145 char *buf, loff_t off, size_t count)
146{
147 struct i2c_client *client = kobj_to_i2c_client(kobj);
148 int rc;
149
150 dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
151 buf, off, count);
152
153 if (off >= DS1682_EEPROM_SIZE)
154 return 0;
155
156 if (off + count > DS1682_EEPROM_SIZE)
157 count = DS1682_EEPROM_SIZE - off;
158
159 rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
160 count, buf);
161 if (rc < 0)
162 return -EIO;
163
164 return count;
165}
166
167static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
168 struct bin_attribute *attr,
169 char *buf, loff_t off, size_t count)
170{
171 struct i2c_client *client = kobj_to_i2c_client(kobj);
172
173 dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
174 buf, off, count);
175
176 if (off >= DS1682_EEPROM_SIZE)
177 return -ENOSPC;
178
179 if (off + count > DS1682_EEPROM_SIZE)
180 count = DS1682_EEPROM_SIZE - off;
181
182 /* Write out to the device */
183 if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
184 count, buf) < 0)
185 return -EIO;
186
187 return count;
188}
189
190static struct bin_attribute ds1682_eeprom_attr = {
191 .attr = {
192 .name = "eeprom",
193 .mode = S_IRUGO | S_IWUSR,
194 },
195 .size = DS1682_EEPROM_SIZE,
196 .read = ds1682_eeprom_read,
197 .write = ds1682_eeprom_write,
198};
199
200/*
201 * Called when a ds1682 device is matched with this driver
202 */
203static int ds1682_probe(struct i2c_client *client,
204 const struct i2c_device_id *id)
205{
206 int rc;
207
208 if (!i2c_check_functionality(client->adapter,
209 I2C_FUNC_SMBUS_I2C_BLOCK)) {
210 dev_err(&client->dev, "i2c bus does not support the ds1682\n");
211 rc = -ENODEV;
212 goto exit;
213 }
214
215 rc = sysfs_create_group(&client->dev.kobj, &ds1682_group);
216 if (rc)
217 goto exit;
218
219 rc = sysfs_create_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
220 if (rc)
221 goto exit_bin_attr;
222
223 return 0;
224
225 exit_bin_attr:
226 sysfs_remove_group(&client->dev.kobj, &ds1682_group);
227 exit:
228 return rc;
229}
230
231static int ds1682_remove(struct i2c_client *client)
232{
233 sysfs_remove_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
234 sysfs_remove_group(&client->dev.kobj, &ds1682_group);
235 return 0;
236}
237
238static const struct i2c_device_id ds1682_id[] = {
239 { "ds1682", 0 },
240 { }
241};
242MODULE_DEVICE_TABLE(i2c, ds1682_id);
243
244static struct i2c_driver ds1682_driver = {
245 .driver = {
246 .name = "ds1682",
247 },
248 .probe = ds1682_probe,
249 .remove = ds1682_remove,
250 .id_table = ds1682_id,
251};
252
253static int __init ds1682_init(void)
254{
255 return i2c_add_driver(&ds1682_driver);
256}
257
258static void __exit ds1682_exit(void)
259{
260 i2c_del_driver(&ds1682_driver);
261}
262
263MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
264MODULE_DESCRIPTION("DS1682 Elapsed Time Indicator driver");
265MODULE_LICENSE("GPL");
266
267module_init(ds1682_init);
268module_exit(ds1682_exit);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index db39f4a52f53..ab1ad41786d1 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -20,6 +20,7 @@
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/of.h>
23#include <linux/i2c.h> 24#include <linux/i2c.h>
24#include <linux/i2c/at24.h> 25#include <linux/i2c/at24.h>
25 26
@@ -54,7 +55,7 @@
54struct at24_data { 55struct at24_data {
55 struct at24_platform_data chip; 56 struct at24_platform_data chip;
56 struct memory_accessor macc; 57 struct memory_accessor macc;
57 bool use_smbus; 58 int use_smbus;
58 59
59 /* 60 /*
60 * Lock protects against activities from other Linux tasks, 61 * Lock protects against activities from other Linux tasks,
@@ -158,6 +159,7 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
158 struct i2c_msg msg[2]; 159 struct i2c_msg msg[2];
159 u8 msgbuf[2]; 160 u8 msgbuf[2];
160 struct i2c_client *client; 161 struct i2c_client *client;
162 unsigned long timeout, read_time;
161 int status, i; 163 int status, i;
162 164
163 memset(msg, 0, sizeof(msg)); 165 memset(msg, 0, sizeof(msg));
@@ -183,47 +185,85 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
183 if (count > io_limit) 185 if (count > io_limit)
184 count = io_limit; 186 count = io_limit;
185 187
186 /* Smaller eeproms can work given some SMBus extension calls */ 188 switch (at24->use_smbus) {
187 if (at24->use_smbus) { 189 case I2C_SMBUS_I2C_BLOCK_DATA:
190 /* Smaller eeproms can work given some SMBus extension calls */
188 if (count > I2C_SMBUS_BLOCK_MAX) 191 if (count > I2C_SMBUS_BLOCK_MAX)
189 count = I2C_SMBUS_BLOCK_MAX; 192 count = I2C_SMBUS_BLOCK_MAX;
190 status = i2c_smbus_read_i2c_block_data(client, offset, 193 break;
191 count, buf); 194 case I2C_SMBUS_WORD_DATA:
192 dev_dbg(&client->dev, "smbus read %zu@%d --> %d\n", 195 count = 2;
193 count, offset, status); 196 break;
194 return (status < 0) ? -EIO : status; 197 case I2C_SMBUS_BYTE_DATA:
198 count = 1;
199 break;
200 default:
201 /*
202 * When we have a better choice than SMBus calls, use a
203 * combined I2C message. Write address; then read up to
204 * io_limit data bytes. Note that read page rollover helps us
205 * here (unlike writes). msgbuf is u8 and will cast to our
206 * needs.
207 */
208 i = 0;
209 if (at24->chip.flags & AT24_FLAG_ADDR16)
210 msgbuf[i++] = offset >> 8;
211 msgbuf[i++] = offset;
212
213 msg[0].addr = client->addr;
214 msg[0].buf = msgbuf;
215 msg[0].len = i;
216
217 msg[1].addr = client->addr;
218 msg[1].flags = I2C_M_RD;
219 msg[1].buf = buf;
220 msg[1].len = count;
195 } 221 }
196 222
197 /* 223 /*
198 * When we have a better choice than SMBus calls, use a combined 224 * Reads fail if the previous write didn't complete yet. We may
199 * I2C message. Write address; then read up to io_limit data bytes. 225 * loop a few times until this one succeeds, waiting at least
200 * Note that read page rollover helps us here (unlike writes). 226 * long enough for one entire page write to work.
201 * msgbuf is u8 and will cast to our needs.
202 */ 227 */
203 i = 0; 228 timeout = jiffies + msecs_to_jiffies(write_timeout);
204 if (at24->chip.flags & AT24_FLAG_ADDR16) 229 do {
205 msgbuf[i++] = offset >> 8; 230 read_time = jiffies;
206 msgbuf[i++] = offset; 231 switch (at24->use_smbus) {
207 232 case I2C_SMBUS_I2C_BLOCK_DATA:
208 msg[0].addr = client->addr; 233 status = i2c_smbus_read_i2c_block_data(client, offset,
209 msg[0].buf = msgbuf; 234 count, buf);
210 msg[0].len = i; 235 break;
236 case I2C_SMBUS_WORD_DATA:
237 status = i2c_smbus_read_word_data(client, offset);
238 if (status >= 0) {
239 buf[0] = status & 0xff;
240 buf[1] = status >> 8;
241 status = count;
242 }
243 break;
244 case I2C_SMBUS_BYTE_DATA:
245 status = i2c_smbus_read_byte_data(client, offset);
246 if (status >= 0) {
247 buf[0] = status;
248 status = count;
249 }
250 break;
251 default:
252 status = i2c_transfer(client->adapter, msg, 2);
253 if (status == 2)
254 status = count;
255 }
256 dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
257 count, offset, status, jiffies);
211 258
212 msg[1].addr = client->addr; 259 if (status == count)
213 msg[1].flags = I2C_M_RD; 260 return count;
214 msg[1].buf = buf;
215 msg[1].len = count;
216 261
217 status = i2c_transfer(client->adapter, msg, 2); 262 /* REVISIT: at HZ=100, this is sloooow */
218 dev_dbg(&client->dev, "i2c read %zu@%d --> %d\n", 263 msleep(1);
219 count, offset, status); 264 } while (time_before(read_time, timeout));
220 265
221 if (status == 2) 266 return -ETIMEDOUT;
222 return count;
223 else if (status >= 0)
224 return -EIO;
225 else
226 return status;
227} 267}
228 268
229static ssize_t at24_read(struct at24_data *at24, 269static ssize_t at24_read(struct at24_data *at24,
@@ -260,7 +300,8 @@ static ssize_t at24_read(struct at24_data *at24,
260 return retval; 300 return retval;
261} 301}
262 302
263static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr, 303static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
304 struct bin_attribute *attr,
264 char *buf, loff_t off, size_t count) 305 char *buf, loff_t off, size_t count)
265{ 306{
266 struct at24_data *at24; 307 struct at24_data *at24;
@@ -381,7 +422,8 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
381 return retval; 422 return retval;
382} 423}
383 424
384static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr, 425static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
426 struct bin_attribute *attr,
385 char *buf, loff_t off, size_t count) 427 char *buf, loff_t off, size_t count)
386{ 428{
387 struct at24_data *at24; 429 struct at24_data *at24;
@@ -416,11 +458,32 @@ static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
416 458
417/*-------------------------------------------------------------------------*/ 459/*-------------------------------------------------------------------------*/
418 460
461#ifdef CONFIG_OF
462static void at24_get_ofdata(struct i2c_client *client,
463 struct at24_platform_data *chip)
464{
465 const __be32 *val;
466 struct device_node *node = client->dev.of_node;
467
468 if (node) {
469 if (of_get_property(node, "read-only", NULL))
470 chip->flags |= AT24_FLAG_READONLY;
471 val = of_get_property(node, "pagesize", NULL);
472 if (val)
473 chip->page_size = be32_to_cpup(val);
474 }
475}
476#else
477static void at24_get_ofdata(struct i2c_client *client,
478 struct at24_platform_data *chip)
479{ }
480#endif /* CONFIG_OF */
481
419static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) 482static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
420{ 483{
421 struct at24_platform_data chip; 484 struct at24_platform_data chip;
422 bool writable; 485 bool writable;
423 bool use_smbus = false; 486 int use_smbus = 0;
424 struct at24_data *at24; 487 struct at24_data *at24;
425 int err; 488 int err;
426 unsigned i, num_addresses; 489 unsigned i, num_addresses;
@@ -444,6 +507,9 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
444 */ 507 */
445 chip.page_size = 1; 508 chip.page_size = 1;
446 509
510 /* update chipdata if OF is present */
511 at24_get_ofdata(client, &chip);
512
447 chip.setup = NULL; 513 chip.setup = NULL;
448 chip.context = NULL; 514 chip.context = NULL;
449 } 515 }
@@ -451,6 +517,11 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
451 if (!is_power_of_2(chip.byte_len)) 517 if (!is_power_of_2(chip.byte_len))
452 dev_warn(&client->dev, 518 dev_warn(&client->dev,
453 "byte_len looks suspicious (no power of 2)!\n"); 519 "byte_len looks suspicious (no power of 2)!\n");
520 if (!chip.page_size) {
521 dev_err(&client->dev, "page_size must not be 0!\n");
522 err = -EINVAL;
523 goto err_out;
524 }
454 if (!is_power_of_2(chip.page_size)) 525 if (!is_power_of_2(chip.page_size))
455 dev_warn(&client->dev, 526 dev_warn(&client->dev,
456 "page_size looks suspicious (no power of 2)!\n"); 527 "page_size looks suspicious (no power of 2)!\n");
@@ -461,12 +532,19 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
461 err = -EPFNOSUPPORT; 532 err = -EPFNOSUPPORT;
462 goto err_out; 533 goto err_out;
463 } 534 }
464 if (!i2c_check_functionality(client->adapter, 535 if (i2c_check_functionality(client->adapter,
465 I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { 536 I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
537 use_smbus = I2C_SMBUS_I2C_BLOCK_DATA;
538 } else if (i2c_check_functionality(client->adapter,
539 I2C_FUNC_SMBUS_READ_WORD_DATA)) {
540 use_smbus = I2C_SMBUS_WORD_DATA;
541 } else if (i2c_check_functionality(client->adapter,
542 I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
543 use_smbus = I2C_SMBUS_BYTE_DATA;
544 } else {
466 err = -EPFNOSUPPORT; 545 err = -EPFNOSUPPORT;
467 goto err_out; 546 goto err_out;
468 } 547 }
469 use_smbus = true;
470 } 548 }
471 549
472 if (chip.flags & AT24_FLAG_TAKE8ADDR) 550 if (chip.flags & AT24_FLAG_TAKE8ADDR)
@@ -491,6 +569,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
491 * Export the EEPROM bytes through sysfs, since that's convenient. 569 * Export the EEPROM bytes through sysfs, since that's convenient.
492 * By default, only root should see the data (maybe passwords etc) 570 * By default, only root should see the data (maybe passwords etc)
493 */ 571 */
572 sysfs_bin_attr_init(&at24->bin);
494 at24->bin.attr.name = "eeprom"; 573 at24->bin.attr.name = "eeprom";
495 at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR; 574 at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR;
496 at24->bin.read = at24_bin_read; 575 at24->bin.read = at24_bin_read;
@@ -548,14 +627,15 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
548 627
549 i2c_set_clientdata(client, at24); 628 i2c_set_clientdata(client, at24);
550 629
551 dev_info(&client->dev, "%zu byte %s EEPROM %s\n", 630 dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
552 at24->bin.size, client->name, 631 at24->bin.size, client->name,
553 writable ? "(writable)" : "(read-only)"); 632 writable ? "writable" : "read-only", at24->write_max);
554 dev_dbg(&client->dev, 633 if (use_smbus == I2C_SMBUS_WORD_DATA ||
555 "page_size %d, num_addresses %d, write_max %d%s\n", 634 use_smbus == I2C_SMBUS_BYTE_DATA) {
556 chip.page_size, num_addresses, 635 dev_notice(&client->dev, "Falling back to %s reads, "
557 at24->write_max, 636 "performance will suffer\n", use_smbus ==
558 use_smbus ? ", use_smbus" : ""); 637 I2C_SMBUS_WORD_DATA ? "word" : "byte");
638 }
559 639
560 /* export data to kernel code */ 640 /* export data to kernel code */
561 if (chip.setup) 641 if (chip.setup)
@@ -589,7 +669,6 @@ static int __devexit at24_remove(struct i2c_client *client)
589 669
590 kfree(at24->writebuf); 670 kfree(at24->writebuf);
591 kfree(at24); 671 kfree(at24);
592 i2c_set_clientdata(client, NULL);
593 return 0; 672 return 0;
594} 673}
595 674
@@ -607,6 +686,11 @@ static struct i2c_driver at24_driver = {
607 686
608static int __init at24_init(void) 687static int __init at24_init(void)
609{ 688{
689 if (!io_limit) {
690 pr_err("at24: io_limit must not be 0!\n");
691 return -EINVAL;
692 }
693
610 io_limit = rounddown_pow_of_two(io_limit); 694 io_limit = rounddown_pow_of_two(io_limit);
611 return i2c_add_driver(&at24_driver); 695 return i2c_add_driver(&at24_driver);
612} 696}
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index b34cb5f79eea..c627e4174ccd 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -126,7 +126,8 @@ at25_ee_read(
126} 126}
127 127
128static ssize_t 128static ssize_t
129at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr, 129at25_bin_read(struct file *filp, struct kobject *kobj,
130 struct bin_attribute *bin_attr,
130 char *buf, loff_t off, size_t count) 131 char *buf, loff_t off, size_t count)
131{ 132{
132 struct device *dev; 133 struct device *dev;
@@ -173,6 +174,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
173 unsigned segment; 174 unsigned segment;
174 unsigned offset = (unsigned) off; 175 unsigned offset = (unsigned) off;
175 u8 *cp = bounce + 1; 176 u8 *cp = bounce + 1;
177 int sr;
176 178
177 *cp = AT25_WREN; 179 *cp = AT25_WREN;
178 status = spi_write(at25->spi, cp, 1); 180 status = spi_write(at25->spi, cp, 1);
@@ -214,7 +216,6 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
214 timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); 216 timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT);
215 retries = 0; 217 retries = 0;
216 do { 218 do {
217 int sr;
218 219
219 sr = spi_w8r8(at25->spi, AT25_RDSR); 220 sr = spi_w8r8(at25->spi, AT25_RDSR);
220 if (sr < 0 || (sr & AT25_SR_nRDY)) { 221 if (sr < 0 || (sr & AT25_SR_nRDY)) {
@@ -228,7 +229,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
228 break; 229 break;
229 } while (retries++ < 3 || time_before_eq(jiffies, timeout)); 230 } while (retries++ < 3 || time_before_eq(jiffies, timeout));
230 231
231 if (time_after(jiffies, timeout)) { 232 if ((sr < 0) || (sr & AT25_SR_nRDY)) {
232 dev_err(&at25->spi->dev, 233 dev_err(&at25->spi->dev,
233 "write %d bytes offset %d, " 234 "write %d bytes offset %d, "
234 "timeout after %u msecs\n", 235 "timeout after %u msecs\n",
@@ -253,7 +254,8 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
253} 254}
254 255
255static ssize_t 256static ssize_t
256at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr, 257at25_bin_write(struct file *filp, struct kobject *kobj,
258 struct bin_attribute *bin_attr,
257 char *buf, loff_t off, size_t count) 259 char *buf, loff_t off, size_t count)
258{ 260{
259 struct device *dev; 261 struct device *dev;
@@ -347,6 +349,7 @@ static int at25_probe(struct spi_device *spi)
347 * that's sensitive for read and/or write, like ethernet addresses, 349 * that's sensitive for read and/or write, like ethernet addresses,
348 * security codes, board-specific manufacturing calibrations, etc. 350 * security codes, board-specific manufacturing calibrations, etc.
349 */ 351 */
352 sysfs_bin_attr_init(&at25->bin);
350 at25->bin.attr.name = "eeprom"; 353 at25->bin.attr.name = "eeprom";
351 at25->bin.attr.mode = S_IRUSR; 354 at25->bin.attr.mode = S_IRUSR;
352 at25->bin.read = at25_bin_read; 355 at25->bin.read = at25_bin_read;
@@ -417,4 +420,4 @@ module_exit(at25_exit);
417MODULE_DESCRIPTION("Driver for most SPI EEPROMs"); 420MODULE_DESCRIPTION("Driver for most SPI EEPROMs");
418MODULE_AUTHOR("David Brownell"); 421MODULE_AUTHOR("David Brownell");
419MODULE_LICENSE("GPL"); 422MODULE_LICENSE("GPL");
420 423MODULE_ALIAS("spi:at25");
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2c27193aeaa0..45060ddc4e59 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -1,24 +1,20 @@
1/* 1/*
2 Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and 2 * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
3 Philip Edelbrock <phil@netroedge.com> 3 * Philip Edelbrock <phil@netroedge.com>
4 Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> 4 * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
5 Copyright (C) 2003 IBM Corp. 5 * Copyright (C) 2003 IBM Corp.
6 Copyright (C) 2004 Jean Delvare <khali@linux-fr.org> 6 * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
7 7 *
8 This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version. 11 * (at your option) any later version.
12 12 *
13 This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details. 16 * GNU General Public License for more details.
17 17 */
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/
22 18
23#include <linux/kernel.h> 19#include <linux/kernel.h>
24#include <linux/init.h> 20#include <linux/init.h>
@@ -32,9 +28,6 @@
32static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, 28static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
33 0x55, 0x56, 0x57, I2C_CLIENT_END }; 29 0x55, 0x56, 0x57, I2C_CLIENT_END };
34 30
35/* Insmod parameters */
36I2C_CLIENT_INSMOD_1(eeprom);
37
38 31
39/* Size of EEPROM in bytes */ 32/* Size of EEPROM in bytes */
40#define EEPROM_SIZE 256 33#define EEPROM_SIZE 256
@@ -88,7 +81,8 @@ exit:
88 mutex_unlock(&data->update_lock); 81 mutex_unlock(&data->update_lock);
89} 82}
90 83
91static ssize_t eeprom_read(struct kobject *kobj, struct bin_attribute *bin_attr, 84static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
85 struct bin_attribute *bin_attr,
92 char *buf, loff_t off, size_t count) 86 char *buf, loff_t off, size_t count)
93{ 87{
94 struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); 88 struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
@@ -135,8 +129,7 @@ static struct bin_attribute eeprom_attr = {
135}; 129};
136 130
137/* Return 0 if detection is successful, -ENODEV otherwise */ 131/* Return 0 if detection is successful, -ENODEV otherwise */
138static int eeprom_detect(struct i2c_client *client, int kind, 132static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
139 struct i2c_board_info *info)
140{ 133{
141 struct i2c_adapter *adapter = client->adapter; 134 struct i2c_adapter *adapter = client->adapter;
142 135
@@ -233,7 +226,7 @@ static struct i2c_driver eeprom_driver = {
233 226
234 .class = I2C_CLASS_DDC | I2C_CLASS_SPD, 227 .class = I2C_CLASS_DDC | I2C_CLASS_SPD,
235 .detect = eeprom_detect, 228 .detect = eeprom_detect,
236 .address_data = &addr_data, 229 .address_list = normal_i2c,
237}; 230};
238 231
239static int __init eeprom_init(void) 232static int __init eeprom_init(void)
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 15b1780025c8..7b33de95c4bf 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -1,27 +1,20 @@
1/* 1/*
2 Copyright (C) 2004 - 2006 rt2x00 SourceForge Project 2 * Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 * <http://rt2x00.serialmonkey.com>
4 4 *
5 This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version. 8 * (at your option) any later version.
9 9 *
10 This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details. 13 * GNU General Public License for more details.
14 14 *
15 You should have received a copy of the GNU General Public License 15 * Module: eeprom_93cx6
16 along with this program; if not, write to the 16 * Abstract: EEPROM reader routines for 93cx6 chipsets.
17 Free Software Foundation, Inc., 17 * Supported chipsets: 93c46 & 93c66.
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: eeprom_93cx6
23 Abstract: EEPROM reader routines for 93cx6 chipsets.
24 Supported chipsets: 93c46 & 93c66.
25 */ 18 */
26 19
27#include <linux/kernel.h> 20#include <linux/kernel.h>
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 3c0c58eed347..5653a3ce0517 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -1,30 +1,30 @@
1/* 1/*
2 max6875.c - driver for MAX6874/MAX6875 2 * max6875.c - driver for MAX6874/MAX6875
3 3 *
4 Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com> 4 * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
5 5 *
6 Based on eeprom.c 6 * Based on eeprom.c
7 7 *
8 The MAX6875 has a bank of registers and two banks of EEPROM. 8 * The MAX6875 has a bank of registers and two banks of EEPROM.
9 Address ranges are defined as follows: 9 * Address ranges are defined as follows:
10 * 0x0000 - 0x0046 = configuration registers 10 * * 0x0000 - 0x0046 = configuration registers
11 * 0x8000 - 0x8046 = configuration EEPROM 11 * * 0x8000 - 0x8046 = configuration EEPROM
12 * 0x8100 - 0x82FF = user EEPROM 12 * * 0x8100 - 0x82FF = user EEPROM
13 13 *
14 This driver makes the user EEPROM available for read. 14 * This driver makes the user EEPROM available for read.
15 15 *
16 The registers & config EEPROM should be accessed via i2c-dev. 16 * The registers & config EEPROM should be accessed via i2c-dev.
17 17 *
18 The MAX6875 ignores the lowest address bit, so each chip responds to 18 * The MAX6875 ignores the lowest address bit, so each chip responds to
19 two addresses - 0x50/0x51 and 0x52/0x53. 19 * two addresses - 0x50/0x51 and 0x52/0x53.
20 20 *
21 Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read 21 * Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read
22 address, so this driver is destructive if loaded for the wrong EEPROM chip. 22 * address, so this driver is destructive if loaded for the wrong EEPROM chip.
23 23 *
24 This program is free software; you can redistribute it and/or modify 24 * This program is free software; you can redistribute it and/or modify
25 it under the terms of the GNU General Public License as published by 25 * it under the terms of the GNU General Public License as published by
26 the Free Software Foundation; version 2 of the License. 26 * the Free Software Foundation; version 2 of the License.
27*/ 27 */
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/init.h> 30#include <linux/init.h>
@@ -33,12 +33,6 @@
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35 35
36/* Do not scan - the MAX6875 access method will write to some EEPROM chips */
37static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
38
39/* Insmod parameters */
40I2C_CLIENT_INSMOD_1(max6875);
41
42/* The MAX6875 can only read/write 16 bytes at a time */ 36/* The MAX6875 can only read/write 16 bytes at a time */
43#define SLICE_SIZE 16 37#define SLICE_SIZE 16
44#define SLICE_BITS 4 38#define SLICE_BITS 4
@@ -113,7 +107,7 @@ exit_up:
113 mutex_unlock(&data->update_lock); 107 mutex_unlock(&data->update_lock);
114} 108}
115 109
116static ssize_t max6875_read(struct kobject *kobj, 110static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
117 struct bin_attribute *bin_attr, 111 struct bin_attribute *bin_attr,
118 char *buf, loff_t off, size_t count) 112 char *buf, loff_t off, size_t count)
119{ 113{
@@ -146,31 +140,21 @@ static struct bin_attribute user_eeprom_attr = {
146 .read = max6875_read, 140 .read = max6875_read,
147}; 141};
148 142
149/* Return 0 if detection is successful, -ENODEV otherwise */ 143static int max6875_probe(struct i2c_client *client,
150static int max6875_detect(struct i2c_client *client, int kind, 144 const struct i2c_device_id *id)
151 struct i2c_board_info *info)
152{ 145{
153 struct i2c_adapter *adapter = client->adapter; 146 struct i2c_adapter *adapter = client->adapter;
147 struct max6875_data *data;
148 int err;
154 149
155 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA 150 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
156 | I2C_FUNC_SMBUS_READ_BYTE)) 151 | I2C_FUNC_SMBUS_READ_BYTE))
157 return -ENODEV; 152 return -ENODEV;
158 153
159 /* Only check even addresses */ 154 /* Only bind to even addresses */
160 if (client->addr & 1) 155 if (client->addr & 1)
161 return -ENODEV; 156 return -ENODEV;
162 157
163 strlcpy(info->type, "max6875", I2C_NAME_SIZE);
164
165 return 0;
166}
167
168static int max6875_probe(struct i2c_client *client,
169 const struct i2c_device_id *id)
170{
171 struct max6875_data *data;
172 int err;
173
174 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) 158 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
175 return -ENOMEM; 159 return -ENOMEM;
176 160
@@ -222,9 +206,6 @@ static struct i2c_driver max6875_driver = {
222 .probe = max6875_probe, 206 .probe = max6875_probe,
223 .remove = max6875_remove, 207 .remove = max6875_remove,
224 .id_table = max6875_id, 208 .id_table = max6875_id,
225
226 .detect = max6875_detect,
227 .address_data = &addr_data,
228}; 209};
229 210
230static int __init max6875_init(void) 211static int __init max6875_init(void)
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 348443bdb23b..00e5fcac8fdf 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -27,30 +27,51 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/slab.h>
30 31
31static LIST_HEAD(container_list); 32static LIST_HEAD(container_list);
32static DEFINE_MUTEX(container_list_lock); 33static DEFINE_MUTEX(container_list_lock);
33static struct class enclosure_class; 34static struct class enclosure_class;
34 35
35/** 36/**
36 * enclosure_find - find an enclosure given a device 37 * enclosure_find - find an enclosure given a parent device
37 * @dev: the device to find for 38 * @dev: the parent to match against
39 * @start: Optional enclosure device to start from (NULL if none)
38 * 40 *
39 * Looks through the list of registered enclosures to see 41 * Looks through the list of registered enclosures to find all those
40 * if it can find a match for a device. Returns NULL if no 42 * with @dev as a parent. Returns NULL if no enclosure is
41 * enclosure is found. Obtains a reference to the enclosure class 43 * found. @start can be used as a starting point to obtain multiple
42 * device which must be released with device_put(). 44 * enclosures per parent (should begin with NULL and then be set to
45 * each returned enclosure device). Obtains a reference to the
46 * enclosure class device which must be released with device_put().
47 * If @start is not NULL, a reference must be taken on it which is
48 * released before returning (this allows a loop through all
49 * enclosures to exit with only the reference on the enclosure of
50 * interest held). Note that the @dev may correspond to the actual
51 * device housing the enclosure, in which case no iteration via @start
52 * is required.
43 */ 53 */
44struct enclosure_device *enclosure_find(struct device *dev) 54struct enclosure_device *enclosure_find(struct device *dev,
55 struct enclosure_device *start)
45{ 56{
46 struct enclosure_device *edev; 57 struct enclosure_device *edev;
47 58
48 mutex_lock(&container_list_lock); 59 mutex_lock(&container_list_lock);
49 list_for_each_entry(edev, &container_list, node) { 60 edev = list_prepare_entry(start, &container_list, node);
50 if (edev->edev.parent == dev) { 61 if (start)
51 get_device(&edev->edev); 62 put_device(&start->edev);
52 mutex_unlock(&container_list_lock); 63
53 return edev; 64 list_for_each_entry_continue(edev, &container_list, node) {
65 struct device *parent = edev->edev.parent;
66 /* parent might not be immediate, so iterate up to
67 * the root of the tree if necessary */
68 while (parent) {
69 if (parent == dev) {
70 get_device(&edev->edev);
71 mutex_unlock(&container_list_lock);
72 return edev;
73 }
74 parent = parent->parent;
54 } 75 }
55 } 76 }
56 mutex_unlock(&container_list_lock); 77 mutex_unlock(&container_list_lock);
@@ -218,7 +239,7 @@ static void enclosure_component_release(struct device *dev)
218 put_device(dev->parent); 239 put_device(dev->parent);
219} 240}
220 241
221static struct attribute_group *enclosure_groups[]; 242static const struct attribute_group *enclosure_groups[];
222 243
223/** 244/**
224 * enclosure_component_register - add a particular component to an enclosure 245 * enclosure_component_register - add a particular component to an enclosure
@@ -264,8 +285,11 @@ enclosure_component_register(struct enclosure_device *edev,
264 cdev->groups = enclosure_groups; 285 cdev->groups = enclosure_groups;
265 286
266 err = device_register(cdev); 287 err = device_register(cdev);
267 if (err) 288 if (err) {
268 ERR_PTR(err); 289 ecomp->number = -1;
290 put_device(cdev);
291 return ERR_PTR(err);
292 }
269 293
270 return ecomp; 294 return ecomp;
271} 295}
@@ -295,6 +319,9 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
295 319
296 cdev = &edev->component[component]; 320 cdev = &edev->component[component];
297 321
322 if (cdev->dev == dev)
323 return -EEXIST;
324
298 if (cdev->dev) 325 if (cdev->dev)
299 enclosure_remove_links(cdev); 326 enclosure_remove_links(cdev);
300 327
@@ -312,19 +339,25 @@ EXPORT_SYMBOL_GPL(enclosure_add_device);
312 * Returns zero on success or an error. 339 * Returns zero on success or an error.
313 * 340 *
314 */ 341 */
315int enclosure_remove_device(struct enclosure_device *edev, int component) 342int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
316{ 343{
317 struct enclosure_component *cdev; 344 struct enclosure_component *cdev;
345 int i;
318 346
319 if (!edev || component >= edev->components) 347 if (!edev || !dev)
320 return -EINVAL; 348 return -EINVAL;
321 349
322 cdev = &edev->component[component]; 350 for (i = 0; i < edev->components; i++) {
323 351 cdev = &edev->component[i];
324 device_del(&cdev->cdev); 352 if (cdev->dev == dev) {
325 put_device(cdev->dev); 353 enclosure_remove_links(cdev);
326 cdev->dev = NULL; 354 device_del(&cdev->cdev);
327 return device_add(&cdev->cdev); 355 put_device(dev);
356 cdev->dev = NULL;
357 return device_add(&cdev->cdev);
358 }
359 }
360 return -ENODEV;
328} 361}
329EXPORT_SYMBOL_GPL(enclosure_remove_device); 362EXPORT_SYMBOL_GPL(enclosure_remove_device);
330 363
@@ -362,6 +395,7 @@ static const char *const enclosure_status [] = {
362 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", 395 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
363 [ENCLOSURE_STATUS_UNKNOWN] = "unknown", 396 [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
364 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", 397 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
398 [ENCLOSURE_STATUS_MAX] = NULL,
365}; 399};
366 400
367static const char *const enclosure_type [] = { 401static const char *const enclosure_type [] = {
@@ -507,7 +541,7 @@ static struct attribute_group enclosure_group = {
507 .attrs = enclosure_component_attrs, 541 .attrs = enclosure_component_attrs,
508}; 542};
509 543
510static struct attribute_group *enclosure_groups[] = { 544static const struct attribute_group *enclosure_groups[] = {
511 &enclosure_group, 545 &enclosure_group,
512 NULL 546 NULL
513}; 547};
diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c
new file mode 100644
index 000000000000..46b3439673e9
--- /dev/null
+++ b/drivers/misc/ep93xx_pwm.c
@@ -0,0 +1,385 @@
1/*
2 * Simple PWM driver for EP93XX
3 *
4 * (c) Copyright 2009 Matthieu Crapet <mcrapet@gmail.com>
5 * (c) Copyright 2009 H Hartley Sweeten <hsweeten@visionengravers.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * EP9307 has only one channel:
13 * - PWMOUT
14 *
15 * EP9301/02/12/15 have two channels:
16 * - PWMOUT
17 * - PWMOUT1 (alternate function for EGPIO14)
18 */
19
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/clk.h>
24#include <linux/err.h>
25#include <linux/io.h>
26
27#include <mach/platform.h>
28
29#define EP93XX_PWMx_TERM_COUNT 0x00
30#define EP93XX_PWMx_DUTY_CYCLE 0x04
31#define EP93XX_PWMx_ENABLE 0x08
32#define EP93XX_PWMx_INVERT 0x0C
33
34#define EP93XX_PWM_MAX_COUNT 0xFFFF
35
36struct ep93xx_pwm {
37 void __iomem *mmio_base;
38 struct clk *clk;
39 u32 duty_percent;
40};
41
42static inline void ep93xx_pwm_writel(struct ep93xx_pwm *pwm,
43 unsigned int val, unsigned int off)
44{
45 __raw_writel(val, pwm->mmio_base + off);
46}
47
48static inline unsigned int ep93xx_pwm_readl(struct ep93xx_pwm *pwm,
49 unsigned int off)
50{
51 return __raw_readl(pwm->mmio_base + off);
52}
53
54static inline void ep93xx_pwm_write_tc(struct ep93xx_pwm *pwm, u16 value)
55{
56 ep93xx_pwm_writel(pwm, value, EP93XX_PWMx_TERM_COUNT);
57}
58
59static inline u16 ep93xx_pwm_read_tc(struct ep93xx_pwm *pwm)
60{
61 return ep93xx_pwm_readl(pwm, EP93XX_PWMx_TERM_COUNT);
62}
63
64static inline void ep93xx_pwm_write_dc(struct ep93xx_pwm *pwm, u16 value)
65{
66 ep93xx_pwm_writel(pwm, value, EP93XX_PWMx_DUTY_CYCLE);
67}
68
69static inline void ep93xx_pwm_enable(struct ep93xx_pwm *pwm)
70{
71 ep93xx_pwm_writel(pwm, 0x1, EP93XX_PWMx_ENABLE);
72}
73
74static inline void ep93xx_pwm_disable(struct ep93xx_pwm *pwm)
75{
76 ep93xx_pwm_writel(pwm, 0x0, EP93XX_PWMx_ENABLE);
77}
78
79static inline int ep93xx_pwm_is_enabled(struct ep93xx_pwm *pwm)
80{
81 return ep93xx_pwm_readl(pwm, EP93XX_PWMx_ENABLE) & 0x1;
82}
83
84static inline void ep93xx_pwm_invert(struct ep93xx_pwm *pwm)
85{
86 ep93xx_pwm_writel(pwm, 0x1, EP93XX_PWMx_INVERT);
87}
88
89static inline void ep93xx_pwm_normal(struct ep93xx_pwm *pwm)
90{
91 ep93xx_pwm_writel(pwm, 0x0, EP93XX_PWMx_INVERT);
92}
93
94static inline int ep93xx_pwm_is_inverted(struct ep93xx_pwm *pwm)
95{
96 return ep93xx_pwm_readl(pwm, EP93XX_PWMx_INVERT) & 0x1;
97}
98
99/*
100 * /sys/devices/platform/ep93xx-pwm.N
101 * /min_freq read-only minimum pwm output frequency
102 * /max_req read-only maximum pwm output frequency
103 * /freq read-write pwm output frequency (0 = disable output)
104 * /duty_percent read-write pwm duty cycle percent (1..99)
105 * /invert read-write invert pwm output
106 */
107
108static ssize_t ep93xx_pwm_get_min_freq(struct device *dev,
109 struct device_attribute *attr, char *buf)
110{
111 struct platform_device *pdev = to_platform_device(dev);
112 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
113 unsigned long rate = clk_get_rate(pwm->clk);
114
115 return sprintf(buf, "%ld\n", rate / (EP93XX_PWM_MAX_COUNT + 1));
116}
117
118static ssize_t ep93xx_pwm_get_max_freq(struct device *dev,
119 struct device_attribute *attr, char *buf)
120{
121 struct platform_device *pdev = to_platform_device(dev);
122 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
123 unsigned long rate = clk_get_rate(pwm->clk);
124
125 return sprintf(buf, "%ld\n", rate / 2);
126}
127
128static ssize_t ep93xx_pwm_get_freq(struct device *dev,
129 struct device_attribute *attr, char *buf)
130{
131 struct platform_device *pdev = to_platform_device(dev);
132 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
133
134 if (ep93xx_pwm_is_enabled(pwm)) {
135 unsigned long rate = clk_get_rate(pwm->clk);
136 u16 term = ep93xx_pwm_read_tc(pwm);
137
138 return sprintf(buf, "%ld\n", rate / (term + 1));
139 } else {
140 return sprintf(buf, "disabled\n");
141 }
142}
143
144static ssize_t ep93xx_pwm_set_freq(struct device *dev,
145 struct device_attribute *attr, const char *buf, size_t count)
146{
147 struct platform_device *pdev = to_platform_device(dev);
148 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
149 long val;
150 int err;
151
152 err = strict_strtol(buf, 10, &val);
153 if (err)
154 return -EINVAL;
155
156 if (val == 0) {
157 ep93xx_pwm_disable(pwm);
158 } else if (val <= (clk_get_rate(pwm->clk) / 2)) {
159 u32 term, duty;
160
161 val = (clk_get_rate(pwm->clk) / val) - 1;
162 if (val > EP93XX_PWM_MAX_COUNT)
163 val = EP93XX_PWM_MAX_COUNT;
164 if (val < 1)
165 val = 1;
166
167 term = ep93xx_pwm_read_tc(pwm);
168 duty = ((val + 1) * pwm->duty_percent / 100) - 1;
169
170 /* If pwm is running, order is important */
171 if (val > term) {
172 ep93xx_pwm_write_tc(pwm, val);
173 ep93xx_pwm_write_dc(pwm, duty);
174 } else {
175 ep93xx_pwm_write_dc(pwm, duty);
176 ep93xx_pwm_write_tc(pwm, val);
177 }
178
179 if (!ep93xx_pwm_is_enabled(pwm))
180 ep93xx_pwm_enable(pwm);
181 } else {
182 return -EINVAL;
183 }
184
185 return count;
186}
187
188static ssize_t ep93xx_pwm_get_duty_percent(struct device *dev,
189 struct device_attribute *attr, char *buf)
190{
191 struct platform_device *pdev = to_platform_device(dev);
192 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
193
194 return sprintf(buf, "%d\n", pwm->duty_percent);
195}
196
197static ssize_t ep93xx_pwm_set_duty_percent(struct device *dev,
198 struct device_attribute *attr, const char *buf, size_t count)
199{
200 struct platform_device *pdev = to_platform_device(dev);
201 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
202 long val;
203 int err;
204
205 err = strict_strtol(buf, 10, &val);
206 if (err)
207 return -EINVAL;
208
209 if (val > 0 && val < 100) {
210 u32 term = ep93xx_pwm_read_tc(pwm);
211 ep93xx_pwm_write_dc(pwm, ((term + 1) * val / 100) - 1);
212 pwm->duty_percent = val;
213 return count;
214 }
215
216 return -EINVAL;
217}
218
219static ssize_t ep93xx_pwm_get_invert(struct device *dev,
220 struct device_attribute *attr, char *buf)
221{
222 struct platform_device *pdev = to_platform_device(dev);
223 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
224
225 return sprintf(buf, "%d\n", ep93xx_pwm_is_inverted(pwm));
226}
227
228static ssize_t ep93xx_pwm_set_invert(struct device *dev,
229 struct device_attribute *attr, const char *buf, size_t count)
230{
231 struct platform_device *pdev = to_platform_device(dev);
232 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
233 long val;
234 int err;
235
236 err = strict_strtol(buf, 10, &val);
237 if (err)
238 return -EINVAL;
239
240 if (val == 0)
241 ep93xx_pwm_normal(pwm);
242 else if (val == 1)
243 ep93xx_pwm_invert(pwm);
244 else
245 return -EINVAL;
246
247 return count;
248}
249
250static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL);
251static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL);
252static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO,
253 ep93xx_pwm_get_freq, ep93xx_pwm_set_freq);
254static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO,
255 ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent);
256static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO,
257 ep93xx_pwm_get_invert, ep93xx_pwm_set_invert);
258
259static struct attribute *ep93xx_pwm_attrs[] = {
260 &dev_attr_min_freq.attr,
261 &dev_attr_max_freq.attr,
262 &dev_attr_freq.attr,
263 &dev_attr_duty_percent.attr,
264 &dev_attr_invert.attr,
265 NULL
266};
267
268static const struct attribute_group ep93xx_pwm_sysfs_files = {
269 .attrs = ep93xx_pwm_attrs,
270};
271
272static int __init ep93xx_pwm_probe(struct platform_device *pdev)
273{
274 struct ep93xx_pwm *pwm;
275 struct resource *res;
276 int err;
277
278 err = ep93xx_pwm_acquire_gpio(pdev);
279 if (err)
280 return err;
281
282 pwm = kzalloc(sizeof(struct ep93xx_pwm), GFP_KERNEL);
283 if (!pwm) {
284 err = -ENOMEM;
285 goto fail_no_mem;
286 }
287
288 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
289 if (res == NULL) {
290 err = -ENXIO;
291 goto fail_no_mem_resource;
292 }
293
294 res = request_mem_region(res->start, resource_size(res), pdev->name);
295 if (res == NULL) {
296 err = -EBUSY;
297 goto fail_no_mem_resource;
298 }
299
300 pwm->mmio_base = ioremap(res->start, resource_size(res));
301 if (pwm->mmio_base == NULL) {
302 err = -ENXIO;
303 goto fail_no_ioremap;
304 }
305
306 err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
307 if (err)
308 goto fail_no_sysfs;
309
310 pwm->clk = clk_get(&pdev->dev, "pwm_clk");
311 if (IS_ERR(pwm->clk)) {
312 err = PTR_ERR(pwm->clk);
313 goto fail_no_clk;
314 }
315
316 pwm->duty_percent = 50;
317
318 platform_set_drvdata(pdev, pwm);
319
320 /* disable pwm at startup. Avoids zero value. */
321 ep93xx_pwm_disable(pwm);
322 ep93xx_pwm_write_tc(pwm, EP93XX_PWM_MAX_COUNT);
323 ep93xx_pwm_write_dc(pwm, EP93XX_PWM_MAX_COUNT / 2);
324
325 clk_enable(pwm->clk);
326
327 return 0;
328
329fail_no_clk:
330 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
331fail_no_sysfs:
332 iounmap(pwm->mmio_base);
333fail_no_ioremap:
334 release_mem_region(res->start, resource_size(res));
335fail_no_mem_resource:
336 kfree(pwm);
337fail_no_mem:
338 ep93xx_pwm_release_gpio(pdev);
339 return err;
340}
341
342static int __exit ep93xx_pwm_remove(struct platform_device *pdev)
343{
344 struct ep93xx_pwm *pwm = platform_get_drvdata(pdev);
345 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
346
347 ep93xx_pwm_disable(pwm);
348 clk_disable(pwm->clk);
349 clk_put(pwm->clk);
350 platform_set_drvdata(pdev, NULL);
351 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files);
352 iounmap(pwm->mmio_base);
353 release_mem_region(res->start, resource_size(res));
354 kfree(pwm);
355 ep93xx_pwm_release_gpio(pdev);
356
357 return 0;
358}
359
360static struct platform_driver ep93xx_pwm_driver = {
361 .driver = {
362 .name = "ep93xx-pwm",
363 .owner = THIS_MODULE,
364 },
365 .remove = __exit_p(ep93xx_pwm_remove),
366};
367
368static int __init ep93xx_pwm_init(void)
369{
370 return platform_driver_probe(&ep93xx_pwm_driver, ep93xx_pwm_probe);
371}
372
373static void __exit ep93xx_pwm_exit(void)
374{
375 platform_driver_unregister(&ep93xx_pwm_driver);
376}
377
378module_init(ep93xx_pwm_init);
379module_exit(ep93xx_pwm_exit);
380
381MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, "
382 "H Hartley Sweeten <hsweeten@visionengravers.com>");
383MODULE_DESCRIPTION("EP93xx PWM driver");
384MODULE_LICENSE("GPL");
385MODULE_ALIAS("platform:ep93xx-pwm");
diff --git a/drivers/misc/hdpuftrs/Makefile b/drivers/misc/hdpuftrs/Makefile
deleted file mode 100644
index ac74ae679230..000000000000
--- a/drivers/misc/hdpuftrs/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
deleted file mode 100644
index 176fe4e09d3f..000000000000
--- a/drivers/misc/hdpuftrs/hdpu_cpustate.c
+++ /dev/null
@@ -1,256 +0,0 @@
1/*
2 * Sky CPU State Driver
3 *
4 * Copyright (C) 2002 Brian Waite
5 *
6 * This driver allows use of the CPU state bits
7 * It exports the /dev/sky_cpustate and also
8 * /proc/sky_cpustate pseudo-file for status information.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/spinlock.h>
20#include <linux/smp_lock.h>
21#include <linux/miscdevice.h>
22#include <linux/proc_fs.h>
23#include <linux/hdpu_features.h>
24#include <linux/platform_device.h>
25#include <asm/uaccess.h>
26#include <linux/seq_file.h>
27#include <asm/io.h>
28
29#define SKY_CPUSTATE_VERSION "1.1"
30
31static int hdpu_cpustate_probe(struct platform_device *pdev);
32static int hdpu_cpustate_remove(struct platform_device *pdev);
33
34static unsigned char cpustate_get_state(void);
35static int cpustate_proc_open(struct inode *inode, struct file *file);
36static int cpustate_proc_read(struct seq_file *seq, void *offset);
37
38static struct cpustate_t cpustate;
39
40static const struct file_operations proc_cpustate = {
41 .open = cpustate_proc_open,
42 .read = seq_read,
43 .llseek = seq_lseek,
44 .release = single_release,
45 .owner = THIS_MODULE,
46};
47
48static int cpustate_proc_open(struct inode *inode, struct file *file)
49{
50 return single_open(file, cpustate_proc_read, NULL);
51}
52
53static int cpustate_proc_read(struct seq_file *seq, void *offset)
54{
55 seq_printf(seq, "CPU State: %04x\n", cpustate_get_state());
56 return 0;
57}
58
59static int cpustate_get_ref(int excl)
60{
61
62 int retval = -EBUSY;
63
64 spin_lock(&cpustate.lock);
65
66 if (cpustate.excl)
67 goto out_busy;
68
69 if (excl) {
70 if (cpustate.open_count)
71 goto out_busy;
72 cpustate.excl = 1;
73 }
74
75 cpustate.open_count++;
76 retval = 0;
77
78 out_busy:
79 spin_unlock(&cpustate.lock);
80 return retval;
81}
82
83static int cpustate_free_ref(void)
84{
85
86 spin_lock(&cpustate.lock);
87
88 cpustate.excl = 0;
89 cpustate.open_count--;
90
91 spin_unlock(&cpustate.lock);
92 return 0;
93}
94
95static unsigned char cpustate_get_state(void)
96{
97
98 return cpustate.cached_val;
99}
100
101static void cpustate_set_state(unsigned char new_state)
102{
103 unsigned int state = (new_state << 21);
104
105#ifdef DEBUG_CPUSTATE
106 printk("CPUSTATE -> 0x%x\n", new_state);
107#endif
108 spin_lock(&cpustate.lock);
109 cpustate.cached_val = new_state;
110 writel((0xff << 21), cpustate.clr_addr);
111 writel(state, cpustate.set_addr);
112 spin_unlock(&cpustate.lock);
113}
114
115/*
116 * Now all the various file operations that we export.
117 */
118
119static ssize_t cpustate_read(struct file *file, char *buf,
120 size_t count, loff_t * ppos)
121{
122 unsigned char data;
123
124 if (count < 0)
125 return -EFAULT;
126 if (count == 0)
127 return 0;
128
129 data = cpustate_get_state();
130 if (copy_to_user(buf, &data, sizeof(unsigned char)))
131 return -EFAULT;
132 return sizeof(unsigned char);
133}
134
135static ssize_t cpustate_write(struct file *file, const char *buf,
136 size_t count, loff_t * ppos)
137{
138 unsigned char data;
139
140 if (count < 0)
141 return -EFAULT;
142
143 if (count == 0)
144 return 0;
145
146 if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char)))
147 return -EFAULT;
148
149 cpustate_set_state(data);
150 return sizeof(unsigned char);
151}
152
153static int cpustate_open(struct inode *inode, struct file *file)
154{
155 int ret;
156
157 lock_kernel();
158 ret = cpustate_get_ref((file->f_flags & O_EXCL));
159 unlock_kernel();
160
161 return ret;
162}
163
164static int cpustate_release(struct inode *inode, struct file *file)
165{
166 return cpustate_free_ref();
167}
168
169static struct platform_driver hdpu_cpustate_driver = {
170 .probe = hdpu_cpustate_probe,
171 .remove = hdpu_cpustate_remove,
172 .driver = {
173 .name = HDPU_CPUSTATE_NAME,
174 .owner = THIS_MODULE,
175 },
176};
177
178/*
179 * The various file operations we support.
180 */
181static const struct file_operations cpustate_fops = {
182 .owner = THIS_MODULE,
183 .open = cpustate_open,
184 .release = cpustate_release,
185 .read = cpustate_read,
186 .write = cpustate_write,
187 .llseek = no_llseek,
188};
189
190static struct miscdevice cpustate_dev = {
191 .minor = MISC_DYNAMIC_MINOR,
192 .name = "sky_cpustate",
193 .fops = &cpustate_fops,
194};
195
196static int hdpu_cpustate_probe(struct platform_device *pdev)
197{
198 struct resource *res;
199 struct proc_dir_entry *proc_de;
200 int ret;
201
202 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
203 if (!res) {
204 printk(KERN_ERR "sky_cpustate: "
205 "Invalid memory resource.\n");
206 return -EINVAL;
207 }
208 cpustate.set_addr = (unsigned long *)res->start;
209 cpustate.clr_addr = (unsigned long *)res->end - 1;
210
211 ret = misc_register(&cpustate_dev);
212 if (ret) {
213 printk(KERN_WARNING "sky_cpustate: "
214 "Unable to register misc device.\n");
215 cpustate.set_addr = NULL;
216 cpustate.clr_addr = NULL;
217 return ret;
218 }
219
220 proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
221 if (!proc_de) {
222 printk(KERN_WARNING "sky_cpustate: "
223 "Unable to create proc entry\n");
224 }
225
226 printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
227 return 0;
228}
229
230static int hdpu_cpustate_remove(struct platform_device *pdev)
231{
232 cpustate.set_addr = NULL;
233 cpustate.clr_addr = NULL;
234
235 remove_proc_entry("sky_cpustate", NULL);
236 misc_deregister(&cpustate_dev);
237
238 return 0;
239}
240
241static int __init cpustate_init(void)
242{
243 return platform_driver_register(&hdpu_cpustate_driver);
244}
245
246static void __exit cpustate_exit(void)
247{
248 platform_driver_unregister(&hdpu_cpustate_driver);
249}
250
251module_init(cpustate_init);
252module_exit(cpustate_exit);
253
254MODULE_AUTHOR("Brian Waite");
255MODULE_LICENSE("GPL");
256MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME);
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
deleted file mode 100644
index ce39fa54949b..000000000000
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * Sky Nexus Register Driver
3 *
4 * Copyright (C) 2002 Brian Waite
5 *
6 * This driver allows reading the Nexus register
7 * It exports the /proc/sky_chassis_id and also
8 * /proc/sky_slot_id pseudo-file for status information.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/proc_fs.h>
20#include <linux/hdpu_features.h>
21#include <linux/platform_device.h>
22#include <linux/seq_file.h>
23#include <asm/io.h>
24
25static int hdpu_nexus_probe(struct platform_device *pdev);
26static int hdpu_nexus_remove(struct platform_device *pdev);
27static int hdpu_slot_id_open(struct inode *inode, struct file *file);
28static int hdpu_slot_id_read(struct seq_file *seq, void *offset);
29static int hdpu_chassis_id_open(struct inode *inode, struct file *file);
30static int hdpu_chassis_id_read(struct seq_file *seq, void *offset);
31
32static struct proc_dir_entry *hdpu_slot_id;
33static struct proc_dir_entry *hdpu_chassis_id;
34static int slot_id = -1;
35static int chassis_id = -1;
36
37static const struct file_operations proc_slot_id = {
38 .open = hdpu_slot_id_open,
39 .read = seq_read,
40 .llseek = seq_lseek,
41 .release = single_release,
42 .owner = THIS_MODULE,
43};
44
45static const struct file_operations proc_chassis_id = {
46 .open = hdpu_chassis_id_open,
47 .read = seq_read,
48 .llseek = seq_lseek,
49 .release = single_release,
50 .owner = THIS_MODULE,
51};
52
53static struct platform_driver hdpu_nexus_driver = {
54 .probe = hdpu_nexus_probe,
55 .remove = hdpu_nexus_remove,
56 .driver = {
57 .name = HDPU_NEXUS_NAME,
58 .owner = THIS_MODULE,
59 },
60};
61
62static int hdpu_slot_id_open(struct inode *inode, struct file *file)
63{
64 return single_open(file, hdpu_slot_id_read, NULL);
65}
66
67static int hdpu_slot_id_read(struct seq_file *seq, void *offset)
68{
69 seq_printf(seq, "%d\n", slot_id);
70 return 0;
71}
72
73static int hdpu_chassis_id_open(struct inode *inode, struct file *file)
74{
75 return single_open(file, hdpu_chassis_id_read, NULL);
76}
77
78static int hdpu_chassis_id_read(struct seq_file *seq, void *offset)
79{
80 seq_printf(seq, "%d\n", chassis_id);
81 return 0;
82}
83
84static int hdpu_nexus_probe(struct platform_device *pdev)
85{
86 struct resource *res;
87 int *nexus_id_addr;
88
89 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
90 if (!res) {
91 printk(KERN_ERR "sky_nexus: "
92 "Invalid memory resource.\n");
93 return -EINVAL;
94 }
95 nexus_id_addr = ioremap(res->start,
96 (unsigned long)(res->end - res->start));
97 if (nexus_id_addr) {
98 slot_id = (*nexus_id_addr >> 8) & 0x1f;
99 chassis_id = *nexus_id_addr & 0xff;
100 iounmap(nexus_id_addr);
101 } else {
102 printk(KERN_ERR "sky_nexus: Could not map slot id\n");
103 }
104
105 hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
106 if (!hdpu_slot_id) {
107 printk(KERN_WARNING "sky_nexus: "
108 "Unable to create proc dir entry: sky_slot_id\n");
109 }
110
111 hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
112 &proc_chassis_id);
113 if (!hdpu_chassis_id)
114 printk(KERN_WARNING "sky_nexus: "
115 "Unable to create proc dir entry: sky_chassis_id\n");
116
117 return 0;
118}
119
120static int hdpu_nexus_remove(struct platform_device *pdev)
121{
122 slot_id = -1;
123 chassis_id = -1;
124
125 remove_proc_entry("sky_slot_id", NULL);
126 remove_proc_entry("sky_chassis_id", NULL);
127
128 hdpu_slot_id = 0;
129 hdpu_chassis_id = 0;
130
131 return 0;
132}
133
134static int __init nexus_init(void)
135{
136 return platform_driver_register(&hdpu_nexus_driver);
137}
138
139static void __exit nexus_exit(void)
140{
141 platform_driver_unregister(&hdpu_nexus_driver);
142}
143
144module_init(nexus_init);
145module_exit(nexus_exit);
146
147MODULE_AUTHOR("Brian Waite");
148MODULE_LICENSE("GPL");
149MODULE_ALIAS("platform:" HDPU_NEXUS_NAME);
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
new file mode 100644
index 000000000000..234bfcaf2099
--- /dev/null
+++ b/drivers/misc/hmc6352.c
@@ -0,0 +1,166 @@
1/*
2 * hmc6352.c - Honeywell Compass Driver
3 *
4 * Copyright (C) 2009 Intel Corp
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/i2c.h>
28#include <linux/err.h>
29#include <linux/delay.h>
30#include <linux/sysfs.h>
31
32static DEFINE_MUTEX(compass_mutex);
33
34static int compass_command(struct i2c_client *c, u8 cmd)
35{
36 int ret = i2c_master_send(c, &cmd, 1);
37 if (ret < 0)
38 dev_warn(&c->dev, "command '%c' failed.\n", cmd);
39 return ret;
40}
41
42static int compass_store(struct device *dev, const char *buf, size_t count,
43 const char *map)
44{
45 struct i2c_client *c = to_i2c_client(dev);
46 int ret;
47 unsigned long val;
48
49 if (strict_strtoul(buf, 10, &val))
50 return -EINVAL;
51 if (val >= strlen(map))
52 return -EINVAL;
53 mutex_lock(&compass_mutex);
54 ret = compass_command(c, map[val]);
55 mutex_unlock(&compass_mutex);
56 if (ret < 0)
57 return ret;
58 return count;
59}
60
61static ssize_t compass_calibration_store(struct device *dev,
62 struct device_attribute *attr, const char *buf, size_t count)
63{
64 return compass_store(dev, buf, count, "EC");
65}
66
67static ssize_t compass_power_mode_store(struct device *dev,
68 struct device_attribute *attr, const char *buf, size_t count)
69{
70 return compass_store(dev, buf, count, "SW");
71}
72
73static ssize_t compass_heading_data_show(struct device *dev,
74 struct device_attribute *attr, char *buf)
75{
76 struct i2c_client *client = to_i2c_client(dev);
77 unsigned char i2c_data[2];
78 unsigned int ret;
79
80 mutex_lock(&compass_mutex);
81 ret = compass_command(client, 'A');
82 if (ret != 1) {
83 mutex_unlock(&compass_mutex);
84 return ret;
85 }
86 msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */
87 ret = i2c_master_recv(client, i2c_data, 2);
88 mutex_unlock(&compass_mutex);
89 if (ret != 1) {
90 dev_warn(dev, "i2c read data cmd failed\n");
91 return ret;
92 }
93 ret = (i2c_data[0] << 8) | i2c_data[1];
94 return sprintf(buf, "%d.%d\n", ret/10, ret%10);
95}
96
97
98static DEVICE_ATTR(heading0_input, S_IRUGO, compass_heading_data_show, NULL);
99static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store);
100static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store);
101
102static struct attribute *mid_att_compass[] = {
103 &dev_attr_heading0_input.attr,
104 &dev_attr_calibration.attr,
105 &dev_attr_power_state.attr,
106 NULL
107};
108
109static const struct attribute_group m_compass_gr = {
110 .name = "hmc6352",
111 .attrs = mid_att_compass
112};
113
114static int hmc6352_probe(struct i2c_client *client,
115 const struct i2c_device_id *id)
116{
117 int res;
118
119 res = sysfs_create_group(&client->dev.kobj, &m_compass_gr);
120 if (res) {
121 dev_err(&client->dev, "device_create_file failed\n");
122 return res;
123 }
124 dev_info(&client->dev, "%s HMC6352 compass chip found\n",
125 client->name);
126 return 0;
127}
128
129static int hmc6352_remove(struct i2c_client *client)
130{
131 sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
132 return 0;
133}
134
135static struct i2c_device_id hmc6352_id[] = {
136 { "hmc6352", 0 },
137 { }
138};
139
140MODULE_DEVICE_TABLE(i2c, hmc6352_id);
141
142static struct i2c_driver hmc6352_driver = {
143 .driver = {
144 .name = "hmc6352",
145 },
146 .probe = hmc6352_probe,
147 .remove = hmc6352_remove,
148 .id_table = hmc6352_id,
149};
150
151static int __init sensor_hmc6352_init(void)
152{
153 return i2c_add_driver(&hmc6352_driver);
154}
155
156static void __exit sensor_hmc6352_exit(void)
157{
158 i2c_del_driver(&hmc6352_driver);
159}
160
161module_init(sensor_hmc6352_init);
162module_exit(sensor_hmc6352_exit);
163
164MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
165MODULE_DESCRIPTION("hmc6352 Compass Driver");
166MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 880ccf39e23b..fffc227181b0 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Driver for HP iLO/iLO2 management processor. 2 * Driver for the HP iLO management processor.
3 * 3 *
4 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. 4 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
5 * David Altobelli <david.altobelli@hp.com> 5 * David Altobelli <david.altobelli@hp.com>
@@ -13,14 +13,19 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/interrupt.h>
16#include <linux/ioport.h> 17#include <linux/ioport.h>
17#include <linux/device.h> 18#include <linux/device.h>
18#include <linux/file.h> 19#include <linux/file.h>
19#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/sched.h>
20#include <linux/spinlock.h> 22#include <linux/spinlock.h>
21#include <linux/delay.h> 23#include <linux/delay.h>
22#include <linux/uaccess.h> 24#include <linux/uaccess.h>
23#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/wait.h>
27#include <linux/poll.h>
28#include <linux/slab.h>
24#include "hpilo.h" 29#include "hpilo.h"
25 30
26static struct class *ilo_class; 31static struct class *ilo_class;
@@ -61,9 +66,10 @@ static inline int desc_mem_sz(int nr_entry)
61static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) 66static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
62{ 67{
63 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); 68 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
69 unsigned long flags;
64 int ret = 0; 70 int ret = 0;
65 71
66 spin_lock(&hw->fifo_lock); 72 spin_lock_irqsave(&hw->fifo_lock, flags);
67 if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] 73 if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
68 & ENTRY_MASK_O)) { 74 & ENTRY_MASK_O)) {
69 fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= 75 fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
@@ -71,7 +77,7 @@ static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
71 fifo_q->tail += 1; 77 fifo_q->tail += 1;
72 ret = 1; 78 ret = 1;
73 } 79 }
74 spin_unlock(&hw->fifo_lock); 80 spin_unlock_irqrestore(&hw->fifo_lock, flags);
75 81
76 return ret; 82 return ret;
77} 83}
@@ -79,10 +85,11 @@ static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
79static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) 85static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
80{ 86{
81 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); 87 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
88 unsigned long flags;
82 int ret = 0; 89 int ret = 0;
83 u64 c; 90 u64 c;
84 91
85 spin_lock(&hw->fifo_lock); 92 spin_lock_irqsave(&hw->fifo_lock, flags);
86 c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; 93 c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
87 if (c & ENTRY_MASK_C) { 94 if (c & ENTRY_MASK_C) {
88 if (entry) 95 if (entry)
@@ -93,7 +100,23 @@ static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
93 fifo_q->head += 1; 100 fifo_q->head += 1;
94 ret = 1; 101 ret = 1;
95 } 102 }
96 spin_unlock(&hw->fifo_lock); 103 spin_unlock_irqrestore(&hw->fifo_lock, flags);
104
105 return ret;
106}
107
108static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar)
109{
110 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
111 unsigned long flags;
112 int ret = 0;
113 u64 c;
114
115 spin_lock_irqsave(&hw->fifo_lock, flags);
116 c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
117 if (c & ENTRY_MASK_C)
118 ret = 1;
119 spin_unlock_irqrestore(&hw->fifo_lock, flags);
97 120
98 return ret; 121 return ret;
99} 122}
@@ -142,6 +165,13 @@ static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
142 return ret; 165 return ret;
143} 166}
144 167
168static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb)
169{
170 char *fifobar = ccb->ccb_u3.recv_fifobar;
171
172 return fifo_check_recv(hw, fifobar);
173}
174
145static inline void doorbell_set(struct ccb *ccb) 175static inline void doorbell_set(struct ccb *ccb)
146{ 176{
147 iowrite8(1, ccb->ccb_u5.db_base); 177 iowrite8(1, ccb->ccb_u5.db_base);
@@ -151,6 +181,7 @@ static inline void doorbell_clr(struct ccb *ccb)
151{ 181{
152 iowrite8(2, ccb->ccb_u5.db_base); 182 iowrite8(2, ccb->ccb_u5.db_base);
153} 183}
184
154static inline int ctrl_set(int l2sz, int idxmask, int desclim) 185static inline int ctrl_set(int l2sz, int idxmask, int desclim)
155{ 186{
156 int active = 0, go = 1; 187 int active = 0, go = 1;
@@ -160,6 +191,7 @@ static inline int ctrl_set(int l2sz, int idxmask, int desclim)
160 active << CTRL_BITPOS_A | 191 active << CTRL_BITPOS_A |
161 go << CTRL_BITPOS_G; 192 go << CTRL_BITPOS_G;
162} 193}
194
163static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz) 195static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
164{ 196{
165 /* for simplicity, use the same parameters for send and recv ctrls */ 197 /* for simplicity, use the same parameters for send and recv ctrls */
@@ -192,13 +224,10 @@ static void fifo_setup(void *base_addr, int nr_entry)
192 224
193static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) 225static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
194{ 226{
195 struct ccb *driver_ccb; 227 struct ccb *driver_ccb = &data->driver_ccb;
196 struct ccb __iomem *device_ccb; 228 struct ccb __iomem *device_ccb = data->mapped_ccb;
197 int retries; 229 int retries;
198 230
199 driver_ccb = &data->driver_ccb;
200 device_ccb = data->mapped_ccb;
201
202 /* complicated dance to tell the hw we are stopping */ 231 /* complicated dance to tell the hw we are stopping */
203 doorbell_clr(driver_ccb); 232 doorbell_clr(driver_ccb);
204 iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G), 233 iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
@@ -225,34 +254,31 @@ static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
225 pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); 254 pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
226} 255}
227 256
228static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) 257static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
229{ 258{
230 char *dma_va, *dma_pa; 259 char *dma_va;
231 int pkt_id, pkt_sz, i, error; 260 dma_addr_t dma_pa;
232 struct ccb *driver_ccb, *ilo_ccb; 261 struct ccb *driver_ccb, *ilo_ccb;
233 struct pci_dev *pdev;
234 262
235 driver_ccb = &data->driver_ccb; 263 driver_ccb = &data->driver_ccb;
236 ilo_ccb = &data->ilo_ccb; 264 ilo_ccb = &data->ilo_ccb;
237 pdev = hw->ilo_dev;
238 265
239 data->dma_size = 2 * fifo_sz(NR_QENTRY) + 266 data->dma_size = 2 * fifo_sz(NR_QENTRY) +
240 2 * desc_mem_sz(NR_QENTRY) + 267 2 * desc_mem_sz(NR_QENTRY) +
241 ILO_START_ALIGN + ILO_CACHE_SZ; 268 ILO_START_ALIGN + ILO_CACHE_SZ;
242 269
243 error = -ENOMEM; 270 data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size,
244 data->dma_va = pci_alloc_consistent(pdev, data->dma_size,
245 &data->dma_pa); 271 &data->dma_pa);
246 if (!data->dma_va) 272 if (!data->dma_va)
247 goto out; 273 return -ENOMEM;
248 274
249 dma_va = (char *)data->dma_va; 275 dma_va = (char *)data->dma_va;
250 dma_pa = (char *)data->dma_pa; 276 dma_pa = data->dma_pa;
251 277
252 memset(dma_va, 0, data->dma_size); 278 memset(dma_va, 0, data->dma_size);
253 279
254 dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); 280 dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
255 dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_START_ALIGN); 281 dma_pa = roundup(dma_pa, ILO_START_ALIGN);
256 282
257 /* 283 /*
258 * Create two ccb's, one with virt addrs, one with phys addrs. 284 * Create two ccb's, one with virt addrs, one with phys addrs.
@@ -263,26 +289,26 @@ static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
263 289
264 fifo_setup(dma_va, NR_QENTRY); 290 fifo_setup(dma_va, NR_QENTRY);
265 driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; 291 driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
266 ilo_ccb->ccb_u1.send_fifobar = dma_pa + FIFOHANDLESIZE; 292 ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
267 dma_va += fifo_sz(NR_QENTRY); 293 dma_va += fifo_sz(NR_QENTRY);
268 dma_pa += fifo_sz(NR_QENTRY); 294 dma_pa += fifo_sz(NR_QENTRY);
269 295
270 dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); 296 dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
271 dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_CACHE_SZ); 297 dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
272 298
273 fifo_setup(dma_va, NR_QENTRY); 299 fifo_setup(dma_va, NR_QENTRY);
274 driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; 300 driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
275 ilo_ccb->ccb_u3.recv_fifobar = dma_pa + FIFOHANDLESIZE; 301 ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
276 dma_va += fifo_sz(NR_QENTRY); 302 dma_va += fifo_sz(NR_QENTRY);
277 dma_pa += fifo_sz(NR_QENTRY); 303 dma_pa += fifo_sz(NR_QENTRY);
278 304
279 driver_ccb->ccb_u2.send_desc = dma_va; 305 driver_ccb->ccb_u2.send_desc = dma_va;
280 ilo_ccb->ccb_u2.send_desc = dma_pa; 306 ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
281 dma_pa += desc_mem_sz(NR_QENTRY); 307 dma_pa += desc_mem_sz(NR_QENTRY);
282 dma_va += desc_mem_sz(NR_QENTRY); 308 dma_va += desc_mem_sz(NR_QENTRY);
283 309
284 driver_ccb->ccb_u4.recv_desc = dma_va; 310 driver_ccb->ccb_u4.recv_desc = dma_va;
285 ilo_ccb->ccb_u4.recv_desc = dma_pa; 311 ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
286 312
287 driver_ccb->channel = slot; 313 driver_ccb->channel = slot;
288 ilo_ccb->channel = slot; 314 ilo_ccb->channel = slot;
@@ -290,10 +316,18 @@ static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
290 driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE); 316 driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
291 ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */ 317 ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
292 318
319 return 0;
320}
321
322static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
323{
324 int pkt_id, pkt_sz;
325 struct ccb *driver_ccb = &data->driver_ccb;
326
293 /* copy the ccb with physical addrs to device memory */ 327 /* copy the ccb with physical addrs to device memory */
294 data->mapped_ccb = (struct ccb __iomem *) 328 data->mapped_ccb = (struct ccb __iomem *)
295 (hw->ram_vaddr + (slot * ILOHW_CCB_SZ)); 329 (hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
296 memcpy_toio(data->mapped_ccb, ilo_ccb, sizeof(struct ccb)); 330 memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb));
297 331
298 /* put packets on the send and receive queues */ 332 /* put packets on the send and receive queues */
299 pkt_sz = 0; 333 pkt_sz = 0;
@@ -306,7 +340,14 @@ static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
306 for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) 340 for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
307 ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz); 341 ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
308 342
343 /* the ccb is ready to use */
309 doorbell_clr(driver_ccb); 344 doorbell_clr(driver_ccb);
345}
346
347static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data)
348{
349 int pkt_id, i;
350 struct ccb *driver_ccb = &data->driver_ccb;
310 351
311 /* make sure iLO is really handling requests */ 352 /* make sure iLO is really handling requests */
312 for (i = MAX_WAIT; i > 0; i--) { 353 for (i = MAX_WAIT; i > 0; i--) {
@@ -315,20 +356,14 @@ static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
315 udelay(WAIT_TIME); 356 udelay(WAIT_TIME);
316 } 357 }
317 358
318 if (i) { 359 if (i == 0) {
319 ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0); 360 dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n");
320 doorbell_set(driver_ccb); 361 return -EBUSY;
321 } else {
322 dev_err(&pdev->dev, "Open could not dequeue a packet\n");
323 error = -EBUSY;
324 goto free;
325 } 362 }
326 363
364 ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
365 doorbell_set(driver_ccb);
327 return 0; 366 return 0;
328free:
329 ilo_ccb_close(pdev, data);
330out:
331 return error;
332} 367}
333 368
334static inline int is_channel_reset(struct ccb *ccb) 369static inline int is_channel_reset(struct ccb *ccb)
@@ -343,19 +378,45 @@ static inline void set_channel_reset(struct ccb *ccb)
343 FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1; 378 FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
344} 379}
345 380
381static inline int get_device_outbound(struct ilo_hwinfo *hw)
382{
383 return ioread32(&hw->mmio_vaddr[DB_OUT]);
384}
385
386static inline int is_db_reset(int db_out)
387{
388 return db_out & (1 << DB_RESET);
389}
390
346static inline int is_device_reset(struct ilo_hwinfo *hw) 391static inline int is_device_reset(struct ilo_hwinfo *hw)
347{ 392{
348 /* check for global reset condition */ 393 /* check for global reset condition */
349 return ioread32(&hw->mmio_vaddr[DB_OUT]) & (1 << DB_RESET); 394 return is_db_reset(get_device_outbound(hw));
395}
396
397static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr)
398{
399 iowrite32(clr, &hw->mmio_vaddr[DB_OUT]);
350} 400}
351 401
352static inline void clear_device(struct ilo_hwinfo *hw) 402static inline void clear_device(struct ilo_hwinfo *hw)
353{ 403{
354 /* clear the device (reset bits, pending channel entries) */ 404 /* clear the device (reset bits, pending channel entries) */
355 iowrite32(-1, &hw->mmio_vaddr[DB_OUT]); 405 clear_pending_db(hw, -1);
406}
407
408static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
409{
410 iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
356} 411}
357 412
358static void ilo_locked_reset(struct ilo_hwinfo *hw) 413static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
414{
415 iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
416 &hw->mmio_vaddr[DB_IRQ]);
417}
418
419static void ilo_set_reset(struct ilo_hwinfo *hw)
359{ 420{
360 int slot; 421 int slot;
361 422
@@ -368,40 +429,22 @@ static void ilo_locked_reset(struct ilo_hwinfo *hw)
368 continue; 429 continue;
369 set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); 430 set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
370 } 431 }
371
372 clear_device(hw);
373}
374
375static void ilo_reset(struct ilo_hwinfo *hw)
376{
377 spin_lock(&hw->alloc_lock);
378
379 /* reset might have been handled after lock was taken */
380 if (is_device_reset(hw))
381 ilo_locked_reset(hw);
382
383 spin_unlock(&hw->alloc_lock);
384} 432}
385 433
386static ssize_t ilo_read(struct file *fp, char __user *buf, 434static ssize_t ilo_read(struct file *fp, char __user *buf,
387 size_t len, loff_t *off) 435 size_t len, loff_t *off)
388{ 436{
389 int err, found, cnt, pkt_id, pkt_len; 437 int err, found, cnt, pkt_id, pkt_len;
390 struct ccb_data *data; 438 struct ccb_data *data = fp->private_data;
391 struct ccb *driver_ccb; 439 struct ccb *driver_ccb = &data->driver_ccb;
392 struct ilo_hwinfo *hw; 440 struct ilo_hwinfo *hw = data->ilo_hw;
393 void *pkt; 441 void *pkt;
394 442
395 data = fp->private_data; 443 if (is_channel_reset(driver_ccb)) {
396 driver_ccb = &data->driver_ccb;
397 hw = data->ilo_hw;
398
399 if (is_device_reset(hw) || is_channel_reset(driver_ccb)) {
400 /* 444 /*
401 * If the device has been reset, applications 445 * If the device has been reset, applications
402 * need to close and reopen all ccbs. 446 * need to close and reopen all ccbs.
403 */ 447 */
404 ilo_reset(hw);
405 return -ENODEV; 448 return -ENODEV;
406 } 449 }
407 450
@@ -442,23 +485,13 @@ static ssize_t ilo_write(struct file *fp, const char __user *buf,
442 size_t len, loff_t *off) 485 size_t len, loff_t *off)
443{ 486{
444 int err, pkt_id, pkt_len; 487 int err, pkt_id, pkt_len;
445 struct ccb_data *data; 488 struct ccb_data *data = fp->private_data;
446 struct ccb *driver_ccb; 489 struct ccb *driver_ccb = &data->driver_ccb;
447 struct ilo_hwinfo *hw; 490 struct ilo_hwinfo *hw = data->ilo_hw;
448 void *pkt; 491 void *pkt;
449 492
450 data = fp->private_data; 493 if (is_channel_reset(driver_ccb))
451 driver_ccb = &data->driver_ccb;
452 hw = data->ilo_hw;
453
454 if (is_device_reset(hw) || is_channel_reset(driver_ccb)) {
455 /*
456 * If the device has been reset, applications
457 * need to close and reopen all ccbs.
458 */
459 ilo_reset(hw);
460 return -ENODEV; 494 return -ENODEV;
461 }
462 495
463 /* get a packet to send the user command */ 496 /* get a packet to send the user command */
464 if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) 497 if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
@@ -480,32 +513,48 @@ static ssize_t ilo_write(struct file *fp, const char __user *buf,
480 return err ? -EFAULT : len; 513 return err ? -EFAULT : len;
481} 514}
482 515
516static unsigned int ilo_poll(struct file *fp, poll_table *wait)
517{
518 struct ccb_data *data = fp->private_data;
519 struct ccb *driver_ccb = &data->driver_ccb;
520
521 poll_wait(fp, &data->ccb_waitq, wait);
522
523 if (is_channel_reset(driver_ccb))
524 return POLLERR;
525 else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
526 return POLLIN | POLLRDNORM;
527
528 return 0;
529}
530
483static int ilo_close(struct inode *ip, struct file *fp) 531static int ilo_close(struct inode *ip, struct file *fp)
484{ 532{
485 int slot; 533 int slot;
486 struct ccb_data *data; 534 struct ccb_data *data;
487 struct ilo_hwinfo *hw; 535 struct ilo_hwinfo *hw;
536 unsigned long flags;
488 537
489 slot = iminor(ip) % MAX_CCB; 538 slot = iminor(ip) % MAX_CCB;
490 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); 539 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
491 540
492 spin_lock(&hw->alloc_lock); 541 spin_lock(&hw->open_lock);
493
494 if (is_device_reset(hw))
495 ilo_locked_reset(hw);
496 542
497 if (hw->ccb_alloc[slot]->ccb_cnt == 1) { 543 if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
498 544
499 data = fp->private_data; 545 data = fp->private_data;
500 546
547 spin_lock_irqsave(&hw->alloc_lock, flags);
548 hw->ccb_alloc[slot] = NULL;
549 spin_unlock_irqrestore(&hw->alloc_lock, flags);
550
501 ilo_ccb_close(hw->ilo_dev, data); 551 ilo_ccb_close(hw->ilo_dev, data);
502 552
503 kfree(data); 553 kfree(data);
504 hw->ccb_alloc[slot] = NULL;
505 } else 554 } else
506 hw->ccb_alloc[slot]->ccb_cnt--; 555 hw->ccb_alloc[slot]->ccb_cnt--;
507 556
508 spin_unlock(&hw->alloc_lock); 557 spin_unlock(&hw->open_lock);
509 558
510 return 0; 559 return 0;
511} 560}
@@ -515,6 +564,7 @@ static int ilo_open(struct inode *ip, struct file *fp)
515 int slot, error; 564 int slot, error;
516 struct ccb_data *data; 565 struct ccb_data *data;
517 struct ilo_hwinfo *hw; 566 struct ilo_hwinfo *hw;
567 unsigned long flags;
518 568
519 slot = iminor(ip) % MAX_CCB; 569 slot = iminor(ip) % MAX_CCB;
520 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); 570 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
@@ -524,22 +574,42 @@ static int ilo_open(struct inode *ip, struct file *fp)
524 if (!data) 574 if (!data)
525 return -ENOMEM; 575 return -ENOMEM;
526 576
527 spin_lock(&hw->alloc_lock); 577 spin_lock(&hw->open_lock);
528
529 if (is_device_reset(hw))
530 ilo_locked_reset(hw);
531 578
532 /* each fd private_data holds sw/hw view of ccb */ 579 /* each fd private_data holds sw/hw view of ccb */
533 if (hw->ccb_alloc[slot] == NULL) { 580 if (hw->ccb_alloc[slot] == NULL) {
534 /* create a channel control block for this minor */ 581 /* create a channel control block for this minor */
535 error = ilo_ccb_open(hw, data, slot); 582 error = ilo_ccb_setup(hw, data, slot);
536 if (!error) { 583 if (error) {
537 hw->ccb_alloc[slot] = data;
538 hw->ccb_alloc[slot]->ccb_cnt = 1;
539 hw->ccb_alloc[slot]->ccb_excl = fp->f_flags & O_EXCL;
540 hw->ccb_alloc[slot]->ilo_hw = hw;
541 } else
542 kfree(data); 584 kfree(data);
585 goto out;
586 }
587
588 data->ccb_cnt = 1;
589 data->ccb_excl = fp->f_flags & O_EXCL;
590 data->ilo_hw = hw;
591 init_waitqueue_head(&data->ccb_waitq);
592
593 /* write the ccb to hw */
594 spin_lock_irqsave(&hw->alloc_lock, flags);
595 ilo_ccb_open(hw, data, slot);
596 hw->ccb_alloc[slot] = data;
597 spin_unlock_irqrestore(&hw->alloc_lock, flags);
598
599 /* make sure the channel is functional */
600 error = ilo_ccb_verify(hw, data);
601 if (error) {
602
603 spin_lock_irqsave(&hw->alloc_lock, flags);
604 hw->ccb_alloc[slot] = NULL;
605 spin_unlock_irqrestore(&hw->alloc_lock, flags);
606
607 ilo_ccb_close(hw->ilo_dev, data);
608
609 kfree(data);
610 goto out;
611 }
612
543 } else { 613 } else {
544 kfree(data); 614 kfree(data);
545 if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { 615 if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
@@ -554,7 +624,8 @@ static int ilo_open(struct inode *ip, struct file *fp)
554 error = 0; 624 error = 0;
555 } 625 }
556 } 626 }
557 spin_unlock(&hw->alloc_lock); 627out:
628 spin_unlock(&hw->open_lock);
558 629
559 if (!error) 630 if (!error)
560 fp->private_data = hw->ccb_alloc[slot]; 631 fp->private_data = hw->ccb_alloc[slot];
@@ -566,10 +637,47 @@ static const struct file_operations ilo_fops = {
566 .owner = THIS_MODULE, 637 .owner = THIS_MODULE,
567 .read = ilo_read, 638 .read = ilo_read,
568 .write = ilo_write, 639 .write = ilo_write,
640 .poll = ilo_poll,
569 .open = ilo_open, 641 .open = ilo_open,
570 .release = ilo_close, 642 .release = ilo_close,
643 .llseek = noop_llseek,
571}; 644};
572 645
646static irqreturn_t ilo_isr(int irq, void *data)
647{
648 struct ilo_hwinfo *hw = data;
649 int pending, i;
650
651 spin_lock(&hw->alloc_lock);
652
653 /* check for ccbs which have data */
654 pending = get_device_outbound(hw);
655 if (!pending) {
656 spin_unlock(&hw->alloc_lock);
657 return IRQ_NONE;
658 }
659
660 if (is_db_reset(pending)) {
661 /* wake up all ccbs if the device was reset */
662 pending = -1;
663 ilo_set_reset(hw);
664 }
665
666 for (i = 0; i < MAX_CCB; i++) {
667 if (!hw->ccb_alloc[i])
668 continue;
669 if (pending & (1 << i))
670 wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
671 }
672
673 /* clear the device of the channels that have been handled */
674 clear_pending_db(hw, pending);
675
676 spin_unlock(&hw->alloc_lock);
677
678 return IRQ_HANDLED;
679}
680
573static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) 681static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
574{ 682{
575 pci_iounmap(pdev, hw->db_vaddr); 683 pci_iounmap(pdev, hw->db_vaddr);
@@ -623,6 +731,8 @@ static void ilo_remove(struct pci_dev *pdev)
623 device_destroy(ilo_class, MKDEV(ilo_major, i)); 731 device_destroy(ilo_class, MKDEV(ilo_major, i));
624 732
625 cdev_del(&ilo_hw->cdev); 733 cdev_del(&ilo_hw->cdev);
734 ilo_disable_interrupts(ilo_hw);
735 free_irq(pdev->irq, ilo_hw);
626 ilo_unmap_device(pdev, ilo_hw); 736 ilo_unmap_device(pdev, ilo_hw);
627 pci_release_regions(pdev); 737 pci_release_regions(pdev);
628 pci_disable_device(pdev); 738 pci_disable_device(pdev);
@@ -658,6 +768,7 @@ static int __devinit ilo_probe(struct pci_dev *pdev,
658 ilo_hw->ilo_dev = pdev; 768 ilo_hw->ilo_dev = pdev;
659 spin_lock_init(&ilo_hw->alloc_lock); 769 spin_lock_init(&ilo_hw->alloc_lock);
660 spin_lock_init(&ilo_hw->fifo_lock); 770 spin_lock_init(&ilo_hw->fifo_lock);
771 spin_lock_init(&ilo_hw->open_lock);
661 772
662 error = pci_enable_device(pdev); 773 error = pci_enable_device(pdev);
663 if (error) 774 if (error)
@@ -676,13 +787,19 @@ static int __devinit ilo_probe(struct pci_dev *pdev,
676 pci_set_drvdata(pdev, ilo_hw); 787 pci_set_drvdata(pdev, ilo_hw);
677 clear_device(ilo_hw); 788 clear_device(ilo_hw);
678 789
790 error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
791 if (error)
792 goto unmap;
793
794 ilo_enable_interrupts(ilo_hw);
795
679 cdev_init(&ilo_hw->cdev, &ilo_fops); 796 cdev_init(&ilo_hw->cdev, &ilo_fops);
680 ilo_hw->cdev.owner = THIS_MODULE; 797 ilo_hw->cdev.owner = THIS_MODULE;
681 start = devnum * MAX_CCB; 798 start = devnum * MAX_CCB;
682 error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB); 799 error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB);
683 if (error) { 800 if (error) {
684 dev_err(&pdev->dev, "Could not add cdev\n"); 801 dev_err(&pdev->dev, "Could not add cdev\n");
685 goto unmap; 802 goto remove_isr;
686 } 803 }
687 804
688 for (minor = 0 ; minor < MAX_CCB; minor++) { 805 for (minor = 0 ; minor < MAX_CCB; minor++) {
@@ -695,6 +812,9 @@ static int __devinit ilo_probe(struct pci_dev *pdev,
695 } 812 }
696 813
697 return 0; 814 return 0;
815remove_isr:
816 ilo_disable_interrupts(ilo_hw);
817 free_irq(pdev->irq, ilo_hw);
698unmap: 818unmap:
699 ilo_unmap_device(pdev, ilo_hw); 819 ilo_unmap_device(pdev, ilo_hw);
700free_regions: 820free_regions:
@@ -759,7 +879,7 @@ static void __exit ilo_exit(void)
759 class_destroy(ilo_class); 879 class_destroy(ilo_class);
760} 880}
761 881
762MODULE_VERSION("1.1"); 882MODULE_VERSION("1.2");
763MODULE_ALIAS(ILO_NAME); 883MODULE_ALIAS(ILO_NAME);
764MODULE_DESCRIPTION(ILO_NAME); 884MODULE_DESCRIPTION(ILO_NAME);
765MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); 885MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 03a14c82aad9..54e43adbdea1 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -44,13 +44,27 @@ struct ilo_hwinfo {
44 44
45 struct pci_dev *ilo_dev; 45 struct pci_dev *ilo_dev;
46 46
47 /*
48 * open_lock serializes ccb_cnt during open and close
49 * [ irq disabled ]
50 * -> alloc_lock used when adding/removing/searching ccb_alloc,
51 * which represents all ccbs open on the device
52 * --> fifo_lock controls access to fifo queues shared with hw
53 *
54 * Locks must be taken in this order, but open_lock and alloc_lock
55 * are optional, they do not need to be held in order to take a
56 * lower level lock.
57 */
58 spinlock_t open_lock;
47 spinlock_t alloc_lock; 59 spinlock_t alloc_lock;
48 spinlock_t fifo_lock; 60 spinlock_t fifo_lock;
49 61
50 struct cdev cdev; 62 struct cdev cdev;
51}; 63};
52 64
53/* offset from mmio_vaddr */ 65/* offset from mmio_vaddr for enabling doorbell interrupts */
66#define DB_IRQ 0xB2
67/* offset from mmio_vaddr for outbound communications */
54#define DB_OUT 0xD4 68#define DB_OUT 0xD4
55/* DB_OUT reset bit */ 69/* DB_OUT reset bit */
56#define DB_RESET 26 70#define DB_RESET 26
@@ -65,21 +79,21 @@ struct ilo_hwinfo {
65struct ccb { 79struct ccb {
66 union { 80 union {
67 char *send_fifobar; 81 char *send_fifobar;
68 u64 padding1; 82 u64 send_fifobar_pa;
69 } ccb_u1; 83 } ccb_u1;
70 union { 84 union {
71 char *send_desc; 85 char *send_desc;
72 u64 padding2; 86 u64 send_desc_pa;
73 } ccb_u2; 87 } ccb_u2;
74 u64 send_ctrl; 88 u64 send_ctrl;
75 89
76 union { 90 union {
77 char *recv_fifobar; 91 char *recv_fifobar;
78 u64 padding3; 92 u64 recv_fifobar_pa;
79 } ccb_u3; 93 } ccb_u3;
80 union { 94 union {
81 char *recv_desc; 95 char *recv_desc;
82 u64 padding4; 96 u64 recv_desc_pa;
83 } ccb_u4; 97 } ccb_u4;
84 u64 recv_ctrl; 98 u64 recv_ctrl;
85 99
@@ -131,6 +145,9 @@ struct ccb_data {
131 /* pointer to hardware device info */ 145 /* pointer to hardware device info */
132 struct ilo_hwinfo *ilo_hw; 146 struct ilo_hwinfo *ilo_hw;
133 147
148 /* queue for this ccb to wait for recv data */
149 wait_queue_head_t ccb_waitq;
150
134 /* usage count, to allow for shared ccb's */ 151 /* usage count, to allow for shared ccb's */
135 int ccb_cnt; 152 int ccb_cnt;
136 153
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 276d3fb68094..5c766b4fb238 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -22,6 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/sched.h>
26#include <linux/slab.h>
25#include "ibmasm.h" 27#include "ibmasm.h"
26#include "lowlevel.h" 28#include "lowlevel.h"
27 29
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 68a0a5b94795..76bfda1ffaa9 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -22,6 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/sched.h>
26#include <linux/slab.h>
25#include "ibmasm.h" 27#include "ibmasm.h"
26#include "lowlevel.h" 28#include "lowlevel.h"
27 29
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index de966a6fb7e6..d2d5d23416dd 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -75,6 +75,7 @@
75 75
76#include <linux/fs.h> 76#include <linux/fs.h>
77#include <linux/pagemap.h> 77#include <linux/pagemap.h>
78#include <linux/slab.h>
78#include <asm/uaccess.h> 79#include <asm/uaccess.h>
79#include <asm/io.h> 80#include <asm/io.h>
80#include "ibmasm.h" 81#include "ibmasm.h"
@@ -90,14 +91,13 @@ static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root);
90static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent); 91static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent);
91 92
92 93
93static int ibmasmfs_get_super(struct file_system_type *fst, 94static struct dentry *ibmasmfs_mount(struct file_system_type *fst,
94 int flags, const char *name, void *data, 95 int flags, const char *name, void *data)
95 struct vfsmount *mnt)
96{ 96{
97 return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt); 97 return mount_single(fst, flags, data, ibmasmfs_fill_super);
98} 98}
99 99
100static struct super_operations ibmasmfs_s_ops = { 100static const struct super_operations ibmasmfs_s_ops = {
101 .statfs = simple_statfs, 101 .statfs = simple_statfs,
102 .drop_inode = generic_delete_inode, 102 .drop_inode = generic_delete_inode,
103}; 103};
@@ -107,7 +107,7 @@ static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
107static struct file_system_type ibmasmfs_type = { 107static struct file_system_type ibmasmfs_type = {
108 .owner = THIS_MODULE, 108 .owner = THIS_MODULE,
109 .name = "ibmasmfs", 109 .name = "ibmasmfs",
110 .get_sb = ibmasmfs_get_super, 110 .mount = ibmasmfs_mount,
111 .kill_sb = kill_litter_super, 111 .kill_sb = kill_litter_super,
112}; 112};
113 113
@@ -145,6 +145,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
145 struct inode *ret = new_inode(sb); 145 struct inode *ret = new_inode(sb);
146 146
147 if (ret) { 147 if (ret) {
148 ret->i_ino = get_next_ino();
148 ret->i_mode = mode; 149 ret->i_mode = mode;
149 ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; 150 ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
150 } 151 }
@@ -583,6 +584,7 @@ static const struct file_operations command_fops = {
583 .release = command_file_close, 584 .release = command_file_close,
584 .read = command_file_read, 585 .read = command_file_read,
585 .write = command_file_write, 586 .write = command_file_write,
587 .llseek = generic_file_llseek,
586}; 588};
587 589
588static const struct file_operations event_fops = { 590static const struct file_operations event_fops = {
@@ -590,6 +592,7 @@ static const struct file_operations event_fops = {
590 .release = event_file_close, 592 .release = event_file_close,
591 .read = event_file_read, 593 .read = event_file_read,
592 .write = event_file_write, 594 .write = event_file_write,
595 .llseek = generic_file_llseek,
593}; 596};
594 597
595static const struct file_operations r_heartbeat_fops = { 598static const struct file_operations r_heartbeat_fops = {
@@ -597,6 +600,7 @@ static const struct file_operations r_heartbeat_fops = {
597 .release = r_heartbeat_file_close, 600 .release = r_heartbeat_file_close,
598 .read = r_heartbeat_file_read, 601 .read = r_heartbeat_file_read,
599 .write = r_heartbeat_file_write, 602 .write = r_heartbeat_file_write,
603 .llseek = generic_file_llseek,
600}; 604};
601 605
602static const struct file_operations remote_settings_fops = { 606static const struct file_operations remote_settings_fops = {
@@ -604,6 +608,7 @@ static const struct file_operations remote_settings_fops = {
604 .release = remote_settings_file_close, 608 .release = remote_settings_file_close,
605 .read = remote_settings_file_read, 609 .read = remote_settings_file_read,
606 .write = remote_settings_file_write, 610 .write = remote_settings_file_write,
611 .llseek = generic_file_llseek,
607}; 612};
608 613
609 614
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index dc14b0b9cbfa..a234d965243b 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -52,6 +52,7 @@
52 52
53#include <linux/pci.h> 53#include <linux/pci.h>
54#include <linux/init.h> 54#include <linux/init.h>
55#include <linux/slab.h>
55#include "ibmasm.h" 56#include "ibmasm.h"
56#include "lowlevel.h" 57#include "lowlevel.h"
57#include "remote.h" 58#include "remote.h"
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index bec9e2c44bef..2de487ac788c 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -20,6 +20,7 @@
20 * 20 *
21 */ 21 */
22 22
23#include <linux/sched.h>
23#include "ibmasm.h" 24#include "ibmasm.h"
24#include "dot_command.h" 25#include "dot_command.h"
25 26
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 6e43ab4231ae..152e9d93eecb 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -26,13 +26,11 @@
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/slab.h>
29 30
30/* Addresses to scan */ 31/* Addresses to scan */
31static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; 32static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
32 33
33/* Insmod parameters */
34I2C_CLIENT_INSMOD_1(ics932s401);
35
36/* ICS932S401 registers */ 34/* ICS932S401 registers */
37#define ICS932S401_REG_CFG2 0x01 35#define ICS932S401_REG_CFG2 0x01
38#define ICS932S401_CFG1_SPREAD 0x01 36#define ICS932S401_CFG1_SPREAD 0x01
@@ -106,12 +104,12 @@ struct ics932s401_data {
106 104
107static int ics932s401_probe(struct i2c_client *client, 105static int ics932s401_probe(struct i2c_client *client,
108 const struct i2c_device_id *id); 106 const struct i2c_device_id *id);
109static int ics932s401_detect(struct i2c_client *client, int kind, 107static int ics932s401_detect(struct i2c_client *client,
110 struct i2c_board_info *info); 108 struct i2c_board_info *info);
111static int ics932s401_remove(struct i2c_client *client); 109static int ics932s401_remove(struct i2c_client *client);
112 110
113static const struct i2c_device_id ics932s401_id[] = { 111static const struct i2c_device_id ics932s401_id[] = {
114 { "ics932s401", ics932s401 }, 112 { "ics932s401", 0 },
115 { } 113 { }
116}; 114};
117MODULE_DEVICE_TABLE(i2c, ics932s401_id); 115MODULE_DEVICE_TABLE(i2c, ics932s401_id);
@@ -125,7 +123,7 @@ static struct i2c_driver ics932s401_driver = {
125 .remove = ics932s401_remove, 123 .remove = ics932s401_remove,
126 .id_table = ics932s401_id, 124 .id_table = ics932s401_id,
127 .detect = ics932s401_detect, 125 .detect = ics932s401_detect,
128 .address_data = &addr_data, 126 .address_list = normal_i2c,
129}; 127};
130 128
131static struct ics932s401_data *ics932s401_update_device(struct device *dev) 129static struct ics932s401_data *ics932s401_update_device(struct device *dev)
@@ -413,36 +411,29 @@ static ssize_t show_spread(struct device *dev,
413} 411}
414 412
415/* Return 0 if detection is successful, -ENODEV otherwise */ 413/* Return 0 if detection is successful, -ENODEV otherwise */
416static int ics932s401_detect(struct i2c_client *client, int kind, 414static int ics932s401_detect(struct i2c_client *client,
417 struct i2c_board_info *info) 415 struct i2c_board_info *info)
418{ 416{
419 struct i2c_adapter *adapter = client->adapter; 417 struct i2c_adapter *adapter = client->adapter;
418 int vendor, device, revision;
420 419
421 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 420 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
422 return -ENODEV; 421 return -ENODEV;
423 422
424 if (kind <= 0) { 423 vendor = i2c_smbus_read_word_data(client, ICS932S401_REG_VENDOR_REV);
425 int vendor, device, revision; 424 vendor >>= 8;
426 425 revision = vendor >> ICS932S401_REV_SHIFT;
427 vendor = i2c_smbus_read_word_data(client, 426 vendor &= ICS932S401_VENDOR_MASK;
428 ICS932S401_REG_VENDOR_REV); 427 if (vendor != ICS932S401_VENDOR)
429 vendor >>= 8; 428 return -ENODEV;
430 revision = vendor >> ICS932S401_REV_SHIFT; 429
431 vendor &= ICS932S401_VENDOR_MASK; 430 device = i2c_smbus_read_word_data(client, ICS932S401_REG_DEVICE);
432 if (vendor != ICS932S401_VENDOR) 431 device >>= 8;
433 return -ENODEV; 432 if (device != ICS932S401_DEVICE)
434 433 return -ENODEV;
435 device = i2c_smbus_read_word_data(client, 434
436 ICS932S401_REG_DEVICE); 435 if (revision != ICS932S401_REV)
437 device >>= 8; 436 dev_info(&adapter->dev, "Unknown revision %d\n", revision);
438 if (device != ICS932S401_DEVICE)
439 return -ENODEV;
440
441 if (revision != ICS932S401_REV)
442 dev_info(&adapter->dev, "Unknown revision %d\n",
443 revision);
444 } else
445 dev_dbg(&adapter->dev, "detection forced\n");
446 437
447 strlcpy(info->type, "ics932s401", I2C_NAME_SIZE); 438 strlcpy(info->type, "ics932s401", I2C_NAME_SIZE);
448 439
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 60b0b1a4fb3a..668d41e594a9 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/ioc4.h> 31#include <linux/ioc4.h>
32#include <linux/ktime.h> 32#include <linux/ktime.h>
33#include <linux/slab.h>
33#include <linux/mutex.h> 34#include <linux/mutex.h>
34#include <linux/time.h> 35#include <linux/time.h>
35#include <asm/io.h> 36#include <asm/io.h>
@@ -138,7 +139,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is)
138 * even though the following code utilizes external interrupt registers 139 * even though the following code utilizes external interrupt registers
139 * to perform the speed calculation. 140 * to perform the speed calculation.
140 */ 141 */
141static void 142static void __devinit
142ioc4_clock_calibrate(struct ioc4_driver_data *idd) 143ioc4_clock_calibrate(struct ioc4_driver_data *idd)
143{ 144{
144 union ioc4_int_out int_out; 145 union ioc4_int_out int_out;
@@ -230,7 +231,7 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
230 * on the same PCI bus at slot number 3 to differentiate IO9 from IO10. 231 * on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
231 * If neither is present, it's a PCI-RT. 232 * If neither is present, it's a PCI-RT.
232 */ 233 */
233static unsigned int 234static unsigned int __devinit
234ioc4_variant(struct ioc4_driver_data *idd) 235ioc4_variant(struct ioc4_driver_data *idd)
235{ 236{
236 struct pci_dev *pdev = NULL; 237 struct pci_dev *pdev = NULL;
@@ -269,18 +270,16 @@ ioc4_variant(struct ioc4_driver_data *idd)
269 return IOC4_VARIANT_PCI_RT; 270 return IOC4_VARIANT_PCI_RT;
270} 271}
271 272
272static void 273static void __devinit
273ioc4_load_modules(struct work_struct *work) 274ioc4_load_modules(struct work_struct *work)
274{ 275{
275 /* arg just has to be freed */
276
277 request_module("sgiioc4"); 276 request_module("sgiioc4");
278
279 kfree(work);
280} 277}
281 278
279static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
280
282/* Adds a new instance of an IOC4 card */ 281/* Adds a new instance of an IOC4 card */
283static int 282static int __devinit
284ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) 283ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
285{ 284{
286 struct ioc4_driver_data *idd; 285 struct ioc4_driver_data *idd;
@@ -395,21 +394,12 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
395 * PCI device. 394 * PCI device.
396 */ 395 */
397 if (idd->idd_variant != IOC4_VARIANT_PCI_RT) { 396 if (idd->idd_variant != IOC4_VARIANT_PCI_RT) {
398 struct work_struct *work; 397 /* Request the module from a work procedure as the modprobe
399 work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); 398 * goes out to a userland helper and that will hang if done
400 if (!work) { 399 * directly from ioc4_probe().
401 printk(KERN_WARNING 400 */
402 "%s: IOC4 unable to allocate memory for " 401 printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
403 "load of sub-modules.\n", __func__); 402 schedule_work(&ioc4_load_modules_work);
404 } else {
405 /* Request the module from a work procedure as the
406 * modprobe goes out to a userland helper and that
407 * will hang if done directly from ioc4_probe().
408 */
409 printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
410 INIT_WORK(work, ioc4_load_modules);
411 schedule_work(work);
412 }
413 } 403 }
414 404
415 return 0; 405 return 0;
@@ -425,7 +415,7 @@ out:
425} 415}
426 416
427/* Removes a particular instance of an IOC4 card. */ 417/* Removes a particular instance of an IOC4 card. */
428static void 418static void __devexit
429ioc4_remove(struct pci_dev *pdev) 419ioc4_remove(struct pci_dev *pdev)
430{ 420{
431 struct ioc4_submodule *is; 421 struct ioc4_submodule *is;
@@ -476,7 +466,7 @@ static struct pci_driver ioc4_driver = {
476 .name = "IOC4", 466 .name = "IOC4",
477 .id_table = ioc4_id_table, 467 .id_table = ioc4_id_table,
478 .probe = ioc4_probe, 468 .probe = ioc4_probe,
479 .remove = ioc4_remove, 469 .remove = __devexit_p(ioc4_remove),
480}; 470};
481 471
482MODULE_DEVICE_TABLE(pci, ioc4_id_table); 472MODULE_DEVICE_TABLE(pci, ioc4_id_table);
@@ -486,18 +476,18 @@ MODULE_DEVICE_TABLE(pci, ioc4_id_table);
486 *********************/ 476 *********************/
487 477
488/* Module load */ 478/* Module load */
489static int __devinit 479static int __init
490ioc4_init(void) 480ioc4_init(void)
491{ 481{
492 return pci_register_driver(&ioc4_driver); 482 return pci_register_driver(&ioc4_driver);
493} 483}
494 484
495/* Module unload */ 485/* Module unload */
496static void __devexit 486static void __exit
497ioc4_exit(void) 487ioc4_exit(void)
498{ 488{
499 /* Ensure ioc4_load_modules() has completed before exiting */ 489 /* Ensure ioc4_load_modules() has completed before exiting */
500 flush_scheduled_work(); 490 flush_work_sync(&ioc4_load_modules_work);
501 pci_unregister_driver(&ioc4_driver); 491 pci_unregister_driver(&ioc4_driver);
502} 492}
503 493
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
new file mode 100644
index 000000000000..307aada5fffe
--- /dev/null
+++ b/drivers/misc/isl29020.c
@@ -0,0 +1,248 @@
1/*
2 * isl29020.c - Intersil ALS Driver
3 *
4 * Copyright (C) 2008 Intel Corp
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21 *
22 * Data sheet at: http://www.intersil.com/data/fn/fn6505.pdf
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/i2c.h>
29#include <linux/err.h>
30#include <linux/delay.h>
31#include <linux/sysfs.h>
32#include <linux/pm_runtime.h>
33
34static DEFINE_MUTEX(mutex);
35
36static ssize_t als_sensing_range_show(struct device *dev,
37 struct device_attribute *attr, char *buf)
38{
39 struct i2c_client *client = to_i2c_client(dev);
40 int val;
41
42 val = i2c_smbus_read_byte_data(client, 0x00);
43
44 if (val < 0)
45 return val;
46 return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
47
48}
49
50static ssize_t als_lux_input_data_show(struct device *dev,
51 struct device_attribute *attr, char *buf)
52{
53 struct i2c_client *client = to_i2c_client(dev);
54 int ret_val, val;
55 unsigned long int lux;
56 int temp;
57
58 pm_runtime_get_sync(dev);
59 msleep(100);
60
61 mutex_lock(&mutex);
62 temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */
63 if (temp < 0) {
64 pm_runtime_put_sync(dev);
65 mutex_unlock(&mutex);
66 return temp;
67 }
68
69 ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */
70 mutex_unlock(&mutex);
71
72 if (ret_val < 0) {
73 pm_runtime_put_sync(dev);
74 return ret_val;
75 }
76
77 ret_val |= temp << 8;
78 val = i2c_smbus_read_byte_data(client, 0x00);
79 pm_runtime_put_sync(dev);
80 if (val < 0)
81 return val;
82 lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536;
83 return sprintf(buf, "%ld\n", lux);
84}
85
86static ssize_t als_sensing_range_store(struct device *dev,
87 struct device_attribute *attr, const char *buf, size_t count)
88{
89 struct i2c_client *client = to_i2c_client(dev);
90 int ret_val;
91 unsigned long val;
92
93 if (strict_strtoul(buf, 10, &val))
94 return -EINVAL;
95 if (val < 1 || val > 64000)
96 return -EINVAL;
97
98 /* Pick the smallest sensor range that will meet our requirements */
99 if (val <= 1000)
100 val = 1;
101 else if (val <= 4000)
102 val = 2;
103 else if (val <= 16000)
104 val = 3;
105 else
106 val = 4;
107
108 ret_val = i2c_smbus_read_byte_data(client, 0x00);
109 if (ret_val < 0)
110 return ret_val;
111
112 ret_val &= 0xFC; /*reset the bit before setting them */
113 ret_val |= val - 1;
114 ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val);
115
116 if (ret_val < 0)
117 return ret_val;
118 return count;
119}
120
121static void als_set_power_state(struct i2c_client *client, int enable)
122{
123 int ret_val;
124
125 ret_val = i2c_smbus_read_byte_data(client, 0x00);
126 if (ret_val < 0)
127 return;
128
129 if (enable)
130 ret_val |= 0x80;
131 else
132 ret_val &= 0x7F;
133
134 i2c_smbus_write_byte_data(client, 0x00, ret_val);
135}
136
137static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
138 als_sensing_range_show, als_sensing_range_store);
139static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux_input_data_show, NULL);
140
141static struct attribute *mid_att_als[] = {
142 &dev_attr_lux0_sensor_range.attr,
143 &dev_attr_lux0_input.attr,
144 NULL
145};
146
147static struct attribute_group m_als_gr = {
148 .name = "isl29020",
149 .attrs = mid_att_als
150};
151
152static int als_set_default_config(struct i2c_client *client)
153{
154 int retval;
155
156 retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0);
157 if (retval < 0) {
158 dev_err(&client->dev, "default write failed.");
159 return retval;
160 }
161 return 0;;
162}
163
164static int isl29020_probe(struct i2c_client *client,
165 const struct i2c_device_id *id)
166{
167 int res;
168
169 res = als_set_default_config(client);
170 if (res < 0)
171 return res;
172
173 res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
174 if (res) {
175 dev_err(&client->dev, "isl29020: device create file failed\n");
176 return res;
177 }
178 dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name);
179 als_set_power_state(client, 0);
180 pm_runtime_enable(&client->dev);
181 return res;
182}
183
184static int isl29020_remove(struct i2c_client *client)
185{
186 sysfs_remove_group(&client->dev.kobj, &m_als_gr);
187 return 0;
188}
189
190static struct i2c_device_id isl29020_id[] = {
191 { "isl29020", 0 },
192 { }
193};
194
195MODULE_DEVICE_TABLE(i2c, isl29020_id);
196
197#ifdef CONFIG_PM
198
199static int isl29020_runtime_suspend(struct device *dev)
200{
201 struct i2c_client *client = to_i2c_client(dev);
202 als_set_power_state(client, 0);
203 return 0;
204}
205
206static int isl29020_runtime_resume(struct device *dev)
207{
208 struct i2c_client *client = to_i2c_client(dev);
209 als_set_power_state(client, 1);
210 return 0;
211}
212
213static const struct dev_pm_ops isl29020_pm_ops = {
214 .runtime_suspend = isl29020_runtime_suspend,
215 .runtime_resume = isl29020_runtime_resume,
216};
217
218#define ISL29020_PM_OPS (&isl29020_pm_ops)
219#else /* CONFIG_PM */
220#define ISL29020_PM_OPS NULL
221#endif /* CONFIG_PM */
222
223static struct i2c_driver isl29020_driver = {
224 .driver = {
225 .name = "isl29020",
226 .pm = ISL29020_PM_OPS,
227 },
228 .probe = isl29020_probe,
229 .remove = isl29020_remove,
230 .id_table = isl29020_id,
231};
232
233static int __init sensor_isl29020_init(void)
234{
235 return i2c_add_driver(&isl29020_driver);
236}
237
238static void __exit sensor_isl29020_exit(void)
239{
240 i2c_del_driver(&isl29020_driver);
241}
242
243module_init(sensor_isl29020_init);
244module_exit(sensor_isl29020_exit);
245
246MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>");
247MODULE_DESCRIPTION("Intersil isl29020 ALS Driver");
248MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
new file mode 100644
index 000000000000..9e4b88fb57f1
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Kconfig
@@ -0,0 +1,20 @@
1config IWMC3200TOP
2 tristate "Intel Wireless MultiCom Top Driver"
3 depends on MMC && EXPERIMENTAL
4 select FW_LOADER
5 ---help---
6 Intel Wireless MultiCom 3200 Top driver is responsible for
7 for firmware load and enabled coms enumeration
8
9config IWMC3200TOP_DEBUG
10 bool "Enable full debug output of iwmc3200top Driver"
11 depends on IWMC3200TOP
12 ---help---
13 Enable full debug output of iwmc3200top Driver
14
15config IWMC3200TOP_DEBUGFS
16 bool "Enable Debugfs debugging interface for iwmc3200top"
17 depends on IWMC3200TOP
18 ---help---
19 Enable creation of debugfs files for iwmc3200top
20
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
new file mode 100644
index 000000000000..fbf53fb4634e
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Makefile
@@ -0,0 +1,29 @@
1# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
2# drivers/misc/iwmc3200top/Makefile
3#
4# Copyright (C) 2009 Intel Corporation. All rights reserved.
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License version
8# 2 as published by the Free Software Foundation.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18# 02110-1301, USA.
19#
20#
21# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
22# -
23#
24#
25
26obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
27iwmc3200top-objs := main.o fw-download.o
28iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
29iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
new file mode 100644
index 000000000000..62fbaec48207
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.c
@@ -0,0 +1,137 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/ctype.h>
31#include <linux/mmc/sdio_func.h>
32#include <linux/mmc/sdio.h>
33#include <linux/debugfs.h>
34
35#include "iwmc3200top.h"
36#include "fw-msg.h"
37#include "log.h"
38#include "debugfs.h"
39
40
41
42/* Constants definition */
43#define HEXADECIMAL_RADIX 16
44
45/* Functions definition */
46
47
48#define DEBUGFS_ADD(name, parent) do { \
49 dbgfs->dbgfs_##parent##_files.file_##name = \
50 debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
51 &iwmct_dbgfs_##name##_ops); \
52} while (0)
53
54#define DEBUGFS_RM(name) do { \
55 debugfs_remove(name); \
56 name = NULL; \
57} while (0)
58
59#define DEBUGFS_READ_FUNC(name) \
60ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
61 char __user *user_buf, \
62 size_t count, loff_t *ppos);
63
64#define DEBUGFS_WRITE_FUNC(name) \
65ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
66 const char __user *user_buf, \
67 size_t count, loff_t *ppos);
68
69#define DEBUGFS_READ_FILE_OPS(name) \
70 DEBUGFS_READ_FUNC(name) \
71 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
72 .read = iwmct_dbgfs_##name##_read, \
73 .open = iwmct_dbgfs_open_file_generic, \
74 .llseek = generic_file_llseek, \
75 };
76
77#define DEBUGFS_WRITE_FILE_OPS(name) \
78 DEBUGFS_WRITE_FUNC(name) \
79 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
80 .write = iwmct_dbgfs_##name##_write, \
81 .open = iwmct_dbgfs_open_file_generic, \
82 .llseek = generic_file_llseek, \
83 };
84
85#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
86 DEBUGFS_READ_FUNC(name) \
87 DEBUGFS_WRITE_FUNC(name) \
88 static const struct file_operations iwmct_dbgfs_##name##_ops = {\
89 .write = iwmct_dbgfs_##name##_write, \
90 .read = iwmct_dbgfs_##name##_read, \
91 .open = iwmct_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93 };
94
95
96/* Debugfs file ops definitions */
97
98/*
99 * Create the debugfs files and directories
100 *
101 */
102void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
103{
104 struct iwmct_debugfs *dbgfs;
105
106 dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
107 if (!dbgfs) {
108 LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
109 sizeof(struct iwmct_debugfs));
110 return;
111 }
112
113 priv->dbgfs = dbgfs;
114 dbgfs->name = name;
115 dbgfs->dir_drv = debugfs_create_dir(name, NULL);
116 if (!dbgfs->dir_drv) {
117 LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
118 return;
119 }
120
121 return;
122}
123
124/**
125 * Remove the debugfs files and directories
126 *
127 */
128void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
129{
130 if (!dbgfs)
131 return;
132
133 DEBUGFS_RM(dbgfs->dir_drv);
134 kfree(dbgfs);
135 dbgfs = NULL;
136}
137
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
new file mode 100644
index 000000000000..71d45759b40f
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.h
@@ -0,0 +1,58 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __DEBUGFS_H__
28#define __DEBUGFS_H__
29
30
31#ifdef CONFIG_IWMC3200TOP_DEBUGFS
32
33struct iwmct_debugfs {
34 const char *name;
35 struct dentry *dir_drv;
36 struct dir_drv_files {
37 } dbgfs_drv_files;
38};
39
40void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
41void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
42
43#else /* CONFIG_IWMC3200TOP_DEBUGFS */
44
45struct iwmct_debugfs;
46
47static inline void
48iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
49{}
50
51static inline void
52iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
53{}
54
55#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
56
57#endif /* __DEBUGFS_H__ */
58
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
new file mode 100644
index 000000000000..e27afde6e99f
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -0,0 +1,358 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-download.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/slab.h>
30#include <asm/unaligned.h>
31
32#include "iwmc3200top.h"
33#include "log.h"
34#include "fw-msg.h"
35
36#define CHECKSUM_BYTES_NUM sizeof(u32)
37
38/**
39 init parser struct with file
40 */
41static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
42 size_t file_size, size_t block_size)
43{
44 struct iwmct_parser *parser = &priv->parser;
45 struct iwmct_fw_hdr *fw_hdr = &parser->versions;
46
47 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
48
49 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
50
51 parser->file = file;
52 parser->file_size = file_size;
53 parser->cur_pos = 0;
54 parser->entry_point = 0;
55 parser->buf = kzalloc(block_size, GFP_KERNEL);
56 if (!parser->buf) {
57 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
58 return -ENOMEM;
59 }
60 parser->buf_size = block_size;
61
62 /* extract fw versions */
63 memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
64 LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
65 "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
66 fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
67 fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
68 fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
69 fw_hdr->tic_name);
70
71 parser->cur_pos += sizeof(struct iwmct_fw_hdr);
72
73 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
74 return 0;
75}
76
77static bool iwmct_checksum(struct iwmct_priv *priv)
78{
79 struct iwmct_parser *parser = &priv->parser;
80 __le32 *file = (__le32 *)parser->file;
81 int i, pad, steps;
82 u32 accum = 0;
83 u32 checksum;
84 u32 mask = 0xffffffff;
85
86 pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
87 steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
88
89 LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
90
91 for (i = 0; i < steps; i++)
92 accum += le32_to_cpu(file[i]);
93
94 if (pad) {
95 mask <<= 8 * (4 - pad);
96 accum += le32_to_cpu(file[steps]) & mask;
97 }
98
99 checksum = get_unaligned_le32((__le32 *)(parser->file +
100 parser->file_size - CHECKSUM_BYTES_NUM));
101
102 LOG_INFO(priv, FW_DOWNLOAD,
103 "compare checksum accum=0x%x to checksum=0x%x\n",
104 accum, checksum);
105
106 return checksum == accum;
107}
108
109static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
110 size_t *sec_size, __le32 *sec_addr)
111{
112 struct iwmct_parser *parser = &priv->parser;
113 struct iwmct_dbg *dbg = &priv->dbg;
114 struct iwmct_fw_sec_hdr *sec_hdr;
115
116 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
117
118 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
119 <= parser->file_size) {
120
121 sec_hdr = (struct iwmct_fw_sec_hdr *)
122 (parser->file + parser->cur_pos);
123 parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
124
125 LOG_INFO(priv, FW_DOWNLOAD,
126 "sec hdr: type=%s addr=0x%x size=%d\n",
127 sec_hdr->type, sec_hdr->target_addr,
128 sec_hdr->data_size);
129
130 if (strcmp(sec_hdr->type, "ENT") == 0)
131 parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
132 else if (strcmp(sec_hdr->type, "LBL") == 0)
133 strcpy(dbg->label_fw, parser->file + parser->cur_pos);
134 else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
135 (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
136 ((strcmp(sec_hdr->type, "GPS") == 0) &&
137 (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
138 ((strcmp(sec_hdr->type, "BTH") == 0) &&
139 (priv->barker & BARKER_DNLOAD_BT_MSK))) {
140 *sec_addr = sec_hdr->target_addr;
141 *sec_size = le32_to_cpu(sec_hdr->data_size);
142 *p_sec = parser->file + parser->cur_pos;
143 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
144 return 1;
145 } else if (strcmp(sec_hdr->type, "LOG") != 0)
146 LOG_WARNING(priv, FW_DOWNLOAD,
147 "skipping section type %s\n",
148 sec_hdr->type);
149
150 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
151 LOG_INFO(priv, FW_DOWNLOAD,
152 "finished with section cur_pos=%zd\n", parser->cur_pos);
153 }
154
155 LOG_TRACE(priv, INIT, "<--\n");
156 return 0;
157}
158
159static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
160 size_t sec_size, __le32 addr)
161{
162 struct iwmct_parser *parser = &priv->parser;
163 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
164 const u8 *cur_block = p_sec;
165 size_t sent = 0;
166 int cnt = 0;
167 int ret = 0;
168 u32 cmd = 0;
169
170 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
171 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
172 addr, sec_size);
173
174 while (sent < sec_size) {
175 int i;
176 u32 chksm = 0;
177 u32 reset = atomic_read(&priv->reset);
178 /* actual FW data */
179 u32 data_size = min(parser->buf_size - sizeof(*hdr),
180 sec_size - sent);
181 /* Pad to block size */
182 u32 trans_size = (data_size + sizeof(*hdr) +
183 IWMC_SDIO_BLK_SIZE - 1) &
184 ~(IWMC_SDIO_BLK_SIZE - 1);
185 ++cnt;
186
187 /* in case of reset, interrupt FW DOWNLAOD */
188 if (reset) {
189 LOG_INFO(priv, FW_DOWNLOAD,
190 "Reset detected. Abort FW download!!!");
191 ret = -ECANCELED;
192 goto exit;
193 }
194
195 memset(parser->buf, 0, parser->buf_size);
196 cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
197 cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
198 cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
199 cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
200 hdr->data_size = cpu_to_le32(data_size);
201 hdr->target_addr = addr;
202
203 /* checksum is allowed for sizes divisible by 4 */
204 if (data_size & 0x3)
205 cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
206
207 memcpy(hdr->data, cur_block, data_size);
208
209
210 if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
211
212 chksm = data_size + le32_to_cpu(addr) + cmd;
213 for (i = 0; i < data_size >> 2; i++)
214 chksm += ((u32 *)cur_block)[i];
215
216 hdr->block_chksm = cpu_to_le32(chksm);
217 LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
218 hdr->block_chksm);
219 }
220
221 LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
222 "sec_size=%zd, startAddress 0x%X\n",
223 cnt, trans_size, sent, sec_size, addr);
224
225 if (priv->dbg.dump)
226 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
227
228
229 hdr->cmd = cpu_to_le32(cmd);
230 /* send it down */
231 /* TODO: add more proper sending and error checking */
232 ret = iwmct_tx(priv, parser->buf, trans_size);
233 if (ret != 0) {
234 LOG_INFO(priv, FW_DOWNLOAD,
235 "iwmct_tx returned %d\n", ret);
236 goto exit;
237 }
238
239 addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
240 sent += data_size;
241 cur_block = p_sec + sent;
242
243 if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
244 LOG_INFO(priv, FW_DOWNLOAD,
245 "Block number limit is reached [%d]\n",
246 priv->dbg.blocks);
247 break;
248 }
249 }
250
251 if (sent < sec_size)
252 ret = -EINVAL;
253exit:
254 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
255 return ret;
256}
257
258static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
259{
260 struct iwmct_parser *parser = &priv->parser;
261 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
262 int ret;
263 u32 cmd;
264
265 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
266
267 memset(parser->buf, 0, parser->buf_size);
268 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
269 if (jump) {
270 cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
271 hdr->target_addr = cpu_to_le32(parser->entry_point);
272 LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
273 parser->entry_point);
274 } else {
275 cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
276 LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
277 }
278
279 hdr->cmd = cpu_to_le32(cmd);
280
281 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
282 /* send it down */
283 /* TODO: add more proper sending and error checking */
284 ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
285 if (ret)
286 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
287
288 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
289 return 0;
290}
291
292int iwmct_fw_load(struct iwmct_priv *priv)
293{
294 const u8 *fw_name = FW_NAME(FW_API_VER);
295 const struct firmware *raw;
296 const u8 *pdata;
297 size_t len;
298 __le32 addr;
299 int ret;
300
301
302 LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
303 priv->barker);
304 LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
305 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
306 LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
307 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
308 LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
309 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
310
311
312 /* get the firmware */
313 ret = request_firmware(&raw, fw_name, &priv->func->dev);
314 if (ret < 0) {
315 LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
316 fw_name, ret);
317 goto exit;
318 }
319
320 if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
321 LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
322 fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
323 goto exit;
324 }
325
326 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
327
328 /* clear parser struct */
329 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
330 if (ret < 0) {
331 LOG_ERROR(priv, FW_DOWNLOAD,
332 "iwmct_parser_init failed: Reason %d\n", ret);
333 goto exit;
334 }
335
336 if (!iwmct_checksum(priv)) {
337 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
338 ret = -EINVAL;
339 goto exit;
340 }
341
342 /* download firmware to device */
343 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
344 ret = iwmct_download_section(priv, pdata, len, addr);
345 if (ret) {
346 LOG_ERROR(priv, FW_DOWNLOAD,
347 "%s download section failed\n", fw_name);
348 goto exit;
349 }
350 }
351
352 ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
353
354exit:
355 kfree(priv->parser.buf);
356 release_firmware(raw);
357 return ret;
358}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
new file mode 100644
index 000000000000..9e26b75bd482
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-msg.h
@@ -0,0 +1,113 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-msg.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __FWMSG_H__
28#define __FWMSG_H__
29
30#define COMM_TYPE_D2H 0xFF
31#define COMM_TYPE_H2D 0xEE
32
33#define COMM_CATEGORY_OPERATIONAL 0x00
34#define COMM_CATEGORY_DEBUG 0x01
35#define COMM_CATEGORY_TESTABILITY 0x02
36#define COMM_CATEGORY_DIAGNOSTICS 0x03
37
38#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
39
40#define FW_LOG_SRC_MAX 32
41#define FW_LOG_SRC_ALL 255
42
43#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
44
45#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
46#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
47#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
48#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
49#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
50#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
51#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
52#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
53#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
54#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
55
56#define OP_OPR_ALIVE cpu_to_le16(0x0010)
57#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
58#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
59#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
60
61#define CMD_FLAG_PADDING_256 0x80
62
63#define FW_HCMD_BLOCK_SIZE 256
64
65struct msg_hdr {
66 u8 type;
67 u8 category;
68 __le16 opcode;
69 u8 seqnum;
70 u8 flags;
71 __le16 length;
72} __attribute__((__packed__));
73
74struct log_hdr {
75 __le32 timestamp;
76 u8 severity;
77 u8 logsource;
78 __le16 reserved;
79} __attribute__((__packed__));
80
81struct mdump_hdr {
82 u8 dmpid;
83 u8 frag;
84 __le16 size;
85 __le32 addr;
86} __attribute__((__packed__));
87
88struct top_msg {
89 struct msg_hdr hdr;
90 union {
91 /* D2H messages */
92 struct {
93 struct log_hdr log_hdr;
94 u8 data[1];
95 } __attribute__((__packed__)) log;
96
97 struct {
98 struct log_hdr log_hdr;
99 struct mdump_hdr md_hdr;
100 u8 data[1];
101 } __attribute__((__packed__)) mdump;
102
103 /* H2D messages */
104 struct {
105 u8 logsource;
106 u8 sevmask;
107 } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
108 struct mdump_hdr mdump_req;
109 } u;
110} __attribute__((__packed__));
111
112
113#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
new file mode 100644
index 000000000000..740ff0738ea8
--- /dev/null
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -0,0 +1,207 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/iwmc3200top.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __IWMC3200TOP_H__
28#define __IWMC3200TOP_H__
29
30#include <linux/workqueue.h>
31
32#define DRV_NAME "iwmc3200top"
33#define FW_API_VER 1
34#define _FW_NAME(api) DRV_NAME "." #api ".fw"
35#define FW_NAME(api) _FW_NAME(api)
36
37#define IWMC_SDIO_BLK_SIZE 256
38#define IWMC_DEFAULT_TR_BLK 64
39#define IWMC_SDIO_DATA_ADDR 0x0
40#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
41#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
42#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
43#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
44
45#define COMM_HUB_HEADER_LENGTH 16
46#define LOGGER_HEADER_LENGTH 10
47
48
49#define BARKER_DNLOAD_BT_POS 0
50#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
51#define BARKER_DNLOAD_GPS_POS 1
52#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
53#define BARKER_DNLOAD_TOP_POS 2
54#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
55#define BARKER_DNLOAD_RESERVED1_POS 3
56#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
57#define BARKER_DNLOAD_JUMP_POS 4
58#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
59#define BARKER_DNLOAD_SYNC_POS 5
60#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
61#define BARKER_DNLOAD_RESERVED2_POS 6
62#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
63#define BARKER_DNLOAD_BARKER_POS 8
64#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
65
66#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
67/* whole field barker */
68#define IWMC_BARKER_ACK 0xfeedbabe
69
70#define IWMC_CMD_SIGNATURE 0xcbbc
71
72#define CMD_HDR_OPCODE_POS 0
73#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
74#define CMD_HDR_RESPONSE_CODE_POS 4
75#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
76#define CMD_HDR_USE_CHECKSUM_POS 8
77#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
78#define CMD_HDR_RESPONSE_REQUIRED_POS 9
79#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
80#define CMD_HDR_DIRECT_ACCESS_POS 10
81#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
82#define CMD_HDR_RESERVED_POS 11
83#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
84#define CMD_HDR_SIGNATURE_POS 16
85#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
86
87enum {
88 IWMC_OPCODE_PING = 0,
89 IWMC_OPCODE_READ = 1,
90 IWMC_OPCODE_WRITE = 2,
91 IWMC_OPCODE_JUMP = 3,
92 IWMC_OPCODE_REBOOT = 4,
93 IWMC_OPCODE_PERSISTENT_WRITE = 5,
94 IWMC_OPCODE_PERSISTENT_READ = 6,
95 IWMC_OPCODE_READ_MODIFY_WRITE = 7,
96 IWMC_OPCODE_LAST_COMMAND = 15
97};
98
99struct iwmct_fw_load_hdr {
100 __le32 cmd;
101 __le32 target_addr;
102 __le32 data_size;
103 __le32 block_chksm;
104 u8 data[0];
105};
106
107/**
108 * struct iwmct_fw_hdr
109 * holds all sw components versions
110 */
111struct iwmct_fw_hdr {
112 u8 top_major;
113 u8 top_minor;
114 u8 top_revision;
115 u8 gps_major;
116 u8 gps_minor;
117 u8 gps_revision;
118 u8 bt_major;
119 u8 bt_minor;
120 u8 bt_revision;
121 u8 tic_name[31];
122};
123
124/**
125 * struct iwmct_fw_sec_hdr
126 * @type: function type
127 * @data_size: section's data size
128 * @target_addr: download address
129 */
130struct iwmct_fw_sec_hdr {
131 u8 type[4];
132 __le32 data_size;
133 __le32 target_addr;
134};
135
136/**
137 * struct iwmct_parser
138 * @file: fw image
139 * @file_size: fw size
140 * @cur_pos: position in file
141 * @buf: temp buf for download
142 * @buf_size: size of buf
143 * @entry_point: address to jump in fw kick-off
144 */
145struct iwmct_parser {
146 const u8 *file;
147 size_t file_size;
148 size_t cur_pos;
149 u8 *buf;
150 size_t buf_size;
151 u32 entry_point;
152 struct iwmct_fw_hdr versions;
153};
154
155
156struct iwmct_work_struct {
157 struct list_head list;
158 ssize_t iosize;
159};
160
161struct iwmct_dbg {
162 int blocks;
163 bool dump;
164 bool jump;
165 bool direct;
166 bool checksum;
167 bool fw_download;
168 int block_size;
169 int download_trans_blks;
170
171 char label_fw[256];
172};
173
174struct iwmct_debugfs;
175
176struct iwmct_priv {
177 struct sdio_func *func;
178 struct iwmct_debugfs *dbgfs;
179 struct iwmct_parser parser;
180 atomic_t reset;
181 atomic_t dev_sync;
182 u32 trans_len;
183 u32 barker;
184 struct iwmct_dbg dbg;
185
186 /* drivers work queue */
187 struct workqueue_struct *wq;
188 struct workqueue_struct *bus_rescan_wq;
189 struct work_struct bus_rescan_worker;
190 struct work_struct isr_worker;
191
192 /* drivers wait queue */
193 wait_queue_head_t wait_q;
194
195 /* rx request list */
196 struct list_head read_req_list;
197};
198
199extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
200extern int iwmct_fw_load(struct iwmct_priv *priv);
201
202extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
203extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
204extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
205extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
206
207#endif /* __IWMC3200TOP_H__ */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
new file mode 100644
index 000000000000..a36a55a49cac
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.c
@@ -0,0 +1,348 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/slab.h>
30#include <linux/ctype.h>
31#include "fw-msg.h"
32#include "iwmc3200top.h"
33#include "log.h"
34
35/* Maximal hexadecimal string size of the FW memdump message */
36#define LOG_MSG_SIZE_MAX 12400
37
38/* iwmct_logdefs is a global used by log macros */
39u8 iwmct_logdefs[LOG_SRC_MAX];
40static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
41
42
43static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
44{
45 int i;
46
47 if (src < size)
48 logdefs[src] = logmask;
49 else if (src == LOG_SRC_ALL)
50 for (i = 0; i < size; i++)
51 logdefs[i] = logmask;
52 else
53 return -1;
54
55 return 0;
56}
57
58
59int iwmct_log_set_filter(u8 src, u8 logmask)
60{
61 return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
62}
63
64
65int iwmct_log_set_fw_filter(u8 src, u8 logmask)
66{
67 return _log_set_log_filter(iwmct_fw_logdefs,
68 FW_LOG_SRC_MAX, src, logmask);
69}
70
71
72static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
73 int ilen, char *pref)
74{
75 int pos = 0;
76 int i;
77 int len;
78
79 for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
80 str[pos] = pref[i];
81
82 for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
83 len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
84
85 if (i < ilen)
86 return -1;
87
88 return 0;
89}
90
91/* NOTE: This function is not thread safe.
92 Currently it's called only from sdio rx worker - no race there
93*/
94void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
95{
96 struct top_msg *msg;
97 static char logbuf[LOG_MSG_SIZE_MAX];
98
99 msg = (struct top_msg *)buf;
100
101 if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
102 LOG_ERROR(priv, FW_MSG, "Log message from TOP "
103 "is too short %d (expected %zd)\n",
104 len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
105 return;
106 }
107
108 if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
109 BIT(msg->u.log.log_hdr.severity)) ||
110 !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
111 return;
112
113 switch (msg->hdr.category) {
114 case COMM_CATEGORY_TESTABILITY:
115 if (!(iwmct_logdefs[LOG_SRC_TST] &
116 BIT(msg->u.log.log_hdr.severity)))
117 return;
118 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
119 le16_to_cpu(msg->hdr.length) +
120 sizeof(msg->hdr), "<TST>"))
121 LOG_WARNING(priv, TST,
122 "TOP TST message is too long, truncating...");
123 LOG_WARNING(priv, TST, "%s\n", logbuf);
124 break;
125 case COMM_CATEGORY_DEBUG:
126 if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
127 LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
128 ((u8 *)msg) + sizeof(msg->hdr)
129 + sizeof(msg->u.log.log_hdr));
130 else {
131 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
132 le16_to_cpu(msg->hdr.length)
133 + sizeof(msg->hdr),
134 "<DBG>"))
135 LOG_WARNING(priv, FW_MSG,
136 "TOP DBG message is too long,"
137 "truncating...");
138 LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
139 }
140 break;
141 default:
142 break;
143 }
144}
145
146static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
147{
148 int i, pos, len;
149 for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
150 len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
151 i, logdefs[i]);
152 pos += len;
153 }
154 buf[pos-1] = '\n';
155 buf[pos] = '\0';
156
157 if (i < logdefsz)
158 return -1;
159 return 0;
160}
161
162int log_get_filter_str(char *buf, int size)
163{
164 return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
165}
166
167int log_get_fw_filter_str(char *buf, int size)
168{
169 return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
170}
171
172#define HEXADECIMAL_RADIX 16
173#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
174
175ssize_t show_iwmct_log_level(struct device *d,
176 struct device_attribute *attr, char *buf)
177{
178 struct iwmct_priv *priv = dev_get_drvdata(d);
179 char *str_buf;
180 int buf_size;
181 ssize_t ret;
182
183 buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
184 str_buf = kzalloc(buf_size, GFP_KERNEL);
185 if (!str_buf) {
186 LOG_ERROR(priv, DEBUGFS,
187 "failed to allocate %d bytes\n", buf_size);
188 ret = -ENOMEM;
189 goto exit;
190 }
191
192 if (log_get_filter_str(str_buf, buf_size) < 0) {
193 ret = -EINVAL;
194 goto exit;
195 }
196
197 ret = sprintf(buf, "%s", str_buf);
198
199exit:
200 kfree(str_buf);
201 return ret;
202}
203
204ssize_t store_iwmct_log_level(struct device *d,
205 struct device_attribute *attr,
206 const char *buf, size_t count)
207{
208 struct iwmct_priv *priv = dev_get_drvdata(d);
209 char *token, *str_buf = NULL;
210 long val;
211 ssize_t ret = count;
212 u8 src, mask;
213
214 if (!count)
215 goto exit;
216
217 str_buf = kzalloc(count, GFP_KERNEL);
218 if (!str_buf) {
219 LOG_ERROR(priv, DEBUGFS,
220 "failed to allocate %zd bytes\n", count);
221 ret = -ENOMEM;
222 goto exit;
223 }
224
225 memcpy(str_buf, buf, count);
226
227 while ((token = strsep(&str_buf, ",")) != NULL) {
228 while (isspace(*token))
229 ++token;
230 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
231 LOG_ERROR(priv, DEBUGFS,
232 "failed to convert string to long %s\n",
233 token);
234 ret = -EINVAL;
235 goto exit;
236 }
237
238 mask = val & 0xFF;
239 src = (val & 0XFF00) >> 8;
240 iwmct_log_set_filter(src, mask);
241 }
242
243exit:
244 kfree(str_buf);
245 return ret;
246}
247
248ssize_t show_iwmct_log_level_fw(struct device *d,
249 struct device_attribute *attr, char *buf)
250{
251 struct iwmct_priv *priv = dev_get_drvdata(d);
252 char *str_buf;
253 int buf_size;
254 ssize_t ret;
255
256 buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
257
258 str_buf = kzalloc(buf_size, GFP_KERNEL);
259 if (!str_buf) {
260 LOG_ERROR(priv, DEBUGFS,
261 "failed to allocate %d bytes\n", buf_size);
262 ret = -ENOMEM;
263 goto exit;
264 }
265
266 if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
267 ret = -EINVAL;
268 goto exit;
269 }
270
271 ret = sprintf(buf, "%s", str_buf);
272
273exit:
274 kfree(str_buf);
275 return ret;
276}
277
278ssize_t store_iwmct_log_level_fw(struct device *d,
279 struct device_attribute *attr,
280 const char *buf, size_t count)
281{
282 struct iwmct_priv *priv = dev_get_drvdata(d);
283 struct top_msg cmd;
284 char *token, *str_buf = NULL;
285 ssize_t ret = count;
286 u16 cmdlen = 0;
287 int i;
288 long val;
289 u8 src, mask;
290
291 if (!count)
292 goto exit;
293
294 str_buf = kzalloc(count, GFP_KERNEL);
295 if (!str_buf) {
296 LOG_ERROR(priv, DEBUGFS,
297 "failed to allocate %zd bytes\n", count);
298 ret = -ENOMEM;
299 goto exit;
300 }
301
302 memcpy(str_buf, buf, count);
303
304 cmd.hdr.type = COMM_TYPE_H2D;
305 cmd.hdr.category = COMM_CATEGORY_DEBUG;
306 cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
307
308 for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
309 (i < FW_LOG_SRC_MAX); i++) {
310
311 while (isspace(*token))
312 ++token;
313
314 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
315 LOG_ERROR(priv, DEBUGFS,
316 "failed to convert string to long %s\n",
317 token);
318 ret = -EINVAL;
319 goto exit;
320 }
321
322 mask = val & 0xFF; /* LSB */
323 src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
324 iwmct_log_set_fw_filter(src, mask);
325
326 cmd.u.logdefs[i].logsource = src;
327 cmd.u.logdefs[i].sevmask = mask;
328 }
329
330 cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
331 cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
332
333 ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
334 if (ret) {
335 LOG_ERROR(priv, DEBUGFS,
336 "Failed to send %d bytes of fwcmd, ret=%zd\n",
337 cmdlen, ret);
338 goto exit;
339 } else
340 LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
341
342 ret = count;
343
344exit:
345 kfree(str_buf);
346 return ret;
347}
348
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
new file mode 100644
index 000000000000..4434bb16cea7
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.h
@@ -0,0 +1,171 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __LOG_H__
28#define __LOG_H__
29
30
31/* log severity:
32 * The log levels here match FW log levels
33 * so values need to stay as is */
34#define LOG_SEV_CRITICAL 0
35#define LOG_SEV_ERROR 1
36#define LOG_SEV_WARNING 2
37#define LOG_SEV_INFO 3
38#define LOG_SEV_INFOEX 4
39
40/* Log levels not defined for FW */
41#define LOG_SEV_TRACE 5
42#define LOG_SEV_DUMP 6
43
44#define LOG_SEV_FW_FILTER_ALL \
45 (BIT(LOG_SEV_CRITICAL) | \
46 BIT(LOG_SEV_ERROR) | \
47 BIT(LOG_SEV_WARNING) | \
48 BIT(LOG_SEV_INFO) | \
49 BIT(LOG_SEV_INFOEX))
50
51#define LOG_SEV_FILTER_ALL \
52 (BIT(LOG_SEV_CRITICAL) | \
53 BIT(LOG_SEV_ERROR) | \
54 BIT(LOG_SEV_WARNING) | \
55 BIT(LOG_SEV_INFO) | \
56 BIT(LOG_SEV_INFOEX) | \
57 BIT(LOG_SEV_TRACE) | \
58 BIT(LOG_SEV_DUMP))
59
60/* log source */
61#define LOG_SRC_INIT 0
62#define LOG_SRC_DEBUGFS 1
63#define LOG_SRC_FW_DOWNLOAD 2
64#define LOG_SRC_FW_MSG 3
65#define LOG_SRC_TST 4
66#define LOG_SRC_IRQ 5
67
68#define LOG_SRC_MAX 6
69#define LOG_SRC_ALL 0xFF
70
71/**
72 * Default intitialization runtime log level
73 */
74#ifndef LOG_SEV_FILTER_RUNTIME
75#define LOG_SEV_FILTER_RUNTIME \
76 (BIT(LOG_SEV_CRITICAL) | \
77 BIT(LOG_SEV_ERROR) | \
78 BIT(LOG_SEV_WARNING))
79#endif
80
81#ifndef FW_LOG_SEV_FILTER_RUNTIME
82#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
83#endif
84
85#ifdef CONFIG_IWMC3200TOP_DEBUG
86/**
87 * Log macros
88 */
89
90#define priv2dev(priv) (&(priv->func)->dev)
91
92#define LOG_CRITICAL(priv, src, fmt, args...) \
93do { \
94 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
95 dev_crit(priv2dev(priv), "%s %d: " fmt, \
96 __func__, __LINE__, ##args); \
97} while (0)
98
99#define LOG_ERROR(priv, src, fmt, args...) \
100do { \
101 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
102 dev_err(priv2dev(priv), "%s %d: " fmt, \
103 __func__, __LINE__, ##args); \
104} while (0)
105
106#define LOG_WARNING(priv, src, fmt, args...) \
107do { \
108 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
109 dev_warn(priv2dev(priv), "%s %d: " fmt, \
110 __func__, __LINE__, ##args); \
111} while (0)
112
113#define LOG_INFO(priv, src, fmt, args...) \
114do { \
115 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
116 dev_info(priv2dev(priv), "%s %d: " fmt, \
117 __func__, __LINE__, ##args); \
118} while (0)
119
120#define LOG_TRACE(priv, src, fmt, args...) \
121do { \
122 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
123 dev_dbg(priv2dev(priv), "%s %d: " fmt, \
124 __func__, __LINE__, ##args); \
125} while (0)
126
127#define LOG_HEXDUMP(src, ptr, len) \
128do { \
129 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
130 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
131 16, 1, ptr, len, false); \
132} while (0)
133
134void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
135
136extern u8 iwmct_logdefs[];
137
138int iwmct_log_set_filter(u8 src, u8 logmask);
139int iwmct_log_set_fw_filter(u8 src, u8 logmask);
140
141ssize_t show_iwmct_log_level(struct device *d,
142 struct device_attribute *attr, char *buf);
143ssize_t store_iwmct_log_level(struct device *d,
144 struct device_attribute *attr,
145 const char *buf, size_t count);
146ssize_t show_iwmct_log_level_fw(struct device *d,
147 struct device_attribute *attr, char *buf);
148ssize_t store_iwmct_log_level_fw(struct device *d,
149 struct device_attribute *attr,
150 const char *buf, size_t count);
151
152#else
153
154#define LOG_CRITICAL(priv, src, fmt, args...)
155#define LOG_ERROR(priv, src, fmt, args...)
156#define LOG_WARNING(priv, src, fmt, args...)
157#define LOG_INFO(priv, src, fmt, args...)
158#define LOG_TRACE(priv, src, fmt, args...)
159#define LOG_HEXDUMP(src, ptr, len)
160
161static inline void iwmct_log_top_message(struct iwmct_priv *priv,
162 u8 *buf, int len) {}
163static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
164static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
165
166#endif /* CONFIG_IWMC3200TOP_DEBUG */
167
168int log_get_filter_str(char *buf, int size);
169int log_get_fw_filter_str(char *buf, int size);
170
171#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
new file mode 100644
index 000000000000..c73cef2c3c5e
--- /dev/null
+++ b/drivers/misc/iwmc3200top/main.c
@@ -0,0 +1,666 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/main.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/kernel.h>
31#include <linux/debugfs.h>
32#include <linux/mmc/sdio_ids.h>
33#include <linux/mmc/sdio_func.h>
34#include <linux/mmc/sdio.h>
35
36#include "iwmc3200top.h"
37#include "log.h"
38#include "fw-msg.h"
39#include "debugfs.h"
40
41
42#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
43#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
44
45#define DRIVER_VERSION "0.1.62"
46
47MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
48MODULE_VERSION(DRIVER_VERSION);
49MODULE_LICENSE("GPL");
50MODULE_AUTHOR(DRIVER_COPYRIGHT);
51MODULE_FIRMWARE(FW_NAME(FW_API_VER));
52
53
54static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
55{
56 return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
57
58}
59int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
60{
61 int ret;
62 sdio_claim_host(priv->func);
63 ret = __iwmct_tx(priv, src, count);
64 sdio_release_host(priv->func);
65 return ret;
66}
67/*
68 * This workers main task is to wait for OP_OPR_ALIVE
69 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
70 * When OP_OPR_ALIVE received it will issue
71 * a call to "bus_rescan_devices".
72 */
73static void iwmct_rescan_worker(struct work_struct *ws)
74{
75 struct iwmct_priv *priv;
76 int ret;
77
78 priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
79
80 LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
81
82 ret = bus_rescan_devices(priv->func->dev.bus);
83 if (ret < 0)
84 LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
85}
86
87static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
88{
89 switch (msg->hdr.opcode) {
90 case OP_OPR_ALIVE:
91 LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
92 queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
93 break;
94 default:
95 LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
96 msg->hdr.opcode);
97 break;
98 }
99}
100
101
102static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
103{
104 struct top_msg *msg;
105
106 msg = (struct top_msg *)buf;
107
108 if (msg->hdr.type != COMM_TYPE_D2H) {
109 LOG_ERROR(priv, FW_MSG,
110 "Message from TOP with invalid message type 0x%X\n",
111 msg->hdr.type);
112 return;
113 }
114
115 if (len < sizeof(msg->hdr)) {
116 LOG_ERROR(priv, FW_MSG,
117 "Message from TOP is too short for message header "
118 "received %d bytes, expected at least %zd bytes\n",
119 len, sizeof(msg->hdr));
120 return;
121 }
122
123 if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
124 LOG_ERROR(priv, FW_MSG,
125 "Message length (%d bytes) is shorter than "
126 "in header (%d bytes)\n",
127 len, le16_to_cpu(msg->hdr.length));
128 return;
129 }
130
131 switch (msg->hdr.category) {
132 case COMM_CATEGORY_OPERATIONAL:
133 op_top_message(priv, (struct top_msg *)buf);
134 break;
135
136 case COMM_CATEGORY_DEBUG:
137 case COMM_CATEGORY_TESTABILITY:
138 case COMM_CATEGORY_DIAGNOSTICS:
139 iwmct_log_top_message(priv, buf, len);
140 break;
141
142 default:
143 LOG_ERROR(priv, FW_MSG,
144 "Message from TOP with unknown category 0x%X\n",
145 msg->hdr.category);
146 break;
147 }
148}
149
150int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
151{
152 int ret;
153 u8 *buf;
154
155 LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
156
157 /* add padding to 256 for IWMC */
158 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
159
160 LOG_HEXDUMP(FW_MSG, cmd, len);
161
162 if (len > FW_HCMD_BLOCK_SIZE) {
163 LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
164 len, FW_HCMD_BLOCK_SIZE);
165 return -1;
166 }
167
168 buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
169 if (!buf) {
170 LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
171 FW_HCMD_BLOCK_SIZE);
172 return -1;
173 }
174
175 memcpy(buf, cmd, len);
176 ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
177
178 kfree(buf);
179 return ret;
180}
181
182
183static void iwmct_irq_read_worker(struct work_struct *ws)
184{
185 struct iwmct_priv *priv;
186 struct iwmct_work_struct *read_req;
187 __le32 *buf = NULL;
188 int ret;
189 int iosize;
190 u32 barker;
191 bool is_barker;
192
193 priv = container_of(ws, struct iwmct_priv, isr_worker);
194
195 LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
196
197 /* --------------------- Handshake with device -------------------- */
198 sdio_claim_host(priv->func);
199
200 /* all list manipulations have to be protected by
201 * sdio_claim_host/sdio_release_host */
202 if (list_empty(&priv->read_req_list)) {
203 LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
204 goto exit_release;
205 }
206
207 read_req = list_entry(priv->read_req_list.next,
208 struct iwmct_work_struct, list);
209
210 list_del(&read_req->list);
211 iosize = read_req->iosize;
212 kfree(read_req);
213
214 buf = kzalloc(iosize, GFP_KERNEL);
215 if (!buf) {
216 LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
217 goto exit_release;
218 }
219
220 LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
221 iosize, buf, priv->func->num);
222
223 /* read from device */
224 ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
225 if (ret) {
226 LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
227 goto exit_release;
228 }
229
230 LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
231
232 barker = le32_to_cpu(buf[0]);
233
234 /* Verify whether it's a barker and if not - treat as regular Rx */
235 if (barker == IWMC_BARKER_ACK ||
236 (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
237
238 /* Valid Barker is equal on first 4 dwords */
239 is_barker = (buf[1] == buf[0]) &&
240 (buf[2] == buf[0]) &&
241 (buf[3] == buf[0]);
242
243 if (!is_barker) {
244 LOG_WARNING(priv, IRQ,
245 "Potentially inconsistent barker "
246 "%08X_%08X_%08X_%08X\n",
247 le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
248 le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
249 }
250 } else {
251 is_barker = false;
252 }
253
254 /* Handle Top CommHub message */
255 if (!is_barker) {
256 sdio_release_host(priv->func);
257 handle_top_message(priv, (u8 *)buf, iosize);
258 goto exit;
259 } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
260 if (atomic_read(&priv->dev_sync) == 0) {
261 LOG_ERROR(priv, IRQ,
262 "ACK barker arrived out-of-sync\n");
263 goto exit_release;
264 }
265
266 /* Continuing to FW download (after Sync is completed)*/
267 atomic_set(&priv->dev_sync, 0);
268 LOG_INFO(priv, IRQ, "ACK barker arrived "
269 "- starting FW download\n");
270 } else { /* REBOOT barker */
271 LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker);
272 priv->barker = barker;
273
274 if (barker & BARKER_DNLOAD_SYNC_MSK) {
275 /* Send the same barker back */
276 ret = __iwmct_tx(priv, buf, iosize);
277 if (ret) {
278 LOG_ERROR(priv, IRQ,
279 "error %d echoing barker\n", ret);
280 goto exit_release;
281 }
282 LOG_INFO(priv, IRQ, "Echoing barker to device\n");
283 atomic_set(&priv->dev_sync, 1);
284 goto exit_release;
285 }
286
287 /* Continuing to FW download (without Sync) */
288 LOG_INFO(priv, IRQ, "No sync requested "
289 "- starting FW download\n");
290 }
291
292 sdio_release_host(priv->func);
293
294 if (priv->dbg.fw_download)
295 iwmct_fw_load(priv);
296 else
297 LOG_ERROR(priv, IRQ, "FW download not allowed\n");
298
299 goto exit;
300
301exit_release:
302 sdio_release_host(priv->func);
303exit:
304 kfree(buf);
305 LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
306}
307
308static void iwmct_irq(struct sdio_func *func)
309{
310 struct iwmct_priv *priv;
311 int val, ret;
312 int iosize;
313 int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
314 struct iwmct_work_struct *read_req;
315
316 priv = sdio_get_drvdata(func);
317
318 LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
319
320 /* read the function's status register */
321 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
322
323 LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
324
325 if (!val) {
326 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
327 goto exit_clear_intr;
328 }
329
330
331 /*
332 * read 2 bytes of the transaction size
333 * IMPORTANT: sdio transaction size has to be read before clearing
334 * sdio interrupt!!!
335 */
336 val = sdio_readb(priv->func, addr++, &ret);
337 iosize = val;
338 val = sdio_readb(priv->func, addr++, &ret);
339 iosize += val << 8;
340
341 LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
342
343 if (iosize == 0) {
344 LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
345 goto exit_clear_intr;
346 }
347
348 /* allocate a work structure to pass iosize to the worker */
349 read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
350 if (!read_req) {
351 LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
352 goto exit_clear_intr;
353 }
354
355 INIT_LIST_HEAD(&read_req->list);
356 read_req->iosize = iosize;
357
358 list_add_tail(&priv->read_req_list, &read_req->list);
359
360 /* clear the function's interrupt request bit (write 1 to clear) */
361 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
362
363 queue_work(priv->wq, &priv->isr_worker);
364
365 LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
366
367 return;
368
369exit_clear_intr:
370 /* clear the function's interrupt request bit (write 1 to clear) */
371 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
372}
373
374
375static int blocks;
376module_param(blocks, int, 0604);
377MODULE_PARM_DESC(blocks, "max_blocks_to_send");
378
379static int dump;
380module_param(dump, bool, 0604);
381MODULE_PARM_DESC(dump, "dump_hex_content");
382
383static int jump = 1;
384module_param(jump, bool, 0604);
385
386static int direct = 1;
387module_param(direct, bool, 0604);
388
389static int checksum = 1;
390module_param(checksum, bool, 0604);
391
392static int fw_download = 1;
393module_param(fw_download, bool, 0604);
394
395static int block_size = IWMC_SDIO_BLK_SIZE;
396module_param(block_size, int, 0404);
397
398static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
399module_param(download_trans_blks, int, 0604);
400
401static int rubbish_barker;
402module_param(rubbish_barker, bool, 0604);
403
404#ifdef CONFIG_IWMC3200TOP_DEBUG
405static int log_level[LOG_SRC_MAX];
406static unsigned int log_level_argc;
407module_param_array(log_level, int, &log_level_argc, 0604);
408MODULE_PARM_DESC(log_level, "log_level");
409
410static int log_level_fw[FW_LOG_SRC_MAX];
411static unsigned int log_level_fw_argc;
412module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
413MODULE_PARM_DESC(log_level_fw, "log_level_fw");
414#endif
415
416void iwmct_dbg_init_params(struct iwmct_priv *priv)
417{
418#ifdef CONFIG_IWMC3200TOP_DEBUG
419 int i;
420
421 for (i = 0; i < log_level_argc; i++) {
422 dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
423 i, log_level[i]);
424 iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
425 log_level[i] & 0xFF);
426 }
427 for (i = 0; i < log_level_fw_argc; i++) {
428 dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
429 i, log_level_fw[i]);
430 iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
431 log_level_fw[i] & 0xFF);
432 }
433#endif
434
435 priv->dbg.blocks = blocks;
436 LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
437 priv->dbg.dump = (bool)dump;
438 LOG_INFO(priv, INIT, "dump=%d\n", dump);
439 priv->dbg.jump = (bool)jump;
440 LOG_INFO(priv, INIT, "jump=%d\n", jump);
441 priv->dbg.direct = (bool)direct;
442 LOG_INFO(priv, INIT, "direct=%d\n", direct);
443 priv->dbg.checksum = (bool)checksum;
444 LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
445 priv->dbg.fw_download = (bool)fw_download;
446 LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
447 priv->dbg.block_size = block_size;
448 LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
449 priv->dbg.download_trans_blks = download_trans_blks;
450 LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
451}
452
453/*****************************************************************************
454 *
455 * sysfs attributes
456 *
457 *****************************************************************************/
458static ssize_t show_iwmct_fw_version(struct device *d,
459 struct device_attribute *attr, char *buf)
460{
461 struct iwmct_priv *priv = dev_get_drvdata(d);
462 return sprintf(buf, "%s\n", priv->dbg.label_fw);
463}
464static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
465
466#ifdef CONFIG_IWMC3200TOP_DEBUG
467static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
468 show_iwmct_log_level, store_iwmct_log_level);
469static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
470 show_iwmct_log_level_fw, store_iwmct_log_level_fw);
471#endif
472
473static struct attribute *iwmct_sysfs_entries[] = {
474 &dev_attr_cc_label_fw.attr,
475#ifdef CONFIG_IWMC3200TOP_DEBUG
476 &dev_attr_log_level.attr,
477 &dev_attr_log_level_fw.attr,
478#endif
479 NULL
480};
481
482static struct attribute_group iwmct_attribute_group = {
483 .name = NULL, /* put in device directory */
484 .attrs = iwmct_sysfs_entries,
485};
486
487
488static int iwmct_probe(struct sdio_func *func,
489 const struct sdio_device_id *id)
490{
491 struct iwmct_priv *priv;
492 int ret;
493 int val = 1;
494 int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
495
496 dev_dbg(&func->dev, "enter iwmct_probe\n");
497
498 dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
499 jiffies_to_msecs(2147483647), HZ);
500
501 priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
502 if (!priv) {
503 dev_err(&func->dev, "kzalloc error\n");
504 return -ENOMEM;
505 }
506 priv->func = func;
507 sdio_set_drvdata(func, priv);
508
509
510 /* create drivers work queue */
511 priv->wq = create_workqueue(DRV_NAME "_wq");
512 priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
513 INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
514 INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
515
516 init_waitqueue_head(&priv->wait_q);
517
518 sdio_claim_host(func);
519 /* FIXME: Remove after it is fixed in the Boot ROM upgrade */
520 func->enable_timeout = 10;
521
522 /* In our HW, setting the block size also wakes up the boot rom. */
523 ret = sdio_set_block_size(func, priv->dbg.block_size);
524 if (ret) {
525 LOG_ERROR(priv, INIT,
526 "sdio_set_block_size() failure: %d\n", ret);
527 goto error_sdio_enable;
528 }
529
530 ret = sdio_enable_func(func);
531 if (ret) {
532 LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
533 goto error_sdio_enable;
534 }
535
536 /* init reset and dev_sync states */
537 atomic_set(&priv->reset, 0);
538 atomic_set(&priv->dev_sync, 0);
539
540 /* init read req queue */
541 INIT_LIST_HEAD(&priv->read_req_list);
542
543 /* process configurable parameters */
544 iwmct_dbg_init_params(priv);
545 ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
546 if (ret) {
547 LOG_ERROR(priv, INIT, "Failed to register attributes and "
548 "initialize module_params\n");
549 goto error_dev_attrs;
550 }
551
552 iwmct_dbgfs_register(priv, DRV_NAME);
553
554 if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
555 LOG_INFO(priv, INIT,
556 "Reducing transaction to 8 blocks = 2K (from %d)\n",
557 priv->dbg.download_trans_blks);
558 priv->dbg.download_trans_blks = 8;
559 }
560 priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
561 LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
562
563 ret = sdio_claim_irq(func, iwmct_irq);
564 if (ret) {
565 LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
566 goto error_claim_irq;
567 }
568
569
570 /* Enable function's interrupt */
571 sdio_writeb(priv->func, val, addr, &ret);
572 if (ret) {
573 LOG_ERROR(priv, INIT, "Failure writing to "
574 "Interrupt Enable Register (%d): %d\n", addr, ret);
575 goto error_enable_int;
576 }
577
578 sdio_release_host(func);
579
580 LOG_INFO(priv, INIT, "exit iwmct_probe\n");
581
582 return ret;
583
584error_enable_int:
585 sdio_release_irq(func);
586error_claim_irq:
587 sdio_disable_func(func);
588error_dev_attrs:
589 iwmct_dbgfs_unregister(priv->dbgfs);
590 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
591error_sdio_enable:
592 sdio_release_host(func);
593 return ret;
594}
595
596static void iwmct_remove(struct sdio_func *func)
597{
598 struct iwmct_work_struct *read_req;
599 struct iwmct_priv *priv = sdio_get_drvdata(func);
600
601 LOG_INFO(priv, INIT, "enter\n");
602
603 sdio_claim_host(func);
604 sdio_release_irq(func);
605 sdio_release_host(func);
606
607 /* Safely destroy osc workqueue */
608 destroy_workqueue(priv->bus_rescan_wq);
609 destroy_workqueue(priv->wq);
610
611 sdio_claim_host(func);
612 sdio_disable_func(func);
613 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
614 iwmct_dbgfs_unregister(priv->dbgfs);
615 sdio_release_host(func);
616
617 /* free read requests */
618 while (!list_empty(&priv->read_req_list)) {
619 read_req = list_entry(priv->read_req_list.next,
620 struct iwmct_work_struct, list);
621
622 list_del(&read_req->list);
623 kfree(read_req);
624 }
625
626 kfree(priv);
627}
628
629
630static const struct sdio_device_id iwmct_ids[] = {
631 /* Intel Wireless MultiCom 3200 Top Driver */
632 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
633 { }, /* Terminating entry */
634};
635
636MODULE_DEVICE_TABLE(sdio, iwmct_ids);
637
638static struct sdio_driver iwmct_driver = {
639 .probe = iwmct_probe,
640 .remove = iwmct_remove,
641 .name = DRV_NAME,
642 .id_table = iwmct_ids,
643};
644
645static int __init iwmct_init(void)
646{
647 int rc;
648
649 /* Default log filter settings */
650 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
651 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
652 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
653
654 rc = sdio_register_driver(&iwmct_driver);
655
656 return rc;
657}
658
659static void __exit iwmct_exit(void)
660{
661 sdio_unregister_driver(&iwmct_driver);
662}
663
664module_init(iwmct_init);
665module_exit(iwmct_exit);
666
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index e4ff50b95a5e..59c118c19a91 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -295,6 +295,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
295 /* On x86 a breakpoint stop requires it to be decremented */ 295 /* On x86 a breakpoint stop requires it to be decremented */
296 if (addr + 1 == kgdbts_regs.ip) 296 if (addr + 1 == kgdbts_regs.ip)
297 offset = -1; 297 offset = -1;
298#elif defined(CONFIG_SUPERH)
299 /* On SUPERH a breakpoint stop requires it to be decremented */
300 if (addr + 2 == kgdbts_regs.pc)
301 offset = -2;
298#endif 302#endif
299 if (strcmp(arg, "silent") && 303 if (strcmp(arg, "silent") &&
300 instruction_pointer(&kgdbts_regs) + offset != addr) { 304 instruction_pointer(&kgdbts_regs) + offset != addr) {
@@ -305,6 +309,8 @@ static int check_and_rewind_pc(char *put_str, char *arg)
305#ifdef CONFIG_X86 309#ifdef CONFIG_X86
306 /* On x86 adjust the instruction pointer if needed */ 310 /* On x86 adjust the instruction pointer if needed */
307 kgdbts_regs.ip += offset; 311 kgdbts_regs.ip += offset;
312#elif defined(CONFIG_SUPERH)
313 kgdbts_regs.pc += offset;
308#endif 314#endif
309 return 0; 315 return 0;
310} 316}
@@ -712,6 +718,12 @@ static int run_simple_test(int is_get_char, int chr)
712 718
713 /* End of packet == #XX so look for the '#' */ 719 /* End of packet == #XX so look for the '#' */
714 if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') { 720 if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') {
721 if (put_buf_cnt >= BUFMAX) {
722 eprintk("kgdbts: ERROR: put buffer overflow on"
723 " '%s' line %i\n", ts.name, ts.idx);
724 put_buf_cnt = 0;
725 return 0;
726 }
715 put_buf[put_buf_cnt] = '\0'; 727 put_buf[put_buf_cnt] = '\0';
716 v2printk("put%i: %s\n", ts.idx, put_buf); 728 v2printk("put%i: %s\n", ts.idx, put_buf);
717 /* Trigger check here */ 729 /* Trigger check here */
@@ -885,16 +897,16 @@ static void kgdbts_run_tests(void)
885 int nmi_sleep = 0; 897 int nmi_sleep = 0;
886 int i; 898 int i;
887 899
888 ptr = strstr(config, "F"); 900 ptr = strchr(config, 'F');
889 if (ptr) 901 if (ptr)
890 fork_test = simple_strtol(ptr + 1, NULL, 10); 902 fork_test = simple_strtol(ptr + 1, NULL, 10);
891 ptr = strstr(config, "S"); 903 ptr = strchr(config, 'S');
892 if (ptr) 904 if (ptr)
893 do_sys_open_test = simple_strtol(ptr + 1, NULL, 10); 905 do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
894 ptr = strstr(config, "N"); 906 ptr = strchr(config, 'N');
895 if (ptr) 907 if (ptr)
896 nmi_sleep = simple_strtol(ptr+1, NULL, 10); 908 nmi_sleep = simple_strtol(ptr+1, NULL, 10);
897 ptr = strstr(config, "I"); 909 ptr = strchr(config, 'I');
898 if (ptr) 910 if (ptr)
899 sstep_test = simple_strtol(ptr+1, NULL, 10); 911 sstep_test = simple_strtol(ptr+1, NULL, 10);
900 912
@@ -1032,12 +1044,6 @@ static int __init init_kgdbts(void)
1032 return configure_kgdbts(); 1044 return configure_kgdbts();
1033} 1045}
1034 1046
1035static void cleanup_kgdbts(void)
1036{
1037 if (configured == 1)
1038 kgdb_unregister_io_module(&kgdbts_io_ops);
1039}
1040
1041static int kgdbts_get_char(void) 1047static int kgdbts_get_char(void)
1042{ 1048{
1043 int val = 0; 1049 int val = 0;
@@ -1069,10 +1075,8 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
1069 return 0; 1075 return 0;
1070 } 1076 }
1071 1077
1072 if (kgdb_connected) { 1078 if (configured == 1) {
1073 printk(KERN_ERR 1079 printk(KERN_ERR "kgdbts: ERROR: Already configured and running.\n");
1074 "kgdbts: Cannot reconfigure while KGDB is connected.\n");
1075
1076 return -EBUSY; 1080 return -EBUSY;
1077 } 1081 }
1078 1082
@@ -1081,9 +1085,6 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
1081 if (config[len - 1] == '\n') 1085 if (config[len - 1] == '\n')
1082 config[len - 1] = '\0'; 1086 config[len - 1] = '\0';
1083 1087
1084 if (configured == 1)
1085 cleanup_kgdbts();
1086
1087 /* Go and configure with the new params. */ 1088 /* Go and configure with the new params. */
1088 return configure_kgdbts(); 1089 return configure_kgdbts();
1089} 1090}
@@ -1111,7 +1112,6 @@ static struct kgdb_io kgdbts_io_ops = {
1111}; 1112};
1112 1113
1113module_init(init_kgdbts); 1114module_init(init_kgdbts);
1114module_exit(cleanup_kgdbts);
1115module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644); 1115module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644);
1116MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]"); 1116MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]");
1117MODULE_DESCRIPTION("KGDB Test Suite"); 1117MODULE_DESCRIPTION("KGDB Test Suite");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 1bfe5d16963b..81d7fa4ec0db 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -26,21 +26,9 @@
26 * It is adapted from the Linux Kernel Dump Test Tool by 26 * It is adapted from the Linux Kernel Dump Test Tool by
27 * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> 27 * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
28 * 28 *
29 * Usage : insmod lkdtm.ko [recur_count={>0}] cpoint_name=<> cpoint_type=<> 29 * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
30 * [cpoint_count={>0}]
31 * 30 *
32 * recur_count : Recursion level for the stack overflow test. Default is 10. 31 * See Documentation/fault-injection/provoke-crashes.txt for instructions
33 *
34 * cpoint_name : Crash point where the kernel is to be crashed. It can be
35 * one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
36 * FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD,
37 * IDE_CORE_CP
38 *
39 * cpoint_type : Indicates the action to be taken on hitting the crash point.
40 * It can be one of PANIC, BUG, EXCEPTION, LOOP, OVERFLOW
41 *
42 * cpoint_count : Indicates the number of times the crash point is to be hit
43 * to trigger an action. The default is 10.
44 */ 32 */
45 33
46#include <linux/kernel.h> 34#include <linux/kernel.h>
@@ -52,36 +40,44 @@
52#include <linux/init.h> 40#include <linux/init.h>
53#include <linux/interrupt.h> 41#include <linux/interrupt.h>
54#include <linux/hrtimer.h> 42#include <linux/hrtimer.h>
43#include <linux/slab.h>
55#include <scsi/scsi_cmnd.h> 44#include <scsi/scsi_cmnd.h>
45#include <linux/debugfs.h>
56 46
57#ifdef CONFIG_IDE 47#ifdef CONFIG_IDE
58#include <linux/ide.h> 48#include <linux/ide.h>
59#endif 49#endif
60 50
61#define NUM_CPOINTS 8
62#define NUM_CPOINT_TYPES 5
63#define DEFAULT_COUNT 10 51#define DEFAULT_COUNT 10
64#define REC_NUM_DEFAULT 10 52#define REC_NUM_DEFAULT 10
65 53
66enum cname { 54enum cname {
67 INVALID, 55 CN_INVALID,
68 INT_HARDWARE_ENTRY, 56 CN_INT_HARDWARE_ENTRY,
69 INT_HW_IRQ_EN, 57 CN_INT_HW_IRQ_EN,
70 INT_TASKLET_ENTRY, 58 CN_INT_TASKLET_ENTRY,
71 FS_DEVRW, 59 CN_FS_DEVRW,
72 MEM_SWAPOUT, 60 CN_MEM_SWAPOUT,
73 TIMERADD, 61 CN_TIMERADD,
74 SCSI_DISPATCH_CMD, 62 CN_SCSI_DISPATCH_CMD,
75 IDE_CORE_CP 63 CN_IDE_CORE_CP,
64 CN_DIRECT,
76}; 65};
77 66
78enum ctype { 67enum ctype {
79 NONE, 68 CT_NONE,
80 PANIC, 69 CT_PANIC,
81 BUG, 70 CT_BUG,
82 EXCEPTION, 71 CT_EXCEPTION,
83 LOOP, 72 CT_LOOP,
84 OVERFLOW 73 CT_OVERFLOW,
74 CT_CORRUPT_STACK,
75 CT_UNALIGNED_LOAD_STORE_WRITE,
76 CT_OVERWRITE_ALLOCATION,
77 CT_WRITE_AFTER_FREE,
78 CT_SOFTLOCKUP,
79 CT_HARDLOCKUP,
80 CT_HUNG_TASK,
85}; 81};
86 82
87static char* cp_name[] = { 83static char* cp_name[] = {
@@ -92,7 +88,8 @@ static char* cp_name[] = {
92 "MEM_SWAPOUT", 88 "MEM_SWAPOUT",
93 "TIMERADD", 89 "TIMERADD",
94 "SCSI_DISPATCH_CMD", 90 "SCSI_DISPATCH_CMD",
95 "IDE_CORE_CP" 91 "IDE_CORE_CP",
92 "DIRECT",
96}; 93};
97 94
98static char* cp_type[] = { 95static char* cp_type[] = {
@@ -100,7 +97,14 @@ static char* cp_type[] = {
100 "BUG", 97 "BUG",
101 "EXCEPTION", 98 "EXCEPTION",
102 "LOOP", 99 "LOOP",
103 "OVERFLOW" 100 "OVERFLOW",
101 "CORRUPT_STACK",
102 "UNALIGNED_LOAD_STORE_WRITE",
103 "OVERWRITE_ALLOCATION",
104 "WRITE_AFTER_FREE",
105 "SOFTLOCKUP",
106 "HARDLOCKUP",
107 "HUNG_TASK",
104}; 108};
105 109
106static struct jprobe lkdtm; 110static struct jprobe lkdtm;
@@ -113,16 +117,16 @@ static char* cpoint_type;
113static int cpoint_count = DEFAULT_COUNT; 117static int cpoint_count = DEFAULT_COUNT;
114static int recur_count = REC_NUM_DEFAULT; 118static int recur_count = REC_NUM_DEFAULT;
115 119
116static enum cname cpoint = INVALID; 120static enum cname cpoint = CN_INVALID;
117static enum ctype cptype = NONE; 121static enum ctype cptype = CT_NONE;
118static int count = DEFAULT_COUNT; 122static int count = DEFAULT_COUNT;
119 123
120module_param(recur_count, int, 0644); 124module_param(recur_count, int, 0644);
121MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ 125MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
122 "default is 10"); 126 "default is 10");
123module_param(cpoint_name, charp, 0644); 127module_param(cpoint_name, charp, 0444);
124MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed"); 128MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
125module_param(cpoint_type, charp, 0644); 129module_param(cpoint_type, charp, 0444);
126MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\ 130MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
127 "hitting the crash point"); 131 "hitting the crash point");
128module_param(cpoint_count, int, 0644); 132module_param(cpoint_count, int, 0644);
@@ -193,34 +197,66 @@ int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
193} 197}
194#endif 198#endif
195 199
200/* Return the crashpoint number or NONE if the name is invalid */
201static enum ctype parse_cp_type(const char *what, size_t count)
202{
203 int i;
204
205 for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
206 if (!strcmp(what, cp_type[i]))
207 return i + 1;
208 }
209
210 return CT_NONE;
211}
212
213static const char *cp_type_to_str(enum ctype type)
214{
215 if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
216 return "None";
217
218 return cp_type[type - 1];
219}
220
221static const char *cp_name_to_str(enum cname name)
222{
223 if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
224 return "INVALID";
225
226 return cp_name[name - 1];
227}
228
229
196static int lkdtm_parse_commandline(void) 230static int lkdtm_parse_commandline(void)
197{ 231{
198 int i; 232 int i;
199 233
200 if (cpoint_name == NULL || cpoint_type == NULL || 234 if (cpoint_count < 1 || recur_count < 1)
201 cpoint_count < 1 || recur_count < 1)
202 return -EINVAL; 235 return -EINVAL;
203 236
204 for (i = 0; i < NUM_CPOINTS; ++i) { 237 count = cpoint_count;
238
239 /* No special parameters */
240 if (!cpoint_type && !cpoint_name)
241 return 0;
242
243 /* Neither or both of these need to be set */
244 if (!cpoint_type || !cpoint_name)
245 return -EINVAL;
246
247 cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
248 if (cptype == CT_NONE)
249 return -EINVAL;
250
251 for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
205 if (!strcmp(cpoint_name, cp_name[i])) { 252 if (!strcmp(cpoint_name, cp_name[i])) {
206 cpoint = i + 1; 253 cpoint = i + 1;
207 break; 254 return 0;
208 }
209 }
210
211 for (i = 0; i < NUM_CPOINT_TYPES; ++i) {
212 if (!strcmp(cpoint_type, cp_type[i])) {
213 cptype = i + 1;
214 break;
215 } 255 }
216 } 256 }
217 257
218 if (cpoint == INVALID || cptype == NONE) 258 /* Could not find a valid crash point */
219 return -EINVAL; 259 return -EINVAL;
220
221 count = cpoint_count;
222
223 return 0;
224} 260}
225 261
226static int recursive_loop(int a) 262static int recursive_loop(int a)
@@ -235,108 +271,410 @@ static int recursive_loop(int a)
235 return recursive_loop(a); 271 return recursive_loop(a);
236} 272}
237 273
238void lkdtm_handler(void) 274static void lkdtm_do_action(enum ctype which)
275{
276 switch (which) {
277 case CT_PANIC:
278 panic("dumptest");
279 break;
280 case CT_BUG:
281 BUG();
282 break;
283 case CT_EXCEPTION:
284 *((int *) 0) = 0;
285 break;
286 case CT_LOOP:
287 for (;;)
288 ;
289 break;
290 case CT_OVERFLOW:
291 (void) recursive_loop(0);
292 break;
293 case CT_CORRUPT_STACK: {
294 volatile u32 data[8];
295 volatile u32 *p = data;
296
297 p[12] = 0x12345678;
298 break;
299 }
300 case CT_UNALIGNED_LOAD_STORE_WRITE: {
301 static u8 data[5] __attribute__((aligned(4))) = {1, 2,
302 3, 4, 5};
303 u32 *p;
304 u32 val = 0x12345678;
305
306 p = (u32 *)(data + 1);
307 if (*p == 0)
308 val = 0x87654321;
309 *p = val;
310 break;
311 }
312 case CT_OVERWRITE_ALLOCATION: {
313 size_t len = 1020;
314 u32 *data = kmalloc(len, GFP_KERNEL);
315
316 data[1024 / sizeof(u32)] = 0x12345678;
317 kfree(data);
318 break;
319 }
320 case CT_WRITE_AFTER_FREE: {
321 size_t len = 1024;
322 u32 *data = kmalloc(len, GFP_KERNEL);
323
324 kfree(data);
325 schedule();
326 memset(data, 0x78, len);
327 break;
328 }
329 case CT_SOFTLOCKUP:
330 preempt_disable();
331 for (;;)
332 cpu_relax();
333 break;
334 case CT_HARDLOCKUP:
335 local_irq_disable();
336 for (;;)
337 cpu_relax();
338 break;
339 case CT_HUNG_TASK:
340 set_current_state(TASK_UNINTERRUPTIBLE);
341 schedule();
342 break;
343 case CT_NONE:
344 default:
345 break;
346 }
347
348}
349
350static void lkdtm_handler(void)
239{ 351{
240 printk(KERN_INFO "lkdtm : Crash point %s of type %s hit\n", 352 count--;
241 cpoint_name, cpoint_type); 353 printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
242 --count; 354 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
243 355
244 if (count == 0) { 356 if (count == 0) {
245 switch (cptype) { 357 lkdtm_do_action(cptype);
246 case NONE:
247 break;
248 case PANIC:
249 printk(KERN_INFO "lkdtm : PANIC\n");
250 panic("dumptest");
251 break;
252 case BUG:
253 printk(KERN_INFO "lkdtm : BUG\n");
254 BUG();
255 break;
256 case EXCEPTION:
257 printk(KERN_INFO "lkdtm : EXCEPTION\n");
258 *((int *) 0) = 0;
259 break;
260 case LOOP:
261 printk(KERN_INFO "lkdtm : LOOP\n");
262 for (;;);
263 break;
264 case OVERFLOW:
265 printk(KERN_INFO "lkdtm : OVERFLOW\n");
266 (void) recursive_loop(0);
267 break;
268 default:
269 break;
270 }
271 count = cpoint_count; 358 count = cpoint_count;
272 } 359 }
273} 360}
274 361
275static int __init lkdtm_module_init(void) 362static int lkdtm_register_cpoint(enum cname which)
276{ 363{
277 int ret; 364 int ret;
278 365
279 if (lkdtm_parse_commandline() == -EINVAL) { 366 cpoint = CN_INVALID;
280 printk(KERN_INFO "lkdtm : Invalid command\n"); 367 if (lkdtm.entry != NULL)
281 return -EINVAL; 368 unregister_jprobe(&lkdtm);
282 }
283 369
284 switch (cpoint) { 370 switch (which) {
285 case INT_HARDWARE_ENTRY: 371 case CN_DIRECT:
286 lkdtm.kp.symbol_name = "__do_IRQ"; 372 lkdtm_do_action(cptype);
373 return 0;
374 case CN_INT_HARDWARE_ENTRY:
375 lkdtm.kp.symbol_name = "do_IRQ";
287 lkdtm.entry = (kprobe_opcode_t*) jp_do_irq; 376 lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
288 break; 377 break;
289 case INT_HW_IRQ_EN: 378 case CN_INT_HW_IRQ_EN:
290 lkdtm.kp.symbol_name = "handle_IRQ_event"; 379 lkdtm.kp.symbol_name = "handle_IRQ_event";
291 lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event; 380 lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
292 break; 381 break;
293 case INT_TASKLET_ENTRY: 382 case CN_INT_TASKLET_ENTRY:
294 lkdtm.kp.symbol_name = "tasklet_action"; 383 lkdtm.kp.symbol_name = "tasklet_action";
295 lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action; 384 lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
296 break; 385 break;
297 case FS_DEVRW: 386 case CN_FS_DEVRW:
298 lkdtm.kp.symbol_name = "ll_rw_block"; 387 lkdtm.kp.symbol_name = "ll_rw_block";
299 lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block; 388 lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
300 break; 389 break;
301 case MEM_SWAPOUT: 390 case CN_MEM_SWAPOUT:
302 lkdtm.kp.symbol_name = "shrink_inactive_list"; 391 lkdtm.kp.symbol_name = "shrink_inactive_list";
303 lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list; 392 lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
304 break; 393 break;
305 case TIMERADD: 394 case CN_TIMERADD:
306 lkdtm.kp.symbol_name = "hrtimer_start"; 395 lkdtm.kp.symbol_name = "hrtimer_start";
307 lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start; 396 lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
308 break; 397 break;
309 case SCSI_DISPATCH_CMD: 398 case CN_SCSI_DISPATCH_CMD:
310 lkdtm.kp.symbol_name = "scsi_dispatch_cmd"; 399 lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
311 lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd; 400 lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
312 break; 401 break;
313 case IDE_CORE_CP: 402 case CN_IDE_CORE_CP:
314#ifdef CONFIG_IDE 403#ifdef CONFIG_IDE
315 lkdtm.kp.symbol_name = "generic_ide_ioctl"; 404 lkdtm.kp.symbol_name = "generic_ide_ioctl";
316 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; 405 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
317#else 406#else
318 printk(KERN_INFO "lkdtm : Crash point not available\n"); 407 printk(KERN_INFO "lkdtm: Crash point not available\n");
408 return -EINVAL;
319#endif 409#endif
320 break; 410 break;
321 default: 411 default:
322 printk(KERN_INFO "lkdtm : Invalid Crash Point\n"); 412 printk(KERN_INFO "lkdtm: Invalid Crash Point\n");
323 break; 413 return -EINVAL;
324 } 414 }
325 415
416 cpoint = which;
326 if ((ret = register_jprobe(&lkdtm)) < 0) { 417 if ((ret = register_jprobe(&lkdtm)) < 0) {
327 printk(KERN_INFO "lkdtm : Couldn't register jprobe\n"); 418 printk(KERN_INFO "lkdtm: Couldn't register jprobe\n");
328 return ret; 419 cpoint = CN_INVALID;
420 }
421
422 return ret;
423}
424
425static ssize_t do_register_entry(enum cname which, struct file *f,
426 const char __user *user_buf, size_t count, loff_t *off)
427{
428 char *buf;
429 int err;
430
431 if (count >= PAGE_SIZE)
432 return -EINVAL;
433
434 buf = (char *)__get_free_page(GFP_KERNEL);
435 if (!buf)
436 return -ENOMEM;
437 if (copy_from_user(buf, user_buf, count)) {
438 free_page((unsigned long) buf);
439 return -EFAULT;
440 }
441 /* NULL-terminate and remove enter */
442 buf[count] = '\0';
443 strim(buf);
444
445 cptype = parse_cp_type(buf, count);
446 free_page((unsigned long) buf);
447
448 if (cptype == CT_NONE)
449 return -EINVAL;
450
451 err = lkdtm_register_cpoint(which);
452 if (err < 0)
453 return err;
454
455 *off += count;
456
457 return count;
458}
459
460/* Generic read callback that just prints out the available crash types */
461static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
462 size_t count, loff_t *off)
463{
464 char *buf;
465 int i, n, out;
466
467 buf = (char *)__get_free_page(GFP_KERNEL);
468
469 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
470 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
471 n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
472 buf[n] = '\0';
473
474 out = simple_read_from_buffer(user_buf, count, off,
475 buf, n);
476 free_page((unsigned long) buf);
477
478 return out;
479}
480
481static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
482{
483 return 0;
484}
485
486
487static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
488 size_t count, loff_t *off)
489{
490 return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
491}
492
493static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
494 size_t count, loff_t *off)
495{
496 return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
497}
498
499static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
500 size_t count, loff_t *off)
501{
502 return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
503}
504
505static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
506 size_t count, loff_t *off)
507{
508 return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
509}
510
511static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
512 size_t count, loff_t *off)
513{
514 return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
515}
516
517static ssize_t timeradd_entry(struct file *f, const char __user *buf,
518 size_t count, loff_t *off)
519{
520 return do_register_entry(CN_TIMERADD, f, buf, count, off);
521}
522
523static ssize_t scsi_dispatch_cmd_entry(struct file *f,
524 const char __user *buf, size_t count, loff_t *off)
525{
526 return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
527}
528
529static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
530 size_t count, loff_t *off)
531{
532 return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
533}
534
535/* Special entry to just crash directly. Available without KPROBEs */
536static ssize_t direct_entry(struct file *f, const char __user *user_buf,
537 size_t count, loff_t *off)
538{
539 enum ctype type;
540 char *buf;
541
542 if (count >= PAGE_SIZE)
543 return -EINVAL;
544 if (count < 1)
545 return -EINVAL;
546
547 buf = (char *)__get_free_page(GFP_KERNEL);
548 if (!buf)
549 return -ENOMEM;
550 if (copy_from_user(buf, user_buf, count)) {
551 free_page((unsigned long) buf);
552 return -EFAULT;
553 }
554 /* NULL-terminate and remove enter */
555 buf[count] = '\0';
556 strim(buf);
557
558 type = parse_cp_type(buf, count);
559 free_page((unsigned long) buf);
560 if (type == CT_NONE)
561 return -EINVAL;
562
563 printk(KERN_INFO "lkdtm: Performing direct entry %s\n",
564 cp_type_to_str(type));
565 lkdtm_do_action(type);
566 *off += count;
567
568 return count;
569}
570
571struct crash_entry {
572 const char *name;
573 const struct file_operations fops;
574};
575
576static const struct crash_entry crash_entries[] = {
577 {"DIRECT", {.read = lkdtm_debugfs_read,
578 .llseek = generic_file_llseek,
579 .open = lkdtm_debugfs_open,
580 .write = direct_entry} },
581 {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
582 .llseek = generic_file_llseek,
583 .open = lkdtm_debugfs_open,
584 .write = int_hardware_entry} },
585 {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
586 .llseek = generic_file_llseek,
587 .open = lkdtm_debugfs_open,
588 .write = int_hw_irq_en} },
589 {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
590 .llseek = generic_file_llseek,
591 .open = lkdtm_debugfs_open,
592 .write = int_tasklet_entry} },
593 {"FS_DEVRW", {.read = lkdtm_debugfs_read,
594 .llseek = generic_file_llseek,
595 .open = lkdtm_debugfs_open,
596 .write = fs_devrw_entry} },
597 {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
598 .llseek = generic_file_llseek,
599 .open = lkdtm_debugfs_open,
600 .write = mem_swapout_entry} },
601 {"TIMERADD", {.read = lkdtm_debugfs_read,
602 .llseek = generic_file_llseek,
603 .open = lkdtm_debugfs_open,
604 .write = timeradd_entry} },
605 {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
606 .llseek = generic_file_llseek,
607 .open = lkdtm_debugfs_open,
608 .write = scsi_dispatch_cmd_entry} },
609 {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
610 .llseek = generic_file_llseek,
611 .open = lkdtm_debugfs_open,
612 .write = ide_core_cp_entry} },
613};
614
615static struct dentry *lkdtm_debugfs_root;
616
617static int __init lkdtm_module_init(void)
618{
619 int ret = -EINVAL;
620 int n_debugfs_entries = 1; /* Assume only the direct entry */
621 int i;
622
623 /* Register debugfs interface */
624 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
625 if (!lkdtm_debugfs_root) {
626 printk(KERN_ERR "lkdtm: creating root dir failed\n");
627 return -ENODEV;
628 }
629
630#ifdef CONFIG_KPROBES
631 n_debugfs_entries = ARRAY_SIZE(crash_entries);
632#endif
633
634 for (i = 0; i < n_debugfs_entries; i++) {
635 const struct crash_entry *cur = &crash_entries[i];
636 struct dentry *de;
637
638 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
639 NULL, &cur->fops);
640 if (de == NULL) {
641 printk(KERN_ERR "lkdtm: could not create %s\n",
642 cur->name);
643 goto out_err;
644 }
645 }
646
647 if (lkdtm_parse_commandline() == -EINVAL) {
648 printk(KERN_INFO "lkdtm: Invalid command\n");
649 goto out_err;
650 }
651
652 if (cpoint != CN_INVALID && cptype != CT_NONE) {
653 ret = lkdtm_register_cpoint(cpoint);
654 if (ret < 0) {
655 printk(KERN_INFO "lkdtm: Invalid crash point %d\n",
656 cpoint);
657 goto out_err;
658 }
659 printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n",
660 cpoint_name, cpoint_type);
661 } else {
662 printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n");
329 } 663 }
330 664
331 printk(KERN_INFO "lkdtm : Crash point %s of type %s registered\n",
332 cpoint_name, cpoint_type);
333 return 0; 665 return 0;
666
667out_err:
668 debugfs_remove_recursive(lkdtm_debugfs_root);
669 return ret;
334} 670}
335 671
336static void __exit lkdtm_module_exit(void) 672static void __exit lkdtm_module_exit(void)
337{ 673{
338 unregister_jprobe(&lkdtm); 674 debugfs_remove_recursive(lkdtm_debugfs_root);
339 printk(KERN_INFO "lkdtm : Crash point unregistered\n"); 675
676 unregister_jprobe(&lkdtm);
677 printk(KERN_INFO "lkdtm: Crash point unregistered\n");
340} 678}
341 679
342module_init(lkdtm_module_init); 680module_init(lkdtm_module_init);
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
new file mode 100644
index 000000000000..744b804aca15
--- /dev/null
+++ b/drivers/misc/pch_phub.c
@@ -0,0 +1,717 @@
1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/types.h>
21#include <linux/fs.h>
22#include <linux/uaccess.h>
23#include <linux/string.h>
24#include <linux/pci.h>
25#include <linux/io.h>
26#include <linux/delay.h>
27#include <linux/mutex.h>
28#include <linux/if_ether.h>
29#include <linux/ctype.h>
30
31#define PHUB_STATUS 0x00 /* Status Register offset */
32#define PHUB_CONTROL 0x04 /* Control Register offset */
33#define PHUB_TIMEOUT 0x05 /* Time out value for Status Register */
34#define PCH_PHUB_ROM_WRITE_ENABLE 0x01 /* Enabling for writing ROM */
35#define PCH_PHUB_ROM_WRITE_DISABLE 0x00 /* Disabling for writing ROM */
36#define PCH_PHUB_ROM_START_ADDR 0x14 /* ROM data area start address offset */
37
38/* MAX number of INT_REDUCE_CONTROL registers */
39#define MAX_NUM_INT_REDUCE_CONTROL_REG 128
40#define PCI_DEVICE_ID_PCH1_PHUB 0x8801
41#define PCH_MINOR_NOS 1
42#define CLKCFG_CAN_50MHZ 0x12000000
43#define CLKCFG_CANCLK_MASK 0xFF000000
44
45/* SROM ACCESS Macro */
46#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
47
48/* Registers address offset */
49#define PCH_PHUB_ID_REG 0x0000
50#define PCH_PHUB_QUEUE_PRI_VAL_REG 0x0004
51#define PCH_PHUB_RC_QUEUE_MAXSIZE_REG 0x0008
52#define PCH_PHUB_BRI_QUEUE_MAXSIZE_REG 0x000C
53#define PCH_PHUB_COMP_RESP_TIMEOUT_REG 0x0010
54#define PCH_PHUB_BUS_SLAVE_CONTROL_REG 0x0014
55#define PCH_PHUB_DEADLOCK_AVOID_TYPE_REG 0x0018
56#define PCH_PHUB_INTPIN_REG_WPERMIT_REG0 0x0020
57#define PCH_PHUB_INTPIN_REG_WPERMIT_REG1 0x0024
58#define PCH_PHUB_INTPIN_REG_WPERMIT_REG2 0x0028
59#define PCH_PHUB_INTPIN_REG_WPERMIT_REG3 0x002C
60#define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE 0x0040
61#define CLKCFG_REG_OFFSET 0x500
62
63#define PCH_PHUB_OROM_SIZE 15360
64
65/**
66 * struct pch_phub_reg - PHUB register structure
67 * @phub_id_reg: PHUB_ID register val
68 * @q_pri_val_reg: QUEUE_PRI_VAL register val
69 * @rc_q_maxsize_reg: RC_QUEUE_MAXSIZE register val
70 * @bri_q_maxsize_reg: BRI_QUEUE_MAXSIZE register val
71 * @comp_resp_timeout_reg: COMP_RESP_TIMEOUT register val
72 * @bus_slave_control_reg: BUS_SLAVE_CONTROL_REG register val
73 * @deadlock_avoid_type_reg: DEADLOCK_AVOID_TYPE register val
74 * @intpin_reg_wpermit_reg0: INTPIN_REG_WPERMIT register 0 val
75 * @intpin_reg_wpermit_reg1: INTPIN_REG_WPERMIT register 1 val
76 * @intpin_reg_wpermit_reg2: INTPIN_REG_WPERMIT register 2 val
77 * @intpin_reg_wpermit_reg3: INTPIN_REG_WPERMIT register 3 val
78 * @int_reduce_control_reg: INT_REDUCE_CONTROL registers val
79 * @clkcfg_reg: CLK CFG register val
80 * @pch_phub_base_address: Register base address
81 * @pch_phub_extrom_base_address: external rom base address
82 */
83struct pch_phub_reg {
84 u32 phub_id_reg;
85 u32 q_pri_val_reg;
86 u32 rc_q_maxsize_reg;
87 u32 bri_q_maxsize_reg;
88 u32 comp_resp_timeout_reg;
89 u32 bus_slave_control_reg;
90 u32 deadlock_avoid_type_reg;
91 u32 intpin_reg_wpermit_reg0;
92 u32 intpin_reg_wpermit_reg1;
93 u32 intpin_reg_wpermit_reg2;
94 u32 intpin_reg_wpermit_reg3;
95 u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG];
96 u32 clkcfg_reg;
97 void __iomem *pch_phub_base_address;
98 void __iomem *pch_phub_extrom_base_address;
99};
100
101/* SROM SPEC for MAC address assignment offset */
102static const int pch_phub_mac_offset[ETH_ALEN] = {0x3, 0x2, 0x1, 0x0, 0xb, 0xa};
103
104static DEFINE_MUTEX(pch_phub_mutex);
105
106/**
107 * pch_phub_read_modify_write_reg() - Reading modifying and writing register
108 * @reg_addr_offset: Register offset address value.
109 * @data: Writing value.
110 * @mask: Mask value.
111 */
112static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
113 unsigned int reg_addr_offset,
114 unsigned int data, unsigned int mask)
115{
116 void __iomem *reg_addr = chip->pch_phub_base_address + reg_addr_offset;
117 iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
118}
119
120/* pch_phub_save_reg_conf - saves register configuration */
121static void pch_phub_save_reg_conf(struct pci_dev *pdev)
122{
123 unsigned int i;
124 struct pch_phub_reg *chip = pci_get_drvdata(pdev);
125
126 void __iomem *p = chip->pch_phub_base_address;
127
128 chip->phub_id_reg = ioread32(p + PCH_PHUB_ID_REG);
129 chip->q_pri_val_reg = ioread32(p + PCH_PHUB_QUEUE_PRI_VAL_REG);
130 chip->rc_q_maxsize_reg = ioread32(p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
131 chip->bri_q_maxsize_reg = ioread32(p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
132 chip->comp_resp_timeout_reg =
133 ioread32(p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
134 chip->bus_slave_control_reg =
135 ioread32(p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
136 chip->deadlock_avoid_type_reg =
137 ioread32(p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
138 chip->intpin_reg_wpermit_reg0 =
139 ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
140 chip->intpin_reg_wpermit_reg1 =
141 ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
142 chip->intpin_reg_wpermit_reg2 =
143 ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
144 chip->intpin_reg_wpermit_reg3 =
145 ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
146 dev_dbg(&pdev->dev, "%s : "
147 "chip->phub_id_reg=%x, "
148 "chip->q_pri_val_reg=%x, "
149 "chip->rc_q_maxsize_reg=%x, "
150 "chip->bri_q_maxsize_reg=%x, "
151 "chip->comp_resp_timeout_reg=%x, "
152 "chip->bus_slave_control_reg=%x, "
153 "chip->deadlock_avoid_type_reg=%x, "
154 "chip->intpin_reg_wpermit_reg0=%x, "
155 "chip->intpin_reg_wpermit_reg1=%x, "
156 "chip->intpin_reg_wpermit_reg2=%x, "
157 "chip->intpin_reg_wpermit_reg3=%x\n", __func__,
158 chip->phub_id_reg,
159 chip->q_pri_val_reg,
160 chip->rc_q_maxsize_reg,
161 chip->bri_q_maxsize_reg,
162 chip->comp_resp_timeout_reg,
163 chip->bus_slave_control_reg,
164 chip->deadlock_avoid_type_reg,
165 chip->intpin_reg_wpermit_reg0,
166 chip->intpin_reg_wpermit_reg1,
167 chip->intpin_reg_wpermit_reg2,
168 chip->intpin_reg_wpermit_reg3);
169 for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
170 chip->int_reduce_control_reg[i] =
171 ioread32(p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
172 dev_dbg(&pdev->dev, "%s : "
173 "chip->int_reduce_control_reg[%d]=%x\n",
174 __func__, i, chip->int_reduce_control_reg[i]);
175 }
176 chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET);
177}
178
179/* pch_phub_restore_reg_conf - restore register configuration */
180static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
181{
182 unsigned int i;
183 struct pch_phub_reg *chip = pci_get_drvdata(pdev);
184 void __iomem *p;
185 p = chip->pch_phub_base_address;
186
187 iowrite32(chip->phub_id_reg, p + PCH_PHUB_ID_REG);
188 iowrite32(chip->q_pri_val_reg, p + PCH_PHUB_QUEUE_PRI_VAL_REG);
189 iowrite32(chip->rc_q_maxsize_reg, p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
190 iowrite32(chip->bri_q_maxsize_reg, p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
191 iowrite32(chip->comp_resp_timeout_reg,
192 p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
193 iowrite32(chip->bus_slave_control_reg,
194 p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
195 iowrite32(chip->deadlock_avoid_type_reg,
196 p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
197 iowrite32(chip->intpin_reg_wpermit_reg0,
198 p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
199 iowrite32(chip->intpin_reg_wpermit_reg1,
200 p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
201 iowrite32(chip->intpin_reg_wpermit_reg2,
202 p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
203 iowrite32(chip->intpin_reg_wpermit_reg3,
204 p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
205 dev_dbg(&pdev->dev, "%s : "
206 "chip->phub_id_reg=%x, "
207 "chip->q_pri_val_reg=%x, "
208 "chip->rc_q_maxsize_reg=%x, "
209 "chip->bri_q_maxsize_reg=%x, "
210 "chip->comp_resp_timeout_reg=%x, "
211 "chip->bus_slave_control_reg=%x, "
212 "chip->deadlock_avoid_type_reg=%x, "
213 "chip->intpin_reg_wpermit_reg0=%x, "
214 "chip->intpin_reg_wpermit_reg1=%x, "
215 "chip->intpin_reg_wpermit_reg2=%x, "
216 "chip->intpin_reg_wpermit_reg3=%x\n", __func__,
217 chip->phub_id_reg,
218 chip->q_pri_val_reg,
219 chip->rc_q_maxsize_reg,
220 chip->bri_q_maxsize_reg,
221 chip->comp_resp_timeout_reg,
222 chip->bus_slave_control_reg,
223 chip->deadlock_avoid_type_reg,
224 chip->intpin_reg_wpermit_reg0,
225 chip->intpin_reg_wpermit_reg1,
226 chip->intpin_reg_wpermit_reg2,
227 chip->intpin_reg_wpermit_reg3);
228 for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
229 iowrite32(chip->int_reduce_control_reg[i],
230 p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
231 dev_dbg(&pdev->dev, "%s : "
232 "chip->int_reduce_control_reg[%d]=%x\n",
233 __func__, i, chip->int_reduce_control_reg[i]);
234 }
235
236 iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET);
237}
238
239/**
240 * pch_phub_read_serial_rom() - Reading Serial ROM
241 * @offset_address: Serial ROM offset address to read.
242 * @data: Read buffer for specified Serial ROM value.
243 */
244static void pch_phub_read_serial_rom(struct pch_phub_reg *chip,
245 unsigned int offset_address, u8 *data)
246{
247 void __iomem *mem_addr = chip->pch_phub_extrom_base_address +
248 offset_address;
249
250 *data = ioread8(mem_addr);
251}
252
253/**
254 * pch_phub_write_serial_rom() - Writing Serial ROM
255 * @offset_address: Serial ROM offset address.
256 * @data: Serial ROM value to write.
257 */
258static int pch_phub_write_serial_rom(struct pch_phub_reg *chip,
259 unsigned int offset_address, u8 data)
260{
261 void __iomem *mem_addr = chip->pch_phub_extrom_base_address +
262 (offset_address & PCH_WORD_ADDR_MASK);
263 int i;
264 unsigned int word_data;
265 unsigned int pos;
266 unsigned int mask;
267 pos = (offset_address % 4) * 8;
268 mask = ~(0xFF << pos);
269
270 iowrite32(PCH_PHUB_ROM_WRITE_ENABLE,
271 chip->pch_phub_extrom_base_address + PHUB_CONTROL);
272
273 word_data = ioread32(mem_addr);
274 iowrite32((word_data & mask) | (u32)data << pos, mem_addr);
275
276 i = 0;
277 while (ioread8(chip->pch_phub_extrom_base_address +
278 PHUB_STATUS) != 0x00) {
279 msleep(1);
280 if (i == PHUB_TIMEOUT)
281 return -ETIMEDOUT;
282 i++;
283 }
284
285 iowrite32(PCH_PHUB_ROM_WRITE_DISABLE,
286 chip->pch_phub_extrom_base_address + PHUB_CONTROL);
287
288 return 0;
289}
290
291/**
292 * pch_phub_read_serial_rom_val() - Read Serial ROM value
293 * @offset_address: Serial ROM address offset value.
294 * @data: Serial ROM value to read.
295 */
296static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip,
297 unsigned int offset_address, u8 *data)
298{
299 unsigned int mem_addr;
300
301 mem_addr = PCH_PHUB_ROM_START_ADDR +
302 pch_phub_mac_offset[offset_address];
303
304 pch_phub_read_serial_rom(chip, mem_addr, data);
305}
306
307/**
308 * pch_phub_write_serial_rom_val() - writing Serial ROM value
309 * @offset_address: Serial ROM address offset value.
310 * @data: Serial ROM value.
311 */
312static int pch_phub_write_serial_rom_val(struct pch_phub_reg *chip,
313 unsigned int offset_address, u8 data)
314{
315 int retval;
316 unsigned int mem_addr;
317
318 mem_addr = PCH_PHUB_ROM_START_ADDR +
319 pch_phub_mac_offset[offset_address];
320
321 retval = pch_phub_write_serial_rom(chip, mem_addr, data);
322
323 return retval;
324}
325
326/* pch_phub_gbe_serial_rom_conf - makes Serial ROM header format configuration
327 * for Gigabit Ethernet MAC address
328 */
329static int pch_phub_gbe_serial_rom_conf(struct pch_phub_reg *chip)
330{
331 int retval;
332
333 retval = pch_phub_write_serial_rom(chip, 0x0b, 0xbc);
334 retval |= pch_phub_write_serial_rom(chip, 0x0a, 0x10);
335 retval |= pch_phub_write_serial_rom(chip, 0x09, 0x01);
336 retval |= pch_phub_write_serial_rom(chip, 0x08, 0x02);
337
338 retval |= pch_phub_write_serial_rom(chip, 0x0f, 0x00);
339 retval |= pch_phub_write_serial_rom(chip, 0x0e, 0x00);
340 retval |= pch_phub_write_serial_rom(chip, 0x0d, 0x00);
341 retval |= pch_phub_write_serial_rom(chip, 0x0c, 0x80);
342
343 retval |= pch_phub_write_serial_rom(chip, 0x13, 0xbc);
344 retval |= pch_phub_write_serial_rom(chip, 0x12, 0x10);
345 retval |= pch_phub_write_serial_rom(chip, 0x11, 0x01);
346 retval |= pch_phub_write_serial_rom(chip, 0x10, 0x18);
347
348 retval |= pch_phub_write_serial_rom(chip, 0x1b, 0xbc);
349 retval |= pch_phub_write_serial_rom(chip, 0x1a, 0x10);
350 retval |= pch_phub_write_serial_rom(chip, 0x19, 0x01);
351 retval |= pch_phub_write_serial_rom(chip, 0x18, 0x19);
352
353 retval |= pch_phub_write_serial_rom(chip, 0x23, 0xbc);
354 retval |= pch_phub_write_serial_rom(chip, 0x22, 0x10);
355 retval |= pch_phub_write_serial_rom(chip, 0x21, 0x01);
356 retval |= pch_phub_write_serial_rom(chip, 0x20, 0x3a);
357
358 retval |= pch_phub_write_serial_rom(chip, 0x27, 0x01);
359 retval |= pch_phub_write_serial_rom(chip, 0x26, 0x00);
360 retval |= pch_phub_write_serial_rom(chip, 0x25, 0x00);
361 retval |= pch_phub_write_serial_rom(chip, 0x24, 0x00);
362
363 return retval;
364}
365
366/**
367 * pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address
368 * @offset_address: Gigabit Ethernet MAC address offset value.
369 * @data: Buffer of the Gigabit Ethernet MAC address value.
370 */
371static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
372{
373 int i;
374 for (i = 0; i < ETH_ALEN; i++)
375 pch_phub_read_serial_rom_val(chip, i, &data[i]);
376}
377
378/**
379 * pch_phub_write_gbe_mac_addr() - Write MAC address
380 * @offset_address: Gigabit Ethernet MAC address offset value.
381 * @data: Gigabit Ethernet MAC address value.
382 */
383static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
384{
385 int retval;
386 int i;
387
388 retval = pch_phub_gbe_serial_rom_conf(chip);
389 if (retval)
390 return retval;
391
392 for (i = 0; i < ETH_ALEN; i++) {
393 retval = pch_phub_write_serial_rom_val(chip, i, data[i]);
394 if (retval)
395 return retval;
396 }
397
398 return retval;
399}
400
401static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
402 struct bin_attribute *attr, char *buf,
403 loff_t off, size_t count)
404{
405 unsigned int rom_signature;
406 unsigned char rom_length;
407 unsigned int tmp;
408 unsigned int addr_offset;
409 unsigned int orom_size;
410 int ret;
411 int err;
412
413 struct pch_phub_reg *chip =
414 dev_get_drvdata(container_of(kobj, struct device, kobj));
415
416 ret = mutex_lock_interruptible(&pch_phub_mutex);
417 if (ret) {
418 err = -ERESTARTSYS;
419 goto return_err_nomutex;
420 }
421
422 /* Get Rom signature */
423 pch_phub_read_serial_rom(chip, 0x80, (unsigned char *)&rom_signature);
424 rom_signature &= 0xff;
425 pch_phub_read_serial_rom(chip, 0x81, (unsigned char *)&tmp);
426 rom_signature |= (tmp & 0xff) << 8;
427 if (rom_signature == 0xAA55) {
428 pch_phub_read_serial_rom(chip, 0x82, &rom_length);
429 orom_size = rom_length * 512;
430 if (orom_size < off) {
431 addr_offset = 0;
432 goto return_ok;
433 }
434 if (orom_size < count) {
435 addr_offset = 0;
436 goto return_ok;
437 }
438
439 for (addr_offset = 0; addr_offset < count; addr_offset++) {
440 pch_phub_read_serial_rom(chip, 0x80 + addr_offset + off,
441 &buf[addr_offset]);
442 }
443 } else {
444 err = -ENODATA;
445 goto return_err;
446 }
447return_ok:
448 mutex_unlock(&pch_phub_mutex);
449 return addr_offset;
450
451return_err:
452 mutex_unlock(&pch_phub_mutex);
453return_err_nomutex:
454 return err;
455}
456
457static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
458 struct bin_attribute *attr,
459 char *buf, loff_t off, size_t count)
460{
461 int err;
462 unsigned int addr_offset;
463 int ret;
464 struct pch_phub_reg *chip =
465 dev_get_drvdata(container_of(kobj, struct device, kobj));
466
467 ret = mutex_lock_interruptible(&pch_phub_mutex);
468 if (ret)
469 return -ERESTARTSYS;
470
471 if (off > PCH_PHUB_OROM_SIZE) {
472 addr_offset = 0;
473 goto return_ok;
474 }
475 if (count > PCH_PHUB_OROM_SIZE) {
476 addr_offset = 0;
477 goto return_ok;
478 }
479
480 for (addr_offset = 0; addr_offset < count; addr_offset++) {
481 if (PCH_PHUB_OROM_SIZE < off + addr_offset)
482 goto return_ok;
483
484 ret = pch_phub_write_serial_rom(chip, 0x80 + addr_offset + off,
485 buf[addr_offset]);
486 if (ret) {
487 err = ret;
488 goto return_err;
489 }
490 }
491
492return_ok:
493 mutex_unlock(&pch_phub_mutex);
494 return addr_offset;
495
496return_err:
497 mutex_unlock(&pch_phub_mutex);
498 return err;
499}
500
501static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
502 char *buf)
503{
504 u8 mac[8];
505 struct pch_phub_reg *chip = dev_get_drvdata(dev);
506
507 pch_phub_read_gbe_mac_addr(chip, mac);
508
509 return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
510 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
511}
512
513static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
514 const char *buf, size_t count)
515{
516 u8 mac[6];
517 struct pch_phub_reg *chip = dev_get_drvdata(dev);
518
519 if (count != 18)
520 return -EINVAL;
521
522 sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
523 (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
524 (u32 *)&mac[4], (u32 *)&mac[5]);
525
526 pch_phub_write_gbe_mac_addr(chip, mac);
527
528 return count;
529}
530
531static DEVICE_ATTR(pch_mac, S_IRUGO | S_IWUSR, show_pch_mac, store_pch_mac);
532
533static struct bin_attribute pch_bin_attr = {
534 .attr = {
535 .name = "pch_firmware",
536 .mode = S_IRUGO | S_IWUSR,
537 },
538 .size = PCH_PHUB_OROM_SIZE + 1,
539 .read = pch_phub_bin_read,
540 .write = pch_phub_bin_write,
541};
542
543static int __devinit pch_phub_probe(struct pci_dev *pdev,
544 const struct pci_device_id *id)
545{
546 int retval;
547
548 int ret;
549 ssize_t rom_size;
550 struct pch_phub_reg *chip;
551
552 chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
553 if (chip == NULL)
554 return -ENOMEM;
555
556 ret = pci_enable_device(pdev);
557 if (ret) {
558 dev_err(&pdev->dev,
559 "%s : pci_enable_device FAILED(ret=%d)", __func__, ret);
560 goto err_pci_enable_dev;
561 }
562 dev_dbg(&pdev->dev, "%s : pci_enable_device returns %d\n", __func__,
563 ret);
564
565 ret = pci_request_regions(pdev, KBUILD_MODNAME);
566 if (ret) {
567 dev_err(&pdev->dev,
568 "%s : pci_request_regions FAILED(ret=%d)", __func__, ret);
569 goto err_req_regions;
570 }
571 dev_dbg(&pdev->dev, "%s : "
572 "pci_request_regions returns %d\n", __func__, ret);
573
574 chip->pch_phub_base_address = pci_iomap(pdev, 1, 0);
575
576
577 if (chip->pch_phub_base_address == 0) {
578 dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
579 ret = -ENOMEM;
580 goto err_pci_iomap;
581 }
582 dev_dbg(&pdev->dev, "%s : pci_iomap SUCCESS and value "
583 "in pch_phub_base_address variable is %p\n", __func__,
584 chip->pch_phub_base_address);
585 chip->pch_phub_extrom_base_address = pci_map_rom(pdev, &rom_size);
586
587 if (chip->pch_phub_extrom_base_address == 0) {
588 dev_err(&pdev->dev, "%s : pci_map_rom FAILED", __func__);
589 ret = -ENOMEM;
590 goto err_pci_map;
591 }
592 dev_dbg(&pdev->dev, "%s : "
593 "pci_map_rom SUCCESS and value in "
594 "pch_phub_extrom_base_address variable is %p\n", __func__,
595 chip->pch_phub_extrom_base_address);
596
597 pci_set_drvdata(pdev, chip);
598
599 retval = sysfs_create_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
600 if (retval)
601 goto err_sysfs_create;
602
603 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
604 if (retval)
605 goto exit_bin_attr;
606
607 pch_phub_read_modify_write_reg(chip, (unsigned int)CLKCFG_REG_OFFSET,
608 CLKCFG_CAN_50MHZ, CLKCFG_CANCLK_MASK);
609
610 /* set the prefech value */
611 iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
612 /* set the interrupt delay value */
613 iowrite32(0x25, chip->pch_phub_base_address + 0x44);
614
615 return 0;
616exit_bin_attr:
617 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
618
619err_sysfs_create:
620 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
621err_pci_map:
622 pci_iounmap(pdev, chip->pch_phub_base_address);
623err_pci_iomap:
624 pci_release_regions(pdev);
625err_req_regions:
626 pci_disable_device(pdev);
627err_pci_enable_dev:
628 kfree(chip);
629 dev_err(&pdev->dev, "%s returns %d\n", __func__, ret);
630 return ret;
631}
632
633static void __devexit pch_phub_remove(struct pci_dev *pdev)
634{
635 struct pch_phub_reg *chip = pci_get_drvdata(pdev);
636
637 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
638 sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
639 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
640 pci_iounmap(pdev, chip->pch_phub_base_address);
641 pci_release_regions(pdev);
642 pci_disable_device(pdev);
643 kfree(chip);
644}
645
646#ifdef CONFIG_PM
647
648static int pch_phub_suspend(struct pci_dev *pdev, pm_message_t state)
649{
650 int ret;
651
652 pch_phub_save_reg_conf(pdev);
653 ret = pci_save_state(pdev);
654 if (ret) {
655 dev_err(&pdev->dev,
656 " %s -pci_save_state returns %d\n", __func__, ret);
657 return ret;
658 }
659 pci_enable_wake(pdev, PCI_D3hot, 0);
660 pci_disable_device(pdev);
661 pci_set_power_state(pdev, pci_choose_state(pdev, state));
662
663 return 0;
664}
665
666static int pch_phub_resume(struct pci_dev *pdev)
667{
668 int ret;
669
670 pci_set_power_state(pdev, PCI_D0);
671 pci_restore_state(pdev);
672 ret = pci_enable_device(pdev);
673 if (ret) {
674 dev_err(&pdev->dev,
675 "%s-pci_enable_device failed(ret=%d) ", __func__, ret);
676 return ret;
677 }
678
679 pci_enable_wake(pdev, PCI_D3hot, 0);
680 pch_phub_restore_reg_conf(pdev);
681
682 return 0;
683}
684#else
685#define pch_phub_suspend NULL
686#define pch_phub_resume NULL
687#endif /* CONFIG_PM */
688
689static struct pci_device_id pch_phub_pcidev_id[] = {
690 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH1_PHUB)},
691 {0,}
692};
693
694static struct pci_driver pch_phub_driver = {
695 .name = "pch_phub",
696 .id_table = pch_phub_pcidev_id,
697 .probe = pch_phub_probe,
698 .remove = __devexit_p(pch_phub_remove),
699 .suspend = pch_phub_suspend,
700 .resume = pch_phub_resume
701};
702
703static int __init pch_phub_pci_init(void)
704{
705 return pci_register_driver(&pch_phub_driver);
706}
707
708static void __exit pch_phub_pci_exit(void)
709{
710 pci_unregister_driver(&pch_phub_driver);
711}
712
713module_init(pch_phub_pci_init);
714module_exit(pch_phub_pci_exit);
715
716MODULE_DESCRIPTION("PCH Packet Hub PCI Driver");
717MODULE_LICENSE("GPL");
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index fa57b67593ae..b05db55c8c8e 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -21,8 +21,10 @@
21#include <linux/poll.h> 21#include <linux/poll.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/cdev.h> 23#include <linux/cdev.h>
24#include <linux/slab.h>
24#include <linux/phantom.h> 25#include <linux/phantom.h>
25#include <linux/smp_lock.h> 26#include <linux/sched.h>
27#include <linux/mutex.h>
26 28
27#include <asm/atomic.h> 29#include <asm/atomic.h>
28#include <asm/io.h> 30#include <asm/io.h>
@@ -36,6 +38,7 @@
36#define PHB_RUNNING 1 38#define PHB_RUNNING 1
37#define PHB_NOT_OH 2 39#define PHB_NOT_OH 2
38 40
41static DEFINE_MUTEX(phantom_mutex);
39static struct class *phantom_class; 42static struct class *phantom_class;
40static int phantom_major; 43static int phantom_major;
41 44
@@ -213,17 +216,17 @@ static int phantom_open(struct inode *inode, struct file *file)
213 struct phantom_device *dev = container_of(inode->i_cdev, 216 struct phantom_device *dev = container_of(inode->i_cdev,
214 struct phantom_device, cdev); 217 struct phantom_device, cdev);
215 218
216 lock_kernel(); 219 mutex_lock(&phantom_mutex);
217 nonseekable_open(inode, file); 220 nonseekable_open(inode, file);
218 221
219 if (mutex_lock_interruptible(&dev->open_lock)) { 222 if (mutex_lock_interruptible(&dev->open_lock)) {
220 unlock_kernel(); 223 mutex_unlock(&phantom_mutex);
221 return -ERESTARTSYS; 224 return -ERESTARTSYS;
222 } 225 }
223 226
224 if (dev->opened) { 227 if (dev->opened) {
225 mutex_unlock(&dev->open_lock); 228 mutex_unlock(&dev->open_lock);
226 unlock_kernel(); 229 mutex_unlock(&phantom_mutex);
227 return -EINVAL; 230 return -EINVAL;
228 } 231 }
229 232
@@ -234,7 +237,7 @@ static int phantom_open(struct inode *inode, struct file *file)
234 atomic_set(&dev->counter, 0); 237 atomic_set(&dev->counter, 0);
235 dev->opened++; 238 dev->opened++;
236 mutex_unlock(&dev->open_lock); 239 mutex_unlock(&dev->open_lock);
237 unlock_kernel(); 240 mutex_unlock(&phantom_mutex);
238 return 0; 241 return 0;
239} 242}
240 243
@@ -271,12 +274,13 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait)
271 return mask; 274 return mask;
272} 275}
273 276
274static struct file_operations phantom_file_ops = { 277static const struct file_operations phantom_file_ops = {
275 .open = phantom_open, 278 .open = phantom_open,
276 .release = phantom_release, 279 .release = phantom_release,
277 .unlocked_ioctl = phantom_ioctl, 280 .unlocked_ioctl = phantom_ioctl,
278 .compat_ioctl = phantom_compat_ioctl, 281 .compat_ioctl = phantom_compat_ioctl,
279 .poll = phantom_poll, 282 .poll = phantom_poll,
283 .llseek = no_llseek,
280}; 284};
281 285
282static irqreturn_t phantom_isr(int irq, void *data) 286static irqreturn_t phantom_isr(int irq, void *data)
@@ -339,8 +343,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev,
339 int retval; 343 int retval;
340 344
341 retval = pci_enable_device(pdev); 345 retval = pci_enable_device(pdev);
342 if (retval) 346 if (retval) {
347 dev_err(&pdev->dev, "pci_enable_device failed!\n");
343 goto err; 348 goto err;
349 }
344 350
345 minor = phantom_get_free(); 351 minor = phantom_get_free();
346 if (minor == PHANTOM_MAX_MINORS) { 352 if (minor == PHANTOM_MAX_MINORS) {
@@ -352,8 +358,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev,
352 phantom_devices[minor] = 1; 358 phantom_devices[minor] = 1;
353 359
354 retval = pci_request_regions(pdev, "phantom"); 360 retval = pci_request_regions(pdev, "phantom");
355 if (retval) 361 if (retval) {
362 dev_err(&pdev->dev, "pci_request_regions failed!\n");
356 goto err_null; 363 goto err_null;
364 }
357 365
358 retval = -ENOMEM; 366 retval = -ENOMEM;
359 pht = kzalloc(sizeof(*pht), GFP_KERNEL); 367 pht = kzalloc(sizeof(*pht), GFP_KERNEL);
@@ -496,12 +504,7 @@ static struct pci_driver phantom_pci_driver = {
496 .resume = phantom_resume 504 .resume = phantom_resume
497}; 505};
498 506
499static ssize_t phantom_show_version(struct class *cls, char *buf) 507static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION);
500{
501 return sprintf(buf, PHANTOM_VERSION "\n");
502}
503
504static CLASS_ATTR(version, 0444, phantom_show_version, NULL);
505 508
506static int __init phantom_init(void) 509static int __init phantom_init(void)
507{ 510{
@@ -514,7 +517,7 @@ static int __init phantom_init(void)
514 printk(KERN_ERR "phantom: can't register phantom class\n"); 517 printk(KERN_ERR "phantom: can't register phantom class\n");
515 goto err; 518 goto err;
516 } 519 }
517 retval = class_create_file(phantom_class, &class_attr_version); 520 retval = class_create_file(phantom_class, &class_attr_version.attr);
518 if (retval) { 521 if (retval) {
519 printk(KERN_ERR "phantom: can't create sysfs version file\n"); 522 printk(KERN_ERR "phantom: can't create sysfs version file\n");
520 goto err_class; 523 goto err_class;
@@ -540,7 +543,7 @@ static int __init phantom_init(void)
540err_unchr: 543err_unchr:
541 unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); 544 unregister_chrdev_region(dev, PHANTOM_MAX_MINORS);
542err_attr: 545err_attr:
543 class_remove_file(phantom_class, &class_attr_version); 546 class_remove_file(phantom_class, &class_attr_version.attr);
544err_class: 547err_class:
545 class_destroy(phantom_class); 548 class_destroy(phantom_class);
546err: 549err:
@@ -553,7 +556,7 @@ static void __exit phantom_exit(void)
553 556
554 unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); 557 unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS);
555 558
556 class_remove_file(phantom_class, &class_attr_version); 559 class_remove_file(phantom_class, &class_attr_version.attr);
557 class_destroy(phantom_class); 560 class_destroy(phantom_class);
558 561
559 pr_debug("phantom: module successfully removed\n"); 562 pr_debug("phantom: module successfully removed\n");
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h
index f93f03a9e6e9..3ad76cd18b4b 100644
--- a/drivers/misc/sgi-gru/gru.h
+++ b/drivers/misc/sgi-gru/gru.h
@@ -53,6 +53,17 @@ struct gru_chiplet_info {
53 int free_user_cbr; 53 int free_user_cbr;
54}; 54};
55 55
56/*
57 * Statictics kept for each context.
58 */
59struct gru_gseg_statistics {
60 unsigned long fmm_tlbmiss;
61 unsigned long upm_tlbmiss;
62 unsigned long tlbdropin;
63 unsigned long context_stolen;
64 unsigned long reserved[10];
65};
66
56/* Flags for GRU options on the gru_create_context() call */ 67/* Flags for GRU options on the gru_create_context() call */
57/* Select one of the follow 4 options to specify how TLB misses are handled */ 68/* Select one of the follow 4 options to specify how TLB misses are handled */
58#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */ 69#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
index 3c9c06618e6a..d95587cc794c 100644
--- a/drivers/misc/sgi-gru/gru_instructions.h
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -34,17 +34,17 @@ extern void gru_wait_abort_proc(void *cb);
34#include <asm/intrinsics.h> 34#include <asm/intrinsics.h>
35#define __flush_cache(p) ia64_fc((unsigned long)p) 35#define __flush_cache(p) ia64_fc((unsigned long)p)
36/* Use volatile on IA64 to ensure ordering via st4.rel */ 36/* Use volatile on IA64 to ensure ordering via st4.rel */
37#define gru_ordered_store_int(p, v) \ 37#define gru_ordered_store_ulong(p, v) \
38 do { \ 38 do { \
39 barrier(); \ 39 barrier(); \
40 *((volatile int *)(p)) = v; /* force st.rel */ \ 40 *((volatile unsigned long *)(p)) = v; /* force st.rel */ \
41 } while (0) 41 } while (0)
42#elif defined(CONFIG_X86_64) 42#elif defined(CONFIG_X86_64)
43#define __flush_cache(p) clflush(p) 43#define __flush_cache(p) clflush(p)
44#define gru_ordered_store_int(p, v) \ 44#define gru_ordered_store_ulong(p, v) \
45 do { \ 45 do { \
46 barrier(); \ 46 barrier(); \
47 *(int *)p = v; \ 47 *(unsigned long *)p = v; \
48 } while (0) 48 } while (0)
49#else 49#else
50#error "Unsupported architecture" 50#error "Unsupported architecture"
@@ -129,8 +129,13 @@ struct gru_instruction_bits {
129 */ 129 */
130struct gru_instruction { 130struct gru_instruction {
131 /* DW 0 */ 131 /* DW 0 */
132 unsigned int op32; /* icmd,xtype,iaa0,ima,opc */ 132 union {
133 unsigned int tri0; 133 unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */
134 struct {
135 unsigned int op32;
136 unsigned int tri0;
137 };
138 };
134 unsigned long tri1_bufsize; /* DW 1 */ 139 unsigned long tri1_bufsize; /* DW 1 */
135 unsigned long baddr0; /* DW 2 */ 140 unsigned long baddr0; /* DW 2 */
136 unsigned long nelem; /* DW 3 */ 141 unsigned long nelem; /* DW 3 */
@@ -140,7 +145,7 @@ struct gru_instruction {
140 unsigned long avalue; /* DW 7 */ 145 unsigned long avalue; /* DW 7 */
141}; 146};
142 147
143/* Some shifts and masks for the low 32 bits of a GRU command */ 148/* Some shifts and masks for the low 64 bits of a GRU command */
144#define GRU_CB_ICMD_SHFT 0 149#define GRU_CB_ICMD_SHFT 0
145#define GRU_CB_ICMD_MASK 0x1 150#define GRU_CB_ICMD_MASK 0x1
146#define GRU_CB_XTYPE_SHFT 8 151#define GRU_CB_XTYPE_SHFT 8
@@ -155,6 +160,10 @@ struct gru_instruction {
155#define GRU_CB_OPC_MASK 0xff 160#define GRU_CB_OPC_MASK 0xff
156#define GRU_CB_EXOPC_SHFT 24 161#define GRU_CB_EXOPC_SHFT 24
157#define GRU_CB_EXOPC_MASK 0xff 162#define GRU_CB_EXOPC_MASK 0xff
163#define GRU_IDEF2_SHFT 32
164#define GRU_IDEF2_MASK 0x3ffff
165#define GRU_ISTATUS_SHFT 56
166#define GRU_ISTATUS_MASK 0x3
158 167
159/* GRU instruction opcodes (opc field) */ 168/* GRU instruction opcodes (opc field) */
160#define OP_NOP 0x00 169#define OP_NOP 0x00
@@ -256,6 +265,7 @@ struct gru_instruction {
256#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16) 265#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
257#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17) 266#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
258#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18) 267#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
268#define CBE_CAUSE_FORCED_ERROR (1 << 19)
259 269
260/* CBE cbrexecstatus bits */ 270/* CBE cbrexecstatus bits */
261#define CBR_EXS_ABORT_OCC_BIT 0 271#define CBR_EXS_ABORT_OCC_BIT 0
@@ -264,13 +274,15 @@ struct gru_instruction {
264#define CBR_EXS_QUEUED_BIT 3 274#define CBR_EXS_QUEUED_BIT 3
265#define CBR_EXS_TLB_INVAL_BIT 4 275#define CBR_EXS_TLB_INVAL_BIT 4
266#define CBR_EXS_EXCEPTION_BIT 5 276#define CBR_EXS_EXCEPTION_BIT 5
277#define CBR_EXS_CB_INT_PENDING_BIT 6
267 278
268#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) 279#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
269#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) 280#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
270#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) 281#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
271#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) 282#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
272#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT) 283#define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
273#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) 284#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
285#define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT)
274 286
275/* 287/*
276 * Exceptions are retried for the following cases. If any OTHER bits are set 288 * Exceptions are retried for the following cases. If any OTHER bits are set
@@ -296,12 +308,14 @@ union gru_mesqhead {
296 308
297 309
298/* Generate the low word of a GRU instruction */ 310/* Generate the low word of a GRU instruction */
299static inline unsigned int 311static inline unsigned long
300__opword(unsigned char opcode, unsigned char exopc, unsigned char xtype, 312__opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
301 unsigned char iaa0, unsigned char iaa1, 313 unsigned char iaa0, unsigned char iaa1,
302 unsigned char ima) 314 unsigned long idef2, unsigned char ima)
303{ 315{
304 return (1 << GRU_CB_ICMD_SHFT) | 316 return (1 << GRU_CB_ICMD_SHFT) |
317 ((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) |
318 (idef2<< GRU_IDEF2_SHFT) |
305 (iaa0 << GRU_CB_IAA0_SHFT) | 319 (iaa0 << GRU_CB_IAA0_SHFT) |
306 (iaa1 << GRU_CB_IAA1_SHFT) | 320 (iaa1 << GRU_CB_IAA1_SHFT) |
307 (ima << GRU_CB_IMA_SHFT) | 321 (ima << GRU_CB_IMA_SHFT) |
@@ -319,12 +333,13 @@ static inline void gru_flush_cache(void *p)
319} 333}
320 334
321/* 335/*
322 * Store the lower 32 bits of the command including the "start" bit. Then 336 * Store the lower 64 bits of the command including the "start" bit. Then
323 * start the instruction executing. 337 * start the instruction executing.
324 */ 338 */
325static inline void gru_start_instruction(struct gru_instruction *ins, int op32) 339static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64)
326{ 340{
327 gru_ordered_store_int(ins, op32); 341 gru_ordered_store_ulong(ins, op64);
342 mb();
328 gru_flush_cache(ins); 343 gru_flush_cache(ins);
329} 344}
330 345
@@ -340,6 +355,30 @@ static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
340 * - nelem and stride are in elements 355 * - nelem and stride are in elements
341 * - tri0/tri1 is in bytes for the beginning of the data segment. 356 * - tri0/tri1 is in bytes for the beginning of the data segment.
342 */ 357 */
358static inline void gru_vload_phys(void *cb, unsigned long gpa,
359 unsigned int tri0, int iaa, unsigned long hints)
360{
361 struct gru_instruction *ins = (struct gru_instruction *)cb;
362
363 ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
364 ins->nelem = 1;
365 ins->op1_stride = 1;
366 gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0,
367 (unsigned long)tri0, CB_IMA(hints)));
368}
369
370static inline void gru_vstore_phys(void *cb, unsigned long gpa,
371 unsigned int tri0, int iaa, unsigned long hints)
372{
373 struct gru_instruction *ins = (struct gru_instruction *)cb;
374
375 ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
376 ins->nelem = 1;
377 ins->op1_stride = 1;
378 gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0,
379 (unsigned long)tri0, CB_IMA(hints)));
380}
381
343static inline void gru_vload(void *cb, unsigned long mem_addr, 382static inline void gru_vload(void *cb, unsigned long mem_addr,
344 unsigned int tri0, unsigned char xtype, unsigned long nelem, 383 unsigned int tri0, unsigned char xtype, unsigned long nelem,
345 unsigned long stride, unsigned long hints) 384 unsigned long stride, unsigned long hints)
@@ -348,10 +387,9 @@ static inline void gru_vload(void *cb, unsigned long mem_addr,
348 387
349 ins->baddr0 = (long)mem_addr; 388 ins->baddr0 = (long)mem_addr;
350 ins->nelem = nelem; 389 ins->nelem = nelem;
351 ins->tri0 = tri0;
352 ins->op1_stride = stride; 390 ins->op1_stride = stride;
353 gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0, 391 gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
354 CB_IMA(hints))); 392 (unsigned long)tri0, CB_IMA(hints)));
355} 393}
356 394
357static inline void gru_vstore(void *cb, unsigned long mem_addr, 395static inline void gru_vstore(void *cb, unsigned long mem_addr,
@@ -362,10 +400,9 @@ static inline void gru_vstore(void *cb, unsigned long mem_addr,
362 400
363 ins->baddr0 = (long)mem_addr; 401 ins->baddr0 = (long)mem_addr;
364 ins->nelem = nelem; 402 ins->nelem = nelem;
365 ins->tri0 = tri0;
366 ins->op1_stride = stride; 403 ins->op1_stride = stride;
367 gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0, 404 gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
368 CB_IMA(hints))); 405 tri0, CB_IMA(hints)));
369} 406}
370 407
371static inline void gru_ivload(void *cb, unsigned long mem_addr, 408static inline void gru_ivload(void *cb, unsigned long mem_addr,
@@ -376,10 +413,9 @@ static inline void gru_ivload(void *cb, unsigned long mem_addr,
376 413
377 ins->baddr0 = (long)mem_addr; 414 ins->baddr0 = (long)mem_addr;
378 ins->nelem = nelem; 415 ins->nelem = nelem;
379 ins->tri0 = tri0;
380 ins->tri1_bufsize = tri1; 416 ins->tri1_bufsize = tri1;
381 gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0, 417 gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
382 CB_IMA(hints))); 418 tri0, CB_IMA(hints)));
383} 419}
384 420
385static inline void gru_ivstore(void *cb, unsigned long mem_addr, 421static inline void gru_ivstore(void *cb, unsigned long mem_addr,
@@ -390,10 +426,9 @@ static inline void gru_ivstore(void *cb, unsigned long mem_addr,
390 426
391 ins->baddr0 = (long)mem_addr; 427 ins->baddr0 = (long)mem_addr;
392 ins->nelem = nelem; 428 ins->nelem = nelem;
393 ins->tri0 = tri0;
394 ins->tri1_bufsize = tri1; 429 ins->tri1_bufsize = tri1;
395 gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0, 430 gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
396 CB_IMA(hints))); 431 tri0, CB_IMA(hints)));
397} 432}
398 433
399static inline void gru_vset(void *cb, unsigned long mem_addr, 434static inline void gru_vset(void *cb, unsigned long mem_addr,
@@ -406,8 +441,8 @@ static inline void gru_vset(void *cb, unsigned long mem_addr,
406 ins->op2_value_baddr1 = value; 441 ins->op2_value_baddr1 = value;
407 ins->nelem = nelem; 442 ins->nelem = nelem;
408 ins->op1_stride = stride; 443 ins->op1_stride = stride;
409 gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0, 444 gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0,
410 CB_IMA(hints))); 445 0, CB_IMA(hints)));
411} 446}
412 447
413static inline void gru_ivset(void *cb, unsigned long mem_addr, 448static inline void gru_ivset(void *cb, unsigned long mem_addr,
@@ -420,8 +455,8 @@ static inline void gru_ivset(void *cb, unsigned long mem_addr,
420 ins->op2_value_baddr1 = value; 455 ins->op2_value_baddr1 = value;
421 ins->nelem = nelem; 456 ins->nelem = nelem;
422 ins->tri1_bufsize = tri1; 457 ins->tri1_bufsize = tri1;
423 gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0, 458 gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0,
424 CB_IMA(hints))); 459 0, CB_IMA(hints)));
425} 460}
426 461
427static inline void gru_vflush(void *cb, unsigned long mem_addr, 462static inline void gru_vflush(void *cb, unsigned long mem_addr,
@@ -433,15 +468,15 @@ static inline void gru_vflush(void *cb, unsigned long mem_addr,
433 ins->baddr0 = (long)mem_addr; 468 ins->baddr0 = (long)mem_addr;
434 ins->op1_stride = stride; 469 ins->op1_stride = stride;
435 ins->nelem = nelem; 470 ins->nelem = nelem;
436 gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0, 471 gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
437 CB_IMA(hints))); 472 0, CB_IMA(hints)));
438} 473}
439 474
440static inline void gru_nop(void *cb, int hints) 475static inline void gru_nop(void *cb, int hints)
441{ 476{
442 struct gru_instruction *ins = (void *)cb; 477 struct gru_instruction *ins = (void *)cb;
443 478
444 gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints))); 479 gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints)));
445} 480}
446 481
447 482
@@ -455,10 +490,9 @@ static inline void gru_bcopy(void *cb, const unsigned long src,
455 ins->baddr0 = (long)src; 490 ins->baddr0 = (long)src;
456 ins->op2_value_baddr1 = (long)dest; 491 ins->op2_value_baddr1 = (long)dest;
457 ins->nelem = nelem; 492 ins->nelem = nelem;
458 ins->tri0 = tri0;
459 ins->tri1_bufsize = bufsize; 493 ins->tri1_bufsize = bufsize;
460 gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM, 494 gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM,
461 IAA_RAM, CB_IMA(hints))); 495 IAA_RAM, tri0, CB_IMA(hints)));
462} 496}
463 497
464static inline void gru_bstore(void *cb, const unsigned long src, 498static inline void gru_bstore(void *cb, const unsigned long src,
@@ -470,9 +504,8 @@ static inline void gru_bstore(void *cb, const unsigned long src,
470 ins->baddr0 = (long)src; 504 ins->baddr0 = (long)src;
471 ins->op2_value_baddr1 = (long)dest; 505 ins->op2_value_baddr1 = (long)dest;
472 ins->nelem = nelem; 506 ins->nelem = nelem;
473 ins->tri0 = tri0; 507 gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
474 gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM, 508 tri0, CB_IMA(hints)));
475 CB_IMA(hints)));
476} 509}
477 510
478static inline void gru_gamir(void *cb, int exopc, unsigned long src, 511static inline void gru_gamir(void *cb, int exopc, unsigned long src,
@@ -481,8 +514,8 @@ static inline void gru_gamir(void *cb, int exopc, unsigned long src,
481 struct gru_instruction *ins = (void *)cb; 514 struct gru_instruction *ins = (void *)cb;
482 515
483 ins->baddr0 = (long)src; 516 ins->baddr0 = (long)src;
484 gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0, 517 gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
485 CB_IMA(hints))); 518 0, CB_IMA(hints)));
486} 519}
487 520
488static inline void gru_gamirr(void *cb, int exopc, unsigned long src, 521static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
@@ -491,8 +524,8 @@ static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
491 struct gru_instruction *ins = (void *)cb; 524 struct gru_instruction *ins = (void *)cb;
492 525
493 ins->baddr0 = (long)src; 526 ins->baddr0 = (long)src;
494 gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0, 527 gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
495 CB_IMA(hints))); 528 0, CB_IMA(hints)));
496} 529}
497 530
498static inline void gru_gamer(void *cb, int exopc, unsigned long src, 531static inline void gru_gamer(void *cb, int exopc, unsigned long src,
@@ -505,8 +538,8 @@ static inline void gru_gamer(void *cb, int exopc, unsigned long src,
505 ins->baddr0 = (long)src; 538 ins->baddr0 = (long)src;
506 ins->op1_stride = operand1; 539 ins->op1_stride = operand1;
507 ins->op2_value_baddr1 = operand2; 540 ins->op2_value_baddr1 = operand2;
508 gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0, 541 gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
509 CB_IMA(hints))); 542 0, CB_IMA(hints)));
510} 543}
511 544
512static inline void gru_gamerr(void *cb, int exopc, unsigned long src, 545static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
@@ -518,8 +551,8 @@ static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
518 ins->baddr0 = (long)src; 551 ins->baddr0 = (long)src;
519 ins->op1_stride = operand1; 552 ins->op1_stride = operand1;
520 ins->op2_value_baddr1 = operand2; 553 ins->op2_value_baddr1 = operand2;
521 gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0, 554 gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
522 CB_IMA(hints))); 555 0, CB_IMA(hints)));
523} 556}
524 557
525static inline void gru_gamxr(void *cb, unsigned long src, 558static inline void gru_gamxr(void *cb, unsigned long src,
@@ -529,8 +562,8 @@ static inline void gru_gamxr(void *cb, unsigned long src,
529 562
530 ins->baddr0 = (long)src; 563 ins->baddr0 = (long)src;
531 ins->nelem = 4; 564 ins->nelem = 4;
532 gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW, 565 gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
533 IAA_RAM, 0, CB_IMA(hints))); 566 IAA_RAM, 0, 0, CB_IMA(hints)));
534} 567}
535 568
536static inline void gru_mesq(void *cb, unsigned long queue, 569static inline void gru_mesq(void *cb, unsigned long queue,
@@ -541,9 +574,8 @@ static inline void gru_mesq(void *cb, unsigned long queue,
541 574
542 ins->baddr0 = (long)queue; 575 ins->baddr0 = (long)queue;
543 ins->nelem = nelem; 576 ins->nelem = nelem;
544 ins->tri0 = tri0; 577 gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
545 gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0, 578 tri0, CB_IMA(hints)));
546 CB_IMA(hints)));
547} 579}
548 580
549static inline unsigned long gru_get_amo_value(void *cb) 581static inline unsigned long gru_get_amo_value(void *cb)
@@ -662,6 +694,14 @@ static inline void gru_wait_abort(void *cb)
662 gru_wait_abort_proc(cb); 694 gru_wait_abort_proc(cb);
663} 695}
664 696
697/*
698 * Get a pointer to the start of a gseg
699 * p - Any valid pointer within the gseg
700 */
701static inline void *gru_get_gseg_pointer (void *p)
702{
703 return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1));
704}
665 705
666/* 706/*
667 * Get a pointer to a control block 707 * Get a pointer to a control block
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 679e01778286..38657cdaf54d 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -40,6 +40,12 @@
40#include "gru_instructions.h" 40#include "gru_instructions.h"
41#include <asm/uv/uv_hub.h> 41#include <asm/uv/uv_hub.h>
42 42
43/* Return codes for vtop functions */
44#define VTOP_SUCCESS 0
45#define VTOP_INVALID -1
46#define VTOP_RETRY -2
47
48
43/* 49/*
44 * Test if a physical address is a valid GRU GSEG address 50 * Test if a physical address is a valid GRU GSEG address
45 */ 51 */
@@ -90,19 +96,22 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
90{ 96{
91 struct mm_struct *mm = current->mm; 97 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma; 98 struct vm_area_struct *vma;
93 struct gru_thread_state *gts = NULL; 99 struct gru_thread_state *gts = ERR_PTR(-EINVAL);
94 100
95 down_write(&mm->mmap_sem); 101 down_write(&mm->mmap_sem);
96 vma = gru_find_vma(vaddr); 102 vma = gru_find_vma(vaddr);
97 if (vma) 103 if (!vma)
98 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); 104 goto err;
99 if (gts) {
100 mutex_lock(&gts->ts_ctxlock);
101 downgrade_write(&mm->mmap_sem);
102 } else {
103 up_write(&mm->mmap_sem);
104 }
105 105
106 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
107 if (IS_ERR(gts))
108 goto err;
109 mutex_lock(&gts->ts_ctxlock);
110 downgrade_write(&mm->mmap_sem);
111 return gts;
112
113err:
114 up_write(&mm->mmap_sem);
106 return gts; 115 return gts;
107} 116}
108 117
@@ -122,39 +131,15 @@ static void gru_unlock_gts(struct gru_thread_state *gts)
122 * is necessary to prevent the user from seeing a stale cb.istatus that will 131 * is necessary to prevent the user from seeing a stale cb.istatus that will
123 * change as soon as the TFH restart is complete. Races may cause an 132 * change as soon as the TFH restart is complete. Races may cause an
124 * occasional failure to clear the cb.istatus, but that is ok. 133 * occasional failure to clear the cb.istatus, but that is ok.
125 *
126 * If the cb address is not valid (should not happen, but...), nothing
127 * bad will happen.. The get_user()/put_user() will fail but there
128 * are no bad side-effects.
129 */ 134 */
130static void gru_cb_set_istatus_active(unsigned long __user *cb) 135static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
131{ 136{
132 union { 137 if (cbk) {
133 struct gru_instruction_bits bits; 138 cbk->istatus = CBS_ACTIVE;
134 unsigned long dw;
135 } u;
136
137 if (cb) {
138 get_user(u.dw, cb);
139 u.bits.istatus = CBS_ACTIVE;
140 put_user(u.dw, cb);
141 } 139 }
142} 140}
143 141
144/* 142/*
145 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
146 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
147 * GRU (except for headless blades which are not currently supported). A blade
148 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
149 * number uniquely identifies the GRU chiplet on the local blade that caused the
150 * interrupt. Always called in interrupt context.
151 */
152static inline struct gru_state *irq_to_gru(int irq)
153{
154 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
155}
156
157/*
158 * Read & clear a TFM 143 * Read & clear a TFM
159 * 144 *
160 * The GRU has an array of fault maps. A map is private to a cpu 145 * The GRU has an array of fault maps. A map is private to a cpu
@@ -207,10 +192,11 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
207{ 192{
208 struct page *page; 193 struct page *page;
209 194
210 /* ZZZ Need to handle HUGE pages */ 195#ifdef CONFIG_HUGETLB_PAGE
211 if (is_vm_hugetlb_page(vma)) 196 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
212 return -EFAULT; 197#else
213 *pageshift = PAGE_SHIFT; 198 *pageshift = PAGE_SHIFT;
199#endif
214 if (get_user_pages 200 if (get_user_pages
215 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) 201 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
216 return -EFAULT; 202 return -EFAULT;
@@ -268,7 +254,6 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
268 return 0; 254 return 0;
269 255
270err: 256err:
271 local_irq_enable();
272 return 1; 257 return 1;
273} 258}
274 259
@@ -301,14 +286,69 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
301 paddr = paddr & ~((1UL << ps) - 1); 286 paddr = paddr & ~((1UL << ps) - 1);
302 *gpa = uv_soc_phys_ram_to_gpa(paddr); 287 *gpa = uv_soc_phys_ram_to_gpa(paddr);
303 *pageshift = ps; 288 *pageshift = ps;
304 return 0; 289 return VTOP_SUCCESS;
305 290
306inval: 291inval:
307 return -1; 292 return VTOP_INVALID;
308upm: 293upm:
309 return -2; 294 return VTOP_RETRY;
295}
296
297
298/*
299 * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
300 * CBE cacheline so that the line will be written back to home agent.
301 * Otherwise the line may be silently dropped. This has no impact
302 * except on performance.
303 */
304static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
305{
306 if (unlikely(cbe)) {
307 cbe->cbrexecstatus = 0; /* make CL dirty */
308 gru_flush_cache(cbe);
309 }
310} 310}
311 311
312/*
313 * Preload the TLB with entries that may be required. Currently, preloading
314 * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
315 * the end of the bcopy tranfer, whichever is smaller.
316 */
317static void gru_preload_tlb(struct gru_state *gru,
318 struct gru_thread_state *gts, int atomic,
319 unsigned long fault_vaddr, int asid, int write,
320 unsigned char tlb_preload_count,
321 struct gru_tlb_fault_handle *tfh,
322 struct gru_control_block_extended *cbe)
323{
324 unsigned long vaddr = 0, gpa;
325 int ret, pageshift;
326
327 if (cbe->opccpy != OP_BCOPY)
328 return;
329
330 if (fault_vaddr == cbe->cbe_baddr0)
331 vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
332 else if (fault_vaddr == cbe->cbe_baddr1)
333 vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
334
335 fault_vaddr &= PAGE_MASK;
336 vaddr &= PAGE_MASK;
337 vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
338
339 while (vaddr > fault_vaddr) {
340 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
341 if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
342 GRU_PAGESIZE(pageshift)))
343 return;
344 gru_dbg(grudev,
345 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
346 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
347 vaddr, asid, write, pageshift, gpa);
348 vaddr -= PAGE_SIZE;
349 STAT(tlb_preload_page);
350 }
351}
312 352
313/* 353/*
314 * Drop a TLB entry into the GRU. The fault is described by info in an TFH. 354 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
@@ -320,11 +360,14 @@ upm:
320 * < 0 = error code 360 * < 0 = error code
321 * 361 *
322 */ 362 */
323static int gru_try_dropin(struct gru_thread_state *gts, 363static int gru_try_dropin(struct gru_state *gru,
364 struct gru_thread_state *gts,
324 struct gru_tlb_fault_handle *tfh, 365 struct gru_tlb_fault_handle *tfh,
325 unsigned long __user *cb) 366 struct gru_instruction_bits *cbk)
326{ 367{
327 int pageshift = 0, asid, write, ret, atomic = !cb; 368 struct gru_control_block_extended *cbe = NULL;
369 unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
370 int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
328 unsigned long gpa = 0, vaddr = 0; 371 unsigned long gpa = 0, vaddr = 0;
329 372
330 /* 373 /*
@@ -335,24 +378,34 @@ static int gru_try_dropin(struct gru_thread_state *gts,
335 */ 378 */
336 379
337 /* 380 /*
381 * Prefetch the CBE if doing TLB preloading
382 */
383 if (unlikely(tlb_preload_count)) {
384 cbe = gru_tfh_to_cbe(tfh);
385 prefetchw(cbe);
386 }
387
388 /*
338 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. 389 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
339 * Might be a hardware race OR a stupid user. Ignore FMM because FMM 390 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
340 * is a transient state. 391 * is a transient state.
341 */ 392 */
342 if (tfh->status != TFHSTATUS_EXCEPTION) { 393 if (tfh->status != TFHSTATUS_EXCEPTION) {
343 gru_flush_cache(tfh); 394 gru_flush_cache(tfh);
395 sync_core();
344 if (tfh->status != TFHSTATUS_EXCEPTION) 396 if (tfh->status != TFHSTATUS_EXCEPTION)
345 goto failnoexception; 397 goto failnoexception;
346 STAT(tfh_stale_on_fault); 398 STAT(tfh_stale_on_fault);
347 } 399 }
348 if (tfh->state == TFHSTATE_IDLE) 400 if (tfh->state == TFHSTATE_IDLE)
349 goto failidle; 401 goto failidle;
350 if (tfh->state == TFHSTATE_MISS_FMM && cb) 402 if (tfh->state == TFHSTATE_MISS_FMM && cbk)
351 goto failfmm; 403 goto failfmm;
352 404
353 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; 405 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
354 vaddr = tfh->missvaddr; 406 vaddr = tfh->missvaddr;
355 asid = tfh->missasid; 407 asid = tfh->missasid;
408 indexway = tfh->indexway;
356 if (asid == 0) 409 if (asid == 0)
357 goto failnoasid; 410 goto failnoasid;
358 411
@@ -366,41 +419,51 @@ static int gru_try_dropin(struct gru_thread_state *gts,
366 goto failactive; 419 goto failactive;
367 420
368 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); 421 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
369 if (ret == -1) 422 if (ret == VTOP_INVALID)
370 goto failinval; 423 goto failinval;
371 if (ret == -2) 424 if (ret == VTOP_RETRY)
372 goto failupm; 425 goto failupm;
373 426
374 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { 427 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
375 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); 428 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
376 if (atomic || !gru_update_cch(gts, 0)) { 429 if (atomic || !gru_update_cch(gts)) {
377 gts->ts_force_cch_reload = 1; 430 gts->ts_force_cch_reload = 1;
378 goto failupm; 431 goto failupm;
379 } 432 }
380 } 433 }
381 gru_cb_set_istatus_active(cb); 434
435 if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
436 gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
437 gru_flush_cache_cbe(cbe);
438 }
439
440 gru_cb_set_istatus_active(cbk);
441 gts->ustats.tlbdropin++;
382 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, 442 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
383 GRU_PAGESIZE(pageshift)); 443 GRU_PAGESIZE(pageshift));
384 STAT(tlb_dropin);
385 gru_dbg(grudev, 444 gru_dbg(grudev,
386 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n", 445 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
387 ret ? "non-atomic" : "atomic", tfh, vaddr, asid, 446 " rw %d, ps %d, gpa 0x%lx\n",
388 pageshift, gpa); 447 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
448 indexway, write, pageshift, gpa);
449 STAT(tlb_dropin);
389 return 0; 450 return 0;
390 451
391failnoasid: 452failnoasid:
392 /* No asid (delayed unload). */ 453 /* No asid (delayed unload). */
393 STAT(tlb_dropin_fail_no_asid); 454 STAT(tlb_dropin_fail_no_asid);
394 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); 455 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
395 if (!cb) 456 if (!cbk)
396 tfh_user_polling_mode(tfh); 457 tfh_user_polling_mode(tfh);
397 else 458 else
398 gru_flush_cache(tfh); 459 gru_flush_cache(tfh);
460 gru_flush_cache_cbe(cbe);
399 return -EAGAIN; 461 return -EAGAIN;
400 462
401failupm: 463failupm:
402 /* Atomic failure switch CBR to UPM */ 464 /* Atomic failure switch CBR to UPM */
403 tfh_user_polling_mode(tfh); 465 tfh_user_polling_mode(tfh);
466 gru_flush_cache_cbe(cbe);
404 STAT(tlb_dropin_fail_upm); 467 STAT(tlb_dropin_fail_upm);
405 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); 468 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
406 return 1; 469 return 1;
@@ -408,6 +471,7 @@ failupm:
408failfmm: 471failfmm:
409 /* FMM state on UPM call */ 472 /* FMM state on UPM call */
410 gru_flush_cache(tfh); 473 gru_flush_cache(tfh);
474 gru_flush_cache_cbe(cbe);
411 STAT(tlb_dropin_fail_fmm); 475 STAT(tlb_dropin_fail_fmm);
412 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); 476 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
413 return 0; 477 return 0;
@@ -415,17 +479,20 @@ failfmm:
415failnoexception: 479failnoexception:
416 /* TFH status did not show exception pending */ 480 /* TFH status did not show exception pending */
417 gru_flush_cache(tfh); 481 gru_flush_cache(tfh);
418 if (cb) 482 gru_flush_cache_cbe(cbe);
419 gru_flush_cache(cb); 483 if (cbk)
484 gru_flush_cache(cbk);
420 STAT(tlb_dropin_fail_no_exception); 485 STAT(tlb_dropin_fail_no_exception);
421 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state); 486 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
487 tfh, tfh->status, tfh->state);
422 return 0; 488 return 0;
423 489
424failidle: 490failidle:
425 /* TFH state was idle - no miss pending */ 491 /* TFH state was idle - no miss pending */
426 gru_flush_cache(tfh); 492 gru_flush_cache(tfh);
427 if (cb) 493 gru_flush_cache_cbe(cbe);
428 gru_flush_cache(cb); 494 if (cbk)
495 gru_flush_cache(cbk);
429 STAT(tlb_dropin_fail_idle); 496 STAT(tlb_dropin_fail_idle);
430 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state); 497 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
431 return 0; 498 return 0;
@@ -433,16 +500,18 @@ failidle:
433failinval: 500failinval:
434 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ 501 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
435 tfh_exception(tfh); 502 tfh_exception(tfh);
503 gru_flush_cache_cbe(cbe);
436 STAT(tlb_dropin_fail_invalid); 504 STAT(tlb_dropin_fail_invalid);
437 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); 505 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
438 return -EFAULT; 506 return -EFAULT;
439 507
440failactive: 508failactive:
441 /* Range invalidate active. Switch to UPM iff atomic */ 509 /* Range invalidate active. Switch to UPM iff atomic */
442 if (!cb) 510 if (!cbk)
443 tfh_user_polling_mode(tfh); 511 tfh_user_polling_mode(tfh);
444 else 512 else
445 gru_flush_cache(tfh); 513 gru_flush_cache(tfh);
514 gru_flush_cache_cbe(cbe);
446 STAT(tlb_dropin_fail_range_active); 515 STAT(tlb_dropin_fail_range_active);
447 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", 516 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
448 tfh, vaddr); 517 tfh, vaddr);
@@ -455,31 +524,41 @@ failactive:
455 * Note that this is the interrupt handler that is registered with linux 524 * Note that this is the interrupt handler that is registered with linux
456 * interrupt handlers. 525 * interrupt handlers.
457 */ 526 */
458irqreturn_t gru_intr(int irq, void *dev_id) 527static irqreturn_t gru_intr(int chiplet, int blade)
459{ 528{
460 struct gru_state *gru; 529 struct gru_state *gru;
461 struct gru_tlb_fault_map imap, dmap; 530 struct gru_tlb_fault_map imap, dmap;
462 struct gru_thread_state *gts; 531 struct gru_thread_state *gts;
463 struct gru_tlb_fault_handle *tfh = NULL; 532 struct gru_tlb_fault_handle *tfh = NULL;
533 struct completion *cmp;
464 int cbrnum, ctxnum; 534 int cbrnum, ctxnum;
465 535
466 STAT(intr); 536 STAT(intr);
467 537
468 gru = irq_to_gru(irq); 538 gru = &gru_base[blade]->bs_grus[chiplet];
469 if (!gru) { 539 if (!gru) {
470 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n", 540 dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
471 raw_smp_processor_id(), irq); 541 raw_smp_processor_id(), chiplet);
472 return IRQ_NONE; 542 return IRQ_NONE;
473 } 543 }
474 get_clear_fault_map(gru, &imap, &dmap); 544 get_clear_fault_map(gru, &imap, &dmap);
545 gru_dbg(grudev,
546 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
547 smp_processor_id(), chiplet, gru->gs_gid,
548 imap.fault_bits[0], imap.fault_bits[1],
549 dmap.fault_bits[0], dmap.fault_bits[1]);
475 550
476 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { 551 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
477 complete(gru->gs_blade->bs_async_wq); 552 STAT(intr_cbr);
553 cmp = gru->gs_blade->bs_async_wq;
554 if (cmp)
555 complete(cmp);
478 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n", 556 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
479 gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done); 557 gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
480 } 558 }
481 559
482 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { 560 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
561 STAT(intr_tfh);
483 tfh = get_tfh_by_index(gru, cbrnum); 562 tfh = get_tfh_by_index(gru, cbrnum);
484 prefetchw(tfh); /* Helps on hdw, required for emulator */ 563 prefetchw(tfh); /* Helps on hdw, required for emulator */
485 564
@@ -492,14 +571,20 @@ irqreturn_t gru_intr(int irq, void *dev_id)
492 ctxnum = tfh->ctxnum; 571 ctxnum = tfh->ctxnum;
493 gts = gru->gs_gts[ctxnum]; 572 gts = gru->gs_gts[ctxnum];
494 573
574 /* Spurious interrupts can cause this. Ignore. */
575 if (!gts) {
576 STAT(intr_spurious);
577 continue;
578 }
579
495 /* 580 /*
496 * This is running in interrupt context. Trylock the mmap_sem. 581 * This is running in interrupt context. Trylock the mmap_sem.
497 * If it fails, retry the fault in user context. 582 * If it fails, retry the fault in user context.
498 */ 583 */
584 gts->ustats.fmm_tlbmiss++;
499 if (!gts->ts_force_cch_reload && 585 if (!gts->ts_force_cch_reload &&
500 down_read_trylock(&gts->ts_mm->mmap_sem)) { 586 down_read_trylock(&gts->ts_mm->mmap_sem)) {
501 gts->ustats.fmm_tlbdropin++; 587 gru_try_dropin(gru, gts, tfh, NULL);
502 gru_try_dropin(gts, tfh, NULL);
503 up_read(&gts->ts_mm->mmap_sem); 588 up_read(&gts->ts_mm->mmap_sem);
504 } else { 589 } else {
505 tfh_user_polling_mode(tfh); 590 tfh_user_polling_mode(tfh);
@@ -509,20 +594,43 @@ irqreturn_t gru_intr(int irq, void *dev_id)
509 return IRQ_HANDLED; 594 return IRQ_HANDLED;
510} 595}
511 596
597irqreturn_t gru0_intr(int irq, void *dev_id)
598{
599 return gru_intr(0, uv_numa_blade_id());
600}
601
602irqreturn_t gru1_intr(int irq, void *dev_id)
603{
604 return gru_intr(1, uv_numa_blade_id());
605}
606
607irqreturn_t gru_intr_mblade(int irq, void *dev_id)
608{
609 int blade;
610
611 for_each_possible_blade(blade) {
612 if (uv_blade_nr_possible_cpus(blade))
613 continue;
614 gru_intr(0, blade);
615 gru_intr(1, blade);
616 }
617 return IRQ_HANDLED;
618}
619
512 620
513static int gru_user_dropin(struct gru_thread_state *gts, 621static int gru_user_dropin(struct gru_thread_state *gts,
514 struct gru_tlb_fault_handle *tfh, 622 struct gru_tlb_fault_handle *tfh,
515 unsigned long __user *cb) 623 void *cb)
516{ 624{
517 struct gru_mm_struct *gms = gts->ts_gms; 625 struct gru_mm_struct *gms = gts->ts_gms;
518 int ret; 626 int ret;
519 627
520 gts->ustats.upm_tlbdropin++; 628 gts->ustats.upm_tlbmiss++;
521 while (1) { 629 while (1) {
522 wait_event(gms->ms_wait_queue, 630 wait_event(gms->ms_wait_queue,
523 atomic_read(&gms->ms_range_active) == 0); 631 atomic_read(&gms->ms_range_active) == 0);
524 prefetchw(tfh); /* Helps on hdw, required for emulator */ 632 prefetchw(tfh); /* Helps on hdw, required for emulator */
525 ret = gru_try_dropin(gts, tfh, cb); 633 ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
526 if (ret <= 0) 634 if (ret <= 0)
527 return ret; 635 return ret;
528 STAT(call_os_wait_queue); 636 STAT(call_os_wait_queue);
@@ -538,52 +646,41 @@ int gru_handle_user_call_os(unsigned long cb)
538{ 646{
539 struct gru_tlb_fault_handle *tfh; 647 struct gru_tlb_fault_handle *tfh;
540 struct gru_thread_state *gts; 648 struct gru_thread_state *gts;
541 unsigned long __user *cbp; 649 void *cbk;
542 int ucbnum, cbrnum, ret = -EINVAL; 650 int ucbnum, cbrnum, ret = -EINVAL;
543 651
544 STAT(call_os); 652 STAT(call_os);
545 gru_dbg(grudev, "address 0x%lx\n", cb);
546 653
547 /* sanity check the cb pointer */ 654 /* sanity check the cb pointer */
548 ucbnum = get_cb_number((void *)cb); 655 ucbnum = get_cb_number((void *)cb);
549 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) 656 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
550 return -EINVAL; 657 return -EINVAL;
551 cbp = (unsigned long *)cb;
552 658
553 gts = gru_find_lock_gts(cb); 659 gts = gru_find_lock_gts(cb);
554 if (!gts) 660 if (!gts)
555 return -EINVAL; 661 return -EINVAL;
662 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
556 663
557 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) 664 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
558 goto exit; 665 goto exit;
559 666
560 /* 667 gru_check_context_placement(gts);
561 * If force_unload is set, the UPM TLB fault is phony. The task
562 * has migrated to another node and the GSEG must be moved. Just
563 * unload the context. The task will page fault and assign a new
564 * context.
565 */
566 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
567 gts->ts_blade != uv_numa_blade_id()) {
568 STAT(call_os_offnode_reference);
569 gts->ts_force_unload = 1;
570 }
571 668
572 /* 669 /*
573 * CCH may contain stale data if ts_force_cch_reload is set. 670 * CCH may contain stale data if ts_force_cch_reload is set.
574 */ 671 */
575 if (gts->ts_gru && gts->ts_force_cch_reload) { 672 if (gts->ts_gru && gts->ts_force_cch_reload) {
576 gts->ts_force_cch_reload = 0; 673 gts->ts_force_cch_reload = 0;
577 gru_update_cch(gts, 0); 674 gru_update_cch(gts);
578 } 675 }
579 676
580 ret = -EAGAIN; 677 ret = -EAGAIN;
581 cbrnum = thread_cbr_number(gts, ucbnum); 678 cbrnum = thread_cbr_number(gts, ucbnum);
582 if (gts->ts_force_unload) { 679 if (gts->ts_gru) {
583 gru_unload_context(gts, 1);
584 } else if (gts->ts_gru) {
585 tfh = get_tfh_by_index(gts->ts_gru, cbrnum); 680 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
586 ret = gru_user_dropin(gts, tfh, cbp); 681 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
682 gts->ts_ctxnum, ucbnum);
683 ret = gru_user_dropin(gts, tfh, cbk);
587 } 684 }
588exit: 685exit:
589 gru_unlock_gts(gts); 686 gru_unlock_gts(gts);
@@ -605,11 +702,11 @@ int gru_get_exception_detail(unsigned long arg)
605 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet))) 702 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
606 return -EFAULT; 703 return -EFAULT;
607 704
608 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
609 gts = gru_find_lock_gts(excdet.cb); 705 gts = gru_find_lock_gts(excdet.cb);
610 if (!gts) 706 if (!gts)
611 return -EINVAL; 707 return -EINVAL;
612 708
709 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
613 ucbnum = get_cb_number((void *)excdet.cb); 710 ucbnum = get_cb_number((void *)excdet.cb);
614 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { 711 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
615 ret = -EINVAL; 712 ret = -EINVAL;
@@ -617,6 +714,7 @@ int gru_get_exception_detail(unsigned long arg)
617 cbrnum = thread_cbr_number(gts, ucbnum); 714 cbrnum = thread_cbr_number(gts, ucbnum);
618 cbe = get_cbe_by_index(gts->ts_gru, cbrnum); 715 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
619 gru_flush_cache(cbe); /* CBE not coherent */ 716 gru_flush_cache(cbe); /* CBE not coherent */
717 sync_core(); /* make sure we are have current data */
620 excdet.opc = cbe->opccpy; 718 excdet.opc = cbe->opccpy;
621 excdet.exopc = cbe->exopccpy; 719 excdet.exopc = cbe->exopccpy;
622 excdet.ecause = cbe->ecause; 720 excdet.ecause = cbe->ecause;
@@ -624,7 +722,7 @@ int gru_get_exception_detail(unsigned long arg)
624 excdet.exceptdet1 = cbe->idef3upd; 722 excdet.exceptdet1 = cbe->idef3upd;
625 excdet.cbrstate = cbe->cbrstate; 723 excdet.cbrstate = cbe->cbrstate;
626 excdet.cbrexecstatus = cbe->cbrexecstatus; 724 excdet.cbrexecstatus = cbe->cbrexecstatus;
627 gru_flush_cache(cbe); 725 gru_flush_cache_cbe(cbe);
628 ret = 0; 726 ret = 0;
629 } else { 727 } else {
630 ret = -EAGAIN; 728 ret = -EAGAIN;
@@ -733,6 +831,11 @@ long gru_get_gseg_statistics(unsigned long arg)
733 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 831 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
734 return -EFAULT; 832 return -EFAULT;
735 833
834 /*
835 * The library creates arrays of contexts for threaded programs.
836 * If no gts exists in the array, the context has never been used & all
837 * statistics are implicitly 0.
838 */
736 gts = gru_find_lock_gts(req.gseg); 839 gts = gru_find_lock_gts(req.gseg);
737 if (gts) { 840 if (gts) {
738 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats)); 841 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
@@ -762,11 +865,25 @@ int gru_set_context_option(unsigned long arg)
762 return -EFAULT; 865 return -EFAULT;
763 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); 866 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
764 867
765 gts = gru_alloc_locked_gts(req.gseg); 868 gts = gru_find_lock_gts(req.gseg);
766 if (!gts) 869 if (!gts) {
767 return -EINVAL; 870 gts = gru_alloc_locked_gts(req.gseg);
871 if (IS_ERR(gts))
872 return PTR_ERR(gts);
873 }
768 874
769 switch (req.op) { 875 switch (req.op) {
876 case sco_blade_chiplet:
877 /* Select blade/chiplet for GRU context */
878 if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
879 req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
880 ret = -EINVAL;
881 } else {
882 gts->ts_user_blade_id = req.val1;
883 gts->ts_user_chiplet_id = req.val0;
884 gru_check_context_placement(gts);
885 }
886 break;
770 case sco_gseg_owner: 887 case sco_gseg_owner:
771 /* Register the current task as the GSEG owner */ 888 /* Register the current task as the GSEG owner */
772 gts->ts_tgid_owner = current->tgid; 889 gts->ts_tgid_owner = current->tgid;
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index fa2d93a9fb8d..28852dfa310d 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -29,13 +29,15 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/smp_lock.h>
33#include <linux/spinlock.h> 32#include <linux/spinlock.h>
34#include <linux/device.h> 33#include <linux/device.h>
35#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
36#include <linux/interrupt.h> 35#include <linux/interrupt.h>
37#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
38#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38#ifdef CONFIG_X86_64
39#include <asm/uv/uv_irq.h>
40#endif
39#include <asm/uv/uv.h> 41#include <asm/uv/uv.h>
40#include "gru.h" 42#include "gru.h"
41#include "grulib.h" 43#include "grulib.h"
@@ -54,7 +56,6 @@ struct gru_stats_s gru_stats;
54/* Guaranteed user available resources on each node */ 56/* Guaranteed user available resources on each node */
55static int max_user_cbrs, max_user_dsr_bytes; 57static int max_user_cbrs, max_user_dsr_bytes;
56 58
57static struct file_operations gru_fops;
58static struct miscdevice gru_miscdev; 59static struct miscdevice gru_miscdev;
59 60
60 61
@@ -94,7 +95,7 @@ static void gru_vma_close(struct vm_area_struct *vma)
94/* 95/*
95 * gru_file_mmap 96 * gru_file_mmap
96 * 97 *
97 * Called when mmaping the device. Initializes the vma with a fault handler 98 * Called when mmapping the device. Initializes the vma with a fault handler
98 * and private data structure necessary to allocate, track, and free the 99 * and private data structure necessary to allocate, track, and free the
99 * underlying pages. 100 * underlying pages.
100 */ 101 */
@@ -132,7 +133,6 @@ static int gru_create_new_context(unsigned long arg)
132 struct gru_vma_data *vdata; 133 struct gru_vma_data *vdata;
133 int ret = -EINVAL; 134 int ret = -EINVAL;
134 135
135
136 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 136 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
137 return -EFAULT; 137 return -EFAULT;
138 138
@@ -152,6 +152,7 @@ static int gru_create_new_context(unsigned long arg)
152 vdata->vd_dsr_au_count = 152 vdata->vd_dsr_au_count =
153 GRU_DS_BYTES_TO_AU(req.data_segment_bytes); 153 GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
154 vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); 154 vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
155 vdata->vd_tlb_preload_count = req.tlb_preload_count;
155 ret = 0; 156 ret = 0;
156 } 157 }
157 up_write(&current->mm->mmap_sem); 158 up_write(&current->mm->mmap_sem);
@@ -192,7 +193,7 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
192{ 193{
193 int err = -EBADRQC; 194 int err = -EBADRQC;
194 195
195 gru_dbg(grudev, "file %p\n", file); 196 gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
196 197
197 switch (req) { 198 switch (req) {
198 case GRU_CREATE_CONTEXT: 199 case GRU_CREATE_CONTEXT:
@@ -234,23 +235,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
234 * system. 235 * system.
235 */ 236 */
236static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, 237static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
237 void *vaddr, int nid, int bid, int grunum) 238 void *vaddr, int blade_id, int chiplet_id)
238{ 239{
239 spin_lock_init(&gru->gs_lock); 240 spin_lock_init(&gru->gs_lock);
240 spin_lock_init(&gru->gs_asid_lock); 241 spin_lock_init(&gru->gs_asid_lock);
241 gru->gs_gru_base_paddr = paddr; 242 gru->gs_gru_base_paddr = paddr;
242 gru->gs_gru_base_vaddr = vaddr; 243 gru->gs_gru_base_vaddr = vaddr;
243 gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum; 244 gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
244 gru->gs_blade = gru_base[bid]; 245 gru->gs_blade = gru_base[blade_id];
245 gru->gs_blade_id = bid; 246 gru->gs_blade_id = blade_id;
247 gru->gs_chiplet_id = chiplet_id;
246 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; 248 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
247 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; 249 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
248 gru->gs_asid_limit = MAX_ASID; 250 gru->gs_asid_limit = MAX_ASID;
249 gru_tgh_flush_init(gru); 251 gru_tgh_flush_init(gru);
250 if (gru->gs_gid >= gru_max_gids) 252 if (gru->gs_gid >= gru_max_gids)
251 gru_max_gids = gru->gs_gid + 1; 253 gru_max_gids = gru->gs_gid + 1;
252 gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", 254 gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
253 bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, 255 blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
254 gru->gs_gru_base_paddr); 256 gru->gs_gru_base_paddr);
255} 257}
256 258
@@ -266,12 +268,10 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
266 268
267 max_user_cbrs = GRU_NUM_CB; 269 max_user_cbrs = GRU_NUM_CB;
268 max_user_dsr_bytes = GRU_NUM_DSR_BYTES; 270 max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
269 for_each_online_node(nid) { 271 for_each_possible_blade(bid) {
270 bid = uv_node_to_blade_id(nid); 272 pnode = uv_blade_to_pnode(bid);
271 pnode = uv_node_to_pnode(nid); 273 nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
272 if (bid < 0 || gru_base[bid]) 274 page = alloc_pages_node(nid, GFP_KERNEL, order);
273 continue;
274 page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
275 if (!page) 275 if (!page)
276 goto fail; 276 goto fail;
277 gru_base[bid] = page_address(page); 277 gru_base[bid] = page_address(page);
@@ -287,7 +287,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
287 chip++, gru++) { 287 chip++, gru++) {
288 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); 288 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
289 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); 289 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
290 gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip); 290 gru_init_chiplet(gru, paddr, vaddr, bid, chip);
291 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; 291 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
292 cbrs = max(cbrs, n); 292 cbrs = max(cbrs, n);
293 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; 293 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
@@ -300,39 +300,215 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
300 return 0; 300 return 0;
301 301
302fail: 302fail:
303 for (nid--; nid >= 0; nid--) 303 for (bid--; bid >= 0; bid--)
304 free_pages((unsigned long)gru_base[nid], order); 304 free_pages((unsigned long)gru_base[bid], order);
305 return -ENOMEM; 305 return -ENOMEM;
306} 306}
307 307
308#ifdef CONFIG_IA64 308static void gru_free_tables(void)
309{
310 int bid;
311 int order = get_order(sizeof(struct gru_state) *
312 GRU_CHIPLETS_PER_BLADE);
309 313
310static int get_base_irq(void) 314 for (bid = 0; bid < GRU_MAX_BLADES; bid++)
315 free_pages((unsigned long)gru_base[bid], order);
316}
317
318static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
311{ 319{
312 return IRQ_GRU; 320 unsigned long mmr = 0;
321 int core;
322
323 /*
324 * We target the cores of a blade and not the hyperthreads themselves.
325 * There is a max of 8 cores per socket and 2 sockets per blade,
326 * making for a max total of 16 cores (i.e., 16 CPUs without
327 * hyperthreading and 32 CPUs with hyperthreading).
328 */
329 core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
330 if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
331 return 0;
332
333 if (chiplet == 0) {
334 mmr = UVH_GR0_TLB_INT0_CONFIG +
335 core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
336 } else if (chiplet == 1) {
337 mmr = UVH_GR1_TLB_INT0_CONFIG +
338 core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
339 } else {
340 BUG();
341 }
342
343 *corep = core;
344 return mmr;
313} 345}
314 346
315#elif defined CONFIG_X86_64 347#ifdef CONFIG_IA64
316 348
317static void noop(unsigned int irq) 349static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
350
351static void gru_noop(unsigned int irq)
318{ 352{
319} 353}
320 354
321static struct irq_chip gru_chip = { 355static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
322 .name = "gru", 356 [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
323 .mask = noop, 357 .mask = gru_noop,
324 .unmask = noop, 358 .unmask = gru_noop,
325 .ack = noop, 359 .ack = gru_noop
360 }
326}; 361};
327 362
328static int get_base_irq(void) 363static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
364 irq_handler_t irq_handler, int cpu, int blade)
365{
366 unsigned long mmr;
367 int irq = IRQ_GRU + chiplet;
368 int ret, core;
369
370 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
371 if (mmr == 0)
372 return 0;
373
374 if (gru_irq_count[chiplet] == 0) {
375 gru_chip[chiplet].name = irq_name;
376 ret = set_irq_chip(irq, &gru_chip[chiplet]);
377 if (ret) {
378 printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
379 GRU_DRIVER_ID_STR, -ret);
380 return ret;
381 }
382
383 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
384 if (ret) {
385 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
386 GRU_DRIVER_ID_STR, -ret);
387 return ret;
388 }
389 }
390 gru_irq_count[chiplet]++;
391
392 return 0;
393}
394
395static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
396{
397 unsigned long mmr;
398 int core, irq = IRQ_GRU + chiplet;
399
400 if (gru_irq_count[chiplet] == 0)
401 return;
402
403 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
404 if (mmr == 0)
405 return;
406
407 if (--gru_irq_count[chiplet] == 0)
408 free_irq(irq, NULL);
409}
410
411#elif defined CONFIG_X86_64
412
413static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
414 irq_handler_t irq_handler, int cpu, int blade)
415{
416 unsigned long mmr;
417 int irq, core;
418 int ret;
419
420 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
421 if (mmr == 0)
422 return 0;
423
424 irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
425 if (irq < 0) {
426 printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
427 GRU_DRIVER_ID_STR, -irq);
428 return irq;
429 }
430
431 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
432 if (ret) {
433 uv_teardown_irq(irq);
434 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
435 GRU_DRIVER_ID_STR, -ret);
436 return ret;
437 }
438 gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
439 return 0;
440}
441
442static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
329{ 443{
330 set_irq_chip(IRQ_GRU, &gru_chip); 444 int irq, core;
331 set_irq_chip(IRQ_GRU + 1, &gru_chip); 445 unsigned long mmr;
332 return IRQ_GRU; 446
447 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
448 if (mmr) {
449 irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
450 if (irq) {
451 free_irq(irq, NULL);
452 uv_teardown_irq(irq);
453 }
454 }
333} 455}
456
334#endif 457#endif
335 458
459static void gru_teardown_tlb_irqs(void)
460{
461 int blade;
462 int cpu;
463
464 for_each_online_cpu(cpu) {
465 blade = uv_cpu_to_blade_id(cpu);
466 gru_chiplet_teardown_tlb_irq(0, cpu, blade);
467 gru_chiplet_teardown_tlb_irq(1, cpu, blade);
468 }
469 for_each_possible_blade(blade) {
470 if (uv_blade_nr_possible_cpus(blade))
471 continue;
472 gru_chiplet_teardown_tlb_irq(0, 0, blade);
473 gru_chiplet_teardown_tlb_irq(1, 0, blade);
474 }
475}
476
477static int gru_setup_tlb_irqs(void)
478{
479 int blade;
480 int cpu;
481 int ret;
482
483 for_each_online_cpu(cpu) {
484 blade = uv_cpu_to_blade_id(cpu);
485 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
486 if (ret != 0)
487 goto exit1;
488
489 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
490 if (ret != 0)
491 goto exit1;
492 }
493 for_each_possible_blade(blade) {
494 if (uv_blade_nr_possible_cpus(blade))
495 continue;
496 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
497 if (ret != 0)
498 goto exit1;
499
500 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
501 if (ret != 0)
502 goto exit1;
503 }
504
505 return 0;
506
507exit1:
508 gru_teardown_tlb_irqs();
509 return ret;
510}
511
336/* 512/*
337 * gru_init 513 * gru_init
338 * 514 *
@@ -340,8 +516,7 @@ static int get_base_irq(void)
340 */ 516 */
341static int __init gru_init(void) 517static int __init gru_init(void)
342{ 518{
343 int ret, irq, chip; 519 int ret;
344 char id[10];
345 520
346 if (!is_uv_system()) 521 if (!is_uv_system())
347 return 0; 522 return 0;
@@ -356,41 +531,29 @@ static int __init gru_init(void)
356 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; 531 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
357 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", 532 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
358 gru_start_paddr, gru_end_paddr); 533 gru_start_paddr, gru_end_paddr);
359 irq = get_base_irq();
360 for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
361 ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
362 /* TODO: fix irq handling on x86. For now ignore failure because
363 * interrupts are not required & not yet fully supported */
364 if (ret) {
365 printk(KERN_WARNING
366 "!!!WARNING: GRU ignoring request failure!!!\n");
367 ret = 0;
368 }
369 if (ret) {
370 printk(KERN_ERR "%s: request_irq failed\n",
371 GRU_DRIVER_ID_STR);
372 goto exit1;
373 }
374 }
375
376 ret = misc_register(&gru_miscdev); 534 ret = misc_register(&gru_miscdev);
377 if (ret) { 535 if (ret) {
378 printk(KERN_ERR "%s: misc_register failed\n", 536 printk(KERN_ERR "%s: misc_register failed\n",
379 GRU_DRIVER_ID_STR); 537 GRU_DRIVER_ID_STR);
380 goto exit1; 538 goto exit0;
381 } 539 }
382 540
383 ret = gru_proc_init(); 541 ret = gru_proc_init();
384 if (ret) { 542 if (ret) {
385 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); 543 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
386 goto exit2; 544 goto exit1;
387 } 545 }
388 546
389 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); 547 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
390 if (ret) { 548 if (ret) {
391 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); 549 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
392 goto exit3; 550 goto exit2;
393 } 551 }
552
553 ret = gru_setup_tlb_irqs();
554 if (ret != 0)
555 goto exit3;
556
394 gru_kservices_init(); 557 gru_kservices_init();
395 558
396 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, 559 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
@@ -398,39 +561,33 @@ static int __init gru_init(void)
398 return 0; 561 return 0;
399 562
400exit3: 563exit3:
401 gru_proc_exit(); 564 gru_free_tables();
402exit2: 565exit2:
403 misc_deregister(&gru_miscdev); 566 gru_proc_exit();
404exit1: 567exit1:
405 for (--chip; chip >= 0; chip--) 568 misc_deregister(&gru_miscdev);
406 free_irq(irq + chip, NULL); 569exit0:
407 return ret; 570 return ret;
408 571
409} 572}
410 573
411static void __exit gru_exit(void) 574static void __exit gru_exit(void)
412{ 575{
413 int i, bid;
414 int order = get_order(sizeof(struct gru_state) *
415 GRU_CHIPLETS_PER_BLADE);
416
417 if (!is_uv_system()) 576 if (!is_uv_system())
418 return; 577 return;
419 578
420 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) 579 gru_teardown_tlb_irqs();
421 free_irq(IRQ_GRU + i, NULL);
422 gru_kservices_exit(); 580 gru_kservices_exit();
423 for (bid = 0; bid < GRU_MAX_BLADES; bid++) 581 gru_free_tables();
424 free_pages((unsigned long)gru_base[bid], order);
425
426 misc_deregister(&gru_miscdev); 582 misc_deregister(&gru_miscdev);
427 gru_proc_exit(); 583 gru_proc_exit();
428} 584}
429 585
430static struct file_operations gru_fops = { 586static const struct file_operations gru_fops = {
431 .owner = THIS_MODULE, 587 .owner = THIS_MODULE,
432 .unlocked_ioctl = gru_file_unlocked_ioctl, 588 .unlocked_ioctl = gru_file_unlocked_ioctl,
433 .mmap = gru_file_mmap, 589 .mmap = gru_file_mmap,
590 .llseek = noop_llseek,
434}; 591};
435 592
436static struct miscdevice gru_miscdev = { 593static struct miscdevice gru_miscdev = {
@@ -439,7 +596,7 @@ static struct miscdevice gru_miscdev = {
439 .fops = &gru_fops, 596 .fops = &gru_fops,
440}; 597};
441 598
442struct vm_operations_struct gru_vm_ops = { 599const struct vm_operations_struct gru_vm_ops = {
443 .close = gru_vma_close, 600 .close = gru_vma_close,
444 .fault = gru_fault, 601 .fault = gru_fault,
445}; 602};
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index 37e7cfc53b9c..2f30badc6ffd 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -27,9 +27,11 @@
27#ifdef CONFIG_IA64 27#ifdef CONFIG_IA64
28#include <asm/processor.h> 28#include <asm/processor.h>
29#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) 29#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
30#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
30#else 31#else
31#include <asm/tsc.h> 32#include <asm/tsc.h>
32#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) 33#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
34#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
33#endif 35#endif
34 36
35/* Extract the status field from a kernel handle */ 37/* Extract the status field from a kernel handle */
@@ -39,21 +41,39 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39 41
40static void update_mcs_stats(enum mcs_op op, unsigned long clks) 42static void update_mcs_stats(enum mcs_op op, unsigned long clks)
41{ 43{
44 unsigned long nsec;
45
46 nsec = CLKS2NSEC(clks);
42 atomic_long_inc(&mcs_op_statistics[op].count); 47 atomic_long_inc(&mcs_op_statistics[op].count);
43 atomic_long_add(clks, &mcs_op_statistics[op].total); 48 atomic_long_add(nsec, &mcs_op_statistics[op].total);
44 if (mcs_op_statistics[op].max < clks) 49 if (mcs_op_statistics[op].max < nsec)
45 mcs_op_statistics[op].max = clks; 50 mcs_op_statistics[op].max = nsec;
46} 51}
47 52
48static void start_instruction(void *h) 53static void start_instruction(void *h)
49{ 54{
50 unsigned long *w0 = h; 55 unsigned long *w0 = h;
51 56
52 wmb(); /* setting CMD bit must be last */ 57 wmb(); /* setting CMD/STATUS bits must be last */
53 *w0 = *w0 | 1; 58 *w0 = *w0 | 0x20001;
54 gru_flush_cache(h); 59 gru_flush_cache(h);
55} 60}
56 61
62static void report_instruction_timeout(void *h)
63{
64 unsigned long goff = GSEGPOFF((unsigned long)h);
65 char *id = "???";
66
67 if (TYPE_IS(CCH, goff))
68 id = "CCH";
69 else if (TYPE_IS(TGH, goff))
70 id = "TGH";
71 else if (TYPE_IS(TFH, goff))
72 id = "TFH";
73
74 panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
75}
76
57static int wait_instruction_complete(void *h, enum mcs_op opc) 77static int wait_instruction_complete(void *h, enum mcs_op opc)
58{ 78{
59 int status; 79 int status;
@@ -64,9 +84,10 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
64 status = GET_MSEG_HANDLE_STATUS(h); 84 status = GET_MSEG_HANDLE_STATUS(h);
65 if (status != CCHSTATUS_ACTIVE) 85 if (status != CCHSTATUS_ACTIVE)
66 break; 86 break;
67 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) 87 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
68 panic("GRU %p is malfunctioning: start %ld, end %ld\n", 88 report_instruction_timeout(h);
69 h, start_time, (unsigned long)get_cycles()); 89 start_time = get_cycles();
90 }
70 } 91 }
71 if (gru_options & OPT_STATS) 92 if (gru_options & OPT_STATS)
72 update_mcs_stats(opc, get_cycles() - start_time); 93 update_mcs_stats(opc, get_cycles() - start_time);
@@ -75,9 +96,18 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
75 96
76int cch_allocate(struct gru_context_configuration_handle *cch) 97int cch_allocate(struct gru_context_configuration_handle *cch)
77{ 98{
99 int ret;
100
78 cch->opc = CCHOP_ALLOCATE; 101 cch->opc = CCHOP_ALLOCATE;
79 start_instruction(cch); 102 start_instruction(cch);
80 return wait_instruction_complete(cch, cchop_allocate); 103 ret = wait_instruction_complete(cch, cchop_allocate);
104
105 /*
106 * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
107 * The GSEG memory does not exist until the ALLOCATE completes.
108 */
109 sync_core();
110 return ret;
81} 111}
82 112
83int cch_start(struct gru_context_configuration_handle *cch) 113int cch_start(struct gru_context_configuration_handle *cch)
@@ -96,9 +126,18 @@ int cch_interrupt(struct gru_context_configuration_handle *cch)
96 126
97int cch_deallocate(struct gru_context_configuration_handle *cch) 127int cch_deallocate(struct gru_context_configuration_handle *cch)
98{ 128{
129 int ret;
130
99 cch->opc = CCHOP_DEALLOCATE; 131 cch->opc = CCHOP_DEALLOCATE;
100 start_instruction(cch); 132 start_instruction(cch);
101 return wait_instruction_complete(cch, cchop_deallocate); 133 ret = wait_instruction_complete(cch, cchop_deallocate);
134
135 /*
136 * Stop speculation into the GSEG being unmapped by the previous
137 * DEALLOCATE.
138 */
139 sync_core();
140 return ret;
102} 141}
103 142
104int cch_interrupt_sync(struct gru_context_configuration_handle 143int cch_interrupt_sync(struct gru_context_configuration_handle
@@ -126,17 +165,20 @@ int tgh_invalidate(struct gru_tlb_global_handle *tgh,
126 return wait_instruction_complete(tgh, tghop_invalidate); 165 return wait_instruction_complete(tgh, tghop_invalidate);
127} 166}
128 167
129void tfh_write_only(struct gru_tlb_fault_handle *tfh, 168int tfh_write_only(struct gru_tlb_fault_handle *tfh,
130 unsigned long pfn, unsigned long vaddr, 169 unsigned long paddr, int gaa,
131 int asid, int dirty, int pagesize) 170 unsigned long vaddr, int asid, int dirty,
171 int pagesize)
132{ 172{
133 tfh->fillasid = asid; 173 tfh->fillasid = asid;
134 tfh->fillvaddr = vaddr; 174 tfh->fillvaddr = vaddr;
135 tfh->pfn = pfn; 175 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
176 tfh->gaa = gaa;
136 tfh->dirty = dirty; 177 tfh->dirty = dirty;
137 tfh->pagesize = pagesize; 178 tfh->pagesize = pagesize;
138 tfh->opc = TFHOP_WRITE_ONLY; 179 tfh->opc = TFHOP_WRITE_ONLY;
139 start_instruction(tfh); 180 start_instruction(tfh);
181 return wait_instruction_complete(tfh, tfhop_write_only);
140} 182}
141 183
142void tfh_write_restart(struct gru_tlb_fault_handle *tfh, 184void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
index f44112242d00..3f998b924d8f 100644
--- a/drivers/misc/sgi-gru/gruhandles.h
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -91,6 +91,12 @@
91/* Convert an arbitrary handle address to the beginning of the GRU segment */ 91/* Convert an arbitrary handle address to the beginning of the GRU segment */
92#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1))) 92#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
93 93
94/* Test a valid handle address to determine the type */
95#define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \
96 GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \
97 (((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
98
99
94/* General addressing macros. */ 100/* General addressing macros. */
95static inline void *get_gseg_base_address(void *base, int ctxnum) 101static inline void *get_gseg_base_address(void *base, int ctxnum)
96{ 102{
@@ -158,6 +164,16 @@ static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
158 return vaddr + GRU_SIZE * (2 * pnode + chiplet); 164 return vaddr + GRU_SIZE * (2 * pnode + chiplet);
159} 165}
160 166
167static inline struct gru_control_block_extended *gru_tfh_to_cbe(
168 struct gru_tlb_fault_handle *tfh)
169{
170 unsigned long cbe;
171
172 cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
173 return (struct gru_control_block_extended*)cbe;
174}
175
176
161 177
162 178
163/* 179/*
@@ -236,6 +252,17 @@ enum gru_tgh_state {
236 TGHSTATE_RESTART_CTX, 252 TGHSTATE_RESTART_CTX,
237}; 253};
238 254
255enum gru_tgh_cause {
256 TGHCAUSE_RR_ECC,
257 TGHCAUSE_TLB_ECC,
258 TGHCAUSE_LRU_ECC,
259 TGHCAUSE_PS_ECC,
260 TGHCAUSE_MUL_ERR,
261 TGHCAUSE_DATA_ERR,
262 TGHCAUSE_SW_FORCE
263};
264
265
239/* 266/*
240 * TFH - TLB Global Handle 267 * TFH - TLB Global Handle
241 * Used for TLB dropins into the GRU TLB. 268 * Used for TLB dropins into the GRU TLB.
@@ -440,6 +467,12 @@ struct gru_control_block_extended {
440 unsigned int cbrexecstatus:8; 467 unsigned int cbrexecstatus:8;
441}; 468};
442 469
470/* CBE fields for active BCOPY instructions */
471#define cbe_baddr0 idef1upd
472#define cbe_baddr1 idef3upd
473#define cbe_src_cl idef6cpy
474#define cbe_nelemcur idef5upd
475
443enum gru_cbr_state { 476enum gru_cbr_state {
444 CBRSTATE_INACTIVE, 477 CBRSTATE_INACTIVE,
445 CBRSTATE_IDLE, 478 CBRSTATE_IDLE,
@@ -487,8 +520,8 @@ int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
487int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, 520int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
488 unsigned long vaddrmask, int asid, int pagesize, int global, int n, 521 unsigned long vaddrmask, int asid, int pagesize, int global, int n,
489 unsigned short ctxbitmap); 522 unsigned short ctxbitmap);
490void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn, 523int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
491 unsigned long vaddr, int asid, int dirty, int pagesize); 524 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
492void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, 525void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
493 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); 526 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
494void tfh_restart(struct gru_tlb_fault_handle *tfh); 527void tfh_restart(struct gru_tlb_fault_handle *tfh);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 55eabfa85585..9b2062d17327 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -44,7 +44,8 @@ static int gru_user_copy_handle(void __user **dp, void *s)
44 44
45static int gru_dump_context_data(void *grubase, 45static int gru_dump_context_data(void *grubase,
46 struct gru_context_configuration_handle *cch, 46 struct gru_context_configuration_handle *cch,
47 void __user *ubuf, int ctxnum, int dsrcnt) 47 void __user *ubuf, int ctxnum, int dsrcnt,
48 int flush_cbrs)
48{ 49{
49 void *cb, *cbe, *tfh, *gseg; 50 void *cb, *cbe, *tfh, *gseg;
50 int i, scr; 51 int i, scr;
@@ -55,6 +56,8 @@ static int gru_dump_context_data(void *grubase,
55 tfh = grubase + GRU_TFH_BASE; 56 tfh = grubase + GRU_TFH_BASE;
56 57
57 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { 58 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
59 if (flush_cbrs)
60 gru_flush_cache(cb);
58 if (gru_user_copy_handle(&ubuf, cb)) 61 if (gru_user_copy_handle(&ubuf, cb))
59 goto fail; 62 goto fail;
60 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) 63 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
@@ -115,7 +118,7 @@ fail:
115 118
116static int gru_dump_context(struct gru_state *gru, int ctxnum, 119static int gru_dump_context(struct gru_state *gru, int ctxnum,
117 void __user *ubuf, void __user *ubufend, char data_opt, 120 void __user *ubuf, void __user *ubufend, char data_opt,
118 char lock_cch) 121 char lock_cch, char flush_cbrs)
119{ 122{
120 struct gru_dump_context_header hdr; 123 struct gru_dump_context_header hdr;
121 struct gru_dump_context_header __user *uhdr = ubuf; 124 struct gru_dump_context_header __user *uhdr = ubuf;
@@ -159,8 +162,7 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
159 ret = -EFBIG; 162 ret = -EFBIG;
160 else 163 else
161 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, 164 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
162 dsrcnt); 165 dsrcnt, flush_cbrs);
163
164 } 166 }
165 if (cch_locked) 167 if (cch_locked)
166 unlock_cch_handle(cch); 168 unlock_cch_handle(cch);
@@ -215,7 +217,8 @@ int gru_dump_chiplet_request(unsigned long arg)
215 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { 217 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
216 if (req.ctxnum == ctxnum || req.ctxnum < 0) { 218 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
217 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, 219 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
218 req.data_opt, req.lock_cch); 220 req.data_opt, req.lock_cch,
221 req.flush_cbrs);
219 if (ret < 0) 222 if (ret < 0)
220 goto fail; 223 goto fail;
221 ubuf += ret; 224 ubuf += ret;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index eedbf9c32760..34749ee88dfa 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -24,7 +24,6 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/smp_lock.h>
28#include <linux/spinlock.h> 27#include <linux/spinlock.h>
29#include <linux/device.h> 28#include <linux/device.h>
30#include <linux/miscdevice.h> 29#include <linux/miscdevice.h>
@@ -32,6 +31,7 @@
32#include <linux/interrupt.h> 31#include <linux/interrupt.h>
33#include <linux/uaccess.h> 32#include <linux/uaccess.h>
34#include <linux/delay.h> 33#include <linux/delay.h>
34#include <asm/io_apic.h>
35#include "gru.h" 35#include "gru.h"
36#include "grulib.h" 36#include "grulib.h"
37#include "grutables.h" 37#include "grutables.h"
@@ -98,9 +98,6 @@
98#define ASYNC_HAN_TO_BID(h) ((h) - 1) 98#define ASYNC_HAN_TO_BID(h) ((h) - 1)
99#define ASYNC_BID_TO_HAN(b) ((b) + 1) 99#define ASYNC_BID_TO_HAN(b) ((b) + 1)
100#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] 100#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
101#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
102 (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
103#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
104 101
105#define GRU_NUM_KERNEL_CBR 1 102#define GRU_NUM_KERNEL_CBR 1
106#define GRU_NUM_KERNEL_DSR_BYTES 256 103#define GRU_NUM_KERNEL_DSR_BYTES 256
@@ -161,8 +158,10 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
161 up_read(&bs->bs_kgts_sema); 158 up_read(&bs->bs_kgts_sema);
162 down_write(&bs->bs_kgts_sema); 159 down_write(&bs->bs_kgts_sema);
163 160
164 if (!bs->bs_kgts) 161 if (!bs->bs_kgts) {
165 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0); 162 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
163 bs->bs_kgts->ts_user_blade_id = blade_id;
164 }
166 kgts = bs->bs_kgts; 165 kgts = bs->bs_kgts;
167 166
168 if (!kgts->ts_gru) { 167 if (!kgts->ts_gru) {
@@ -173,9 +172,9 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
173 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( 172 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
174 GRU_NUM_KERNEL_DSR_BYTES * ncpus + 173 GRU_NUM_KERNEL_DSR_BYTES * ncpus +
175 bs->bs_async_dsr_bytes); 174 bs->bs_async_dsr_bytes);
176 while (!gru_assign_gru_context(kgts, blade_id)) { 175 while (!gru_assign_gru_context(kgts)) {
177 msleep(1); 176 msleep(1);
178 gru_steal_context(kgts, blade_id); 177 gru_steal_context(kgts);
179 } 178 }
180 gru_load_context(kgts); 179 gru_load_context(kgts);
181 gru = bs->bs_kgts->ts_gru; 180 gru = bs->bs_kgts->ts_gru;
@@ -201,13 +200,15 @@ static int gru_free_kernel_contexts(void)
201 bs = gru_base[bid]; 200 bs = gru_base[bid];
202 if (!bs) 201 if (!bs)
203 continue; 202 continue;
203
204 /* Ignore busy contexts. Don't want to block here. */
204 if (down_write_trylock(&bs->bs_kgts_sema)) { 205 if (down_write_trylock(&bs->bs_kgts_sema)) {
205 kgts = bs->bs_kgts; 206 kgts = bs->bs_kgts;
206 if (kgts && kgts->ts_gru) 207 if (kgts && kgts->ts_gru)
207 gru_unload_context(kgts, 0); 208 gru_unload_context(kgts, 0);
208 kfree(kgts);
209 bs->bs_kgts = NULL; 209 bs->bs_kgts = NULL;
210 up_write(&bs->bs_kgts_sema); 210 up_write(&bs->bs_kgts_sema);
211 kfree(kgts);
211 } else { 212 } else {
212 ret++; 213 ret++;
213 } 214 }
@@ -221,13 +222,21 @@ static int gru_free_kernel_contexts(void)
221static struct gru_blade_state *gru_lock_kernel_context(int blade_id) 222static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
222{ 223{
223 struct gru_blade_state *bs; 224 struct gru_blade_state *bs;
225 int bid;
224 226
225 STAT(lock_kernel_context); 227 STAT(lock_kernel_context);
226 bs = gru_base[blade_id]; 228again:
229 bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
230 bs = gru_base[bid];
227 231
232 /* Handle the case where migration occured while waiting for the sema */
228 down_read(&bs->bs_kgts_sema); 233 down_read(&bs->bs_kgts_sema);
234 if (blade_id < 0 && bid != uv_numa_blade_id()) {
235 up_read(&bs->bs_kgts_sema);
236 goto again;
237 }
229 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru) 238 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
230 gru_load_kernel_context(bs, blade_id); 239 gru_load_kernel_context(bs, bid);
231 return bs; 240 return bs;
232 241
233} 242}
@@ -256,7 +265,7 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
256 265
257 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); 266 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
258 preempt_disable(); 267 preempt_disable();
259 bs = gru_lock_kernel_context(uv_numa_blade_id()); 268 bs = gru_lock_kernel_context(-1);
260 lcpu = uv_blade_processor_id(); 269 lcpu = uv_blade_processor_id();
261 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; 270 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
262 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; 271 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
@@ -385,13 +394,31 @@ int gru_get_cb_exception_detail(void *cb,
385 struct control_block_extended_exc_detail *excdet) 394 struct control_block_extended_exc_detail *excdet)
386{ 395{
387 struct gru_control_block_extended *cbe; 396 struct gru_control_block_extended *cbe;
388 struct gru_blade_state *bs; 397 struct gru_thread_state *kgts = NULL;
389 int cbrnum; 398 unsigned long off;
390 399 int cbrnum, bid;
391 bs = KCB_TO_BS(cb); 400
392 cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb)); 401 /*
402 * Locate kgts for cb. This algorithm is SLOW but
403 * this function is rarely called (ie., almost never).
404 * Performance does not matter.
405 */
406 for_each_possible_blade(bid) {
407 if (!gru_base[bid])
408 break;
409 kgts = gru_base[bid]->bs_kgts;
410 if (!kgts || !kgts->ts_gru)
411 continue;
412 off = cb - kgts->ts_gru->gs_gru_base_vaddr;
413 if (off < GRU_SIZE)
414 break;
415 kgts = NULL;
416 }
417 BUG_ON(!kgts);
418 cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
393 cbe = get_cbe(GRUBASE(cb), cbrnum); 419 cbe = get_cbe(GRUBASE(cb), cbrnum);
394 gru_flush_cache(cbe); /* CBE not coherent */ 420 gru_flush_cache(cbe); /* CBE not coherent */
421 sync_core();
395 excdet->opc = cbe->opccpy; 422 excdet->opc = cbe->opccpy;
396 excdet->exopc = cbe->exopccpy; 423 excdet->exopc = cbe->exopccpy;
397 excdet->ecause = cbe->ecause; 424 excdet->ecause = cbe->ecause;
@@ -410,8 +437,8 @@ char *gru_get_cb_exception_detail_str(int ret, void *cb,
410 if (ret > 0 && gen->istatus == CBS_EXCEPTION) { 437 if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
411 gru_get_cb_exception_detail(cb, &excdet); 438 gru_get_cb_exception_detail(cb, &excdet);
412 snprintf(buf, size, 439 snprintf(buf, size,
413 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x," 440 "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
414 "excdet0 0x%lx, excdet1 0x%x", 441 "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
415 gen, excdet.opc, excdet.exopc, excdet.ecause, 442 gen, excdet.opc, excdet.exopc, excdet.ecause,
416 excdet.exceptdet0, excdet.exceptdet1); 443 excdet.exceptdet0, excdet.exceptdet1);
417 } else { 444 } else {
@@ -458,9 +485,10 @@ int gru_check_status_proc(void *cb)
458 int ret; 485 int ret;
459 486
460 ret = gen->istatus; 487 ret = gen->istatus;
461 if (ret != CBS_EXCEPTION) 488 if (ret == CBS_EXCEPTION)
462 return ret; 489 ret = gru_retry_exception(cb);
463 return gru_retry_exception(cb); 490 rmb();
491 return ret;
464 492
465} 493}
466 494
@@ -472,7 +500,7 @@ int gru_wait_proc(void *cb)
472 ret = gru_wait_idle_or_exception(gen); 500 ret = gru_wait_idle_or_exception(gen);
473 if (ret == CBS_EXCEPTION) 501 if (ret == CBS_EXCEPTION)
474 ret = gru_retry_exception(cb); 502 ret = gru_retry_exception(cb);
475 503 rmb();
476 return ret; 504 return ret;
477} 505}
478 506
@@ -539,7 +567,7 @@ int gru_create_message_queue(struct gru_message_queue_desc *mqd,
539 mqd->mq = mq; 567 mqd->mq = mq;
540 mqd->mq_gpa = uv_gpa(mq); 568 mqd->mq_gpa = uv_gpa(mq);
541 mqd->qlines = qlines; 569 mqd->qlines = qlines;
542 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid); 570 mqd->interrupt_pnode = nasid >> 1;
543 mqd->interrupt_vector = vector; 571 mqd->interrupt_vector = vector;
544 mqd->interrupt_apicid = apicid; 572 mqd->interrupt_apicid = apicid;
545 return 0; 573 return 0;
@@ -599,6 +627,8 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
599 ret = MQE_UNEXPECTED_CB_ERR; 627 ret = MQE_UNEXPECTED_CB_ERR;
600 break; 628 break;
601 case CBSS_PAGE_OVERFLOW: 629 case CBSS_PAGE_OVERFLOW:
630 STAT(mesq_noop_page_overflow);
631 /* fallthru */
602 default: 632 default:
603 BUG(); 633 BUG();
604 } 634 }
@@ -674,18 +704,6 @@ cberr:
674} 704}
675 705
676/* 706/*
677 * Send a cross-partition interrupt to the SSI that contains the target
678 * message queue. Normally, the interrupt is automatically delivered by hardware
679 * but some error conditions require explicit delivery.
680 */
681static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
682{
683 if (mqd->interrupt_vector)
684 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
685 mqd->interrupt_vector);
686}
687
688/*
689 * Handle a PUT failure. Note: if message was a 2-line message, one of the 707 * Handle a PUT failure. Note: if message was a 2-line message, one of the
690 * lines might have successfully have been written. Before sending the 708 * lines might have successfully have been written. Before sending the
691 * message, "present" must be cleared in BOTH lines to prevent the receiver 709 * message, "present" must be cleared in BOTH lines to prevent the receiver
@@ -694,7 +712,8 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
694static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, 712static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
695 void *mesg, int lines) 713 void *mesg, int lines)
696{ 714{
697 unsigned long m; 715 unsigned long m, *val = mesg, gpa, save;
716 int ret;
698 717
699 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); 718 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
700 if (lines == 2) { 719 if (lines == 2) {
@@ -705,7 +724,26 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
705 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); 724 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
706 if (gru_wait(cb) != CBS_IDLE) 725 if (gru_wait(cb) != CBS_IDLE)
707 return MQE_UNEXPECTED_CB_ERR; 726 return MQE_UNEXPECTED_CB_ERR;
708 send_message_queue_interrupt(mqd); 727
728 if (!mqd->interrupt_vector)
729 return MQE_OK;
730
731 /*
732 * Send a cross-partition interrupt to the SSI that contains the target
733 * message queue. Normally, the interrupt is automatically delivered by
734 * hardware but some error conditions require explicit delivery.
735 * Use the GRU to deliver the interrupt. Otherwise partition failures
736 * could cause unrecovered errors.
737 */
738 gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
739 save = *val;
740 *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
741 dest_Fixed);
742 gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
743 ret = gru_wait(cb);
744 *val = save;
745 if (ret != CBS_IDLE)
746 return MQE_UNEXPECTED_CB_ERR;
709 return MQE_OK; 747 return MQE_OK;
710} 748}
711 749
@@ -740,6 +778,9 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
740 STAT(mesq_send_put_nacked); 778 STAT(mesq_send_put_nacked);
741 ret = send_message_put_nacked(cb, mqd, mesg, lines); 779 ret = send_message_put_nacked(cb, mqd, mesg, lines);
742 break; 780 break;
781 case CBSS_PAGE_OVERFLOW:
782 STAT(mesq_page_overflow);
783 /* fallthru */
743 default: 784 default:
744 BUG(); 785 BUG();
745 } 786 }
@@ -832,7 +873,6 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
832 int present = mhdr->present; 873 int present = mhdr->present;
833 874
834 /* skip NOOP messages */ 875 /* skip NOOP messages */
835 STAT(mesq_receive);
836 while (present == MQS_NOOP) { 876 while (present == MQS_NOOP) {
837 gru_free_message(mqd, mhdr); 877 gru_free_message(mqd, mhdr);
838 mhdr = mq->next; 878 mhdr = mq->next;
@@ -852,6 +892,7 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
852 if (mhdr->lines == 2) 892 if (mhdr->lines == 2)
853 restore_present2(mhdr, mhdr->present2); 893 restore_present2(mhdr, mhdr->present2);
854 894
895 STAT(mesq_receive);
855 return mhdr; 896 return mhdr;
856} 897}
857EXPORT_SYMBOL_GPL(gru_get_next_message); 898EXPORT_SYMBOL_GPL(gru_get_next_message);
@@ -859,6 +900,29 @@ EXPORT_SYMBOL_GPL(gru_get_next_message);
859/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ 900/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
860 901
861/* 902/*
903 * Load a DW from a global GPA. The GPA can be a memory or MMR address.
904 */
905int gru_read_gpa(unsigned long *value, unsigned long gpa)
906{
907 void *cb;
908 void *dsr;
909 int ret, iaa;
910
911 STAT(read_gpa);
912 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
913 return MQE_BUG_NO_RESOURCES;
914 iaa = gpa >> 62;
915 gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
916 ret = gru_wait(cb);
917 if (ret == CBS_IDLE)
918 *value = *(unsigned long *)dsr;
919 gru_free_cpu_resources(cb, dsr);
920 return ret;
921}
922EXPORT_SYMBOL_GPL(gru_read_gpa);
923
924
925/*
862 * Copy a block of data using the GRU resources 926 * Copy a block of data using the GRU resources
863 */ 927 */
864int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, 928int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
@@ -899,24 +963,24 @@ static int quicktest0(unsigned long arg)
899 963
900 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); 964 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
901 if (gru_wait(cb) != CBS_IDLE) { 965 if (gru_wait(cb) != CBS_IDLE) {
902 printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n"); 966 printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
903 goto done; 967 goto done;
904 } 968 }
905 969
906 if (*p != MAGIC) { 970 if (*p != MAGIC) {
907 printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p); 971 printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
908 goto done; 972 goto done;
909 } 973 }
910 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); 974 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
911 if (gru_wait(cb) != CBS_IDLE) { 975 if (gru_wait(cb) != CBS_IDLE) {
912 printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n"); 976 printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
913 goto done; 977 goto done;
914 } 978 }
915 979
916 if (word0 != word1 || word1 != MAGIC) { 980 if (word0 != word1 || word1 != MAGIC) {
917 printk(KERN_DEBUG 981 printk(KERN_DEBUG
918 "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n", 982 "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
919 word1, MAGIC); 983 smp_processor_id(), word1, MAGIC);
920 goto done; 984 goto done;
921 } 985 }
922 ret = 0; 986 ret = 0;
@@ -938,6 +1002,8 @@ static int quicktest1(unsigned long arg)
938 1002
939 /* Need 1K cacheline aligned that does not cross page boundary */ 1003 /* Need 1K cacheline aligned that does not cross page boundary */
940 p = kmalloc(4096, 0); 1004 p = kmalloc(4096, 0);
1005 if (p == NULL)
1006 return -ENOMEM;
941 mq = ALIGNUP(p, 1024); 1007 mq = ALIGNUP(p, 1024);
942 memset(mes, 0xee, sizeof(mes)); 1008 memset(mes, 0xee, sizeof(mes));
943 dw = mq; 1009 dw = mq;
@@ -951,8 +1017,11 @@ static int quicktest1(unsigned long arg)
951 if (ret) 1017 if (ret)
952 break; 1018 break;
953 } 1019 }
954 if (ret != MQE_QUEUE_FULL || i != 4) 1020 if (ret != MQE_QUEUE_FULL || i != 4) {
1021 printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
1022 smp_processor_id(), ret, i);
955 goto done; 1023 goto done;
1024 }
956 1025
957 for (i = 0; i < 6; i++) { 1026 for (i = 0; i < 6; i++) {
958 m = gru_get_next_message(&mqd); 1027 m = gru_get_next_message(&mqd);
@@ -960,7 +1029,12 @@ static int quicktest1(unsigned long arg)
960 break; 1029 break;
961 gru_free_message(&mqd, m); 1030 gru_free_message(&mqd, m);
962 } 1031 }
963 ret = (i == 4) ? 0 : -EIO; 1032 if (i != 4) {
1033 printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
1034 smp_processor_id(), i, m, m ? m[8] : -1);
1035 goto done;
1036 }
1037 ret = 0;
964 1038
965done: 1039done:
966 kfree(p); 1040 kfree(p);
@@ -976,6 +1050,7 @@ static int quicktest2(unsigned long arg)
976 int ret = 0; 1050 int ret = 0;
977 unsigned long *buf; 1051 unsigned long *buf;
978 void *cb0, *cb; 1052 void *cb0, *cb;
1053 struct gru_control_block_status *gen;
979 int i, k, istatus, bytes; 1054 int i, k, istatus, bytes;
980 1055
981 bytes = numcb * 4 * 8; 1056 bytes = numcb * 4 * 8;
@@ -995,20 +1070,30 @@ static int quicktest2(unsigned long arg)
995 XTYPE_DW, 4, 1, IMA_INTERRUPT); 1070 XTYPE_DW, 4, 1, IMA_INTERRUPT);
996 1071
997 ret = 0; 1072 ret = 0;
998 for (k = 0; k < numcb; k++) { 1073 k = numcb;
1074 do {
999 gru_wait_async_cbr(han); 1075 gru_wait_async_cbr(han);
1000 for (i = 0; i < numcb; i++) { 1076 for (i = 0; i < numcb; i++) {
1001 cb = cb0 + i * GRU_HANDLE_STRIDE; 1077 cb = cb0 + i * GRU_HANDLE_STRIDE;
1002 istatus = gru_check_status(cb); 1078 istatus = gru_check_status(cb);
1003 if (istatus == CBS_ACTIVE) 1079 if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
1004 continue; 1080 break;
1005 if (istatus == CBS_EXCEPTION)
1006 ret = -EFAULT;
1007 else if (buf[i] || buf[i + 1] || buf[i + 2] ||
1008 buf[i + 3])
1009 ret = -EIO;
1010 } 1081 }
1011 } 1082 if (i == numcb)
1083 continue;
1084 if (istatus != CBS_IDLE) {
1085 printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
1086 ret = -EFAULT;
1087 } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
1088 buf[4 * i + 3]) {
1089 printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
1090 smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
1091 ret = -EIO;
1092 }
1093 k--;
1094 gen = cb;
1095 gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
1096 } while (k);
1012 BUG_ON(cmp.done); 1097 BUG_ON(cmp.done);
1013 1098
1014 gru_unlock_async_resource(han); 1099 gru_unlock_async_resource(han);
@@ -1018,6 +1103,22 @@ done:
1018 return ret; 1103 return ret;
1019} 1104}
1020 1105
1106#define BUFSIZE 200
1107static int quicktest3(unsigned long arg)
1108{
1109 char buf1[BUFSIZE], buf2[BUFSIZE];
1110 int ret = 0;
1111
1112 memset(buf2, 0, sizeof(buf2));
1113 memset(buf1, get_cycles() & 255, sizeof(buf1));
1114 gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
1115 if (memcmp(buf1, buf2, BUFSIZE)) {
1116 printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
1117 ret = -EIO;
1118 }
1119 return ret;
1120}
1121
1021/* 1122/*
1022 * Debugging only. User hook for various kernel tests 1123 * Debugging only. User hook for various kernel tests
1023 * of driver & gru. 1124 * of driver & gru.
@@ -1036,6 +1137,9 @@ int gru_ktest(unsigned long arg)
1036 case 2: 1137 case 2:
1037 ret = quicktest2(arg); 1138 ret = quicktest2(arg);
1038 break; 1139 break;
1140 case 3:
1141 ret = quicktest3(arg);
1142 break;
1039 case 99: 1143 case 99:
1040 ret = gru_free_kernel_contexts(); 1144 ret = gru_free_kernel_contexts();
1041 break; 1145 break;
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
index d60d34bca44d..02aa94d8484a 100644
--- a/drivers/misc/sgi-gru/grukservices.h
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -131,6 +131,20 @@ extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
131 131
132 132
133/* 133/*
134 * Read a GRU global GPA. Source can be located in a remote partition.
135 *
136 * Input:
137 * value memory address where MMR value is returned
138 * gpa source numalink physical address of GPA
139 *
140 * Output:
141 * 0 OK
142 * >0 error
143 */
144int gru_read_gpa(unsigned long *value, unsigned long gpa);
145
146
147/*
134 * Copy data using the GRU. Source or destination can be located in a remote 148 * Copy data using the GRU. Source or destination can be located in a remote
135 * partition. 149 * partition.
136 * 150 *
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
index 889bc442a3e8..e77d1b1f9d05 100644
--- a/drivers/misc/sgi-gru/grulib.h
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -63,18 +63,9 @@
63#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) 63#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
64#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1))) 64#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
65 65
66/*
67 * Statictics kept on a per-GTS basis.
68 */
69struct gts_statistics {
70 unsigned long fmm_tlbdropin;
71 unsigned long upm_tlbdropin;
72 unsigned long context_stolen;
73};
74
75struct gru_get_gseg_statistics_req { 66struct gru_get_gseg_statistics_req {
76 unsigned long gseg; 67 unsigned long gseg;
77 struct gts_statistics stats; 68 struct gru_gseg_statistics stats;
78}; 69};
79 70
80/* 71/*
@@ -86,6 +77,7 @@ struct gru_create_context_req {
86 unsigned int control_blocks; 77 unsigned int control_blocks;
87 unsigned int maximum_thread_count; 78 unsigned int maximum_thread_count;
88 unsigned int options; 79 unsigned int options;
80 unsigned char tlb_preload_count;
89}; 81};
90 82
91/* 83/*
@@ -98,11 +90,12 @@ struct gru_unload_context_req {
98/* 90/*
99 * Structure used to set context options 91 * Structure used to set context options
100 */ 92 */
101enum {sco_gseg_owner, sco_cch_req_slice}; 93enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet};
102struct gru_set_context_option_req { 94struct gru_set_context_option_req {
103 unsigned long gseg; 95 unsigned long gseg;
104 int op; 96 int op;
105 unsigned long val1; 97 int val0;
98 long val1;
106}; 99};
107 100
108/* 101/*
@@ -124,6 +117,8 @@ struct gru_dump_chiplet_state_req {
124 int ctxnum; 117 int ctxnum;
125 char data_opt; 118 char data_opt;
126 char lock_cch; 119 char lock_cch;
120 char flush_cbrs;
121 char fill[10];
127 pid_t pid; 122 pid_t pid;
128 void *buf; 123 void *buf;
129 size_t buflen; 124 size_t buflen;
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 3bc643dad606..f8538bbd0bfa 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -27,6 +27,7 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/err.h>
30#include <asm/uv/uv_hub.h> 31#include <asm/uv/uv_hub.h>
31#include "gru.h" 32#include "gru.h"
32#include "grutables.h" 33#include "grutables.h"
@@ -48,12 +49,20 @@ struct device *grudev = &gru_device;
48/* 49/*
49 * Select a gru fault map to be used by the current cpu. Note that 50 * Select a gru fault map to be used by the current cpu. Note that
50 * multiple cpus may be using the same map. 51 * multiple cpus may be using the same map.
51 * ZZZ should "shift" be used?? Depends on HT cpu numbering
52 * ZZZ should be inline but did not work on emulator 52 * ZZZ should be inline but did not work on emulator
53 */ 53 */
54int gru_cpu_fault_map_id(void) 54int gru_cpu_fault_map_id(void)
55{ 55{
56#ifdef CONFIG_IA64
56 return uv_blade_processor_id() % GRU_NUM_TFM; 57 return uv_blade_processor_id() % GRU_NUM_TFM;
58#else
59 int cpu = smp_processor_id();
60 int id, core;
61
62 core = uv_cpu_core_number(cpu);
63 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
64 return id;
65#endif
57} 66}
58 67
59/*--------- ASID Management ------------------------------------------- 68/*--------- ASID Management -------------------------------------------
@@ -286,7 +295,8 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
286void gts_drop(struct gru_thread_state *gts) 295void gts_drop(struct gru_thread_state *gts)
287{ 296{
288 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) { 297 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
289 gru_drop_mmu_notifier(gts->ts_gms); 298 if (gts->ts_gms)
299 gru_drop_mmu_notifier(gts->ts_gms);
290 kfree(gts); 300 kfree(gts);
291 STAT(gts_free); 301 STAT(gts_free);
292 } 302 }
@@ -310,16 +320,18 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
310 * Allocate a thread state structure. 320 * Allocate a thread state structure.
311 */ 321 */
312struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 322struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
313 int cbr_au_count, int dsr_au_count, int options, int tsid) 323 int cbr_au_count, int dsr_au_count,
324 unsigned char tlb_preload_count, int options, int tsid)
314{ 325{
315 struct gru_thread_state *gts; 326 struct gru_thread_state *gts;
327 struct gru_mm_struct *gms;
316 int bytes; 328 int bytes;
317 329
318 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 330 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
319 bytes += sizeof(struct gru_thread_state); 331 bytes += sizeof(struct gru_thread_state);
320 gts = kmalloc(bytes, GFP_KERNEL); 332 gts = kmalloc(bytes, GFP_KERNEL);
321 if (!gts) 333 if (!gts)
322 return NULL; 334 return ERR_PTR(-ENOMEM);
323 335
324 STAT(gts_alloc); 336 STAT(gts_alloc);
325 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ 337 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
@@ -327,7 +339,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
327 mutex_init(&gts->ts_ctxlock); 339 mutex_init(&gts->ts_ctxlock);
328 gts->ts_cbr_au_count = cbr_au_count; 340 gts->ts_cbr_au_count = cbr_au_count;
329 gts->ts_dsr_au_count = dsr_au_count; 341 gts->ts_dsr_au_count = dsr_au_count;
342 gts->ts_tlb_preload_count = tlb_preload_count;
330 gts->ts_user_options = options; 343 gts->ts_user_options = options;
344 gts->ts_user_blade_id = -1;
345 gts->ts_user_chiplet_id = -1;
331 gts->ts_tsid = tsid; 346 gts->ts_tsid = tsid;
332 gts->ts_ctxnum = NULLCTX; 347 gts->ts_ctxnum = NULLCTX;
333 gts->ts_tlb_int_select = -1; 348 gts->ts_tlb_int_select = -1;
@@ -336,9 +351,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
336 if (vma) { 351 if (vma) {
337 gts->ts_mm = current->mm; 352 gts->ts_mm = current->mm;
338 gts->ts_vma = vma; 353 gts->ts_vma = vma;
339 gts->ts_gms = gru_register_mmu_notifier(); 354 gms = gru_register_mmu_notifier();
340 if (!gts->ts_gms) 355 if (IS_ERR(gms))
341 goto err; 356 goto err;
357 gts->ts_gms = gms;
342 } 358 }
343 359
344 gru_dbg(grudev, "alloc gts %p\n", gts); 360 gru_dbg(grudev, "alloc gts %p\n", gts);
@@ -346,7 +362,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
346 362
347err: 363err:
348 gts_drop(gts); 364 gts_drop(gts);
349 return NULL; 365 return ERR_CAST(gms);
350} 366}
351 367
352/* 368/*
@@ -360,6 +376,7 @@ struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
360 if (!vdata) 376 if (!vdata)
361 return NULL; 377 return NULL;
362 378
379 STAT(vdata_alloc);
363 INIT_LIST_HEAD(&vdata->vd_head); 380 INIT_LIST_HEAD(&vdata->vd_head);
364 spin_lock_init(&vdata->vd_lock); 381 spin_lock_init(&vdata->vd_lock);
365 gru_dbg(grudev, "alloc vdata %p\n", vdata); 382 gru_dbg(grudev, "alloc vdata %p\n", vdata);
@@ -392,10 +409,12 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
392 struct gru_vma_data *vdata = vma->vm_private_data; 409 struct gru_vma_data *vdata = vma->vm_private_data;
393 struct gru_thread_state *gts, *ngts; 410 struct gru_thread_state *gts, *ngts;
394 411
395 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, 412 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
413 vdata->vd_dsr_au_count,
414 vdata->vd_tlb_preload_count,
396 vdata->vd_user_options, tsid); 415 vdata->vd_user_options, tsid);
397 if (!gts) 416 if (IS_ERR(gts))
398 return NULL; 417 return gts;
399 418
400 spin_lock(&vdata->vd_lock); 419 spin_lock(&vdata->vd_lock);
401 ngts = gru_find_current_gts_nolock(vdata, tsid); 420 ngts = gru_find_current_gts_nolock(vdata, tsid);
@@ -493,6 +512,9 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
493 memset(cbe + i * GRU_HANDLE_STRIDE, 0, 512 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
494 GRU_CACHE_LINE_BYTES); 513 GRU_CACHE_LINE_BYTES);
495 } 514 }
515 /* Flush CBE to hide race in context restart */
516 mb();
517 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
496 cb += GRU_HANDLE_STRIDE; 518 cb += GRU_HANDLE_STRIDE;
497 } 519 }
498 520
@@ -513,6 +535,12 @@ static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
513 cb = gseg + GRU_CB_BASE; 535 cb = gseg + GRU_CB_BASE;
514 cbe = grubase + GRU_CBE_BASE; 536 cbe = grubase + GRU_CBE_BASE;
515 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 537 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
538
539 /* CBEs may not be coherent. Flush them from cache */
540 for_each_cbr_in_allocation_map(i, &cbrmap, scr)
541 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
542 mb(); /* Let the CL flush complete */
543
516 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 544 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
517 545
518 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 546 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
@@ -533,7 +561,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
533 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 561 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
534 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 562 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
535 563
536 gru_dbg(grudev, "gts %p\n", gts); 564 gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
565 gts, gts->ts_cbr_map, gts->ts_dsr_map);
537 lock_cch_handle(cch); 566 lock_cch_handle(cch);
538 if (cch_interrupt_sync(cch)) 567 if (cch_interrupt_sync(cch))
539 BUG(); 568 BUG();
@@ -549,7 +578,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
549 578
550 if (cch_deallocate(cch)) 579 if (cch_deallocate(cch))
551 BUG(); 580 BUG();
552 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
553 unlock_cch_handle(cch); 581 unlock_cch_handle(cch);
554 582
555 gru_free_gru_context(gts); 583 gru_free_gru_context(gts);
@@ -565,9 +593,7 @@ void gru_load_context(struct gru_thread_state *gts)
565 struct gru_context_configuration_handle *cch; 593 struct gru_context_configuration_handle *cch;
566 int i, err, asid, ctxnum = gts->ts_ctxnum; 594 int i, err, asid, ctxnum = gts->ts_ctxnum;
567 595
568 gru_dbg(grudev, "gts %p\n", gts);
569 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 596 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
570
571 lock_cch_handle(cch); 597 lock_cch_handle(cch);
572 cch->tfm_fault_bit_enable = 598 cch->tfm_fault_bit_enable =
573 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 599 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
@@ -591,6 +617,7 @@ void gru_load_context(struct gru_thread_state *gts)
591 cch->unmap_enable = 1; 617 cch->unmap_enable = 1;
592 cch->tfm_done_bit_enable = 1; 618 cch->tfm_done_bit_enable = 1;
593 cch->cb_int_enable = 1; 619 cch->cb_int_enable = 1;
620 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
594 } else { 621 } else {
595 cch->unmap_enable = 0; 622 cch->unmap_enable = 0;
596 cch->tfm_done_bit_enable = 0; 623 cch->tfm_done_bit_enable = 0;
@@ -616,17 +643,18 @@ void gru_load_context(struct gru_thread_state *gts)
616 if (cch_start(cch)) 643 if (cch_start(cch))
617 BUG(); 644 BUG();
618 unlock_cch_handle(cch); 645 unlock_cch_handle(cch);
646
647 gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
648 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
649 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
619} 650}
620 651
621/* 652/*
622 * Update fields in an active CCH: 653 * Update fields in an active CCH:
623 * - retarget interrupts on local blade 654 * - retarget interrupts on local blade
624 * - update sizeavail mask 655 * - update sizeavail mask
625 * - force a delayed context unload by clearing the CCH asids. This
626 * forces TLB misses for new GRU instructions. The context is unloaded
627 * when the next TLB miss occurs.
628 */ 656 */
629int gru_update_cch(struct gru_thread_state *gts, int force_unload) 657int gru_update_cch(struct gru_thread_state *gts)
630{ 658{
631 struct gru_context_configuration_handle *cch; 659 struct gru_context_configuration_handle *cch;
632 struct gru_state *gru = gts->ts_gru; 660 struct gru_state *gru = gts->ts_gru;
@@ -640,21 +668,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
640 goto exit; 668 goto exit;
641 if (cch_interrupt(cch)) 669 if (cch_interrupt(cch))
642 BUG(); 670 BUG();
643 if (!force_unload) { 671 for (i = 0; i < 8; i++)
644 for (i = 0; i < 8; i++) 672 cch->sizeavail[i] = gts->ts_sizeavail;
645 cch->sizeavail[i] = gts->ts_sizeavail; 673 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
646 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 674 cch->tlb_int_select = gru_cpu_fault_map_id();
647 cch->tlb_int_select = gru_cpu_fault_map_id(); 675 cch->tfm_fault_bit_enable =
648 cch->tfm_fault_bit_enable = 676 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
649 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 677 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
650 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
651 } else {
652 for (i = 0; i < 8; i++)
653 cch->asid[i] = 0;
654 cch->tfm_fault_bit_enable = 0;
655 cch->tlb_int_enable = 0;
656 gts->ts_force_unload = 1;
657 }
658 if (cch_start(cch)) 678 if (cch_start(cch))
659 BUG(); 679 BUG();
660 ret = 1; 680 ret = 1;
@@ -679,7 +699,54 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
679 699
680 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 700 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
681 gru_cpu_fault_map_id()); 701 gru_cpu_fault_map_id());
682 return gru_update_cch(gts, 0); 702 return gru_update_cch(gts);
703}
704
705/*
706 * Check if a GRU context is allowed to use a specific chiplet. By default
707 * a context is assigned to any blade-local chiplet. However, users can
708 * override this.
709 * Returns 1 if assignment allowed, 0 otherwise
710 */
711static int gru_check_chiplet_assignment(struct gru_state *gru,
712 struct gru_thread_state *gts)
713{
714 int blade_id;
715 int chiplet_id;
716
717 blade_id = gts->ts_user_blade_id;
718 if (blade_id < 0)
719 blade_id = uv_numa_blade_id();
720
721 chiplet_id = gts->ts_user_chiplet_id;
722 return gru->gs_blade_id == blade_id &&
723 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
724}
725
726/*
727 * Unload the gru context if it is not assigned to the correct blade or
728 * chiplet. Misassignment can occur if the process migrates to a different
729 * blade or if the user changes the selected blade/chiplet.
730 */
731void gru_check_context_placement(struct gru_thread_state *gts)
732{
733 struct gru_state *gru;
734
735 /*
736 * If the current task is the context owner, verify that the
737 * context is correctly placed. This test is skipped for non-owner
738 * references. Pthread apps use non-owner references to the CBRs.
739 */
740 gru = gts->ts_gru;
741 if (!gru || gts->ts_tgid_owner != current->tgid)
742 return;
743
744 if (!gru_check_chiplet_assignment(gru, gts)) {
745 STAT(check_context_unload);
746 gru_unload_context(gts, 1);
747 } else if (gru_retarget_intr(gts)) {
748 STAT(check_context_retarget_intr);
749 }
683} 750}
684 751
685 752
@@ -712,13 +779,17 @@ static void gts_stolen(struct gru_thread_state *gts,
712 } 779 }
713} 780}
714 781
715void gru_steal_context(struct gru_thread_state *gts, int blade_id) 782void gru_steal_context(struct gru_thread_state *gts)
716{ 783{
717 struct gru_blade_state *blade; 784 struct gru_blade_state *blade;
718 struct gru_state *gru, *gru0; 785 struct gru_state *gru, *gru0;
719 struct gru_thread_state *ngts = NULL; 786 struct gru_thread_state *ngts = NULL;
720 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 787 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
788 int blade_id;
721 789
790 blade_id = gts->ts_user_blade_id;
791 if (blade_id < 0)
792 blade_id = uv_numa_blade_id();
722 cbr = gts->ts_cbr_au_count; 793 cbr = gts->ts_cbr_au_count;
723 dsr = gts->ts_dsr_au_count; 794 dsr = gts->ts_dsr_au_count;
724 795
@@ -729,35 +800,39 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
729 gru = blade->bs_lru_gru; 800 gru = blade->bs_lru_gru;
730 if (ctxnum == 0) 801 if (ctxnum == 0)
731 gru = next_gru(blade, gru); 802 gru = next_gru(blade, gru);
803 blade->bs_lru_gru = gru;
804 blade->bs_lru_ctxnum = ctxnum;
732 ctxnum0 = ctxnum; 805 ctxnum0 = ctxnum;
733 gru0 = gru; 806 gru0 = gru;
734 while (1) { 807 while (1) {
735 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 808 if (gru_check_chiplet_assignment(gru, gts)) {
736 break; 809 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
737 spin_lock(&gru->gs_lock);
738 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
739 if (flag && gru == gru0 && ctxnum == ctxnum0)
740 break; 810 break;
741 ngts = gru->gs_gts[ctxnum]; 811 spin_lock(&gru->gs_lock);
742 /* 812 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
743 * We are grabbing locks out of order, so trylock is 813 if (flag && gru == gru0 && ctxnum == ctxnum0)
744 * needed. GTSs are usually not locked, so the odds of 814 break;
745 * success are high. If trylock fails, try to steal a 815 ngts = gru->gs_gts[ctxnum];
746 * different GSEG. 816 /*
747 */ 817 * We are grabbing locks out of order, so trylock is
748 if (ngts && is_gts_stealable(ngts, blade)) 818 * needed. GTSs are usually not locked, so the odds of
819 * success are high. If trylock fails, try to steal a
820 * different GSEG.
821 */
822 if (ngts && is_gts_stealable(ngts, blade))
823 break;
824 ngts = NULL;
825 }
826 spin_unlock(&gru->gs_lock);
827 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
749 break; 828 break;
750 ngts = NULL;
751 flag = 1;
752 } 829 }
753 spin_unlock(&gru->gs_lock); 830 if (flag && gru == gru0)
754 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
755 break; 831 break;
832 flag = 1;
756 ctxnum = 0; 833 ctxnum = 0;
757 gru = next_gru(blade, gru); 834 gru = next_gru(blade, gru);
758 } 835 }
759 blade->bs_lru_gru = gru;
760 blade->bs_lru_ctxnum = ctxnum;
761 spin_unlock(&blade->bs_lock); 836 spin_unlock(&blade->bs_lock);
762 837
763 if (ngts) { 838 if (ngts) {
@@ -776,19 +851,34 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
776} 851}
777 852
778/* 853/*
854 * Assign a gru context.
855 */
856static int gru_assign_context_number(struct gru_state *gru)
857{
858 int ctxnum;
859
860 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
861 __set_bit(ctxnum, &gru->gs_context_map);
862 return ctxnum;
863}
864
865/*
779 * Scan the GRUs on the local blade & assign a GRU context. 866 * Scan the GRUs on the local blade & assign a GRU context.
780 */ 867 */
781struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, 868struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
782 int blade)
783{ 869{
784 struct gru_state *gru, *grux; 870 struct gru_state *gru, *grux;
785 int i, max_active_contexts; 871 int i, max_active_contexts;
872 int blade_id = gts->ts_user_blade_id;
786 873
787 874 if (blade_id < 0)
875 blade_id = uv_numa_blade_id();
788again: 876again:
789 gru = NULL; 877 gru = NULL;
790 max_active_contexts = GRU_NUM_CCH; 878 max_active_contexts = GRU_NUM_CCH;
791 for_each_gru_on_blade(grux, blade, i) { 879 for_each_gru_on_blade(grux, blade_id, i) {
880 if (!gru_check_chiplet_assignment(grux, gts))
881 continue;
792 if (check_gru_resources(grux, gts->ts_cbr_au_count, 882 if (check_gru_resources(grux, gts->ts_cbr_au_count,
793 gts->ts_dsr_au_count, 883 gts->ts_dsr_au_count,
794 max_active_contexts)) { 884 max_active_contexts)) {
@@ -809,12 +899,9 @@ again:
809 reserve_gru_resources(gru, gts); 899 reserve_gru_resources(gru, gts);
810 gts->ts_gru = gru; 900 gts->ts_gru = gru;
811 gts->ts_blade = gru->gs_blade_id; 901 gts->ts_blade = gru->gs_blade_id;
812 gts->ts_ctxnum = 902 gts->ts_ctxnum = gru_assign_context_number(gru);
813 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
814 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
815 atomic_inc(&gts->ts_refcnt); 903 atomic_inc(&gts->ts_refcnt);
816 gru->gs_gts[gts->ts_ctxnum] = gts; 904 gru->gs_gts[gts->ts_ctxnum] = gts;
817 __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
818 spin_unlock(&gru->gs_lock); 905 spin_unlock(&gru->gs_lock);
819 906
820 STAT(assign_context); 907 STAT(assign_context);
@@ -842,7 +929,6 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
842{ 929{
843 struct gru_thread_state *gts; 930 struct gru_thread_state *gts;
844 unsigned long paddr, vaddr; 931 unsigned long paddr, vaddr;
845 int blade_id;
846 932
847 vaddr = (unsigned long)vmf->virtual_address; 933 vaddr = (unsigned long)vmf->virtual_address;
848 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 934 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@@ -857,28 +943,18 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
857again: 943again:
858 mutex_lock(&gts->ts_ctxlock); 944 mutex_lock(&gts->ts_ctxlock);
859 preempt_disable(); 945 preempt_disable();
860 blade_id = uv_numa_blade_id();
861 946
862 if (gts->ts_gru) { 947 gru_check_context_placement(gts);
863 if (gts->ts_gru->gs_blade_id != blade_id) {
864 STAT(migrated_nopfn_unload);
865 gru_unload_context(gts, 1);
866 } else {
867 if (gru_retarget_intr(gts))
868 STAT(migrated_nopfn_retarget);
869 }
870 }
871 948
872 if (!gts->ts_gru) { 949 if (!gts->ts_gru) {
873 STAT(load_user_context); 950 STAT(load_user_context);
874 if (!gru_assign_gru_context(gts, blade_id)) { 951 if (!gru_assign_gru_context(gts)) {
875 preempt_enable(); 952 preempt_enable();
876 mutex_unlock(&gts->ts_ctxlock); 953 mutex_unlock(&gts->ts_ctxlock);
877 set_current_state(TASK_INTERRUPTIBLE); 954 set_current_state(TASK_INTERRUPTIBLE);
878 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 955 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
879 blade_id = uv_numa_blade_id();
880 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 956 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
881 gru_steal_context(gts, blade_id); 957 gru_steal_context(gts);
882 goto again; 958 goto again;
883 } 959 }
884 gru_load_context(gts); 960 gru_load_context(gts);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 9cbf95bedce6..7768b87d995b 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -36,8 +36,7 @@ static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
36{ 36{
37 unsigned long val = atomic_long_read(v); 37 unsigned long val = atomic_long_read(v);
38 38
39 if (val) 39 seq_printf(s, "%16lu %s\n", val, id);
40 seq_printf(s, "%16lu %s\n", val, id);
41} 40}
42 41
43static int statistics_show(struct seq_file *s, void *p) 42static int statistics_show(struct seq_file *s, void *p)
@@ -46,7 +45,8 @@ static int statistics_show(struct seq_file *s, void *p)
46 printstat(s, vdata_free); 45 printstat(s, vdata_free);
47 printstat(s, gts_alloc); 46 printstat(s, gts_alloc);
48 printstat(s, gts_free); 47 printstat(s, gts_free);
49 printstat(s, vdata_double_alloc); 48 printstat(s, gms_alloc);
49 printstat(s, gms_free);
50 printstat(s, gts_double_allocate); 50 printstat(s, gts_double_allocate);
51 printstat(s, assign_context); 51 printstat(s, assign_context);
52 printstat(s, assign_context_failed); 52 printstat(s, assign_context_failed);
@@ -59,28 +59,25 @@ static int statistics_show(struct seq_file *s, void *p)
59 printstat(s, steal_kernel_context); 59 printstat(s, steal_kernel_context);
60 printstat(s, steal_context_failed); 60 printstat(s, steal_context_failed);
61 printstat(s, nopfn); 61 printstat(s, nopfn);
62 printstat(s, break_cow);
63 printstat(s, asid_new); 62 printstat(s, asid_new);
64 printstat(s, asid_next); 63 printstat(s, asid_next);
65 printstat(s, asid_wrap); 64 printstat(s, asid_wrap);
66 printstat(s, asid_reuse); 65 printstat(s, asid_reuse);
67 printstat(s, intr); 66 printstat(s, intr);
67 printstat(s, intr_cbr);
68 printstat(s, intr_tfh);
69 printstat(s, intr_spurious);
68 printstat(s, intr_mm_lock_failed); 70 printstat(s, intr_mm_lock_failed);
69 printstat(s, call_os); 71 printstat(s, call_os);
70 printstat(s, call_os_offnode_reference);
71 printstat(s, call_os_check_for_bug);
72 printstat(s, call_os_wait_queue); 72 printstat(s, call_os_wait_queue);
73 printstat(s, user_flush_tlb); 73 printstat(s, user_flush_tlb);
74 printstat(s, user_unload_context); 74 printstat(s, user_unload_context);
75 printstat(s, user_exception); 75 printstat(s, user_exception);
76 printstat(s, set_context_option); 76 printstat(s, set_context_option);
77 printstat(s, migrate_check); 77 printstat(s, check_context_retarget_intr);
78 printstat(s, migrated_retarget); 78 printstat(s, check_context_unload);
79 printstat(s, migrated_unload);
80 printstat(s, migrated_unload_delay);
81 printstat(s, migrated_nopfn_retarget);
82 printstat(s, migrated_nopfn_unload);
83 printstat(s, tlb_dropin); 79 printstat(s, tlb_dropin);
80 printstat(s, tlb_preload_page);
84 printstat(s, tlb_dropin_fail_no_asid); 81 printstat(s, tlb_dropin_fail_no_asid);
85 printstat(s, tlb_dropin_fail_upm); 82 printstat(s, tlb_dropin_fail_upm);
86 printstat(s, tlb_dropin_fail_invalid); 83 printstat(s, tlb_dropin_fail_invalid);
@@ -88,16 +85,15 @@ static int statistics_show(struct seq_file *s, void *p)
88 printstat(s, tlb_dropin_fail_idle); 85 printstat(s, tlb_dropin_fail_idle);
89 printstat(s, tlb_dropin_fail_fmm); 86 printstat(s, tlb_dropin_fail_fmm);
90 printstat(s, tlb_dropin_fail_no_exception); 87 printstat(s, tlb_dropin_fail_no_exception);
91 printstat(s, tlb_dropin_fail_no_exception_war);
92 printstat(s, tfh_stale_on_fault); 88 printstat(s, tfh_stale_on_fault);
93 printstat(s, mmu_invalidate_range); 89 printstat(s, mmu_invalidate_range);
94 printstat(s, mmu_invalidate_page); 90 printstat(s, mmu_invalidate_page);
95 printstat(s, mmu_clear_flush_young);
96 printstat(s, flush_tlb); 91 printstat(s, flush_tlb);
97 printstat(s, flush_tlb_gru); 92 printstat(s, flush_tlb_gru);
98 printstat(s, flush_tlb_gru_tgh); 93 printstat(s, flush_tlb_gru_tgh);
99 printstat(s, flush_tlb_gru_zero_asid); 94 printstat(s, flush_tlb_gru_zero_asid);
100 printstat(s, copy_gpa); 95 printstat(s, copy_gpa);
96 printstat(s, read_gpa);
101 printstat(s, mesq_receive); 97 printstat(s, mesq_receive);
102 printstat(s, mesq_receive_none); 98 printstat(s, mesq_receive_none);
103 printstat(s, mesq_send); 99 printstat(s, mesq_send);
@@ -108,7 +104,6 @@ static int statistics_show(struct seq_file *s, void *p)
108 printstat(s, mesq_send_qlimit_reached); 104 printstat(s, mesq_send_qlimit_reached);
109 printstat(s, mesq_send_amo_nacked); 105 printstat(s, mesq_send_amo_nacked);
110 printstat(s, mesq_send_put_nacked); 106 printstat(s, mesq_send_put_nacked);
111 printstat(s, mesq_qf_not_full);
112 printstat(s, mesq_qf_locked); 107 printstat(s, mesq_qf_locked);
113 printstat(s, mesq_qf_noop_not_full); 108 printstat(s, mesq_qf_noop_not_full);
114 printstat(s, mesq_qf_switch_head_failed); 109 printstat(s, mesq_qf_switch_head_failed);
@@ -118,6 +113,7 @@ static int statistics_show(struct seq_file *s, void *p)
118 printstat(s, mesq_noop_qlimit_reached); 113 printstat(s, mesq_noop_qlimit_reached);
119 printstat(s, mesq_noop_amo_nacked); 114 printstat(s, mesq_noop_amo_nacked);
120 printstat(s, mesq_noop_put_nacked); 115 printstat(s, mesq_noop_put_nacked);
116 printstat(s, mesq_noop_page_overflow);
121 return 0; 117 return 0;
122} 118}
123 119
@@ -133,8 +129,10 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
133 int op; 129 int op;
134 unsigned long total, count, max; 130 unsigned long total, count, max;
135 static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt", 131 static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
136 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"}; 132 "cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
133 "tfh_write_restart", "tgh_invalidate"};
137 134
135 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
138 for (op = 0; op < mcsop_last; op++) { 136 for (op = 0; op < mcsop_last; op++) {
139 count = atomic_long_read(&mcs_op_statistics[op].count); 137 count = atomic_long_read(&mcs_op_statistics[op].count);
140 total = atomic_long_read(&mcs_op_statistics[op].total); 138 total = atomic_long_read(&mcs_op_statistics[op].total);
@@ -154,6 +152,7 @@ static ssize_t mcs_statistics_write(struct file *file,
154 152
155static int options_show(struct seq_file *s, void *p) 153static int options_show(struct seq_file *s, void *p)
156{ 154{
155 seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
157 seq_printf(s, "0x%lx\n", gru_options); 156 seq_printf(s, "0x%lx\n", gru_options);
158 return 0; 157 return 0;
159} 158}
@@ -161,14 +160,15 @@ static int options_show(struct seq_file *s, void *p)
161static ssize_t options_write(struct file *file, const char __user *userbuf, 160static ssize_t options_write(struct file *file, const char __user *userbuf,
162 size_t count, loff_t *data) 161 size_t count, loff_t *data)
163{ 162{
164 unsigned long val; 163 char buf[20];
165 char buf[80];
166 164
167 if (strncpy_from_user(buf, userbuf, sizeof(buf) - 1) < 0) 165 if (count >= sizeof(buf))
166 return -EINVAL;
167 if (copy_from_user(buf, userbuf, count))
168 return -EFAULT; 168 return -EFAULT;
169 buf[count - 1] = '\0'; 169 buf[count] = '\0';
170 if (!strict_strtoul(buf, 10, &val)) 170 if (strict_strtoul(buf, 0, &gru_options))
171 gru_options = val; 171 return -EINVAL;
172 172
173 return count; 173 return count;
174} 174}
@@ -182,16 +182,17 @@ static int cch_seq_show(struct seq_file *file, void *data)
182 const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" }; 182 const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
183 183
184 if (gid == 0) 184 if (gid == 0)
185 seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid", 185 seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
186 "ctx#", "pid", "cbrs", "dsbytes", "mode"); 186 "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
187 if (gru) 187 if (gru)
188 for (i = 0; i < GRU_NUM_CCH; i++) { 188 for (i = 0; i < GRU_NUM_CCH; i++) {
189 ts = gru->gs_gts[i]; 189 ts = gru->gs_gts[i];
190 if (!ts) 190 if (!ts)
191 continue; 191 continue;
192 seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n", 192 seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
193 gru->gs_gid, gru->gs_blade_id, i, 193 gru->gs_gid, gru->gs_blade_id, i,
194 ts->ts_tgid_owner, 194 is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
195 is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
195 ts->ts_cbr_au_count * GRU_CBR_AU_SIZE, 196 ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
196 ts->ts_cbr_au_count * GRU_DSR_AU_BYTES, 197 ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
197 mode[ts->ts_user_options & 198 mode[ts->ts_user_options &
@@ -340,10 +341,9 @@ static struct proc_dir_entry *proc_gru __read_mostly;
340 341
341static int create_proc_file(struct proc_entry *p) 342static int create_proc_file(struct proc_entry *p)
342{ 343{
343 p->entry = create_proc_entry(p->name, p->mode, proc_gru); 344 p->entry = proc_create(p->name, p->mode, proc_gru, p->fops);
344 if (!p->entry) 345 if (!p->entry)
345 return -1; 346 return -1;
346 p->entry->proc_fops = p->fops;
347 return 0; 347 return 0;
348} 348}
349 349
@@ -355,7 +355,7 @@ static void delete_proc_files(void)
355 for (p = proc_files; p->name; p++) 355 for (p = proc_files; p->name; p++)
356 if (p->entry) 356 if (p->entry)
357 remove_proc_entry(p->name, proc_gru); 357 remove_proc_entry(p->name, proc_gru);
358 remove_proc_entry("gru", NULL); 358 remove_proc_entry("gru", proc_gru->parent);
359 } 359 }
360} 360}
361 361
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 34ab3d453919..7a8b9068ea03 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -161,7 +161,7 @@ extern unsigned int gru_max_gids;
161#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) 161#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
162 162
163#define GRU_DRIVER_ID_STR "SGI GRU Device Driver" 163#define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
164#define GRU_DRIVER_VERSION_STR "0.80" 164#define GRU_DRIVER_VERSION_STR "0.85"
165 165
166/* 166/*
167 * GRU statistics. 167 * GRU statistics.
@@ -171,7 +171,8 @@ struct gru_stats_s {
171 atomic_long_t vdata_free; 171 atomic_long_t vdata_free;
172 atomic_long_t gts_alloc; 172 atomic_long_t gts_alloc;
173 atomic_long_t gts_free; 173 atomic_long_t gts_free;
174 atomic_long_t vdata_double_alloc; 174 atomic_long_t gms_alloc;
175 atomic_long_t gms_free;
175 atomic_long_t gts_double_allocate; 176 atomic_long_t gts_double_allocate;
176 atomic_long_t assign_context; 177 atomic_long_t assign_context;
177 atomic_long_t assign_context_failed; 178 atomic_long_t assign_context_failed;
@@ -184,28 +185,25 @@ struct gru_stats_s {
184 atomic_long_t steal_kernel_context; 185 atomic_long_t steal_kernel_context;
185 atomic_long_t steal_context_failed; 186 atomic_long_t steal_context_failed;
186 atomic_long_t nopfn; 187 atomic_long_t nopfn;
187 atomic_long_t break_cow;
188 atomic_long_t asid_new; 188 atomic_long_t asid_new;
189 atomic_long_t asid_next; 189 atomic_long_t asid_next;
190 atomic_long_t asid_wrap; 190 atomic_long_t asid_wrap;
191 atomic_long_t asid_reuse; 191 atomic_long_t asid_reuse;
192 atomic_long_t intr; 192 atomic_long_t intr;
193 atomic_long_t intr_cbr;
194 atomic_long_t intr_tfh;
195 atomic_long_t intr_spurious;
193 atomic_long_t intr_mm_lock_failed; 196 atomic_long_t intr_mm_lock_failed;
194 atomic_long_t call_os; 197 atomic_long_t call_os;
195 atomic_long_t call_os_offnode_reference;
196 atomic_long_t call_os_check_for_bug;
197 atomic_long_t call_os_wait_queue; 198 atomic_long_t call_os_wait_queue;
198 atomic_long_t user_flush_tlb; 199 atomic_long_t user_flush_tlb;
199 atomic_long_t user_unload_context; 200 atomic_long_t user_unload_context;
200 atomic_long_t user_exception; 201 atomic_long_t user_exception;
201 atomic_long_t set_context_option; 202 atomic_long_t set_context_option;
202 atomic_long_t migrate_check; 203 atomic_long_t check_context_retarget_intr;
203 atomic_long_t migrated_retarget; 204 atomic_long_t check_context_unload;
204 atomic_long_t migrated_unload;
205 atomic_long_t migrated_unload_delay;
206 atomic_long_t migrated_nopfn_retarget;
207 atomic_long_t migrated_nopfn_unload;
208 atomic_long_t tlb_dropin; 205 atomic_long_t tlb_dropin;
206 atomic_long_t tlb_preload_page;
209 atomic_long_t tlb_dropin_fail_no_asid; 207 atomic_long_t tlb_dropin_fail_no_asid;
210 atomic_long_t tlb_dropin_fail_upm; 208 atomic_long_t tlb_dropin_fail_upm;
211 atomic_long_t tlb_dropin_fail_invalid; 209 atomic_long_t tlb_dropin_fail_invalid;
@@ -213,17 +211,16 @@ struct gru_stats_s {
213 atomic_long_t tlb_dropin_fail_idle; 211 atomic_long_t tlb_dropin_fail_idle;
214 atomic_long_t tlb_dropin_fail_fmm; 212 atomic_long_t tlb_dropin_fail_fmm;
215 atomic_long_t tlb_dropin_fail_no_exception; 213 atomic_long_t tlb_dropin_fail_no_exception;
216 atomic_long_t tlb_dropin_fail_no_exception_war;
217 atomic_long_t tfh_stale_on_fault; 214 atomic_long_t tfh_stale_on_fault;
218 atomic_long_t mmu_invalidate_range; 215 atomic_long_t mmu_invalidate_range;
219 atomic_long_t mmu_invalidate_page; 216 atomic_long_t mmu_invalidate_page;
220 atomic_long_t mmu_clear_flush_young;
221 atomic_long_t flush_tlb; 217 atomic_long_t flush_tlb;
222 atomic_long_t flush_tlb_gru; 218 atomic_long_t flush_tlb_gru;
223 atomic_long_t flush_tlb_gru_tgh; 219 atomic_long_t flush_tlb_gru_tgh;
224 atomic_long_t flush_tlb_gru_zero_asid; 220 atomic_long_t flush_tlb_gru_zero_asid;
225 221
226 atomic_long_t copy_gpa; 222 atomic_long_t copy_gpa;
223 atomic_long_t read_gpa;
227 224
228 atomic_long_t mesq_receive; 225 atomic_long_t mesq_receive;
229 atomic_long_t mesq_receive_none; 226 atomic_long_t mesq_receive_none;
@@ -235,7 +232,7 @@ struct gru_stats_s {
235 atomic_long_t mesq_send_qlimit_reached; 232 atomic_long_t mesq_send_qlimit_reached;
236 atomic_long_t mesq_send_amo_nacked; 233 atomic_long_t mesq_send_amo_nacked;
237 atomic_long_t mesq_send_put_nacked; 234 atomic_long_t mesq_send_put_nacked;
238 atomic_long_t mesq_qf_not_full; 235 atomic_long_t mesq_page_overflow;
239 atomic_long_t mesq_qf_locked; 236 atomic_long_t mesq_qf_locked;
240 atomic_long_t mesq_qf_noop_not_full; 237 atomic_long_t mesq_qf_noop_not_full;
241 atomic_long_t mesq_qf_switch_head_failed; 238 atomic_long_t mesq_qf_switch_head_failed;
@@ -245,11 +242,13 @@ struct gru_stats_s {
245 atomic_long_t mesq_noop_qlimit_reached; 242 atomic_long_t mesq_noop_qlimit_reached;
246 atomic_long_t mesq_noop_amo_nacked; 243 atomic_long_t mesq_noop_amo_nacked;
247 atomic_long_t mesq_noop_put_nacked; 244 atomic_long_t mesq_noop_put_nacked;
245 atomic_long_t mesq_noop_page_overflow;
248 246
249}; 247};
250 248
251enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, 249enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
252 cchop_deallocate, tghop_invalidate, mcsop_last}; 250 cchop_deallocate, tfhop_write_only, tfhop_write_restart,
251 tghop_invalidate, mcsop_last};
253 252
254struct mcs_op_statistic { 253struct mcs_op_statistic {
255 atomic_long_t count; 254 atomic_long_t count;
@@ -259,8 +258,8 @@ struct mcs_op_statistic {
259 258
260extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; 259extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
261 260
262#define OPT_DPRINT 1 261#define OPT_DPRINT 1
263#define OPT_STATS 2 262#define OPT_STATS 2
264 263
265 264
266#define IRQ_GRU 110 /* Starting IRQ number for interrupts */ 265#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
@@ -283,7 +282,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
283#define gru_dbg(dev, fmt, x...) \ 282#define gru_dbg(dev, fmt, x...) \
284 do { \ 283 do { \
285 if (gru_options & OPT_DPRINT) \ 284 if (gru_options & OPT_DPRINT) \
286 dev_dbg(dev, "%s: " fmt, __func__, x); \ 285 printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
287 } while (0) 286 } while (0)
288#else 287#else
289#define gru_dbg(x...) 288#define gru_dbg(x...)
@@ -297,13 +296,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
297#define ASID_INC 8 /* number of regions */ 296#define ASID_INC 8 /* number of regions */
298 297
299/* Generate a GRU asid value from a GRU base asid & a virtual address. */ 298/* Generate a GRU asid value from a GRU base asid & a virtual address. */
300#if defined CONFIG_IA64
301#define VADDR_HI_BIT 64 299#define VADDR_HI_BIT 64
302#elif defined CONFIG_X86_64
303#define VADDR_HI_BIT 48
304#else
305#error "Unsupported architecture"
306#endif
307#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) 300#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
308#define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) 301#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
309 302
@@ -345,6 +338,7 @@ struct gru_vma_data {
345 long vd_user_options;/* misc user option flags */ 338 long vd_user_options;/* misc user option flags */
346 int vd_cbr_au_count; 339 int vd_cbr_au_count;
347 int vd_dsr_au_count; 340 int vd_dsr_au_count;
341 unsigned char vd_tlb_preload_count;
348}; 342};
349 343
350/* 344/*
@@ -360,6 +354,7 @@ struct gru_thread_state {
360 struct gru_state *ts_gru; /* GRU where the context is 354 struct gru_state *ts_gru; /* GRU where the context is
361 loaded */ 355 loaded */
362 struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ 356 struct gru_mm_struct *ts_gms; /* asid & ioproc struct */
357 unsigned char ts_tlb_preload_count; /* TLB preload pages */
363 unsigned long ts_cbr_map; /* map of allocated CBRs */ 358 unsigned long ts_cbr_map; /* map of allocated CBRs */
364 unsigned long ts_dsr_map; /* map of allocated DATA 359 unsigned long ts_dsr_map; /* map of allocated DATA
365 resources */ 360 resources */
@@ -368,6 +363,8 @@ struct gru_thread_state {
368 long ts_user_options;/* misc user option flags */ 363 long ts_user_options;/* misc user option flags */
369 pid_t ts_tgid_owner; /* task that is using the 364 pid_t ts_tgid_owner; /* task that is using the
370 context - for migration */ 365 context - for migration */
366 short ts_user_blade_id;/* user selected blade */
367 char ts_user_chiplet_id;/* user selected chiplet */
371 unsigned short ts_sizeavail; /* Pagesizes in use */ 368 unsigned short ts_sizeavail; /* Pagesizes in use */
372 int ts_tsid; /* thread that owns the 369 int ts_tsid; /* thread that owns the
373 structure */ 370 structure */
@@ -384,13 +381,11 @@ struct gru_thread_state {
384 char ts_blade; /* If >= 0, migrate context if 381 char ts_blade; /* If >= 0, migrate context if
385 ref from diferent blade */ 382 ref from diferent blade */
386 char ts_force_cch_reload; 383 char ts_force_cch_reload;
387 char ts_force_unload;/* force context to be unloaded
388 after migration */
389 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each 384 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
390 allocated CB */ 385 allocated CB */
391 int ts_data_valid; /* Indicates if ts_gdata has 386 int ts_data_valid; /* Indicates if ts_gdata has
392 valid data */ 387 valid data */
393 struct gts_statistics ustats; /* User statistics */ 388 struct gru_gseg_statistics ustats; /* User statistics */
394 unsigned long ts_gdata[0]; /* save area for GRU data (CB, 389 unsigned long ts_gdata[0]; /* save area for GRU data (CB,
395 DS, CBE) */ 390 DS, CBE) */
396}; 391};
@@ -422,6 +417,7 @@ struct gru_state {
422 gru segments (64) */ 417 gru segments (64) */
423 unsigned short gs_gid; /* unique GRU number */ 418 unsigned short gs_gid; /* unique GRU number */
424 unsigned short gs_blade_id; /* blade of GRU */ 419 unsigned short gs_blade_id; /* blade of GRU */
420 unsigned char gs_chiplet_id; /* blade chiplet of GRU */
425 unsigned char gs_tgh_local_shift; /* used to pick TGH for 421 unsigned char gs_tgh_local_shift; /* used to pick TGH for
426 local flush */ 422 local flush */
427 unsigned char gs_tgh_first_remote; /* starting TGH# for 423 unsigned char gs_tgh_first_remote; /* starting TGH# for
@@ -453,6 +449,7 @@ struct gru_state {
453 in use */ 449 in use */
454 struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using 450 struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
455 the context */ 451 the context */
452 int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */
456}; 453};
457 454
458/* 455/*
@@ -519,8 +516,7 @@ struct gru_blade_state {
519 516
520/* Scan all active GRUs in a GRU bitmap */ 517/* Scan all active GRUs in a GRU bitmap */
521#define for_each_gru_in_bitmap(gid, map) \ 518#define for_each_gru_in_bitmap(gid, map) \
522 for ((gid) = find_first_bit((map), GRU_MAX_GRUS); (gid) < GRU_MAX_GRUS;\ 519 for_each_set_bit((gid), (map), GRU_MAX_GRUS)
523 (gid)++, (gid) = find_next_bit((map), GRU_MAX_GRUS, (gid)))
524 520
525/* Scan all active GRUs on a specific blade */ 521/* Scan all active GRUs on a specific blade */
526#define for_each_gru_on_blade(gru, nid, i) \ 522#define for_each_gru_on_blade(gru, nid, i) \
@@ -539,23 +535,17 @@ struct gru_blade_state {
539 535
540/* Scan each CBR whose bit is set in a TFM (or copy of) */ 536/* Scan each CBR whose bit is set in a TFM (or copy of) */
541#define for_each_cbr_in_tfm(i, map) \ 537#define for_each_cbr_in_tfm(i, map) \
542 for ((i) = find_first_bit(map, GRU_NUM_CBE); \ 538 for_each_set_bit((i), (map), GRU_NUM_CBE)
543 (i) < GRU_NUM_CBE; \
544 (i)++, (i) = find_next_bit(map, GRU_NUM_CBE, i))
545 539
546/* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */ 540/* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */
547#define for_each_cbr_in_allocation_map(i, map, k) \ 541#define for_each_cbr_in_allocation_map(i, map, k) \
548 for ((k) = find_first_bit(map, GRU_CBR_AU); (k) < GRU_CBR_AU; \ 542 for_each_set_bit((k), (map), GRU_CBR_AU) \
549 (k) = find_next_bit(map, GRU_CBR_AU, (k) + 1)) \
550 for ((i) = (k)*GRU_CBR_AU_SIZE; \ 543 for ((i) = (k)*GRU_CBR_AU_SIZE; \
551 (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++) 544 (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++)
552 545
553/* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */ 546/* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */
554#define for_each_dsr_in_allocation_map(i, map, k) \ 547#define for_each_dsr_in_allocation_map(i, map, k) \
555 for ((k) = find_first_bit((const unsigned long *)map, GRU_DSR_AU);\ 548 for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \
556 (k) < GRU_DSR_AU; \
557 (k) = find_next_bit((const unsigned long *)map, \
558 GRU_DSR_AU, (k) + 1)) \
559 for ((i) = (k) * GRU_DSR_AU_CL; \ 549 for ((i) = (k) * GRU_DSR_AU_CL; \
560 (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++) 550 (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++)
561 551
@@ -619,12 +609,21 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
619 return !gts->ts_mm; 609 return !gts->ts_mm;
620} 610}
621 611
612/*
613 * The following are for Nehelem-EX. A more general scheme is needed for
614 * future processors.
615 */
616#define UV_MAX_INT_CORES 8
617#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
618#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
619#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
620 ((cpu_physical_id(p) >> 1) & 3))
622/*----------------------------------------------------------------------------- 621/*-----------------------------------------------------------------------------
623 * Function prototypes & externs 622 * Function prototypes & externs
624 */ 623 */
625struct gru_unload_context_req; 624struct gru_unload_context_req;
626 625
627extern struct vm_operations_struct gru_vm_ops; 626extern const struct vm_operations_struct gru_vm_ops;
628extern struct device *grudev; 627extern struct device *grudev;
629 628
630extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, 629extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma,
@@ -633,24 +632,26 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
633 *vma, int tsid); 632 *vma, int tsid);
634extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct 633extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
635 *vma, int tsid); 634 *vma, int tsid);
636extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, 635extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
637 int blade);
638extern void gru_load_context(struct gru_thread_state *gts); 636extern void gru_load_context(struct gru_thread_state *gts);
639extern void gru_steal_context(struct gru_thread_state *gts, int blade_id); 637extern void gru_steal_context(struct gru_thread_state *gts);
640extern void gru_unload_context(struct gru_thread_state *gts, int savestate); 638extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
641extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); 639extern int gru_update_cch(struct gru_thread_state *gts);
642extern void gts_drop(struct gru_thread_state *gts); 640extern void gts_drop(struct gru_thread_state *gts);
643extern void gru_tgh_flush_init(struct gru_state *gru); 641extern void gru_tgh_flush_init(struct gru_state *gru);
644extern int gru_kservices_init(void); 642extern int gru_kservices_init(void);
645extern void gru_kservices_exit(void); 643extern void gru_kservices_exit(void);
644extern irqreturn_t gru0_intr(int irq, void *dev_id);
645extern irqreturn_t gru1_intr(int irq, void *dev_id);
646extern irqreturn_t gru_intr_mblade(int irq, void *dev_id);
646extern int gru_dump_chiplet_request(unsigned long arg); 647extern int gru_dump_chiplet_request(unsigned long arg);
647extern long gru_get_gseg_statistics(unsigned long arg); 648extern long gru_get_gseg_statistics(unsigned long arg);
648extern irqreturn_t gru_intr(int irq, void *dev_id);
649extern int gru_handle_user_call_os(unsigned long address); 649extern int gru_handle_user_call_os(unsigned long address);
650extern int gru_user_flush_tlb(unsigned long arg); 650extern int gru_user_flush_tlb(unsigned long arg);
651extern int gru_user_unload_context(unsigned long arg); 651extern int gru_user_unload_context(unsigned long arg);
652extern int gru_get_exception_detail(unsigned long arg); 652extern int gru_get_exception_detail(unsigned long arg);
653extern int gru_set_context_option(unsigned long address); 653extern int gru_set_context_option(unsigned long address);
654extern void gru_check_context_placement(struct gru_thread_state *gts);
654extern int gru_cpu_fault_map_id(void); 655extern int gru_cpu_fault_map_id(void);
655extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); 656extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
656extern void gru_flush_all_tlb(struct gru_state *gru); 657extern void gru_flush_all_tlb(struct gru_state *gru);
@@ -658,7 +659,8 @@ extern int gru_proc_init(void);
658extern void gru_proc_exit(void); 659extern void gru_proc_exit(void);
659 660
660extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 661extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
661 int cbr_au_count, int dsr_au_count, int options, int tsid); 662 int cbr_au_count, int dsr_au_count,
663 unsigned char tlb_preload_count, int options, int tsid);
662extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, 664extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
663 int cbr_au_count, char *cbmap); 665 int cbr_au_count, char *cbmap);
664extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, 666extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 1d125091f5e7..240a6d361665 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -184,8 +184,8 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
184 STAT(flush_tlb_gru_tgh); 184 STAT(flush_tlb_gru_tgh);
185 asid = GRUASID(asid, start); 185 asid = GRUASID(asid, start);
186 gru_dbg(grudev, 186 gru_dbg(grudev,
187 " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", 187 " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
188 gid, asid, num, asids->mt_ctxbitmap); 188 gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
189 tgh = get_lock_tgh_handle(gru); 189 tgh = get_lock_tgh_handle(gru);
190 tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, 190 tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
191 num - 1, asids->mt_ctxbitmap); 191 num - 1, asids->mt_ctxbitmap);
@@ -299,6 +299,7 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
299{ 299{
300 struct gru_mm_struct *gms; 300 struct gru_mm_struct *gms;
301 struct mmu_notifier *mn; 301 struct mmu_notifier *mn;
302 int err;
302 303
303 mn = mmu_find_ops(current->mm, &gru_mmuops); 304 mn = mmu_find_ops(current->mm, &gru_mmuops);
304 if (mn) { 305 if (mn) {
@@ -307,16 +308,22 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
307 } else { 308 } else {
308 gms = kzalloc(sizeof(*gms), GFP_KERNEL); 309 gms = kzalloc(sizeof(*gms), GFP_KERNEL);
309 if (gms) { 310 if (gms) {
311 STAT(gms_alloc);
310 spin_lock_init(&gms->ms_asid_lock); 312 spin_lock_init(&gms->ms_asid_lock);
311 gms->ms_notifier.ops = &gru_mmuops; 313 gms->ms_notifier.ops = &gru_mmuops;
312 atomic_set(&gms->ms_refcnt, 1); 314 atomic_set(&gms->ms_refcnt, 1);
313 init_waitqueue_head(&gms->ms_wait_queue); 315 init_waitqueue_head(&gms->ms_wait_queue);
314 __mmu_notifier_register(&gms->ms_notifier, current->mm); 316 err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
317 if (err)
318 goto error;
315 } 319 }
316 } 320 }
317 gru_dbg(grudev, "gms %p, refcnt %d\n", gms, 321 gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
318 atomic_read(&gms->ms_refcnt)); 322 atomic_read(&gms->ms_refcnt));
319 return gms; 323 return gms;
324error:
325 kfree(gms);
326 return ERR_PTR(err);
320} 327}
321 328
322void gru_drop_mmu_notifier(struct gru_mm_struct *gms) 329void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
@@ -327,6 +334,7 @@ void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
327 if (!gms->ms_released) 334 if (!gms->ms_released)
328 mmu_notifier_unregister(&gms->ms_notifier, current->mm); 335 mmu_notifier_unregister(&gms->ms_notifier, current->mm);
329 kfree(gms); 336 kfree(gms);
337 STAT(gms_free);
330 } 338 }
331} 339}
332 340
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 2275126cb334..851b2f25ce0e 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -339,6 +339,7 @@ extern short xp_partition_id;
339extern u8 xp_region_size; 339extern u8 xp_region_size;
340 340
341extern unsigned long (*xp_pa) (void *); 341extern unsigned long (*xp_pa) (void *);
342extern unsigned long (*xp_socket_pa) (unsigned long);
342extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, 343extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
343 size_t); 344 size_t);
344extern int (*xp_cpu_to_nasid) (int); 345extern int (*xp_cpu_to_nasid) (int);
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 7896849b16dc..01be66d02ca8 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -44,6 +44,9 @@ EXPORT_SYMBOL_GPL(xp_region_size);
44unsigned long (*xp_pa) (void *addr); 44unsigned long (*xp_pa) (void *addr);
45EXPORT_SYMBOL_GPL(xp_pa); 45EXPORT_SYMBOL_GPL(xp_pa);
46 46
47unsigned long (*xp_socket_pa) (unsigned long gpa);
48EXPORT_SYMBOL_GPL(xp_socket_pa);
49
47enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, 50enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
48 const unsigned long src_gpa, size_t len); 51 const unsigned long src_gpa, size_t len);
49EXPORT_SYMBOL_GPL(xp_remote_memcpy); 52EXPORT_SYMBOL_GPL(xp_remote_memcpy);
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index fb3ec9d735a9..d8e463f87241 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -84,6 +84,15 @@ xp_pa_sn2(void *addr)
84} 84}
85 85
86/* 86/*
87 * Convert a global physical to a socket physical address.
88 */
89static unsigned long
90xp_socket_pa_sn2(unsigned long gpa)
91{
92 return gpa;
93}
94
95/*
87 * Wrapper for bte_copy(). 96 * Wrapper for bte_copy().
88 * 97 *
89 * dst_pa - physical address of the destination of the transfer. 98 * dst_pa - physical address of the destination of the transfer.
@@ -162,6 +171,7 @@ xp_init_sn2(void)
162 xp_region_size = sn_region_size; 171 xp_region_size = sn_region_size;
163 172
164 xp_pa = xp_pa_sn2; 173 xp_pa = xp_pa_sn2;
174 xp_socket_pa = xp_socket_pa_sn2;
165 xp_remote_memcpy = xp_remote_memcpy_sn2; 175 xp_remote_memcpy = xp_remote_memcpy_sn2;
166 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; 176 xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
167 xp_expand_memprotect = xp_expand_memprotect_sn2; 177 xp_expand_memprotect = xp_expand_memprotect_sn2;
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index d238576b26fa..a0d093274dc0 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -32,12 +32,44 @@ xp_pa_uv(void *addr)
32 return uv_gpa(addr); 32 return uv_gpa(addr);
33} 33}
34 34
35/*
36 * Convert a global physical to socket physical address.
37 */
38static unsigned long
39xp_socket_pa_uv(unsigned long gpa)
40{
41 return uv_gpa_to_soc_phys_ram(gpa);
42}
43
44static enum xp_retval
45xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
46 size_t len)
47{
48 int ret;
49 unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
50
51 BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
52 BUG_ON(len != 8);
53
54 ret = gru_read_gpa(dst_va, src_gpa);
55 if (ret == 0)
56 return xpSuccess;
57
58 dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
59 "len=%ld\n", dst_gpa, src_gpa, len);
60 return xpGruCopyError;
61}
62
63
35static enum xp_retval 64static enum xp_retval
36xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, 65xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
37 size_t len) 66 size_t len)
38{ 67{
39 int ret; 68 int ret;
40 69
70 if (uv_gpa_in_mmr_space(src_gpa))
71 return xp_remote_mmr_read(dst_gpa, src_gpa, len);
72
41 ret = gru_copy_gpa(dst_gpa, src_gpa, len); 73 ret = gru_copy_gpa(dst_gpa, src_gpa, len);
42 if (ret == 0) 74 if (ret == 0)
43 return xpSuccess; 75 return xpSuccess;
@@ -123,6 +155,7 @@ xp_init_uv(void)
123 xp_region_size = sn_region_size; 155 xp_region_size = sn_region_size;
124 156
125 xp_pa = xp_pa_uv; 157 xp_pa = xp_pa_uv;
158 xp_socket_pa = xp_socket_pa_uv;
126 xp_remote_memcpy = xp_remote_memcpy_uv; 159 xp_remote_memcpy = xp_remote_memcpy_uv;
127 xp_cpu_to_nasid = xp_cpu_to_nasid_uv; 160 xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
128 xp_expand_memprotect = xp_expand_memprotect_uv; 161 xp_expand_memprotect = xp_expand_memprotect_uv;
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index fd3688a3e23f..8d082b46426b 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -44,6 +44,7 @@
44 */ 44 */
45 45
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/slab.h>
47#include <linux/sysctl.h> 48#include <linux/sysctl.h>
48#include <linux/device.h> 49#include <linux/device.h>
49#include <linux/delay.h> 50#include <linux/delay.h>
@@ -89,48 +90,40 @@ static int xpc_disengage_max_timelimit = 120;
89 90
90static ctl_table xpc_sys_xpc_hb_dir[] = { 91static ctl_table xpc_sys_xpc_hb_dir[] = {
91 { 92 {
92 .ctl_name = CTL_UNNUMBERED,
93 .procname = "hb_interval", 93 .procname = "hb_interval",
94 .data = &xpc_hb_interval, 94 .data = &xpc_hb_interval,
95 .maxlen = sizeof(int), 95 .maxlen = sizeof(int),
96 .mode = 0644, 96 .mode = 0644,
97 .proc_handler = &proc_dointvec_minmax, 97 .proc_handler = proc_dointvec_minmax,
98 .strategy = &sysctl_intvec,
99 .extra1 = &xpc_hb_min_interval, 98 .extra1 = &xpc_hb_min_interval,
100 .extra2 = &xpc_hb_max_interval}, 99 .extra2 = &xpc_hb_max_interval},
101 { 100 {
102 .ctl_name = CTL_UNNUMBERED,
103 .procname = "hb_check_interval", 101 .procname = "hb_check_interval",
104 .data = &xpc_hb_check_interval, 102 .data = &xpc_hb_check_interval,
105 .maxlen = sizeof(int), 103 .maxlen = sizeof(int),
106 .mode = 0644, 104 .mode = 0644,
107 .proc_handler = &proc_dointvec_minmax, 105 .proc_handler = proc_dointvec_minmax,
108 .strategy = &sysctl_intvec,
109 .extra1 = &xpc_hb_check_min_interval, 106 .extra1 = &xpc_hb_check_min_interval,
110 .extra2 = &xpc_hb_check_max_interval}, 107 .extra2 = &xpc_hb_check_max_interval},
111 {} 108 {}
112}; 109};
113static ctl_table xpc_sys_xpc_dir[] = { 110static ctl_table xpc_sys_xpc_dir[] = {
114 { 111 {
115 .ctl_name = CTL_UNNUMBERED,
116 .procname = "hb", 112 .procname = "hb",
117 .mode = 0555, 113 .mode = 0555,
118 .child = xpc_sys_xpc_hb_dir}, 114 .child = xpc_sys_xpc_hb_dir},
119 { 115 {
120 .ctl_name = CTL_UNNUMBERED,
121 .procname = "disengage_timelimit", 116 .procname = "disengage_timelimit",
122 .data = &xpc_disengage_timelimit, 117 .data = &xpc_disengage_timelimit,
123 .maxlen = sizeof(int), 118 .maxlen = sizeof(int),
124 .mode = 0644, 119 .mode = 0644,
125 .proc_handler = &proc_dointvec_minmax, 120 .proc_handler = proc_dointvec_minmax,
126 .strategy = &sysctl_intvec,
127 .extra1 = &xpc_disengage_min_timelimit, 121 .extra1 = &xpc_disengage_min_timelimit,
128 .extra2 = &xpc_disengage_max_timelimit}, 122 .extra2 = &xpc_disengage_max_timelimit},
129 {} 123 {}
130}; 124};
131static ctl_table xpc_sys_dir[] = { 125static ctl_table xpc_sys_dir[] = {
132 { 126 {
133 .ctl_name = CTL_UNNUMBERED,
134 .procname = "xpc", 127 .procname = "xpc",
135 .mode = 0555, 128 .mode = 0555,
136 .child = xpc_sys_xpc_dir}, 129 .child = xpc_sys_xpc_dir},
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 65877bc5edaa..6956f7e7d439 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -17,7 +17,9 @@
17 17
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/hardirq.h> 19#include <linux/hardirq.h>
20#include <linux/slab.h>
20#include "xpc.h" 21#include "xpc.h"
22#include <asm/uv/uv_hub.h>
21 23
22/* XPC is exiting flag */ 24/* XPC is exiting flag */
23int xpc_exiting; 25int xpc_exiting;
@@ -92,8 +94,12 @@ xpc_get_rsvd_page_pa(int nasid)
92 break; 94 break;
93 95
94 /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ 96 /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
95 if (L1_CACHE_ALIGN(len) > buf_len) { 97 if (is_shub())
96 kfree(buf_base); 98 len = L1_CACHE_ALIGN(len);
99
100 if (len > buf_len) {
101 if (buf_base != NULL)
102 kfree(buf_base);
97 buf_len = L1_CACHE_ALIGN(len); 103 buf_len = L1_CACHE_ALIGN(len);
98 buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, 104 buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
99 &buf_base); 105 &buf_base);
@@ -105,7 +111,7 @@ xpc_get_rsvd_page_pa(int nasid)
105 } 111 }
106 } 112 }
107 113
108 ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len); 114 ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
109 if (ret != xpSuccess) { 115 if (ret != xpSuccess) {
110 dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); 116 dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
111 break; 117 break;
@@ -143,7 +149,7 @@ xpc_setup_rsvd_page(void)
143 dev_err(xpc_part, "SAL failed to locate the reserved page\n"); 149 dev_err(xpc_part, "SAL failed to locate the reserved page\n");
144 return -ESRCH; 150 return -ESRCH;
145 } 151 }
146 rp = (struct xpc_rsvd_page *)__va(rp_pa); 152 rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
147 153
148 if (rp->SAL_version < 3) { 154 if (rp->SAL_version < 3) {
149 /* SAL_versions < 3 had a SAL_partid defined as a u8 */ 155 /* SAL_versions < 3 had a SAL_partid defined as a u8 */
@@ -433,18 +439,23 @@ xpc_discovery(void)
433 * nodes that can comprise an access protection grouping. The access 439 * nodes that can comprise an access protection grouping. The access
434 * protection is in regards to memory, IOI and IPI. 440 * protection is in regards to memory, IOI and IPI.
435 */ 441 */
436 max_regions = 64;
437 region_size = xp_region_size; 442 region_size = xp_region_size;
438 443
439 switch (region_size) { 444 if (is_uv())
440 case 128: 445 max_regions = 256;
441 max_regions *= 2; 446 else {
442 case 64: 447 max_regions = 64;
443 max_regions *= 2; 448
444 case 32: 449 switch (region_size) {
445 max_regions *= 2; 450 case 128:
446 region_size = 16; 451 max_regions *= 2;
447 DBUG_ON(!is_shub2()); 452 case 64:
453 max_regions *= 2;
454 case 32:
455 max_regions *= 2;
456 region_size = 16;
457 DBUG_ON(!is_shub2());
458 }
448 } 459 }
449 460
450 for (region = 0; region < max_regions; region++) { 461 for (region = 0; region < max_regions; region++) {
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 915a3b495da5..7d71c04fc938 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/slab.h>
17#include <asm/uncached.h> 18#include <asm/uncached.h>
18#include <asm/sn/mspec.h> 19#include <asm/sn/mspec.h>
19#include <asm/sn/sn_sal.h> 20#include <asm/sn/sn_sal.h>
@@ -279,7 +280,7 @@ xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
279 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); 280 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
280 281
281 dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags=" 282 dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags="
282 "0x%lx\n", XPC_PARTID(part), chctl.all_flags); 283 "0x%llx\n", XPC_PARTID(part), chctl.all_flags);
283 284
284 xpc_wakeup_channel_mgr(part); 285 xpc_wakeup_channel_mgr(part);
285} 286}
@@ -615,7 +616,8 @@ xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
615 s64 status; 616 s64 status;
616 enum xp_retval ret; 617 enum xp_retval ret;
617 618
618 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); 619 status = sn_partition_reserved_page_pa((u64)buf, cookie,
620 (u64 *)rp_pa, (u64 *)len);
619 if (status == SALRET_OK) 621 if (status == SALRET_OK)
620 ret = xpSuccess; 622 ret = xpSuccess;
621 else if (status == SALRET_MORE_PASSES) 623 else if (status == SALRET_MORE_PASSES)
@@ -777,8 +779,8 @@ xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
777 if (ret != xpSuccess) 779 if (ret != xpSuccess)
778 return ret; 780 return ret;
779 781
780 dev_dbg(xpc_part, "partid=%d, heartbeat=%ld, last_heartbeat=%ld, " 782 dev_dbg(xpc_part, "partid=%d, heartbeat=%lld, last_heartbeat=%lld, "
781 "heartbeat_offline=%ld, HB_mask[0]=0x%lx\n", XPC_PARTID(part), 783 "heartbeat_offline=%lld, HB_mask[0]=0x%lx\n", XPC_PARTID(part),
782 remote_vars->heartbeat, part->last_heartbeat, 784 remote_vars->heartbeat, part->last_heartbeat,
783 remote_vars->heartbeat_offline, 785 remote_vars->heartbeat_offline,
784 remote_vars->heartbeating_to_mask[0]); 786 remote_vars->heartbeating_to_mask[0]);
@@ -940,7 +942,7 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
940 part_sn2->remote_vars_pa); 942 part_sn2->remote_vars_pa);
941 943
942 part->last_heartbeat = remote_vars->heartbeat - 1; 944 part->last_heartbeat = remote_vars->heartbeat - 1;
943 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", 945 dev_dbg(xpc_part, " last_heartbeat = 0x%016llx\n",
944 part->last_heartbeat); 946 part->last_heartbeat);
945 947
946 part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa; 948 part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
@@ -1029,7 +1031,8 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
1029 part->activate_IRQ_rcvd++; 1031 part->activate_IRQ_rcvd++;
1030 1032
1031 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " 1033 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
1032 "%ld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd, 1034 "%lld:0x%lx\n", (int)nasid, (int)partid,
1035 part->activate_IRQ_rcvd,
1033 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]); 1036 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
1034 1037
1035 if (xpc_partition_disengaged(part) && 1038 if (xpc_partition_disengaged(part) &&
@@ -1129,7 +1132,7 @@ xpc_identify_activate_IRQ_sender_sn2(void)
1129 do { 1132 do {
1130 n_IRQs_detected++; 1133 n_IRQs_detected++;
1131 nasid = (l * BITS_PER_LONG + b) * 2; 1134 nasid = (l * BITS_PER_LONG + b) * 2;
1132 dev_dbg(xpc_part, "interrupt from nasid %ld\n", nasid); 1135 dev_dbg(xpc_part, "interrupt from nasid %lld\n", nasid);
1133 xpc_identify_activate_IRQ_req_sn2(nasid); 1136 xpc_identify_activate_IRQ_req_sn2(nasid);
1134 1137
1135 b = find_next_bit(&nasid_mask_long, BITS_PER_LONG, 1138 b = find_next_bit(&nasid_mask_long, BITS_PER_LONG,
@@ -1386,7 +1389,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1386 1389
1387 if (pulled_entry->magic != 0) { 1390 if (pulled_entry->magic != 0) {
1388 dev_dbg(xpc_chan, "partition %d's XPC vars_part for " 1391 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
1389 "partition %d has bad magic value (=0x%lx)\n", 1392 "partition %d has bad magic value (=0x%llx)\n",
1390 partid, sn_partition_id, pulled_entry->magic); 1393 partid, sn_partition_id, pulled_entry->magic);
1391 return xpBadMagic; 1394 return xpBadMagic;
1392 } 1395 }
@@ -1730,14 +1733,14 @@ xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
1730 1733
1731 if (notify->func != NULL) { 1734 if (notify->func != NULL) {
1732 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p " 1735 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p "
1733 "msg_number=%ld partid=%d channel=%d\n", 1736 "msg_number=%lld partid=%d channel=%d\n",
1734 (void *)notify, get, ch->partid, ch->number); 1737 (void *)notify, get, ch->partid, ch->number);
1735 1738
1736 notify->func(reason, ch->partid, ch->number, 1739 notify->func(reason, ch->partid, ch->number,
1737 notify->key); 1740 notify->key);
1738 1741
1739 dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p" 1742 dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p"
1740 " msg_number=%ld partid=%d channel=%d\n", 1743 " msg_number=%lld partid=%d channel=%d\n",
1741 (void *)notify, get, ch->partid, ch->number); 1744 (void *)notify, get, ch->partid, ch->number);
1742 } 1745 }
1743 } 1746 }
@@ -1858,7 +1861,7 @@ xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
1858 1861
1859 ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get; 1862 ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
1860 1863
1861 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " 1864 dev_dbg(xpc_chan, "w_remote_GP.get changed to %lld, partid=%d, "
1862 "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid, 1865 "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
1863 ch->number); 1866 ch->number);
1864 1867
@@ -1885,7 +1888,7 @@ xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
1885 smp_wmb(); /* ensure flags have been cleared before bte_copy */ 1888 smp_wmb(); /* ensure flags have been cleared before bte_copy */
1886 ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put; 1889 ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
1887 1890
1888 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " 1891 dev_dbg(xpc_chan, "w_remote_GP.put changed to %lld, partid=%d, "
1889 "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid, 1892 "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
1890 ch->number); 1893 ch->number);
1891 1894
@@ -1943,7 +1946,7 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1943 if (ret != xpSuccess) { 1946 if (ret != xpSuccess) {
1944 1947
1945 dev_dbg(xpc_chan, "failed to pull %d msgs starting with" 1948 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
1946 " msg %ld from partition %d, channel=%d, " 1949 " msg %lld from partition %d, channel=%d, "
1947 "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull, 1950 "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
1948 ch->partid, ch->number, ret); 1951 ch->partid, ch->number, ret);
1949 1952
@@ -1995,7 +1998,7 @@ xpc_get_deliverable_payload_sn2(struct xpc_channel *ch)
1995 if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) { 1998 if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
1996 /* we got the entry referenced by get */ 1999 /* we got the entry referenced by get */
1997 2000
1998 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " 2001 dev_dbg(xpc_chan, "w_local_GP.get changed to %lld, "
1999 "partid=%d, channel=%d\n", get + 1, 2002 "partid=%d, channel=%d\n", get + 1,
2000 ch->partid, ch->number); 2003 ch->partid, ch->number);
2001 2004
@@ -2062,7 +2065,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
2062 2065
2063 /* we just set the new value of local_GP->put */ 2066 /* we just set the new value of local_GP->put */
2064 2067
2065 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " 2068 dev_dbg(xpc_chan, "local_GP->put changed to %lld, partid=%d, "
2066 "channel=%d\n", put, ch->partid, ch->number); 2069 "channel=%d\n", put, ch->partid, ch->number);
2067 2070
2068 send_msgrequest = 1; 2071 send_msgrequest = 1;
@@ -2147,8 +2150,8 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
2147 DBUG_ON(msg->flags != 0); 2150 DBUG_ON(msg->flags != 0);
2148 msg->number = put; 2151 msg->number = put;
2149 2152
2150 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " 2153 dev_dbg(xpc_chan, "w_local_GP.put changed to %lld; msg=0x%p, "
2151 "msg_number=%ld, partid=%d, channel=%d\n", put + 1, 2154 "msg_number=%lld, partid=%d, channel=%d\n", put + 1,
2152 (void *)msg, msg->number, ch->partid, ch->number); 2155 (void *)msg, msg->number, ch->partid, ch->number);
2153 2156
2154 *address_of_msg = msg; 2157 *address_of_msg = msg;
@@ -2296,7 +2299,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2296 2299
2297 /* we just set the new value of local_GP->get */ 2300 /* we just set the new value of local_GP->get */
2298 2301
2299 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " 2302 dev_dbg(xpc_chan, "local_GP->get changed to %lld, partid=%d, "
2300 "channel=%d\n", get, ch->partid, ch->number); 2303 "channel=%d\n", get, ch->partid, ch->number);
2301 2304
2302 send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT); 2305 send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT);
@@ -2323,7 +2326,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
2323 msg = container_of(payload, struct xpc_msg_sn2, payload); 2326 msg = container_of(payload, struct xpc_msg_sn2, payload);
2324 msg_number = msg->number; 2327 msg_number = msg->number;
2325 2328
2326 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", 2329 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%lld, partid=%d, channel=%d\n",
2327 (void *)msg, msg_number, ch->partid, ch->number); 2330 (void *)msg, msg_number, ch->partid, ch->number);
2328 2331
2329 DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) != 2332 DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) !=
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index c76677afda1b..17bbacb1b4b1 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -19,6 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/slab.h>
22#include <asm/uv/uv_hub.h> 23#include <asm/uv/uv_hub.h>
23#if defined CONFIG_X86_64 24#if defined CONFIG_X86_64
24#include <asm/uv/bios.h> 25#include <asm/uv/bios.h>
@@ -106,7 +107,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
106 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 107 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
107 108
108#if defined CONFIG_X86_64 109#if defined CONFIG_X86_64
109 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); 110 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
111 UV_AFFINITY_CPU);
110 if (mq->irq < 0) { 112 if (mq->irq < 0) {
111 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", 113 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
112 -mq->irq); 114 -mq->irq);
@@ -136,7 +138,7 @@ static void
136xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) 138xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
137{ 139{
138#if defined CONFIG_X86_64 140#if defined CONFIG_X86_64
139 uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); 141 uv_teardown_irq(mq->irq);
140 142
141#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 143#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
142 int mmr_pnode; 144 int mmr_pnode;
@@ -156,22 +158,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
156{ 158{
157 int ret; 159 int ret;
158 160
159#if defined CONFIG_X86_64 161#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
160 ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), 162 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
161 mq->order, &mq->mmr_offset); 163
162 if (ret < 0) { 164 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
163 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
164 "ret=%d\n", ret);
165 return ret;
166 }
167#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
168 ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
169 mq->order, &mq->mmr_offset); 165 mq->order, &mq->mmr_offset);
170 if (ret < 0) { 166 if (ret < 0) {
171 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", 167 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
172 ret); 168 ret);
173 return -EBUSY; 169 return -EBUSY;
174 } 170 }
171#elif defined CONFIG_X86_64
172 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
173 mq->order, &mq->mmr_offset);
174 if (ret < 0) {
175 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
176 "ret=%d\n", ret);
177 return ret;
178 }
175#else 179#else
176 #error not a supported configuration 180 #error not a supported configuration
177#endif 181#endif
@@ -184,12 +188,13 @@ static void
184xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) 188xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
185{ 189{
186 int ret; 190 int ret;
191 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
187 192
188#if defined CONFIG_X86_64 193#if defined CONFIG_X86_64
189 ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); 194 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
190 BUG_ON(ret != BIOS_STATUS_SUCCESS); 195 BUG_ON(ret != BIOS_STATUS_SUCCESS);
191#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 196#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
192 ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); 197 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
193 BUG_ON(ret != SALRET_OK); 198 BUG_ON(ret != SALRET_OK);
194#else 199#else
195 #error not a supported configuration 200 #error not a supported configuration
@@ -203,6 +208,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
203 enum xp_retval xp_ret; 208 enum xp_retval xp_ret;
204 int ret; 209 int ret;
205 int nid; 210 int nid;
211 int nasid;
206 int pg_order; 212 int pg_order;
207 struct page *page; 213 struct page *page;
208 struct xpc_gru_mq_uv *mq; 214 struct xpc_gru_mq_uv *mq;
@@ -258,9 +264,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
258 goto out_5; 264 goto out_5;
259 } 265 }
260 266
267 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
268
261 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; 269 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
262 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, 270 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
263 nid, mmr_value->vector, mmr_value->dest); 271 nasid, mmr_value->vector, mmr_value->dest);
264 if (ret != 0) { 272 if (ret != 0) {
265 dev_err(xpc_part, "gru_create_message_queue() returned " 273 dev_err(xpc_part, "gru_create_message_queue() returned "
266 "error=%d\n", ret); 274 "error=%d\n", ret);
@@ -409,6 +417,7 @@ xpc_process_activate_IRQ_rcvd_uv(void)
409static void 417static void
410xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, 418xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
411 struct xpc_activate_mq_msghdr_uv *msg_hdr, 419 struct xpc_activate_mq_msghdr_uv *msg_hdr,
420 int part_setup,
412 int *wakeup_hb_checker) 421 int *wakeup_hb_checker)
413{ 422{
414 unsigned long irq_flags; 423 unsigned long irq_flags;
@@ -473,6 +482,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
473 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { 482 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
474 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; 483 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
475 484
485 if (!part_setup)
486 break;
487
476 msg = container_of(msg_hdr, struct 488 msg = container_of(msg_hdr, struct
477 xpc_activate_mq_msg_chctl_closerequest_uv, 489 xpc_activate_mq_msg_chctl_closerequest_uv,
478 hdr); 490 hdr);
@@ -489,6 +501,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
489 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { 501 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
490 struct xpc_activate_mq_msg_chctl_closereply_uv *msg; 502 struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
491 503
504 if (!part_setup)
505 break;
506
492 msg = container_of(msg_hdr, struct 507 msg = container_of(msg_hdr, struct
493 xpc_activate_mq_msg_chctl_closereply_uv, 508 xpc_activate_mq_msg_chctl_closereply_uv,
494 hdr); 509 hdr);
@@ -503,6 +518,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
503 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { 518 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
504 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; 519 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
505 520
521 if (!part_setup)
522 break;
523
506 msg = container_of(msg_hdr, struct 524 msg = container_of(msg_hdr, struct
507 xpc_activate_mq_msg_chctl_openrequest_uv, 525 xpc_activate_mq_msg_chctl_openrequest_uv,
508 hdr); 526 hdr);
@@ -520,6 +538,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
520 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { 538 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
521 struct xpc_activate_mq_msg_chctl_openreply_uv *msg; 539 struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
522 540
541 if (!part_setup)
542 break;
543
523 msg = container_of(msg_hdr, struct 544 msg = container_of(msg_hdr, struct
524 xpc_activate_mq_msg_chctl_openreply_uv, hdr); 545 xpc_activate_mq_msg_chctl_openreply_uv, hdr);
525 args = &part->remote_openclose_args[msg->ch_number]; 546 args = &part->remote_openclose_args[msg->ch_number];
@@ -537,6 +558,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
537 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { 558 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
538 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; 559 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
539 560
561 if (!part_setup)
562 break;
563
540 msg = container_of(msg_hdr, struct 564 msg = container_of(msg_hdr, struct
541 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); 565 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
542 spin_lock_irqsave(&part->chctl_lock, irq_flags); 566 spin_lock_irqsave(&part->chctl_lock, irq_flags);
@@ -613,6 +637,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
613 637
614 part_referenced = xpc_part_ref(part); 638 part_referenced = xpc_part_ref(part);
615 xpc_handle_activate_mq_msg_uv(part, msg_hdr, 639 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
640 part_referenced,
616 &wakeup_hb_checker); 641 &wakeup_hb_checker);
617 if (part_referenced) 642 if (part_referenced)
618 xpc_part_deref(part); 643 xpc_part_deref(part);
@@ -945,11 +970,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
945 head->first = first->next; 970 head->first = first->next;
946 if (head->first == NULL) 971 if (head->first == NULL)
947 head->last = NULL; 972 head->last = NULL;
973
974 head->n_entries--;
975 BUG_ON(head->n_entries < 0);
976
977 first->next = NULL;
948 } 978 }
949 head->n_entries--;
950 BUG_ON(head->n_entries < 0);
951 spin_unlock_irqrestore(&head->lock, irq_flags); 979 spin_unlock_irqrestore(&head->lock, irq_flags);
952 first->next = NULL;
953 return first; 980 return first;
954} 981}
955 982
@@ -1018,7 +1045,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part)
1018 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 1045 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1019 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); 1046 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
1020 1047
1021 while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { 1048 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
1049 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
1022 1050
1023 dev_dbg(xpc_part, "waiting to make first contact with " 1051 dev_dbg(xpc_part, "waiting to make first contact with "
1024 "partition %d\n", XPC_PARTID(part)); 1052 "partition %d\n", XPC_PARTID(part));
@@ -1421,7 +1449,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1421 msg_slot = ch_uv->recv_msg_slots + 1449 msg_slot = ch_uv->recv_msg_slots +
1422 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; 1450 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1423 1451
1424 BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
1425 BUG_ON(msg_slot->hdr.size != 0); 1452 BUG_ON(msg_slot->hdr.size != 0);
1426 1453
1427 memcpy(msg_slot, msg, msg->hdr.size); 1454 memcpy(msg_slot, msg, msg->hdr.size);
@@ -1645,8 +1672,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1645 sizeof(struct xpc_notify_mq_msghdr_uv)); 1672 sizeof(struct xpc_notify_mq_msghdr_uv));
1646 if (ret != xpSuccess) 1673 if (ret != xpSuccess)
1647 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1674 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1648
1649 msg->hdr.msg_slot_number += ch->remote_nentries;
1650} 1675}
1651 1676
1652static struct xpc_arch_operations xpc_arch_ops_uv = { 1677static struct xpc_arch_operations xpc_arch_ops_uv = {
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 8d1c60a3f0df..ee5109a3cd98 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -20,6 +20,7 @@
20 * 20 *
21 */ 21 */
22 22
23#include <linux/slab.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/netdevice.h> 25#include <linux/netdevice.h>
25#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
@@ -235,12 +236,11 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg)
235 skb->ip_summed = CHECKSUM_UNNECESSARY; 236 skb->ip_summed = CHECKSUM_UNNECESSARY;
236 237
237 dev_dbg(xpnet, "passing skb to network layer\n" 238 dev_dbg(xpnet, "passing skb to network layer\n"
238 KERN_DEBUG "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " 239 "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
239 "skb->end=0x%p skb->len=%d\n", 240 "skb->end=0x%p skb->len=%d\n",
240 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), 241 (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
241 skb_end_pointer(skb), skb->len); 242 skb_end_pointer(skb), skb->len);
242 243
243 xpnet_device->last_rx = jiffies;
244 xpnet_device->stats.rx_packets++; 244 xpnet_device->stats.rx_packets++;
245 xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN; 245 xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
246 246
@@ -399,7 +399,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
399 msg->buf_pa = xp_pa((void *)start_addr); 399 msg->buf_pa = xp_pa((void *)start_addr);
400 400
401 dev_dbg(xpnet, "sending XPC message to %d:%d\n" 401 dev_dbg(xpnet, "sending XPC message to %d:%d\n"
402 KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " 402 "msg->buf_pa=0x%lx, msg->size=%u, "
403 "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", 403 "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
404 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, 404 dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
405 msg->leadin_ignore, msg->tailout_ignore); 405 msg->leadin_ignore, msg->tailout_ignore);
@@ -436,7 +436,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
436 436
437 if (skb->data[0] == 0x33) { 437 if (skb->data[0] == 0x33) {
438 dev_kfree_skb(skb); 438 dev_kfree_skb(skb);
439 return 0; /* nothing needed to be done */ 439 return NETDEV_TX_OK; /* nothing needed to be done */
440 } 440 }
441 441
442 /* 442 /*
@@ -476,7 +476,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
476 476
477 if (skb->data[0] == 0xff) { 477 if (skb->data[0] == 0xff) {
478 /* we are being asked to broadcast to all partitions */ 478 /* we are being asked to broadcast to all partitions */
479 for_each_bit(dest_partid, xpnet_broadcast_partitions, 479 for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
480 xp_max_npartitions) { 480 xp_max_npartitions) {
481 481
482 xpnet_send(skb, queued_msg, start_addr, end_addr, 482 xpnet_send(skb, queued_msg, start_addr, end_addr,
@@ -503,7 +503,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
503 dev->stats.tx_packets++; 503 dev->stats.tx_packets++;
504 dev->stats.tx_bytes += skb->len; 504 dev->stats.tx_bytes += skb->len;
505 505
506 return 0; 506 return NETDEV_TX_OK;
507} 507}
508 508
509/* 509/*
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
new file mode 100644
index 000000000000..2c8c3f39710d
--- /dev/null
+++ b/drivers/misc/ti-st/Kconfig
@@ -0,0 +1,17 @@
1#
2# TI's shared transport line discipline and the protocol
3# drivers (BT, FM and GPS)
4#
5menu "Texas Instruments shared transport line discipline"
6config TI_ST
7 tristate "Shared transport core driver"
8 depends on RFKILL
9 select FW_LOADER
10 help
11 This enables the shared transport core driver for TI
12 BT / FM and GPS combo chips. This enables protocol drivers
13 to register themselves with core and send data, the responses
14 are returned to relevant protocol drivers based on their
15 packet types.
16
17endmenu
diff --git a/drivers/misc/ti-st/Makefile b/drivers/misc/ti-st/Makefile
new file mode 100644
index 000000000000..78d7ebb14749
--- /dev/null
+++ b/drivers/misc/ti-st/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for TI's shared transport line discipline
3# and its protocol drivers (BT, FM, GPS)
4#
5obj-$(CONFIG_TI_ST) += st_drv.o
6st_drv-objs := st_core.o st_kim.o st_ll.o
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
new file mode 100644
index 000000000000..f9aad06d1ae5
--- /dev/null
+++ b/drivers/misc/ti-st/st_core.c
@@ -0,0 +1,992 @@
1/*
2 * Shared Transport Line discipline driver Core
3 * This hooks up ST KIM driver and ST LL driver
4 * Copyright (C) 2009-2010 Texas Instruments
5 * Author: Pavan Savoy <pavan_savoy@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#define pr_fmt(fmt) "(stc): " fmt
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/tty.h>
27
28/* understand BT, FM and GPS for now */
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/hci.h>
32#include <linux/ti_wilink_st.h>
33
34/* function pointer pointing to either,
35 * st_kim_recv during registration to receive fw download responses
36 * st_int_recv after registration to receive proto stack responses
37 */
38void (*st_recv) (void*, const unsigned char*, long);
39
40/********************************************************************/
41#if 0
42/* internal misc functions */
43bool is_protocol_list_empty(void)
44{
45 unsigned char i = 0;
46 pr_debug(" %s ", __func__);
47 for (i = 0; i < ST_MAX; i++) {
48 if (st_gdata->list[i] != NULL)
49 return ST_NOTEMPTY;
50 /* not empty */
51 }
52 /* list empty */
53 return ST_EMPTY;
54}
55#endif
56
57/* can be called in from
58 * -- KIM (during fw download)
59 * -- ST Core (during st_write)
60 *
61 * This is the internal write function - a wrapper
62 * to tty->ops->write
63 */
64int st_int_write(struct st_data_s *st_gdata,
65 const unsigned char *data, int count)
66{
67 struct tty_struct *tty;
68 if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) {
69 pr_err("tty unavailable to perform write");
70 return -1;
71 }
72 tty = st_gdata->tty;
73#ifdef VERBOSE
74 print_hex_dump(KERN_DEBUG, "<out<", DUMP_PREFIX_NONE,
75 16, 1, data, count, 0);
76#endif
77 return tty->ops->write(tty, data, count);
78
79}
80
81/*
82 * push the skb received to relevant
83 * protocol stacks
84 */
85void st_send_frame(enum proto_type protoid, struct st_data_s *st_gdata)
86{
87 pr_info(" %s(prot:%d) ", __func__, protoid);
88
89 if (unlikely
90 (st_gdata == NULL || st_gdata->rx_skb == NULL
91 || st_gdata->list[protoid] == NULL)) {
92 pr_err("protocol %d not registered, no data to send?",
93 protoid);
94 kfree_skb(st_gdata->rx_skb);
95 return;
96 }
97 /* this cannot fail
98 * this shouldn't take long
99 * - should be just skb_queue_tail for the
100 * protocol stack driver
101 */
102 if (likely(st_gdata->list[protoid]->recv != NULL)) {
103 if (unlikely
104 (st_gdata->list[protoid]->recv
105 (st_gdata->list[protoid]->priv_data, st_gdata->rx_skb)
106 != 0)) {
107 pr_err(" proto stack %d's ->recv failed", protoid);
108 kfree_skb(st_gdata->rx_skb);
109 return;
110 }
111 } else {
112 pr_err(" proto stack %d's ->recv null", protoid);
113 kfree_skb(st_gdata->rx_skb);
114 }
115 return;
116}
117
118/**
119 * st_reg_complete -
120 * to call registration complete callbacks
121 * of all protocol stack drivers
122 */
123void st_reg_complete(struct st_data_s *st_gdata, char err)
124{
125 unsigned char i = 0;
126 pr_info(" %s ", __func__);
127 for (i = 0; i < ST_MAX; i++) {
128 if (likely(st_gdata != NULL && st_gdata->list[i] != NULL &&
129 st_gdata->list[i]->reg_complete_cb != NULL))
130 st_gdata->list[i]->reg_complete_cb
131 (st_gdata->list[i]->priv_data, err);
132 }
133}
134
135static inline int st_check_data_len(struct st_data_s *st_gdata,
136 int protoid, int len)
137{
138 int room = skb_tailroom(st_gdata->rx_skb);
139
140 pr_debug("len %d room %d", len, room);
141
142 if (!len) {
143 /* Received packet has only packet header and
144 * has zero length payload. So, ask ST CORE to
145 * forward the packet to protocol driver (BT/FM/GPS)
146 */
147 st_send_frame(protoid, st_gdata);
148
149 } else if (len > room) {
150 /* Received packet's payload length is larger.
151 * We can't accommodate it in created skb.
152 */
153 pr_err("Data length is too large len %d room %d", len,
154 room);
155 kfree_skb(st_gdata->rx_skb);
156 } else {
157 /* Packet header has non-zero payload length and
158 * we have enough space in created skb. Lets read
159 * payload data */
160 st_gdata->rx_state = ST_BT_W4_DATA;
161 st_gdata->rx_count = len;
162 return len;
163 }
164
165 /* Change ST state to continue to process next
166 * packet */
167 st_gdata->rx_state = ST_W4_PACKET_TYPE;
168 st_gdata->rx_skb = NULL;
169 st_gdata->rx_count = 0;
170
171 return 0;
172}
173
174/**
175 * st_wakeup_ack - internal function for action when wake-up ack
176 * received
177 */
178static inline void st_wakeup_ack(struct st_data_s *st_gdata,
179 unsigned char cmd)
180{
181 struct sk_buff *waiting_skb;
182 unsigned long flags = 0;
183
184 spin_lock_irqsave(&st_gdata->lock, flags);
185 /* de-Q from waitQ and Q in txQ now that the
186 * chip is awake
187 */
188 while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq)))
189 skb_queue_tail(&st_gdata->txq, waiting_skb);
190
191 /* state forwarded to ST LL */
192 st_ll_sleep_state(st_gdata, (unsigned long)cmd);
193 spin_unlock_irqrestore(&st_gdata->lock, flags);
194
195 /* wake up to send the recently copied skbs from waitQ */
196 st_tx_wakeup(st_gdata);
197}
198
199/**
200 * st_int_recv - ST's internal receive function.
201 * Decodes received RAW data and forwards to corresponding
202 * client drivers (Bluetooth,FM,GPS..etc).
203 * This can receive various types of packets,
204 * HCI-Events, ACL, SCO, 4 types of HCI-LL PM packets
205 * CH-8 packets from FM, CH-9 packets from GPS cores.
206 */
207void st_int_recv(void *disc_data,
208 const unsigned char *data, long count)
209{
210 char *ptr;
211 struct hci_event_hdr *eh;
212 struct hci_acl_hdr *ah;
213 struct hci_sco_hdr *sh;
214 struct fm_event_hdr *fm;
215 struct gps_event_hdr *gps;
216 int len = 0, type = 0, dlen = 0;
217 static enum proto_type protoid = ST_MAX;
218 struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
219
220 ptr = (char *)data;
221 /* tty_receive sent null ? */
222 if (unlikely(ptr == NULL) || (st_gdata == NULL)) {
223 pr_err(" received null from TTY ");
224 return;
225 }
226
227 pr_info("count %ld rx_state %ld"
228 "rx_count %ld", count, st_gdata->rx_state,
229 st_gdata->rx_count);
230
231 /* Decode received bytes here */
232 while (count) {
233 if (st_gdata->rx_count) {
234 len = min_t(unsigned int, st_gdata->rx_count, count);
235 memcpy(skb_put(st_gdata->rx_skb, len), ptr, len);
236 st_gdata->rx_count -= len;
237 count -= len;
238 ptr += len;
239
240 if (st_gdata->rx_count)
241 continue;
242
243 /* Check ST RX state machine , where are we? */
244 switch (st_gdata->rx_state) {
245
246 /* Waiting for complete packet ? */
247 case ST_BT_W4_DATA:
248 pr_debug("Complete pkt received");
249
250 /* Ask ST CORE to forward
251 * the packet to protocol driver */
252 st_send_frame(protoid, st_gdata);
253
254 st_gdata->rx_state = ST_W4_PACKET_TYPE;
255 st_gdata->rx_skb = NULL;
256 protoid = ST_MAX; /* is this required ? */
257 continue;
258
259 /* Waiting for Bluetooth event header ? */
260 case ST_BT_W4_EVENT_HDR:
261 eh = (struct hci_event_hdr *)st_gdata->rx_skb->
262 data;
263
264 pr_debug("Event header: evt 0x%2.2x"
265 "plen %d", eh->evt, eh->plen);
266
267 st_check_data_len(st_gdata, protoid, eh->plen);
268 continue;
269
270 /* Waiting for Bluetooth acl header ? */
271 case ST_BT_W4_ACL_HDR:
272 ah = (struct hci_acl_hdr *)st_gdata->rx_skb->
273 data;
274 dlen = __le16_to_cpu(ah->dlen);
275
276 pr_info("ACL header: dlen %d", dlen);
277
278 st_check_data_len(st_gdata, protoid, dlen);
279 continue;
280
281 /* Waiting for Bluetooth sco header ? */
282 case ST_BT_W4_SCO_HDR:
283 sh = (struct hci_sco_hdr *)st_gdata->rx_skb->
284 data;
285
286 pr_info("SCO header: dlen %d", sh->dlen);
287
288 st_check_data_len(st_gdata, protoid, sh->dlen);
289 continue;
290 case ST_FM_W4_EVENT_HDR:
291 fm = (struct fm_event_hdr *)st_gdata->rx_skb->
292 data;
293 pr_info("FM Header: ");
294 st_check_data_len(st_gdata, ST_FM, fm->plen);
295 continue;
296 /* TODO : Add GPS packet machine logic here */
297 case ST_GPS_W4_EVENT_HDR:
298 /* [0x09 pkt hdr][R/W byte][2 byte len] */
299 gps = (struct gps_event_hdr *)st_gdata->rx_skb->
300 data;
301 pr_info("GPS Header: ");
302 st_check_data_len(st_gdata, ST_GPS, gps->plen);
303 continue;
304 } /* end of switch rx_state */
305 }
306
307 /* end of if rx_count */
308 /* Check first byte of packet and identify module
309 * owner (BT/FM/GPS) */
310 switch (*ptr) {
311
312 /* Bluetooth event packet? */
313 case HCI_EVENT_PKT:
314 pr_info("Event packet");
315 st_gdata->rx_state = ST_BT_W4_EVENT_HDR;
316 st_gdata->rx_count = HCI_EVENT_HDR_SIZE;
317 type = HCI_EVENT_PKT;
318 protoid = ST_BT;
319 break;
320
321 /* Bluetooth acl packet? */
322 case HCI_ACLDATA_PKT:
323 pr_info("ACL packet");
324 st_gdata->rx_state = ST_BT_W4_ACL_HDR;
325 st_gdata->rx_count = HCI_ACL_HDR_SIZE;
326 type = HCI_ACLDATA_PKT;
327 protoid = ST_BT;
328 break;
329
330 /* Bluetooth sco packet? */
331 case HCI_SCODATA_PKT:
332 pr_info("SCO packet");
333 st_gdata->rx_state = ST_BT_W4_SCO_HDR;
334 st_gdata->rx_count = HCI_SCO_HDR_SIZE;
335 type = HCI_SCODATA_PKT;
336 protoid = ST_BT;
337 break;
338
339 /* Channel 8(FM) packet? */
340 case ST_FM_CH8_PKT:
341 pr_info("FM CH8 packet");
342 type = ST_FM_CH8_PKT;
343 st_gdata->rx_state = ST_FM_W4_EVENT_HDR;
344 st_gdata->rx_count = FM_EVENT_HDR_SIZE;
345 protoid = ST_FM;
346 break;
347
348 /* Channel 9(GPS) packet? */
349 case 0x9: /*ST_LL_GPS_CH9_PKT */
350 pr_info("GPS CH9 packet");
351 type = 0x9; /* ST_LL_GPS_CH9_PKT; */
352 protoid = ST_GPS;
353 st_gdata->rx_state = ST_GPS_W4_EVENT_HDR;
354 st_gdata->rx_count = 3; /* GPS_EVENT_HDR_SIZE -1*/
355 break;
356 case LL_SLEEP_IND:
357 case LL_SLEEP_ACK:
358 case LL_WAKE_UP_IND:
359 pr_info("PM packet");
360 /* this takes appropriate action based on
361 * sleep state received --
362 */
363 st_ll_sleep_state(st_gdata, *ptr);
364 ptr++;
365 count--;
366 continue;
367 case LL_WAKE_UP_ACK:
368 pr_info("PM packet");
369 /* wake up ack received */
370 st_wakeup_ack(st_gdata, *ptr);
371 ptr++;
372 count--;
373 continue;
374 /* Unknow packet? */
375 default:
376 pr_err("Unknown packet type %2.2x", (__u8) *ptr);
377 ptr++;
378 count--;
379 continue;
380 };
381 ptr++;
382 count--;
383
384 switch (protoid) {
385 case ST_BT:
386 /* Allocate new packet to hold received data */
387 st_gdata->rx_skb =
388 bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
389 if (!st_gdata->rx_skb) {
390 pr_err("Can't allocate mem for new packet");
391 st_gdata->rx_state = ST_W4_PACKET_TYPE;
392 st_gdata->rx_count = 0;
393 return;
394 }
395 bt_cb(st_gdata->rx_skb)->pkt_type = type;
396 break;
397 case ST_FM: /* for FM */
398 st_gdata->rx_skb =
399 alloc_skb(FM_MAX_FRAME_SIZE, GFP_ATOMIC);
400 if (!st_gdata->rx_skb) {
401 pr_err("Can't allocate mem for new packet");
402 st_gdata->rx_state = ST_W4_PACKET_TYPE;
403 st_gdata->rx_count = 0;
404 return;
405 }
406 /* place holder 0x08 */
407 skb_reserve(st_gdata->rx_skb, 1);
408 st_gdata->rx_skb->cb[0] = ST_FM_CH8_PKT;
409 break;
410 case ST_GPS:
411 /* for GPS */
412 st_gdata->rx_skb =
413 alloc_skb(100 /*GPS_MAX_FRAME_SIZE */ , GFP_ATOMIC);
414 if (!st_gdata->rx_skb) {
415 pr_err("Can't allocate mem for new packet");
416 st_gdata->rx_state = ST_W4_PACKET_TYPE;
417 st_gdata->rx_count = 0;
418 return;
419 }
420 /* place holder 0x09 */
421 skb_reserve(st_gdata->rx_skb, 1);
422 st_gdata->rx_skb->cb[0] = 0x09; /*ST_GPS_CH9_PKT; */
423 break;
424 case ST_MAX:
425 break;
426 }
427 }
428 pr_debug("done %s", __func__);
429 return;
430}
431
432/**
433 * st_int_dequeue - internal de-Q function.
434 * If the previous data set was not written
435 * completely, return that skb which has the pending data.
436 * In normal cases, return top of txq.
437 */
438struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata)
439{
440 struct sk_buff *returning_skb;
441
442 pr_debug("%s", __func__);
443 if (st_gdata->tx_skb != NULL) {
444 returning_skb = st_gdata->tx_skb;
445 st_gdata->tx_skb = NULL;
446 return returning_skb;
447 }
448 return skb_dequeue(&st_gdata->txq);
449}
450
451/**
452 * st_int_enqueue - internal Q-ing function.
453 * Will either Q the skb to txq or the tx_waitq
454 * depending on the ST LL state.
455 * If the chip is asleep, then Q it onto waitq and
456 * wakeup the chip.
457 * txq and waitq needs protection since the other contexts
458 * may be sending data, waking up chip.
459 */
460void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
461{
462 unsigned long flags = 0;
463
464 pr_debug("%s", __func__);
465 spin_lock_irqsave(&st_gdata->lock, flags);
466
467 switch (st_ll_getstate(st_gdata)) {
468 case ST_LL_AWAKE:
469 pr_info("ST LL is AWAKE, sending normally");
470 skb_queue_tail(&st_gdata->txq, skb);
471 break;
472 case ST_LL_ASLEEP_TO_AWAKE:
473 skb_queue_tail(&st_gdata->tx_waitq, skb);
474 break;
475 case ST_LL_AWAKE_TO_ASLEEP:
476 pr_err("ST LL is illegal state(%ld),"
477 "purging received skb.", st_ll_getstate(st_gdata));
478 kfree_skb(skb);
479 break;
480 case ST_LL_ASLEEP:
481 skb_queue_tail(&st_gdata->tx_waitq, skb);
482 st_ll_wakeup(st_gdata);
483 break;
484 default:
485 pr_err("ST LL is illegal state(%ld),"
486 "purging received skb.", st_ll_getstate(st_gdata));
487 kfree_skb(skb);
488 break;
489 }
490
491 spin_unlock_irqrestore(&st_gdata->lock, flags);
492 pr_debug("done %s", __func__);
493 return;
494}
495
496/*
497 * internal wakeup function
498 * called from either
499 * - TTY layer when write's finished
500 * - st_write (in context of the protocol stack)
501 */
502void st_tx_wakeup(struct st_data_s *st_data)
503{
504 struct sk_buff *skb;
505 unsigned long flags; /* for irq save flags */
506 pr_debug("%s", __func__);
507 /* check for sending & set flag sending here */
508 if (test_and_set_bit(ST_TX_SENDING, &st_data->tx_state)) {
509 pr_info("ST already sending");
510 /* keep sending */
511 set_bit(ST_TX_WAKEUP, &st_data->tx_state);
512 return;
513 /* TX_WAKEUP will be checked in another
514 * context
515 */
516 }
517 do { /* come back if st_tx_wakeup is set */
518 /* woke-up to write */
519 clear_bit(ST_TX_WAKEUP, &st_data->tx_state);
520 while ((skb = st_int_dequeue(st_data))) {
521 int len;
522 spin_lock_irqsave(&st_data->lock, flags);
523 /* enable wake-up from TTY */
524 set_bit(TTY_DO_WRITE_WAKEUP, &st_data->tty->flags);
525 len = st_int_write(st_data, skb->data, skb->len);
526 skb_pull(skb, len);
527 /* if skb->len = len as expected, skb->len=0 */
528 if (skb->len) {
529 /* would be the next skb to be sent */
530 st_data->tx_skb = skb;
531 spin_unlock_irqrestore(&st_data->lock, flags);
532 break;
533 }
534 kfree_skb(skb);
535 spin_unlock_irqrestore(&st_data->lock, flags);
536 }
537 /* if wake-up is set in another context- restart sending */
538 } while (test_bit(ST_TX_WAKEUP, &st_data->tx_state));
539
540 /* clear flag sending */
541 clear_bit(ST_TX_SENDING, &st_data->tx_state);
542}
543
544/********************************************************************/
545/* functions called from ST KIM
546*/
547void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf)
548{
549 seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n",
550 st_gdata->protos_registered,
551 st_gdata->list[ST_BT] != NULL ? 'R' : 'U',
552 st_gdata->list[ST_FM] != NULL ? 'R' : 'U',
553 st_gdata->list[ST_GPS] != NULL ? 'R' : 'U');
554}
555
556/********************************************************************/
557/*
558 * functions called from protocol stack drivers
559 * to be EXPORT-ed
560 */
561long st_register(struct st_proto_s *new_proto)
562{
563 struct st_data_s *st_gdata;
564 long err = 0;
565 unsigned long flags = 0;
566
567 st_kim_ref(&st_gdata, 0);
568 pr_info("%s(%d) ", __func__, new_proto->type);
569 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
570 || new_proto->reg_complete_cb == NULL) {
571 pr_err("gdata/new_proto/recv or reg_complete_cb not ready");
572 return -1;
573 }
574
575 if (new_proto->type < ST_BT || new_proto->type >= ST_MAX) {
576 pr_err("protocol %d not supported", new_proto->type);
577 return -EPROTONOSUPPORT;
578 }
579
580 if (st_gdata->list[new_proto->type] != NULL) {
581 pr_err("protocol %d already registered", new_proto->type);
582 return -EALREADY;
583 }
584
585 /* can be from process context only */
586 spin_lock_irqsave(&st_gdata->lock, flags);
587
588 if (test_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state)) {
589 pr_info(" ST_REG_IN_PROGRESS:%d ", new_proto->type);
590 /* fw download in progress */
591 st_kim_chip_toggle(new_proto->type, KIM_GPIO_ACTIVE);
592
593 st_gdata->list[new_proto->type] = new_proto;
594 st_gdata->protos_registered++;
595 new_proto->write = st_write;
596
597 set_bit(ST_REG_PENDING, &st_gdata->st_state);
598 spin_unlock_irqrestore(&st_gdata->lock, flags);
599 return -EINPROGRESS;
600 } else if (st_gdata->protos_registered == ST_EMPTY) {
601 pr_info(" protocol list empty :%d ", new_proto->type);
602 set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
603 st_recv = st_kim_recv;
604
605 /* release lock previously held - re-locked below */
606 spin_unlock_irqrestore(&st_gdata->lock, flags);
607
608 /* enable the ST LL - to set default chip state */
609 st_ll_enable(st_gdata);
610 /* this may take a while to complete
611 * since it involves BT fw download
612 */
613 err = st_kim_start(st_gdata->kim_data);
614 if (err != 0) {
615 clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
616 if ((st_gdata->protos_registered != ST_EMPTY) &&
617 (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
618 pr_err(" KIM failure complete callback ");
619 st_reg_complete(st_gdata, -1);
620 }
621
622 return -1;
623 }
624
625 /* the protocol might require other gpios to be toggled
626 */
627 st_kim_chip_toggle(new_proto->type, KIM_GPIO_ACTIVE);
628
629 clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
630 st_recv = st_int_recv;
631
632 /* this is where all pending registration
633 * are signalled to be complete by calling callback functions
634 */
635 if ((st_gdata->protos_registered != ST_EMPTY) &&
636 (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
637 pr_debug(" call reg complete callback ");
638 st_reg_complete(st_gdata, 0);
639 }
640 clear_bit(ST_REG_PENDING, &st_gdata->st_state);
641
642 /* check for already registered once more,
643 * since the above check is old
644 */
645 if (st_gdata->list[new_proto->type] != NULL) {
646 pr_err(" proto %d already registered ",
647 new_proto->type);
648 return -EALREADY;
649 }
650
651 spin_lock_irqsave(&st_gdata->lock, flags);
652 st_gdata->list[new_proto->type] = new_proto;
653 st_gdata->protos_registered++;
654 new_proto->write = st_write;
655 spin_unlock_irqrestore(&st_gdata->lock, flags);
656 return err;
657 }
658 /* if fw is already downloaded & new stack registers protocol */
659 else {
660 switch (new_proto->type) {
661 case ST_BT:
662 /* do nothing */
663 break;
664 case ST_FM:
665 case ST_GPS:
666 st_kim_chip_toggle(new_proto->type, KIM_GPIO_ACTIVE);
667 break;
668 case ST_MAX:
669 default:
670 pr_err("%d protocol not supported",
671 new_proto->type);
672 spin_unlock_irqrestore(&st_gdata->lock, flags);
673 return -EPROTONOSUPPORT;
674 }
675 st_gdata->list[new_proto->type] = new_proto;
676 st_gdata->protos_registered++;
677 new_proto->write = st_write;
678
679 /* lock already held before entering else */
680 spin_unlock_irqrestore(&st_gdata->lock, flags);
681 return err;
682 }
683 pr_debug("done %s(%d) ", __func__, new_proto->type);
684}
685EXPORT_SYMBOL_GPL(st_register);
686
687/* to unregister a protocol -
688 * to be called from protocol stack driver
689 */
690long st_unregister(enum proto_type type)
691{
692 long err = 0;
693 unsigned long flags = 0;
694 struct st_data_s *st_gdata;
695
696 pr_debug("%s: %d ", __func__, type);
697
698 st_kim_ref(&st_gdata, 0);
699 if (type < ST_BT || type >= ST_MAX) {
700 pr_err(" protocol %d not supported", type);
701 return -EPROTONOSUPPORT;
702 }
703
704 spin_lock_irqsave(&st_gdata->lock, flags);
705
706 if (st_gdata->list[type] == NULL) {
707 pr_err(" protocol %d not registered", type);
708 spin_unlock_irqrestore(&st_gdata->lock, flags);
709 return -EPROTONOSUPPORT;
710 }
711
712 st_gdata->protos_registered--;
713 st_gdata->list[type] = NULL;
714
715 /* kim ignores BT in the below function
716 * and handles the rest, BT is toggled
717 * only in kim_start and kim_stop
718 */
719 st_kim_chip_toggle(type, KIM_GPIO_INACTIVE);
720 spin_unlock_irqrestore(&st_gdata->lock, flags);
721
722 if ((st_gdata->protos_registered == ST_EMPTY) &&
723 (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
724 pr_info(" all protocols unregistered ");
725
726 /* stop traffic on tty */
727 if (st_gdata->tty) {
728 tty_ldisc_flush(st_gdata->tty);
729 stop_tty(st_gdata->tty);
730 }
731
732 /* all protocols now unregistered */
733 st_kim_stop(st_gdata->kim_data);
734 /* disable ST LL */
735 st_ll_disable(st_gdata);
736 }
737 return err;
738}
739
740/*
741 * called in protocol stack drivers
742 * via the write function pointer
743 */
744long st_write(struct sk_buff *skb)
745{
746 struct st_data_s *st_gdata;
747#ifdef DEBUG
748 enum proto_type protoid = ST_MAX;
749#endif
750 long len;
751
752 st_kim_ref(&st_gdata, 0);
753 if (unlikely(skb == NULL || st_gdata == NULL
754 || st_gdata->tty == NULL)) {
755 pr_err("data/tty unavailable to perform write");
756 return -1;
757 }
758#ifdef DEBUG /* open-up skb to read the 1st byte */
759 switch (skb->data[0]) {
760 case HCI_COMMAND_PKT:
761 case HCI_ACLDATA_PKT:
762 case HCI_SCODATA_PKT:
763 protoid = ST_BT;
764 break;
765 case ST_FM_CH8_PKT:
766 protoid = ST_FM;
767 break;
768 case 0x09:
769 protoid = ST_GPS;
770 break;
771 }
772 if (unlikely(st_gdata->list[protoid] == NULL)) {
773 pr_err(" protocol %d not registered, and writing? ",
774 protoid);
775 return -1;
776 }
777#endif
778 pr_debug("%d to be written", skb->len);
779 len = skb->len;
780
781 /* st_ll to decide where to enqueue the skb */
782 st_int_enqueue(st_gdata, skb);
783 /* wake up */
784 st_tx_wakeup(st_gdata);
785
786 /* return number of bytes written */
787 return len;
788}
789
790/* for protocols making use of shared transport */
791EXPORT_SYMBOL_GPL(st_unregister);
792
793/********************************************************************/
794/*
795 * functions called from TTY layer
796 */
797static int st_tty_open(struct tty_struct *tty)
798{
799 int err = 0;
800 struct st_data_s *st_gdata;
801 pr_info("%s ", __func__);
802
803 st_kim_ref(&st_gdata, 0);
804 st_gdata->tty = tty;
805 tty->disc_data = st_gdata;
806
807 /* don't do an wakeup for now */
808 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
809
810 /* mem already allocated
811 */
812 tty->receive_room = 65536;
813 /* Flush any pending characters in the driver and discipline. */
814 tty_ldisc_flush(tty);
815 tty_driver_flush_buffer(tty);
816 /*
817 * signal to UIM via KIM that -
818 * installation of N_TI_WL ldisc is complete
819 */
820 st_kim_complete(st_gdata->kim_data);
821 pr_debug("done %s", __func__);
822 return err;
823}
824
825static void st_tty_close(struct tty_struct *tty)
826{
827 unsigned char i = ST_MAX;
828 unsigned long flags = 0;
829 struct st_data_s *st_gdata = tty->disc_data;
830
831 pr_info("%s ", __func__);
832
833 /* TODO:
834 * if a protocol has been registered & line discipline
835 * un-installed for some reason - what should be done ?
836 */
837 spin_lock_irqsave(&st_gdata->lock, flags);
838 for (i = ST_BT; i < ST_MAX; i++) {
839 if (st_gdata->list[i] != NULL)
840 pr_err("%d not un-registered", i);
841 st_gdata->list[i] = NULL;
842 }
843 st_gdata->protos_registered = 0;
844 spin_unlock_irqrestore(&st_gdata->lock, flags);
845 /*
846 * signal to UIM via KIM that -
847 * N_TI_WL ldisc is un-installed
848 */
849 st_kim_complete(st_gdata->kim_data);
850 st_gdata->tty = NULL;
851 /* Flush any pending characters in the driver and discipline. */
852 tty_ldisc_flush(tty);
853 tty_driver_flush_buffer(tty);
854
855 spin_lock_irqsave(&st_gdata->lock, flags);
856 /* empty out txq and tx_waitq */
857 skb_queue_purge(&st_gdata->txq);
858 skb_queue_purge(&st_gdata->tx_waitq);
859 /* reset the TTY Rx states of ST */
860 st_gdata->rx_count = 0;
861 st_gdata->rx_state = ST_W4_PACKET_TYPE;
862 kfree_skb(st_gdata->rx_skb);
863 st_gdata->rx_skb = NULL;
864 spin_unlock_irqrestore(&st_gdata->lock, flags);
865
866 pr_debug("%s: done ", __func__);
867}
868
869static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
870 char *tty_flags, int count)
871{
872
873#ifdef VERBOSE
874 print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
875 16, 1, data, count, 0);
876#endif
877
878 /*
879 * if fw download is in progress then route incoming data
880 * to KIM for validation
881 */
882 st_recv(tty->disc_data, data, count);
883 pr_debug("done %s", __func__);
884}
885
886/* wake-up function called in from the TTY layer
887 * inside the internal wakeup function will be called
888 */
889static void st_tty_wakeup(struct tty_struct *tty)
890{
891 struct st_data_s *st_gdata = tty->disc_data;
892 pr_debug("%s ", __func__);
893 /* don't do an wakeup for now */
894 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
895
896 /* call our internal wakeup */
897 st_tx_wakeup((void *)st_gdata);
898}
899
900static void st_tty_flush_buffer(struct tty_struct *tty)
901{
902 struct st_data_s *st_gdata = tty->disc_data;
903 pr_debug("%s ", __func__);
904
905 kfree_skb(st_gdata->tx_skb);
906 st_gdata->tx_skb = NULL;
907
908 tty->ops->flush_buffer(tty);
909 return;
910}
911
912static struct tty_ldisc_ops st_ldisc_ops = {
913 .magic = TTY_LDISC_MAGIC,
914 .name = "n_st",
915 .open = st_tty_open,
916 .close = st_tty_close,
917 .receive_buf = st_tty_receive,
918 .write_wakeup = st_tty_wakeup,
919 .flush_buffer = st_tty_flush_buffer,
920 .owner = THIS_MODULE
921};
922
923/********************************************************************/
924int st_core_init(struct st_data_s **core_data)
925{
926 struct st_data_s *st_gdata;
927 long err;
928
929 err = tty_register_ldisc(N_TI_WL, &st_ldisc_ops);
930 if (err) {
931 pr_err("error registering %d line discipline %ld",
932 N_TI_WL, err);
933 return err;
934 }
935 pr_debug("registered n_shared line discipline");
936
937 st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL);
938 if (!st_gdata) {
939 pr_err("memory allocation failed");
940 err = tty_unregister_ldisc(N_TI_WL);
941 if (err)
942 pr_err("unable to un-register ldisc %ld", err);
943 err = -ENOMEM;
944 return err;
945 }
946
947 /* Initialize ST TxQ and Tx waitQ queue head. All BT/FM/GPS module skb's
948 * will be pushed in this queue for actual transmission.
949 */
950 skb_queue_head_init(&st_gdata->txq);
951 skb_queue_head_init(&st_gdata->tx_waitq);
952
953 /* Locking used in st_int_enqueue() to avoid multiple execution */
954 spin_lock_init(&st_gdata->lock);
955
956 err = st_ll_init(st_gdata);
957 if (err) {
958 pr_err("error during st_ll initialization(%ld)", err);
959 kfree(st_gdata);
960 err = tty_unregister_ldisc(N_TI_WL);
961 if (err)
962 pr_err("unable to un-register ldisc");
963 return -1;
964 }
965 *core_data = st_gdata;
966 return 0;
967}
968
969void st_core_exit(struct st_data_s *st_gdata)
970{
971 long err;
972 /* internal module cleanup */
973 err = st_ll_deinit(st_gdata);
974 if (err)
975 pr_err("error during deinit of ST LL %ld", err);
976
977 if (st_gdata != NULL) {
978 /* Free ST Tx Qs and skbs */
979 skb_queue_purge(&st_gdata->txq);
980 skb_queue_purge(&st_gdata->tx_waitq);
981 kfree_skb(st_gdata->rx_skb);
982 kfree_skb(st_gdata->tx_skb);
983 /* TTY ldisc cleanup */
984 err = tty_unregister_ldisc(N_TI_WL);
985 if (err)
986 pr_err("unable to un-register ldisc %ld", err);
987 /* free the global data pointer */
988 kfree(st_gdata);
989 }
990}
991
992
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
new file mode 100644
index 000000000000..73b6c8b0e869
--- /dev/null
+++ b/drivers/misc/ti-st/st_kim.c
@@ -0,0 +1,799 @@
1/*
2 * Shared Transport Line discipline driver Core
3 * Init Manager module responsible for GPIO control
4 * and firmware download
5 * Copyright (C) 2009-2010 Texas Instruments
6 * Author: Pavan Savoy <pavan_savoy@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#define pr_fmt(fmt) "(stk) :" fmt
24#include <linux/platform_device.h>
25#include <linux/jiffies.h>
26#include <linux/firmware.h>
27#include <linux/delay.h>
28#include <linux/wait.h>
29#include <linux/gpio.h>
30#include <linux/debugfs.h>
31#include <linux/seq_file.h>
32#include <linux/sched.h>
33#include <linux/rfkill.h>
34
35/* understand BT events for fw response */
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/hci.h>
39
40#include <linux/ti_wilink_st.h>
41
42
43static int kim_probe(struct platform_device *pdev);
44static int kim_remove(struct platform_device *pdev);
45
46/* KIM platform device driver structure */
47static struct platform_driver kim_platform_driver = {
48 .probe = kim_probe,
49 .remove = kim_remove,
50 /* TODO: ST driver power management during suspend/resume ?
51 */
52#if 0
53 .suspend = kim_suspend,
54 .resume = kim_resume,
55#endif
56 .driver = {
57 .name = "kim",
58 .owner = THIS_MODULE,
59 },
60};
61
62static int kim_toggle_radio(void*, bool);
63static const struct rfkill_ops kim_rfkill_ops = {
64 .set_block = kim_toggle_radio,
65};
66
67/* strings to be used for rfkill entries and by
68 * ST Core to be used for sysfs debug entry
69 */
70#define PROTO_ENTRY(type, name) name
71const unsigned char *protocol_names[] = {
72 PROTO_ENTRY(ST_BT, "Bluetooth"),
73 PROTO_ENTRY(ST_FM, "FM"),
74 PROTO_ENTRY(ST_GPS, "GPS"),
75};
76
77#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
78static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
79
80/**********************************************************************/
81/* internal functions */
82
83/**
84 * st_get_plat_device -
85 * function which returns the reference to the platform device
86 * requested by id. As of now only 1 such device exists (id=0)
87 * the context requesting for reference can get the id to be
88 * requested by a. The protocol driver which is registering or
89 * b. the tty device which is opened.
90 */
91static struct platform_device *st_get_plat_device(int id)
92{
93 return st_kim_devices[id];
94}
95
96/**
97 * validate_firmware_response -
98 * function to return whether the firmware response was proper
99 * in case of error don't complete so that waiting for proper
100 * response times out
101 */
102void validate_firmware_response(struct kim_data_s *kim_gdata)
103{
104 struct sk_buff *skb = kim_gdata->rx_skb;
105 if (unlikely(skb->data[5] != 0)) {
106 pr_err("no proper response during fw download");
107 pr_err("data6 %x", skb->data[5]);
108 return; /* keep waiting for the proper response */
109 }
110 /* becos of all the script being downloaded */
111 complete_all(&kim_gdata->kim_rcvd);
112 kfree_skb(skb);
113}
114
115/* check for data len received inside kim_int_recv
116 * most often hit the last case to update state to waiting for data
117 */
118static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
119{
120 register int room = skb_tailroom(kim_gdata->rx_skb);
121
122 pr_debug("len %d room %d", len, room);
123
124 if (!len) {
125 validate_firmware_response(kim_gdata);
126 } else if (len > room) {
127 /* Received packet's payload length is larger.
128 * We can't accommodate it in created skb.
129 */
130 pr_err("Data length is too large len %d room %d", len,
131 room);
132 kfree_skb(kim_gdata->rx_skb);
133 } else {
134 /* Packet header has non-zero payload length and
135 * we have enough space in created skb. Lets read
136 * payload data */
137 kim_gdata->rx_state = ST_BT_W4_DATA;
138 kim_gdata->rx_count = len;
139 return len;
140 }
141
142 /* Change ST LL state to continue to process next
143 * packet */
144 kim_gdata->rx_state = ST_W4_PACKET_TYPE;
145 kim_gdata->rx_skb = NULL;
146 kim_gdata->rx_count = 0;
147
148 return 0;
149}
150
151/**
152 * kim_int_recv - receive function called during firmware download
153 * firmware download responses on different UART drivers
154 * have been observed to come in bursts of different
155 * tty_receive and hence the logic
156 */
157void kim_int_recv(struct kim_data_s *kim_gdata,
158 const unsigned char *data, long count)
159{
160 const unsigned char *ptr;
161 struct hci_event_hdr *eh;
162 int len = 0, type = 0;
163
164 pr_debug("%s", __func__);
165 /* Decode received bytes here */
166 ptr = data;
167 if (unlikely(ptr == NULL)) {
168 pr_err(" received null from TTY ");
169 return;
170 }
171
172 while (count) {
173 if (kim_gdata->rx_count) {
174 len = min_t(unsigned int, kim_gdata->rx_count, count);
175 memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len);
176 kim_gdata->rx_count -= len;
177 count -= len;
178 ptr += len;
179
180 if (kim_gdata->rx_count)
181 continue;
182
183 /* Check ST RX state machine , where are we? */
184 switch (kim_gdata->rx_state) {
185 /* Waiting for complete packet ? */
186 case ST_BT_W4_DATA:
187 pr_debug("Complete pkt received");
188 validate_firmware_response(kim_gdata);
189 kim_gdata->rx_state = ST_W4_PACKET_TYPE;
190 kim_gdata->rx_skb = NULL;
191 continue;
192 /* Waiting for Bluetooth event header ? */
193 case ST_BT_W4_EVENT_HDR:
194 eh = (struct hci_event_hdr *)kim_gdata->
195 rx_skb->data;
196 pr_debug("Event header: evt 0x%2.2x"
197 "plen %d", eh->evt, eh->plen);
198 kim_check_data_len(kim_gdata, eh->plen);
199 continue;
200 } /* end of switch */
201 } /* end of if rx_state */
202 switch (*ptr) {
203 /* Bluetooth event packet? */
204 case HCI_EVENT_PKT:
205 pr_info("Event packet");
206 kim_gdata->rx_state = ST_BT_W4_EVENT_HDR;
207 kim_gdata->rx_count = HCI_EVENT_HDR_SIZE;
208 type = HCI_EVENT_PKT;
209 break;
210 default:
211 pr_info("unknown packet");
212 ptr++;
213 count--;
214 continue;
215 }
216 ptr++;
217 count--;
218 kim_gdata->rx_skb =
219 bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
220 if (!kim_gdata->rx_skb) {
221 pr_err("can't allocate mem for new packet");
222 kim_gdata->rx_state = ST_W4_PACKET_TYPE;
223 kim_gdata->rx_count = 0;
224 return;
225 }
226 bt_cb(kim_gdata->rx_skb)->pkt_type = type;
227 }
228 pr_info("done %s", __func__);
229 return;
230}
231
232static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
233{
234 unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
235 const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
236
237 pr_debug("%s", __func__);
238
239 INIT_COMPLETION(kim_gdata->kim_rcvd);
240 if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
241 pr_err("kim: couldn't write 4 bytes");
242 return -1;
243 }
244
245 if (!wait_for_completion_timeout
246 (&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) {
247 pr_err(" waiting for ver info- timed out ");
248 return -1;
249 }
250
251 version =
252 MAKEWORD(kim_gdata->resp_buffer[13],
253 kim_gdata->resp_buffer[14]);
254 chip = (version & 0x7C00) >> 10;
255 min_ver = (version & 0x007F);
256 maj_ver = (version & 0x0380) >> 7;
257
258 if (version & 0x8000)
259 maj_ver |= 0x0008;
260
261 sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver);
262
263 /* to be accessed later via sysfs entry */
264 kim_gdata->version.full = version;
265 kim_gdata->version.chip = chip;
266 kim_gdata->version.maj_ver = maj_ver;
267 kim_gdata->version.min_ver = min_ver;
268
269 pr_info("%s", bts_scr_name);
270 return 0;
271}
272
273/**
274 * download_firmware -
275 * internal function which parses through the .bts firmware
276 * script file intreprets SEND, DELAY actions only as of now
277 */
278static long download_firmware(struct kim_data_s *kim_gdata)
279{
280 long err = 0;
281 long len = 0;
282 unsigned char *ptr = NULL;
283 unsigned char *action_ptr = NULL;
284 unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */
285
286 err = read_local_version(kim_gdata, bts_scr_name);
287 if (err != 0) {
288 pr_err("kim: failed to read local ver");
289 return err;
290 }
291 err =
292 request_firmware(&kim_gdata->fw_entry, bts_scr_name,
293 &kim_gdata->kim_pdev->dev);
294 if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) ||
295 (kim_gdata->fw_entry->size == 0))) {
296 pr_err(" request_firmware failed(errno %ld) for %s", err,
297 bts_scr_name);
298 return -1;
299 }
300 ptr = (void *)kim_gdata->fw_entry->data;
301 len = kim_gdata->fw_entry->size;
302 /* bts_header to remove out magic number and
303 * version
304 */
305 ptr += sizeof(struct bts_header);
306 len -= sizeof(struct bts_header);
307
308 while (len > 0 && ptr) {
309 pr_debug(" action size %d, type %d ",
310 ((struct bts_action *)ptr)->size,
311 ((struct bts_action *)ptr)->type);
312
313 switch (((struct bts_action *)ptr)->type) {
314 case ACTION_SEND_COMMAND: /* action send */
315 action_ptr = &(((struct bts_action *)ptr)->data[0]);
316 if (unlikely
317 (((struct hci_command *)action_ptr)->opcode ==
318 0xFF36)) {
319 /* ignore remote change
320 * baud rate HCI VS command */
321 pr_err
322 (" change remote baud"
323 " rate command in firmware");
324 break;
325 }
326
327 INIT_COMPLETION(kim_gdata->kim_rcvd);
328 err = st_int_write(kim_gdata->core_data,
329 ((struct bts_action_send *)action_ptr)->data,
330 ((struct bts_action *)ptr)->size);
331 if (unlikely(err < 0)) {
332 release_firmware(kim_gdata->fw_entry);
333 return -1;
334 }
335 if (!wait_for_completion_timeout
336 (&kim_gdata->kim_rcvd,
337 msecs_to_jiffies(CMD_RESP_TIME))) {
338 pr_err
339 (" response timeout during fw download ");
340 /* timed out */
341 release_firmware(kim_gdata->fw_entry);
342 return -1;
343 }
344 break;
345 case ACTION_DELAY: /* sleep */
346 pr_info("sleep command in scr");
347 action_ptr = &(((struct bts_action *)ptr)->data[0]);
348 mdelay(((struct bts_action_delay *)action_ptr)->msec);
349 break;
350 }
351 len =
352 len - (sizeof(struct bts_action) +
353 ((struct bts_action *)ptr)->size);
354 ptr =
355 ptr + sizeof(struct bts_action) +
356 ((struct bts_action *)ptr)->size;
357 }
358 /* fw download complete */
359 release_firmware(kim_gdata->fw_entry);
360 return 0;
361}
362
363/**********************************************************************/
364/* functions called from ST core */
365/* function to toggle the GPIO
366 * needs to know whether the GPIO is active high or active low
367 */
368void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
369{
370 struct platform_device *kim_pdev;
371 struct kim_data_s *kim_gdata;
372 pr_info(" %s ", __func__);
373
374 kim_pdev = st_get_plat_device(0);
375 kim_gdata = dev_get_drvdata(&kim_pdev->dev);
376
377 if (kim_gdata->gpios[type] == -1) {
378 pr_info(" gpio not requested for protocol %s",
379 protocol_names[type]);
380 return;
381 }
382 switch (type) {
383 case ST_BT:
384 /*Do Nothing */
385 break;
386
387 case ST_FM:
388 if (state == KIM_GPIO_ACTIVE)
389 gpio_set_value(kim_gdata->gpios[ST_FM], GPIO_LOW);
390 else
391 gpio_set_value(kim_gdata->gpios[ST_FM], GPIO_HIGH);
392 break;
393
394 case ST_GPS:
395 if (state == KIM_GPIO_ACTIVE)
396 gpio_set_value(kim_gdata->gpios[ST_GPS], GPIO_HIGH);
397 else
398 gpio_set_value(kim_gdata->gpios[ST_GPS], GPIO_LOW);
399 break;
400
401 case ST_MAX:
402 default:
403 break;
404 }
405
406 return;
407}
408
409/* called from ST Core, when REG_IN_PROGRESS (registration in progress)
410 * can be because of
411 * 1. response to read local version
412 * 2. during send/recv's of firmware download
413 */
414void st_kim_recv(void *disc_data, const unsigned char *data, long count)
415{
416 struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
417 struct kim_data_s *kim_gdata = st_gdata->kim_data;
418
419 pr_info(" %s ", __func__);
420 /* copy to local buffer */
421 if (unlikely(data[4] == 0x01 && data[5] == 0x10 && data[0] == 0x04)) {
422 /* must be the read_ver_cmd */
423 memcpy(kim_gdata->resp_buffer, data, count);
424 complete_all(&kim_gdata->kim_rcvd);
425 return;
426 } else {
427 kim_int_recv(kim_gdata, data, count);
428 /* either completes or times out */
429 }
430 return;
431}
432
433/* to signal completion of line discipline installation
434 * called from ST Core, upon tty_open
435 */
436void st_kim_complete(void *kim_data)
437{
438 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
439 complete(&kim_gdata->ldisc_installed);
440}
441
442/**
443 * st_kim_start - called from ST Core upon 1st registration
444 * This involves toggling the chip enable gpio, reading
445 * the firmware version from chip, forming the fw file name
446 * based on the chip version, requesting the fw, parsing it
447 * and perform download(send/recv).
448 */
449long st_kim_start(void *kim_data)
450{
451 long err = 0;
452 long retry = POR_RETRY_COUNT;
453 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
454
455 pr_info(" %s", __func__);
456
457 do {
458 /* TODO: this is only because rfkill sub-system
459 * doesn't send events to user-space if the state
460 * isn't changed
461 */
462 rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 1);
463 /* Configure BT nShutdown to HIGH state */
464 gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_LOW);
465 mdelay(5); /* FIXME: a proper toggle */
466 gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_HIGH);
467 mdelay(100);
468 /* re-initialize the completion */
469 INIT_COMPLETION(kim_gdata->ldisc_installed);
470#if 0 /* older way of signalling user-space UIM */
471 /* send signal to UIM */
472 err = kill_pid(find_get_pid(kim_gdata->uim_pid), SIGUSR2, 0);
473 if (err != 0) {
474 pr_info(" sending SIGUSR2 to uim failed %ld", err);
475 err = -1;
476 continue;
477 }
478#endif
479 /* unblock and send event to UIM via /dev/rfkill */
480 rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 0);
481 /* wait for ldisc to be installed */
482 err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
483 msecs_to_jiffies(LDISC_TIME));
484 if (!err) { /* timeout */
485 pr_err("line disc installation timed out ");
486 err = -1;
487 continue;
488 } else {
489 /* ldisc installed now */
490 pr_info(" line discipline installed ");
491 err = download_firmware(kim_gdata);
492 if (err != 0) {
493 pr_err("download firmware failed");
494 continue;
495 } else { /* on success don't retry */
496 break;
497 }
498 }
499 } while (retry--);
500 return err;
501}
502
503/**
504 * st_kim_stop - called from ST Core, on the last un-registration
505 * toggle low the chip enable gpio
506 */
507long st_kim_stop(void *kim_data)
508{
509 long err = 0;
510 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
511
512 INIT_COMPLETION(kim_gdata->ldisc_installed);
513#if 0 /* older way of signalling user-space UIM */
514 /* send signal to UIM */
515 err = kill_pid(find_get_pid(kim_gdata->uim_pid), SIGUSR2, 1);
516 if (err != 0) {
517 pr_err("sending SIGUSR2 to uim failed %ld", err);
518 return -1;
519 }
520#endif
521 /* set BT rfkill to be blocked */
522 err = rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 1);
523
524 /* wait for ldisc to be un-installed */
525 err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
526 msecs_to_jiffies(LDISC_TIME));
527 if (!err) { /* timeout */
528 pr_err(" timed out waiting for ldisc to be un-installed");
529 return -1;
530 }
531
532 /* By default configure BT nShutdown to LOW state */
533 gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_LOW);
534 mdelay(1);
535 gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_HIGH);
536 mdelay(1);
537 gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_LOW);
538 return err;
539}
540
541/**********************************************************************/
542/* functions called from subsystems */
543/* called when debugfs entry is read from */
544
545static int show_version(struct seq_file *s, void *unused)
546{
547 struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
548 seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full,
549 kim_gdata->version.chip, kim_gdata->version.maj_ver,
550 kim_gdata->version.min_ver);
551 return 0;
552}
553
554static int show_list(struct seq_file *s, void *unused)
555{
556 struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
557 kim_st_list_protocols(kim_gdata->core_data, s);
558 return 0;
559}
560
561/* function called from rfkill subsystem, when someone from
562 * user space would write 0/1 on the sysfs entry
563 * /sys/class/rfkill/rfkill0,1,3/state
564 */
565static int kim_toggle_radio(void *data, bool blocked)
566{
567 enum proto_type type = *((enum proto_type *)data);
568 pr_debug(" %s: %d ", __func__, type);
569
570 switch (type) {
571 case ST_BT:
572 /* do nothing */
573 break;
574 case ST_FM:
575 case ST_GPS:
576 if (blocked)
577 st_kim_chip_toggle(type, KIM_GPIO_INACTIVE);
578 else
579 st_kim_chip_toggle(type, KIM_GPIO_ACTIVE);
580 break;
581 case ST_MAX:
582 pr_err(" wrong proto type ");
583 break;
584 }
585 return 0;
586}
587
588/**
589 * st_kim_ref - reference the core's data
590 * This references the per-ST platform device in the arch/xx/
591 * board-xx.c file.
592 * This would enable multiple such platform devices to exist
593 * on a given platform
594 */
595void st_kim_ref(struct st_data_s **core_data, int id)
596{
597 struct platform_device *pdev;
598 struct kim_data_s *kim_gdata;
599 /* get kim_gdata reference from platform device */
600 pdev = st_get_plat_device(id);
601 kim_gdata = dev_get_drvdata(&pdev->dev);
602 *core_data = kim_gdata->core_data;
603}
604
605static int kim_version_open(struct inode *i, struct file *f)
606{
607 return single_open(f, show_version, i->i_private);
608}
609
610static int kim_list_open(struct inode *i, struct file *f)
611{
612 return single_open(f, show_list, i->i_private);
613}
614
615static const struct file_operations version_debugfs_fops = {
616 /* version info */
617 .open = kim_version_open,
618 .read = seq_read,
619 .llseek = seq_lseek,
620 .release = single_release,
621};
622static const struct file_operations list_debugfs_fops = {
623 /* protocols info */
624 .open = kim_list_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
630/**********************************************************************/
631/* functions called from platform device driver subsystem
632 * need to have a relevant platform device entry in the platform's
633 * board-*.c file
634 */
635
636struct dentry *kim_debugfs_dir;
637static int kim_probe(struct platform_device *pdev)
638{
639 long status;
640 long proto;
641 long *gpios = pdev->dev.platform_data;
642 struct kim_data_s *kim_gdata;
643
644 if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
645 /* multiple devices could exist */
646 st_kim_devices[pdev->id] = pdev;
647 } else {
648 /* platform's sure about existance of 1 device */
649 st_kim_devices[0] = pdev;
650 }
651
652 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
653 if (!kim_gdata) {
654 pr_err("no mem to allocate");
655 return -ENOMEM;
656 }
657 dev_set_drvdata(&pdev->dev, kim_gdata);
658
659 status = st_core_init(&kim_gdata->core_data);
660 if (status != 0) {
661 pr_err(" ST core init failed");
662 return -1;
663 }
664 /* refer to itself */
665 kim_gdata->core_data->kim_data = kim_gdata;
666
667 for (proto = 0; proto < ST_MAX; proto++) {
668 kim_gdata->gpios[proto] = gpios[proto];
669 pr_info(" %ld gpio to be requested", gpios[proto]);
670 }
671
672 for (proto = 0; (proto < ST_MAX) && (gpios[proto] != -1); proto++) {
673 /* Claim the Bluetooth/FM/GPIO
674 * nShutdown gpio from the system
675 */
676 status = gpio_request(gpios[proto], "kim");
677 if (unlikely(status)) {
678 pr_err(" gpio %ld request failed ", gpios[proto]);
679 proto -= 1;
680 while (proto >= 0) {
681 if (gpios[proto] != -1)
682 gpio_free(gpios[proto]);
683 }
684 return status;
685 }
686
687 /* Configure nShutdown GPIO as output=0 */
688 status =
689 gpio_direction_output(gpios[proto], 0);
690 if (unlikely(status)) {
691 pr_err(" unable to configure gpio %ld",
692 gpios[proto]);
693 proto -= 1;
694 while (proto >= 0) {
695 if (gpios[proto] != -1)
696 gpio_free(gpios[proto]);
697 }
698 return status;
699 }
700 }
701 /* get reference of pdev for request_firmware
702 */
703 kim_gdata->kim_pdev = pdev;
704 init_completion(&kim_gdata->kim_rcvd);
705 init_completion(&kim_gdata->ldisc_installed);
706
707 for (proto = 0; (proto < ST_MAX) && (gpios[proto] != -1); proto++) {
708 /* TODO: should all types be rfkill_type_bt ? */
709 kim_gdata->rf_protos[proto] = proto;
710 kim_gdata->rfkill[proto] = rfkill_alloc(protocol_names[proto],
711 &pdev->dev, RFKILL_TYPE_BLUETOOTH,
712 &kim_rfkill_ops, &kim_gdata->rf_protos[proto]);
713 if (kim_gdata->rfkill[proto] == NULL) {
714 pr_err("cannot create rfkill entry for gpio %ld",
715 gpios[proto]);
716 continue;
717 }
718 /* block upon creation */
719 rfkill_init_sw_state(kim_gdata->rfkill[proto], 1);
720 status = rfkill_register(kim_gdata->rfkill[proto]);
721 if (unlikely(status)) {
722 pr_err("rfkill registration failed for gpio %ld",
723 gpios[proto]);
724 rfkill_unregister(kim_gdata->rfkill[proto]);
725 continue;
726 }
727 pr_info("rfkill entry created for %ld", gpios[proto]);
728 }
729
730 kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
731 if (IS_ERR(kim_debugfs_dir)) {
732 pr_err(" debugfs entries creation failed ");
733 kim_debugfs_dir = NULL;
734 return -1;
735 }
736
737 debugfs_create_file("version", S_IRUGO, kim_debugfs_dir,
738 kim_gdata, &version_debugfs_fops);
739 debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir,
740 kim_gdata, &list_debugfs_fops);
741 pr_info(" debugfs entries created ");
742 return 0;
743}
744
745static int kim_remove(struct platform_device *pdev)
746{
747 /* free the GPIOs requested
748 */
749 long *gpios = pdev->dev.platform_data;
750 long proto;
751 struct kim_data_s *kim_gdata;
752
753 kim_gdata = dev_get_drvdata(&pdev->dev);
754
755 for (proto = 0; (proto < ST_MAX) && (gpios[proto] != -1); proto++) {
756 /* Claim the Bluetooth/FM/GPIO
757 * nShutdown gpio from the system
758 */
759 gpio_free(gpios[proto]);
760 rfkill_unregister(kim_gdata->rfkill[proto]);
761 rfkill_destroy(kim_gdata->rfkill[proto]);
762 kim_gdata->rfkill[proto] = NULL;
763 }
764 pr_info("kim: GPIO Freed");
765 debugfs_remove_recursive(kim_debugfs_dir);
766 kim_gdata->kim_pdev = NULL;
767 st_core_exit(kim_gdata->core_data);
768
769 kfree(kim_gdata);
770 kim_gdata = NULL;
771 return 0;
772}
773
774/**********************************************************************/
775/* entry point for ST KIM module, called in from ST Core */
776
777static int __init st_kim_init(void)
778{
779 long ret = 0;
780 ret = platform_driver_register(&kim_platform_driver);
781 if (ret != 0) {
782 pr_err("platform drv registration failed");
783 return -1;
784 }
785 return 0;
786}
787
788static void __exit st_kim_deinit(void)
789{
790 /* the following returns void */
791 platform_driver_unregister(&kim_platform_driver);
792}
793
794
795module_init(st_kim_init);
796module_exit(st_kim_deinit);
797MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>");
798MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips ");
799MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
new file mode 100644
index 000000000000..2bda8dea15b0
--- /dev/null
+++ b/drivers/misc/ti-st/st_ll.c
@@ -0,0 +1,150 @@
1/*
2 * Shared Transport driver
3 * HCI-LL module responsible for TI proprietary HCI_LL protocol
4 * Copyright (C) 2009-2010 Texas Instruments
5 * Author: Pavan Savoy <pavan_savoy@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#define pr_fmt(fmt) "(stll) :" fmt
23#include <linux/skbuff.h>
24#include <linux/module.h>
25#include <linux/ti_wilink_st.h>
26
27/**********************************************************************/
28/* internal functions */
29static void send_ll_cmd(struct st_data_s *st_data,
30 unsigned char cmd)
31{
32
33 pr_info("%s: writing %x", __func__, cmd);
34 st_int_write(st_data, &cmd, 1);
35 return;
36}
37
38static void ll_device_want_to_sleep(struct st_data_s *st_data)
39{
40 pr_debug("%s", __func__);
41 /* sanity check */
42 if (st_data->ll_state != ST_LL_AWAKE)
43 pr_err("ERR hcill: ST_LL_GO_TO_SLEEP_IND"
44 "in state %ld", st_data->ll_state);
45
46 send_ll_cmd(st_data, LL_SLEEP_ACK);
47 /* update state */
48 st_data->ll_state = ST_LL_ASLEEP;
49}
50
51static void ll_device_want_to_wakeup(struct st_data_s *st_data)
52{
53 /* diff actions in diff states */
54 switch (st_data->ll_state) {
55 case ST_LL_ASLEEP:
56 send_ll_cmd(st_data, LL_WAKE_UP_ACK); /* send wake_ack */
57 break;
58 case ST_LL_ASLEEP_TO_AWAKE:
59 /* duplicate wake_ind */
60 pr_err("duplicate wake_ind while waiting for Wake ack");
61 break;
62 case ST_LL_AWAKE:
63 /* duplicate wake_ind */
64 pr_err("duplicate wake_ind already AWAKE");
65 break;
66 case ST_LL_AWAKE_TO_ASLEEP:
67 /* duplicate wake_ind */
68 pr_err("duplicate wake_ind");
69 break;
70 }
71 /* update state */
72 st_data->ll_state = ST_LL_AWAKE;
73}
74
75/**********************************************************************/
76/* functions invoked by ST Core */
77
78/* called when ST Core wants to
79 * enable ST LL */
80void st_ll_enable(struct st_data_s *ll)
81{
82 ll->ll_state = ST_LL_AWAKE;
83}
84
85/* called when ST Core /local module wants to
86 * disable ST LL */
87void st_ll_disable(struct st_data_s *ll)
88{
89 ll->ll_state = ST_LL_INVALID;
90}
91
92/* called when ST Core wants to update the state */
93void st_ll_wakeup(struct st_data_s *ll)
94{
95 if (likely(ll->ll_state != ST_LL_AWAKE)) {
96 send_ll_cmd(ll, LL_WAKE_UP_IND); /* WAKE_IND */
97 ll->ll_state = ST_LL_ASLEEP_TO_AWAKE;
98 } else {
99 /* don't send the duplicate wake_indication */
100 pr_err(" Chip already AWAKE ");
101 }
102}
103
104/* called when ST Core wants the state */
105unsigned long st_ll_getstate(struct st_data_s *ll)
106{
107 pr_debug(" returning state %ld", ll->ll_state);
108 return ll->ll_state;
109}
110
111/* called from ST Core, when a PM related packet arrives */
112unsigned long st_ll_sleep_state(struct st_data_s *st_data,
113 unsigned char cmd)
114{
115 switch (cmd) {
116 case LL_SLEEP_IND: /* sleep ind */
117 pr_info("sleep indication recvd");
118 ll_device_want_to_sleep(st_data);
119 break;
120 case LL_SLEEP_ACK: /* sleep ack */
121 pr_err("sleep ack rcvd: host shouldn't");
122 break;
123 case LL_WAKE_UP_IND: /* wake ind */
124 pr_info("wake indication recvd");
125 ll_device_want_to_wakeup(st_data);
126 break;
127 case LL_WAKE_UP_ACK: /* wake ack */
128 pr_info("wake ack rcvd");
129 st_data->ll_state = ST_LL_AWAKE;
130 break;
131 default:
132 pr_err(" unknown input/state ");
133 return -1;
134 }
135 return 0;
136}
137
138/* Called from ST CORE to initialize ST LL */
139long st_ll_init(struct st_data_s *ll)
140{
141 /* set state to invalid */
142 ll->ll_state = ST_LL_INVALID;
143 return 0;
144}
145
146/* Called from ST CORE to de-initialize ST LL */
147long st_ll_deinit(struct st_data_s *ll)
148{
149 return 0;
150}
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
new file mode 100644
index 000000000000..d3f229a3a77e
--- /dev/null
+++ b/drivers/misc/ti_dac7512.c
@@ -0,0 +1,101 @@
1/*
2 * dac7512.c - Linux kernel module for
3 * Texas Instruments DAC7512
4 *
5 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/spi/spi.h>
25
26#define DAC7512_DRV_NAME "dac7512"
27#define DRIVER_VERSION "1.0"
28
29static ssize_t dac7512_store_val(struct device *dev,
30 struct device_attribute *attr,
31 const char *buf, size_t count)
32{
33 struct spi_device *spi = to_spi_device(dev);
34 unsigned char tmp[2];
35 unsigned long val;
36
37 if (strict_strtoul(buf, 10, &val) < 0)
38 return -EINVAL;
39
40 tmp[0] = val >> 8;
41 tmp[1] = val & 0xff;
42 spi_write(spi, tmp, sizeof(tmp));
43 return count;
44}
45
46static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val);
47
48static struct attribute *dac7512_attributes[] = {
49 &dev_attr_value.attr,
50 NULL
51};
52
53static const struct attribute_group dac7512_attr_group = {
54 .attrs = dac7512_attributes,
55};
56
57static int __devinit dac7512_probe(struct spi_device *spi)
58{
59 int ret;
60
61 spi->bits_per_word = 8;
62 spi->mode = SPI_MODE_0;
63 ret = spi_setup(spi);
64 if (ret < 0)
65 return ret;
66
67 return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group);
68}
69
70static int __devexit dac7512_remove(struct spi_device *spi)
71{
72 sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group);
73 return 0;
74}
75
76static struct spi_driver dac7512_driver = {
77 .driver = {
78 .name = DAC7512_DRV_NAME,
79 .owner = THIS_MODULE,
80 },
81 .probe = dac7512_probe,
82 .remove = __devexit_p(dac7512_remove),
83};
84
85static int __init dac7512_init(void)
86{
87 return spi_register_driver(&dac7512_driver);
88}
89
90static void __exit dac7512_exit(void)
91{
92 spi_unregister_driver(&dac7512_driver);
93}
94
95MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
96MODULE_DESCRIPTION("DAC7512 16-bit DAC");
97MODULE_LICENSE("GPL v2");
98MODULE_VERSION(DRIVER_VERSION);
99
100module_init(dac7512_init);
101module_exit(dac7512_exit);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 98bcba521da2..5f6852dff40b 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/tifm.h> 12#include <linux/tifm.h>
13#include <linux/slab.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/idr.h> 15#include <linux/idr.h>
15 16
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
new file mode 100644
index 000000000000..483ae5f7f68e
--- /dev/null
+++ b/drivers/misc/tsl2550.c
@@ -0,0 +1,473 @@
1/*
2 * tsl2550.c - Linux kernel modules for ambient light sensor
3 *
4 * Copyright (C) 2007 Rodolfo Giometti <giometti@linux.it>
5 * Copyright (C) 2007 Eurotech S.p.A. <info@eurotech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/i2c.h>
26#include <linux/mutex.h>
27
28#define TSL2550_DRV_NAME "tsl2550"
29#define DRIVER_VERSION "1.2"
30
31/*
32 * Defines
33 */
34
35#define TSL2550_POWER_DOWN 0x00
36#define TSL2550_POWER_UP 0x03
37#define TSL2550_STANDARD_RANGE 0x18
38#define TSL2550_EXTENDED_RANGE 0x1d
39#define TSL2550_READ_ADC0 0x43
40#define TSL2550_READ_ADC1 0x83
41
42/*
43 * Structs
44 */
45
46struct tsl2550_data {
47 struct i2c_client *client;
48 struct mutex update_lock;
49
50 unsigned int power_state:1;
51 unsigned int operating_mode:1;
52};
53
54/*
55 * Global data
56 */
57
58static const u8 TSL2550_MODE_RANGE[2] = {
59 TSL2550_STANDARD_RANGE, TSL2550_EXTENDED_RANGE,
60};
61
62/*
63 * Management functions
64 */
65
66static int tsl2550_set_operating_mode(struct i2c_client *client, int mode)
67{
68 struct tsl2550_data *data = i2c_get_clientdata(client);
69
70 int ret = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[mode]);
71
72 data->operating_mode = mode;
73
74 return ret;
75}
76
77static int tsl2550_set_power_state(struct i2c_client *client, int state)
78{
79 struct tsl2550_data *data = i2c_get_clientdata(client);
80 int ret;
81
82 if (state == 0)
83 ret = i2c_smbus_write_byte(client, TSL2550_POWER_DOWN);
84 else {
85 ret = i2c_smbus_write_byte(client, TSL2550_POWER_UP);
86
87 /* On power up we should reset operating mode also... */
88 tsl2550_set_operating_mode(client, data->operating_mode);
89 }
90
91 data->power_state = state;
92
93 return ret;
94}
95
96static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd)
97{
98 int ret;
99
100 ret = i2c_smbus_read_byte_data(client, cmd);
101 if (ret < 0)
102 return ret;
103 if (!(ret & 0x80))
104 return -EAGAIN;
105 return ret & 0x7f; /* remove the "valid" bit */
106}
107
108/*
109 * LUX calculation
110 */
111
112#define TSL2550_MAX_LUX 1846
113
114static const u8 ratio_lut[] = {
115 100, 100, 100, 100, 100, 100, 100, 100,
116 100, 100, 100, 100, 100, 100, 99, 99,
117 99, 99, 99, 99, 99, 99, 99, 99,
118 99, 99, 99, 98, 98, 98, 98, 98,
119 98, 98, 97, 97, 97, 97, 97, 96,
120 96, 96, 96, 95, 95, 95, 94, 94,
121 93, 93, 93, 92, 92, 91, 91, 90,
122 89, 89, 88, 87, 87, 86, 85, 84,
123 83, 82, 81, 80, 79, 78, 77, 75,
124 74, 73, 71, 69, 68, 66, 64, 62,
125 60, 58, 56, 54, 52, 49, 47, 44,
126 42, 41, 40, 40, 39, 39, 38, 38,
127 37, 37, 37, 36, 36, 36, 35, 35,
128 35, 35, 34, 34, 34, 34, 33, 33,
129 33, 33, 32, 32, 32, 32, 32, 31,
130 31, 31, 31, 31, 30, 30, 30, 30,
131 30,
132};
133
134static const u16 count_lut[] = {
135 0, 1, 2, 3, 4, 5, 6, 7,
136 8, 9, 10, 11, 12, 13, 14, 15,
137 16, 18, 20, 22, 24, 26, 28, 30,
138 32, 34, 36, 38, 40, 42, 44, 46,
139 49, 53, 57, 61, 65, 69, 73, 77,
140 81, 85, 89, 93, 97, 101, 105, 109,
141 115, 123, 131, 139, 147, 155, 163, 171,
142 179, 187, 195, 203, 211, 219, 227, 235,
143 247, 263, 279, 295, 311, 327, 343, 359,
144 375, 391, 407, 423, 439, 455, 471, 487,
145 511, 543, 575, 607, 639, 671, 703, 735,
146 767, 799, 831, 863, 895, 927, 959, 991,
147 1039, 1103, 1167, 1231, 1295, 1359, 1423, 1487,
148 1551, 1615, 1679, 1743, 1807, 1871, 1935, 1999,
149 2095, 2223, 2351, 2479, 2607, 2735, 2863, 2991,
150 3119, 3247, 3375, 3503, 3631, 3759, 3887, 4015,
151};
152
153/*
154 * This function is described into Taos TSL2550 Designer's Notebook
155 * pages 2, 3.
156 */
157static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
158{
159 unsigned int lux;
160
161 /* Look up count from channel values */
162 u16 c0 = count_lut[ch0];
163 u16 c1 = count_lut[ch1];
164
165 /*
166 * Calculate ratio.
167 * Note: the "128" is a scaling factor
168 */
169 u8 r = 128;
170
171 /* Avoid division by 0 and count 1 cannot be greater than count 0 */
172 if (c1 <= c0)
173 if (c0) {
174 r = c1 * 128 / c0;
175
176 /* Calculate LUX */
177 lux = ((c0 - c1) * ratio_lut[r]) / 256;
178 } else
179 lux = 0;
180 else
181 return -EAGAIN;
182
183 /* LUX range check */
184 return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
185}
186
187/*
188 * SysFS support
189 */
190
191static ssize_t tsl2550_show_power_state(struct device *dev,
192 struct device_attribute *attr, char *buf)
193{
194 struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev));
195
196 return sprintf(buf, "%u\n", data->power_state);
197}
198
199static ssize_t tsl2550_store_power_state(struct device *dev,
200 struct device_attribute *attr, const char *buf, size_t count)
201{
202 struct i2c_client *client = to_i2c_client(dev);
203 struct tsl2550_data *data = i2c_get_clientdata(client);
204 unsigned long val = simple_strtoul(buf, NULL, 10);
205 int ret;
206
207 if (val < 0 || val > 1)
208 return -EINVAL;
209
210 mutex_lock(&data->update_lock);
211 ret = tsl2550_set_power_state(client, val);
212 mutex_unlock(&data->update_lock);
213
214 if (ret < 0)
215 return ret;
216
217 return count;
218}
219
220static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
221 tsl2550_show_power_state, tsl2550_store_power_state);
222
223static ssize_t tsl2550_show_operating_mode(struct device *dev,
224 struct device_attribute *attr, char *buf)
225{
226 struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev));
227
228 return sprintf(buf, "%u\n", data->operating_mode);
229}
230
231static ssize_t tsl2550_store_operating_mode(struct device *dev,
232 struct device_attribute *attr, const char *buf, size_t count)
233{
234 struct i2c_client *client = to_i2c_client(dev);
235 struct tsl2550_data *data = i2c_get_clientdata(client);
236 unsigned long val = simple_strtoul(buf, NULL, 10);
237 int ret;
238
239 if (val < 0 || val > 1)
240 return -EINVAL;
241
242 if (data->power_state == 0)
243 return -EBUSY;
244
245 mutex_lock(&data->update_lock);
246 ret = tsl2550_set_operating_mode(client, val);
247 mutex_unlock(&data->update_lock);
248
249 if (ret < 0)
250 return ret;
251
252 return count;
253}
254
255static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO,
256 tsl2550_show_operating_mode, tsl2550_store_operating_mode);
257
258static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
259{
260 struct tsl2550_data *data = i2c_get_clientdata(client);
261 u8 ch0, ch1;
262 int ret;
263
264 ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC0);
265 if (ret < 0)
266 return ret;
267 ch0 = ret;
268
269 ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1);
270 if (ret < 0)
271 return ret;
272 ch1 = ret;
273
274 /* Do the job */
275 ret = tsl2550_calculate_lux(ch0, ch1);
276 if (ret < 0)
277 return ret;
278 if (data->operating_mode == 1)
279 ret *= 5;
280
281 return sprintf(buf, "%d\n", ret);
282}
283
284static ssize_t tsl2550_show_lux1_input(struct device *dev,
285 struct device_attribute *attr, char *buf)
286{
287 struct i2c_client *client = to_i2c_client(dev);
288 struct tsl2550_data *data = i2c_get_clientdata(client);
289 int ret;
290
291 /* No LUX data if not operational */
292 if (!data->power_state)
293 return -EBUSY;
294
295 mutex_lock(&data->update_lock);
296 ret = __tsl2550_show_lux(client, buf);
297 mutex_unlock(&data->update_lock);
298
299 return ret;
300}
301
302static DEVICE_ATTR(lux1_input, S_IRUGO,
303 tsl2550_show_lux1_input, NULL);
304
305static struct attribute *tsl2550_attributes[] = {
306 &dev_attr_power_state.attr,
307 &dev_attr_operating_mode.attr,
308 &dev_attr_lux1_input.attr,
309 NULL
310};
311
312static const struct attribute_group tsl2550_attr_group = {
313 .attrs = tsl2550_attributes,
314};
315
316/*
317 * Initialization function
318 */
319
320static int tsl2550_init_client(struct i2c_client *client)
321{
322 struct tsl2550_data *data = i2c_get_clientdata(client);
323 int err;
324
325 /*
326 * Probe the chip. To do so we try to power up the device and then to
327 * read back the 0x03 code
328 */
329 err = i2c_smbus_read_byte_data(client, TSL2550_POWER_UP);
330 if (err < 0)
331 return err;
332 if (err != TSL2550_POWER_UP)
333 return -ENODEV;
334 data->power_state = 1;
335
336 /* Set the default operating mode */
337 err = i2c_smbus_write_byte(client,
338 TSL2550_MODE_RANGE[data->operating_mode]);
339 if (err < 0)
340 return err;
341
342 return 0;
343}
344
345/*
346 * I2C init/probing/exit functions
347 */
348
349static struct i2c_driver tsl2550_driver;
350static int __devinit tsl2550_probe(struct i2c_client *client,
351 const struct i2c_device_id *id)
352{
353 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
354 struct tsl2550_data *data;
355 int *opmode, err = 0;
356
357 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE
358 | I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
359 err = -EIO;
360 goto exit;
361 }
362
363 data = kzalloc(sizeof(struct tsl2550_data), GFP_KERNEL);
364 if (!data) {
365 err = -ENOMEM;
366 goto exit;
367 }
368 data->client = client;
369 i2c_set_clientdata(client, data);
370
371 /* Check platform data */
372 opmode = client->dev.platform_data;
373 if (opmode) {
374 if (*opmode < 0 || *opmode > 1) {
375 dev_err(&client->dev, "invalid operating_mode (%d)\n",
376 *opmode);
377 err = -EINVAL;
378 goto exit_kfree;
379 }
380 data->operating_mode = *opmode;
381 } else
382 data->operating_mode = 0; /* default mode is standard */
383 dev_info(&client->dev, "%s operating mode\n",
384 data->operating_mode ? "extended" : "standard");
385
386 mutex_init(&data->update_lock);
387
388 /* Initialize the TSL2550 chip */
389 err = tsl2550_init_client(client);
390 if (err)
391 goto exit_kfree;
392
393 /* Register sysfs hooks */
394 err = sysfs_create_group(&client->dev.kobj, &tsl2550_attr_group);
395 if (err)
396 goto exit_kfree;
397
398 dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
399
400 return 0;
401
402exit_kfree:
403 kfree(data);
404exit:
405 return err;
406}
407
408static int __devexit tsl2550_remove(struct i2c_client *client)
409{
410 sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group);
411
412 /* Power down the device */
413 tsl2550_set_power_state(client, 0);
414
415 kfree(i2c_get_clientdata(client));
416
417 return 0;
418}
419
420#ifdef CONFIG_PM
421
422static int tsl2550_suspend(struct i2c_client *client, pm_message_t mesg)
423{
424 return tsl2550_set_power_state(client, 0);
425}
426
427static int tsl2550_resume(struct i2c_client *client)
428{
429 return tsl2550_set_power_state(client, 1);
430}
431
432#else
433
434#define tsl2550_suspend NULL
435#define tsl2550_resume NULL
436
437#endif /* CONFIG_PM */
438
439static const struct i2c_device_id tsl2550_id[] = {
440 { "tsl2550", 0 },
441 { }
442};
443MODULE_DEVICE_TABLE(i2c, tsl2550_id);
444
445static struct i2c_driver tsl2550_driver = {
446 .driver = {
447 .name = TSL2550_DRV_NAME,
448 .owner = THIS_MODULE,
449 },
450 .suspend = tsl2550_suspend,
451 .resume = tsl2550_resume,
452 .probe = tsl2550_probe,
453 .remove = __devexit_p(tsl2550_remove),
454 .id_table = tsl2550_id,
455};
456
457static int __init tsl2550_init(void)
458{
459 return i2c_add_driver(&tsl2550_driver);
460}
461
462static void __exit tsl2550_exit(void)
463{
464 i2c_del_driver(&tsl2550_driver);
465}
466
467MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
468MODULE_DESCRIPTION("TSL2550 ambient light sensor driver");
469MODULE_LICENSE("GPL");
470MODULE_VERSION(DRIVER_VERSION);
471
472module_init(tsl2550_init);
473module_exit(tsl2550_exit);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
new file mode 100644
index 000000000000..2a1e804a71aa
--- /dev/null
+++ b/drivers/misc/vmw_balloon.c
@@ -0,0 +1,844 @@
1/*
2 * VMware Balloon driver.
3 *
4 * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Maintained by: Dmitry Torokhov <dtor@vmware.com>
21 */
22
23/*
24 * This is VMware physical memory management driver for Linux. The driver
25 * acts like a "balloon" that can be inflated to reclaim physical pages by
26 * reserving them in the guest and invalidating them in the monitor,
27 * freeing up the underlying machine pages so they can be allocated to
28 * other guests. The balloon can also be deflated to allow the guest to
29 * use more physical memory. Higher level policies can control the sizes
30 * of balloons in VMs in order to manage physical memory resources.
31 */
32
33//#define DEBUG
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/sched.h>
40#include <linux/module.h>
41#include <linux/workqueue.h>
42#include <linux/debugfs.h>
43#include <linux/seq_file.h>
44#include <asm/hypervisor.h>
45
46MODULE_AUTHOR("VMware, Inc.");
47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
48MODULE_VERSION("1.2.1.1-k");
49MODULE_ALIAS("dmi:*:svnVMware*:*");
50MODULE_ALIAS("vmware_vmmemctl");
51MODULE_LICENSE("GPL");
52
53/*
54 * Various constants controlling rate of inflaint/deflating balloon,
55 * measured in pages.
56 */
57
58/*
59 * Rate of allocating memory when there is no memory pressure
60 * (driver performs non-sleeping allocations).
61 */
62#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
63
64/*
65 * Rates of memory allocaton when guest experiences memory pressure
66 * (driver performs sleeping allocations).
67 */
68#define VMW_BALLOON_RATE_ALLOC_MIN 512U
69#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
70#define VMW_BALLOON_RATE_ALLOC_INC 16U
71
72/*
73 * Rates for releasing pages while deflating balloon.
74 */
75#define VMW_BALLOON_RATE_FREE_MIN 512U
76#define VMW_BALLOON_RATE_FREE_MAX 16384U
77#define VMW_BALLOON_RATE_FREE_INC 16U
78
79/*
80 * When guest is under memory pressure, use a reduced page allocation
81 * rate for next several cycles.
82 */
83#define VMW_BALLOON_SLOW_CYCLES 4
84
85/*
86 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
87 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
88 * __GFP_NOWARN, to suppress page allocation failure warnings.
89 */
90#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
91
92/*
93 * Use GFP_HIGHUSER when executing in a separate kernel thread
94 * context and allocation can sleep. This is less stressful to
95 * the guest memory system, since it allows the thread to block
96 * while memory is reclaimed, and won't take pages from emergency
97 * low-memory pools.
98 */
99#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
100
101/* Maximum number of page allocations without yielding processor */
102#define VMW_BALLOON_YIELD_THRESHOLD 1024
103
104/* Maximum number of refused pages we accumulate during inflation cycle */
105#define VMW_BALLOON_MAX_REFUSED 16
106
107/*
108 * Hypervisor communication port definitions.
109 */
110#define VMW_BALLOON_HV_PORT 0x5670
111#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
112#define VMW_BALLOON_PROTOCOL_VERSION 2
113#define VMW_BALLOON_GUEST_ID 1 /* Linux */
114
115#define VMW_BALLOON_CMD_START 0
116#define VMW_BALLOON_CMD_GET_TARGET 1
117#define VMW_BALLOON_CMD_LOCK 2
118#define VMW_BALLOON_CMD_UNLOCK 3
119#define VMW_BALLOON_CMD_GUEST_ID 4
120
121/* error codes */
122#define VMW_BALLOON_SUCCESS 0
123#define VMW_BALLOON_FAILURE -1
124#define VMW_BALLOON_ERROR_CMD_INVALID 1
125#define VMW_BALLOON_ERROR_PPN_INVALID 2
126#define VMW_BALLOON_ERROR_PPN_LOCKED 3
127#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
128#define VMW_BALLOON_ERROR_PPN_PINNED 5
129#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
130#define VMW_BALLOON_ERROR_RESET 7
131#define VMW_BALLOON_ERROR_BUSY 8
132
133#define VMWARE_BALLOON_CMD(cmd, data, result) \
134({ \
135 unsigned long __stat, __dummy1, __dummy2; \
136 __asm__ __volatile__ ("inl (%%dx)" : \
137 "=a"(__stat), \
138 "=c"(__dummy1), \
139 "=d"(__dummy2), \
140 "=b"(result) : \
141 "0"(VMW_BALLOON_HV_MAGIC), \
142 "1"(VMW_BALLOON_CMD_##cmd), \
143 "2"(VMW_BALLOON_HV_PORT), \
144 "3"(data) : \
145 "memory"); \
146 result &= -1UL; \
147 __stat & -1UL; \
148})
149
150#ifdef CONFIG_DEBUG_FS
151struct vmballoon_stats {
152 unsigned int timer;
153
154 /* allocation statustics */
155 unsigned int alloc;
156 unsigned int alloc_fail;
157 unsigned int sleep_alloc;
158 unsigned int sleep_alloc_fail;
159 unsigned int refused_alloc;
160 unsigned int refused_free;
161 unsigned int free;
162
163 /* monitor operations */
164 unsigned int lock;
165 unsigned int lock_fail;
166 unsigned int unlock;
167 unsigned int unlock_fail;
168 unsigned int target;
169 unsigned int target_fail;
170 unsigned int start;
171 unsigned int start_fail;
172 unsigned int guest_type;
173 unsigned int guest_type_fail;
174};
175
176#define STATS_INC(stat) (stat)++
177#else
178#define STATS_INC(stat)
179#endif
180
181struct vmballoon {
182
183 /* list of reserved physical pages */
184 struct list_head pages;
185
186 /* transient list of non-balloonable pages */
187 struct list_head refused_pages;
188 unsigned int n_refused_pages;
189
190 /* balloon size in pages */
191 unsigned int size;
192 unsigned int target;
193
194 /* reset flag */
195 bool reset_required;
196
197 /* adjustment rates (pages per second) */
198 unsigned int rate_alloc;
199 unsigned int rate_free;
200
201 /* slowdown page allocations for next few cycles */
202 unsigned int slow_allocation_cycles;
203
204#ifdef CONFIG_DEBUG_FS
205 /* statistics */
206 struct vmballoon_stats stats;
207
208 /* debugfs file exporting statistics */
209 struct dentry *dbg_entry;
210#endif
211
212 struct sysinfo sysinfo;
213
214 struct delayed_work dwork;
215};
216
217static struct vmballoon balloon;
218static struct workqueue_struct *vmballoon_wq;
219
220/*
221 * Send "start" command to the host, communicating supported version
222 * of the protocol.
223 */
224static bool vmballoon_send_start(struct vmballoon *b)
225{
226 unsigned long status, dummy;
227
228 STATS_INC(b->stats.start);
229
230 status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
231 if (status == VMW_BALLOON_SUCCESS)
232 return true;
233
234 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
235 STATS_INC(b->stats.start_fail);
236 return false;
237}
238
239static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
240{
241 switch (status) {
242 case VMW_BALLOON_SUCCESS:
243 return true;
244
245 case VMW_BALLOON_ERROR_RESET:
246 b->reset_required = true;
247 /* fall through */
248
249 default:
250 return false;
251 }
252}
253
254/*
255 * Communicate guest type to the host so that it can adjust ballooning
256 * algorithm to the one most appropriate for the guest. This command
257 * is normally issued after sending "start" command and is part of
258 * standard reset sequence.
259 */
260static bool vmballoon_send_guest_id(struct vmballoon *b)
261{
262 unsigned long status, dummy;
263
264 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
265
266 STATS_INC(b->stats.guest_type);
267
268 if (vmballoon_check_status(b, status))
269 return true;
270
271 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
272 STATS_INC(b->stats.guest_type_fail);
273 return false;
274}
275
276/*
277 * Retrieve desired balloon size from the host.
278 */
279static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
280{
281 unsigned long status;
282 unsigned long target;
283 unsigned long limit;
284 u32 limit32;
285
286 /*
287 * si_meminfo() is cheap. Moreover, we want to provide dynamic
288 * max balloon size later. So let us call si_meminfo() every
289 * iteration.
290 */
291 si_meminfo(&b->sysinfo);
292 limit = b->sysinfo.totalram;
293
294 /* Ensure limit fits in 32-bits */
295 limit32 = (u32)limit;
296 if (limit != limit32)
297 return false;
298
299 /* update stats */
300 STATS_INC(b->stats.target);
301
302 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
303 if (vmballoon_check_status(b, status)) {
304 *new_target = target;
305 return true;
306 }
307
308 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
309 STATS_INC(b->stats.target_fail);
310 return false;
311}
312
313/*
314 * Notify the host about allocated page so that host can use it without
315 * fear that guest will need it. Host may reject some pages, we need to
316 * check the return value and maybe submit a different page.
317 */
318static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
319{
320 unsigned long status, dummy;
321 u32 pfn32;
322
323 pfn32 = (u32)pfn;
324 if (pfn32 != pfn)
325 return false;
326
327 STATS_INC(b->stats.lock);
328
329 status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
330 if (vmballoon_check_status(b, status))
331 return true;
332
333 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
334 STATS_INC(b->stats.lock_fail);
335 return false;
336}
337
338/*
339 * Notify the host that guest intends to release given page back into
340 * the pool of available (to the guest) pages.
341 */
342static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
343{
344 unsigned long status, dummy;
345 u32 pfn32;
346
347 pfn32 = (u32)pfn;
348 if (pfn32 != pfn)
349 return false;
350
351 STATS_INC(b->stats.unlock);
352
353 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
354 if (vmballoon_check_status(b, status))
355 return true;
356
357 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
358 STATS_INC(b->stats.unlock_fail);
359 return false;
360}
361
362/*
363 * Quickly release all pages allocated for the balloon. This function is
364 * called when host decides to "reset" balloon for one reason or another.
365 * Unlike normal "deflate" we do not (shall not) notify host of the pages
366 * being released.
367 */
368static void vmballoon_pop(struct vmballoon *b)
369{
370 struct page *page, *next;
371 unsigned int count = 0;
372
373 list_for_each_entry_safe(page, next, &b->pages, lru) {
374 list_del(&page->lru);
375 __free_page(page);
376 STATS_INC(b->stats.free);
377 b->size--;
378
379 if (++count >= b->rate_free) {
380 count = 0;
381 cond_resched();
382 }
383 }
384}
385
386/*
387 * Perform standard reset sequence by popping the balloon (in case it
388 * is not empty) and then restarting protocol. This operation normally
389 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
390 */
391static void vmballoon_reset(struct vmballoon *b)
392{
393 /* free all pages, skipping monitor unlock */
394 vmballoon_pop(b);
395
396 if (vmballoon_send_start(b)) {
397 b->reset_required = false;
398 if (!vmballoon_send_guest_id(b))
399 pr_err("failed to send guest ID to the host\n");
400 }
401}
402
403/*
404 * Allocate (or reserve) a page for the balloon and notify the host. If host
405 * refuses the page put it on "refuse" list and allocate another one until host
406 * is satisfied. "Refused" pages are released at the end of inflation cycle
407 * (when we allocate b->rate_alloc pages).
408 */
409static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
410{
411 struct page *page;
412 gfp_t flags;
413 bool locked = false;
414
415 do {
416 if (!can_sleep)
417 STATS_INC(b->stats.alloc);
418 else
419 STATS_INC(b->stats.sleep_alloc);
420
421 flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
422 page = alloc_page(flags);
423 if (!page) {
424 if (!can_sleep)
425 STATS_INC(b->stats.alloc_fail);
426 else
427 STATS_INC(b->stats.sleep_alloc_fail);
428 return -ENOMEM;
429 }
430
431 /* inform monitor */
432 locked = vmballoon_send_lock_page(b, page_to_pfn(page));
433 if (!locked) {
434 STATS_INC(b->stats.refused_alloc);
435
436 if (b->reset_required) {
437 __free_page(page);
438 return -EIO;
439 }
440
441 /*
442 * Place page on the list of non-balloonable pages
443 * and retry allocation, unless we already accumulated
444 * too many of them, in which case take a breather.
445 */
446 list_add(&page->lru, &b->refused_pages);
447 if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
448 return -EIO;
449 }
450 } while (!locked);
451
452 /* track allocated page */
453 list_add(&page->lru, &b->pages);
454
455 /* update balloon size */
456 b->size++;
457
458 return 0;
459}
460
461/*
462 * Release the page allocated for the balloon. Note that we first notify
463 * the host so it can make sure the page will be available for the guest
464 * to use, if needed.
465 */
466static int vmballoon_release_page(struct vmballoon *b, struct page *page)
467{
468 if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
469 return -EIO;
470
471 list_del(&page->lru);
472
473 /* deallocate page */
474 __free_page(page);
475 STATS_INC(b->stats.free);
476
477 /* update balloon size */
478 b->size--;
479
480 return 0;
481}
482
483/*
484 * Release pages that were allocated while attempting to inflate the
485 * balloon but were refused by the host for one reason or another.
486 */
487static void vmballoon_release_refused_pages(struct vmballoon *b)
488{
489 struct page *page, *next;
490
491 list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
492 list_del(&page->lru);
493 __free_page(page);
494 STATS_INC(b->stats.refused_free);
495 }
496
497 b->n_refused_pages = 0;
498}
499
500/*
501 * Inflate the balloon towards its target size. Note that we try to limit
502 * the rate of allocation to make sure we are not choking the rest of the
503 * system.
504 */
505static void vmballoon_inflate(struct vmballoon *b)
506{
507 unsigned int goal;
508 unsigned int rate;
509 unsigned int i;
510 unsigned int allocations = 0;
511 int error = 0;
512 bool alloc_can_sleep = false;
513
514 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
515
516 /*
517 * First try NOSLEEP page allocations to inflate balloon.
518 *
519 * If we do not throttle nosleep allocations, we can drain all
520 * free pages in the guest quickly (if the balloon target is high).
521 * As a side-effect, draining free pages helps to inform (force)
522 * the guest to start swapping if balloon target is not met yet,
523 * which is a desired behavior. However, balloon driver can consume
524 * all available CPU cycles if too many pages are allocated in a
525 * second. Therefore, we throttle nosleep allocations even when
526 * the guest is not under memory pressure. OTOH, if we have already
527 * predicted that the guest is under memory pressure, then we
528 * slowdown page allocations considerably.
529 */
530
531 goal = b->target - b->size;
532 /*
533 * Start with no sleep allocation rate which may be higher
534 * than sleeping allocation rate.
535 */
536 rate = b->slow_allocation_cycles ?
537 b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
538
539 pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
540 __func__, goal, rate, b->rate_alloc);
541
542 for (i = 0; i < goal; i++) {
543
544 error = vmballoon_reserve_page(b, alloc_can_sleep);
545 if (error) {
546 if (error != -ENOMEM) {
547 /*
548 * Not a page allocation failure, stop this
549 * cycle. Maybe we'll get new target from
550 * the host soon.
551 */
552 break;
553 }
554
555 if (alloc_can_sleep) {
556 /*
557 * CANSLEEP page allocation failed, so guest
558 * is under severe memory pressure. Quickly
559 * decrease allocation rate.
560 */
561 b->rate_alloc = max(b->rate_alloc / 2,
562 VMW_BALLOON_RATE_ALLOC_MIN);
563 break;
564 }
565
566 /*
567 * NOSLEEP page allocation failed, so the guest is
568 * under memory pressure. Let us slow down page
569 * allocations for next few cycles so that the guest
570 * gets out of memory pressure. Also, if we already
571 * allocated b->rate_alloc pages, let's pause,
572 * otherwise switch to sleeping allocations.
573 */
574 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
575
576 if (i >= b->rate_alloc)
577 break;
578
579 alloc_can_sleep = true;
580 /* Lower rate for sleeping allocations. */
581 rate = b->rate_alloc;
582 }
583
584 if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
585 cond_resched();
586 allocations = 0;
587 }
588
589 if (i >= rate) {
590 /* We allocated enough pages, let's take a break. */
591 break;
592 }
593 }
594
595 /*
596 * We reached our goal without failures so try increasing
597 * allocation rate.
598 */
599 if (error == 0 && i >= b->rate_alloc) {
600 unsigned int mult = i / b->rate_alloc;
601
602 b->rate_alloc =
603 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
604 VMW_BALLOON_RATE_ALLOC_MAX);
605 }
606
607 vmballoon_release_refused_pages(b);
608}
609
610/*
611 * Decrease the size of the balloon allowing guest to use more memory.
612 */
613static void vmballoon_deflate(struct vmballoon *b)
614{
615 struct page *page, *next;
616 unsigned int i = 0;
617 unsigned int goal;
618 int error;
619
620 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
621
622 /* limit deallocation rate */
623 goal = min(b->size - b->target, b->rate_free);
624
625 pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
626
627 /* free pages to reach target */
628 list_for_each_entry_safe(page, next, &b->pages, lru) {
629 error = vmballoon_release_page(b, page);
630 if (error) {
631 /* quickly decrease rate in case of error */
632 b->rate_free = max(b->rate_free / 2,
633 VMW_BALLOON_RATE_FREE_MIN);
634 return;
635 }
636
637 if (++i >= goal)
638 break;
639 }
640
641 /* slowly increase rate if there were no errors */
642 b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
643 VMW_BALLOON_RATE_FREE_MAX);
644}
645
646/*
647 * Balloon work function: reset protocol, if needed, get the new size and
648 * adjust balloon as needed. Repeat in 1 sec.
649 */
650static void vmballoon_work(struct work_struct *work)
651{
652 struct delayed_work *dwork = to_delayed_work(work);
653 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
654 unsigned int target;
655
656 STATS_INC(b->stats.timer);
657
658 if (b->reset_required)
659 vmballoon_reset(b);
660
661 if (b->slow_allocation_cycles > 0)
662 b->slow_allocation_cycles--;
663
664 if (vmballoon_send_get_target(b, &target)) {
665 /* update target, adjust size */
666 b->target = target;
667
668 if (b->size < target)
669 vmballoon_inflate(b);
670 else if (b->size > target)
671 vmballoon_deflate(b);
672 }
673
674 queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ));
675}
676
677/*
678 * DEBUGFS Interface
679 */
680#ifdef CONFIG_DEBUG_FS
681
682static int vmballoon_debug_show(struct seq_file *f, void *offset)
683{
684 struct vmballoon *b = f->private;
685 struct vmballoon_stats *stats = &b->stats;
686
687 /* format size info */
688 seq_printf(f,
689 "target: %8d pages\n"
690 "current: %8d pages\n",
691 b->target, b->size);
692
693 /* format rate info */
694 seq_printf(f,
695 "rateNoSleepAlloc: %8d pages/sec\n"
696 "rateSleepAlloc: %8d pages/sec\n"
697 "rateFree: %8d pages/sec\n",
698 VMW_BALLOON_NOSLEEP_ALLOC_MAX,
699 b->rate_alloc, b->rate_free);
700
701 seq_printf(f,
702 "\n"
703 "timer: %8u\n"
704 "start: %8u (%4u failed)\n"
705 "guestType: %8u (%4u failed)\n"
706 "lock: %8u (%4u failed)\n"
707 "unlock: %8u (%4u failed)\n"
708 "target: %8u (%4u failed)\n"
709 "primNoSleepAlloc: %8u (%4u failed)\n"
710 "primCanSleepAlloc: %8u (%4u failed)\n"
711 "primFree: %8u\n"
712 "errAlloc: %8u\n"
713 "errFree: %8u\n",
714 stats->timer,
715 stats->start, stats->start_fail,
716 stats->guest_type, stats->guest_type_fail,
717 stats->lock, stats->lock_fail,
718 stats->unlock, stats->unlock_fail,
719 stats->target, stats->target_fail,
720 stats->alloc, stats->alloc_fail,
721 stats->sleep_alloc, stats->sleep_alloc_fail,
722 stats->free,
723 stats->refused_alloc, stats->refused_free);
724
725 return 0;
726}
727
728static int vmballoon_debug_open(struct inode *inode, struct file *file)
729{
730 return single_open(file, vmballoon_debug_show, inode->i_private);
731}
732
733static const struct file_operations vmballoon_debug_fops = {
734 .owner = THIS_MODULE,
735 .open = vmballoon_debug_open,
736 .read = seq_read,
737 .llseek = seq_lseek,
738 .release = single_release,
739};
740
741static int __init vmballoon_debugfs_init(struct vmballoon *b)
742{
743 int error;
744
745 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
746 &vmballoon_debug_fops);
747 if (IS_ERR(b->dbg_entry)) {
748 error = PTR_ERR(b->dbg_entry);
749 pr_err("failed to create debugfs entry, error: %d\n", error);
750 return error;
751 }
752
753 return 0;
754}
755
756static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
757{
758 debugfs_remove(b->dbg_entry);
759}
760
761#else
762
763static inline int vmballoon_debugfs_init(struct vmballoon *b)
764{
765 return 0;
766}
767
768static inline void vmballoon_debugfs_exit(struct vmballoon *b)
769{
770}
771
772#endif /* CONFIG_DEBUG_FS */
773
774static int __init vmballoon_init(void)
775{
776 int error;
777
778 /*
779 * Check if we are running on VMware's hypervisor and bail out
780 * if we are not.
781 */
782 if (x86_hyper != &x86_hyper_vmware)
783 return -ENODEV;
784
785 vmballoon_wq = create_freezeable_workqueue("vmmemctl");
786 if (!vmballoon_wq) {
787 pr_err("failed to create workqueue\n");
788 return -ENOMEM;
789 }
790
791 INIT_LIST_HEAD(&balloon.pages);
792 INIT_LIST_HEAD(&balloon.refused_pages);
793
794 /* initialize rates */
795 balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
796 balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
797
798 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
799
800 /*
801 * Start balloon.
802 */
803 if (!vmballoon_send_start(&balloon)) {
804 pr_err("failed to send start command to the host\n");
805 error = -EIO;
806 goto fail;
807 }
808
809 if (!vmballoon_send_guest_id(&balloon)) {
810 pr_err("failed to send guest ID to the host\n");
811 error = -EIO;
812 goto fail;
813 }
814
815 error = vmballoon_debugfs_init(&balloon);
816 if (error)
817 goto fail;
818
819 queue_delayed_work(vmballoon_wq, &balloon.dwork, 0);
820
821 return 0;
822
823fail:
824 destroy_workqueue(vmballoon_wq);
825 return error;
826}
827module_init(vmballoon_init);
828
829static void __exit vmballoon_exit(void)
830{
831 cancel_delayed_work_sync(&balloon.dwork);
832 destroy_workqueue(vmballoon_wq);
833
834 vmballoon_debugfs_exit(&balloon);
835
836 /*
837 * Deallocate all reserved memory, and reset connection with monitor.
838 * Reset connection before deallocating memory to avoid potential for
839 * additional spurious resets from guest touching deallocated pages.
840 */
841 vmballoon_send_start(&balloon);
842 vmballoon_pop(&balloon);
843}
844module_exit(vmballoon_exit);