aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/braille/braille_console.c1
-rw-r--r--drivers/hid/hid-lg.h2
-rw-r--r--drivers/hwmon/Kconfig37
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/k10temp.c197
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c183
-rw-r--r--drivers/hwmon/sht15.c6
-rw-r--r--drivers/hwmon/smsc47m1.c153
-rw-r--r--drivers/hwmon/via-cputemp.c356
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/idle/i7300_idle.c15
-rw-r--r--drivers/leds/Kconfig33
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/leds-adp5520.c230
-rw-r--r--drivers/leds/leds-alix2.c115
-rw-r--r--drivers/leds/leds-cobalt-qube.c4
-rw-r--r--drivers/leds/leds-cobalt-raq.c2
-rw-r--r--drivers/leds/leds-lt3593.c217
-rw-r--r--drivers/leds/leds-pwm.c5
-rw-r--r--drivers/leds/leds-regulator.c242
-rw-r--r--drivers/leds/leds-ss4200.c556
-rw-r--r--drivers/message/fusion/mptbase.c6
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/enclosure.c1
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/host/Kconfig37
-rw-r--r--drivers/mmc/host/Makefile6
-rw-r--r--drivers/mmc/host/sdhci-of-core.c (renamed from drivers/mmc/host/sdhci-of.c)143
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c143
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c65
-rw-r--r--drivers/mmc/host/sdhci-of.h42
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c13
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/excite_nandflash.c248
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/octeon/Kconfig10
-rw-r--r--drivers/net/octeon/Makefile2
-rw-r--r--drivers/net/octeon/octeon_mgmt.c1176
-rw-r--r--drivers/net/phy/Kconfig11
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/mdio-octeon.c180
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c6
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/regulator/88pm8607.c685
-rw-r--r--drivers/regulator/Kconfig13
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/ab3100.c33
-rw-r--r--drivers/regulator/core.c248
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/regulator/max8660.c510
-rw-r--r--drivers/regulator/mc13783-regulator.c245
-rw-r--r--drivers/regulator/mc13783.c410
-rw-r--r--drivers/regulator/twl-regulator.c147
-rw-r--r--drivers/regulator/wm831x-dcdc.c207
-rw-r--r--drivers/regulator/wm831x-ldo.c2
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-ds1305.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_diag.c42
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c3
-rw-r--r--drivers/s390/char/tape_block.c1
-rw-r--r--drivers/s390/char/tape_char.c3
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/cio/ccwreq.c3
-rw-r--r--drivers/s390/cio/device.c1
-rw-r--r--drivers/s390/cio/device_pgid.c29
-rw-r--r--drivers/s390/cio/fcx.c4
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/qdio_main.c3
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/s390/cio/qdio_perf.h1
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c24
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c137
-rw-r--r--drivers/scsi/hpsa.c3531
-rw-r--r--drivers/scsi/hpsa.h273
-rw-r--r--drivers/scsi/hpsa_cmd.h326
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/libfc/fc_fcp.c65
-rw-r--r--drivers/scsi/libfc/fc_lport.c7
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c88
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h10
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c149
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c57
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h32
-rw-r--r--drivers/scsi/pmcraid.c34
-rw-r--r--drivers/scsi/pmcraid.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c103
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c75
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c17
-rw-r--r--drivers/scsi/sd.c107
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/st.c23
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/spi/Kconfig28
-rw-r--r--drivers/spi/Makefile10
-rw-r--r--drivers/spi/atmel_spi.c6
-rw-r--r--drivers/spi/dw_spi.c944
-rw-r--r--drivers/spi/dw_spi_pci.c169
-rw-r--r--drivers/spi/spi_bfin5xx.c2
-rw-r--r--drivers/spi/spi_mpc8xxx.c2
-rw-r--r--drivers/spi/spi_s3c24xx.c244
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.S116
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.h26
-rw-r--r--drivers/spi/spi_s3c64xx.c1196
-rw-r--r--drivers/spi/spi_sh_sci.c2
-rw-r--r--drivers/spi/spi_txx9.c6
-rw-r--r--drivers/spi/spidev.c18
-rw-r--r--drivers/staging/iio/ring_sw.h1
-rw-r--r--drivers/staging/octeon/Kconfig3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c204
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h2
-rw-r--r--drivers/staging/octeon/ethernet-proc.c112
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c52
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c2
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c2
-rw-r--r--drivers/staging/octeon/ethernet.c23
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h6
-rw-r--r--drivers/staging/panel/panel.c2
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/video/atafb.c3
-rw-r--r--drivers/video/backlight/adp5520_bl.c2
-rw-r--r--drivers/video/backlight/adx_bl.c2
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/cr_bllcd.c4
-rw-r--r--drivers/video/backlight/da903x_bl.c2
-rw-r--r--drivers/video/backlight/generic_bl.c2
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/jornada720_bl.c2
-rw-r--r--drivers/video/backlight/kb3886_bl.c2
-rw-r--r--drivers/video/backlight/locomolcd.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c20
-rw-r--r--drivers/video/backlight/omap1_bl.c2
-rw-r--r--drivers/video/backlight/progear_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c11
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/backlight/wm831x_bl.c2
-rw-r--r--drivers/video/omap/lcd_ldp.c4
-rw-r--r--drivers/video/omap/lcd_omap2evm.c10
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c2
-rw-r--r--drivers/video/omap/lcd_omap3evm.c10
-rw-r--r--drivers/video/omap/lcd_overo.c2
-rw-r--r--drivers/video/via/viafbdev.c4
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/geodewdt.c40
-rw-r--r--drivers/watchdog/rm9k_wdt.c419
185 files changed, 13838 insertions, 2301 deletions
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index d672cfe7ca59..cb423f5aef24 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -21,7 +21,6 @@
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */ 22 */
23 23
24#include <linux/autoconf.h>
25#include <linux/kernel.h> 24#include <linux/kernel.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index 27ae750ca878..bf31592eaf79 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -1,8 +1,6 @@
1#ifndef __HID_LG_H 1#ifndef __HID_LG_H
2#define __HID_LG_H 2#define __HID_LG_H
3 3
4#include <linux/autoconf.h>
5
6#ifdef CONFIG_LOGITECH_FF 4#ifdef CONFIG_LOGITECH_FF
7int lgff_init(struct hid_device *hdev); 5int lgff_init(struct hid_device *hdev);
8#else 6#else
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 95ccbe377f9c..46c3c566307e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -228,6 +228,18 @@ config SENSORS_K8TEMP
228 This driver can also be built as a module. If so, the module 228 This driver can also be built as a module. If so, the module
229 will be called k8temp. 229 will be called k8temp.
230 230
231config SENSORS_K10TEMP
232 tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
233 depends on X86 && PCI
234 help
235 If you say yes here you get support for the temperature
236 sensor(s) inside your CPU. Supported are later revisions of
237 the AMD Family 10h and all revisions of the AMD Family 11h
238 microarchitectures.
239
240 This driver can also be built as a module. If so, the module
241 will be called k10temp.
242
231config SENSORS_AMS 243config SENSORS_AMS
232 tristate "Apple Motion Sensor driver" 244 tristate "Apple Motion Sensor driver"
233 depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL 245 depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
@@ -810,6 +822,14 @@ config SENSORS_TMP421
810 This driver can also be built as a module. If so, the module 822 This driver can also be built as a module. If so, the module
811 will be called tmp421. 823 will be called tmp421.
812 824
825config SENSORS_VIA_CPUTEMP
826 tristate "VIA CPU temperature sensor"
827 depends on X86
828 help
829 If you say yes here you get support for the temperature
830 sensor inside your CPU. Supported are all known variants of
831 the VIA C7 and Nano.
832
813config SENSORS_VIA686A 833config SENSORS_VIA686A
814 tristate "VIA686A" 834 tristate "VIA686A"
815 depends on PCI 835 depends on PCI
@@ -998,6 +1018,23 @@ config SENSORS_LIS3_SPI
998 will be called lis3lv02d and a specific module for the SPI transport 1018 will be called lis3lv02d and a specific module for the SPI transport
999 is called lis3lv02d_spi. 1019 is called lis3lv02d_spi.
1000 1020
1021config SENSORS_LIS3_I2C
1022 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
1023 depends on I2C && INPUT
1024 select INPUT_POLLDEV
1025 default n
1026 help
1027 This driver provides support for the LIS3LV02Dx accelerometer connected
1028 via I2C. The accelerometer data is readable via
1029 /sys/devices/platform/lis3lv02d.
1030
1031 This driver also provides an absolute input class device, allowing
1032 the device to act as a pinball machine-esque joystick.
1033
1034 This driver can also be built as modules. If so, the core module
1035 will be called lis3lv02d and a specific module for the I2C transport
1036 is called lis3lv02d_i2c.
1037
1001config SENSORS_APPLESMC 1038config SENSORS_APPLESMC
1002 tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)" 1039 tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
1003 depends on INPUT && X86 1040 depends on INPUT && X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 33c2ee105284..450c8e894277 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -53,8 +53,10 @@ obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
53obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o 53obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
54obj-$(CONFIG_SENSORS_IT87) += it87.o 54obj-$(CONFIG_SENSORS_IT87) += it87.o
55obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o 55obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
56obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
56obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o 57obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
57obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o 58obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o
59obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o
58obj-$(CONFIG_SENSORS_LM63) += lm63.o 60obj-$(CONFIG_SENSORS_LM63) += lm63.o
59obj-$(CONFIG_SENSORS_LM70) += lm70.o 61obj-$(CONFIG_SENSORS_LM70) += lm70.o
60obj-$(CONFIG_SENSORS_LM73) += lm73.o 62obj-$(CONFIG_SENSORS_LM73) += lm73.o
@@ -87,6 +89,7 @@ obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
87obj-$(CONFIG_SENSORS_THMC50) += thmc50.o 89obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
88obj-$(CONFIG_SENSORS_TMP401) += tmp401.o 90obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
89obj-$(CONFIG_SENSORS_TMP421) += tmp421.o 91obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
92obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
90obj-$(CONFIG_SENSORS_VIA686A) += via686a.o 93obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
91obj-$(CONFIG_SENSORS_VT1211) += vt1211.o 94obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
92obj-$(CONFIG_SENSORS_VT8231) += vt8231.o 95obj-$(CONFIG_SENSORS_VT8231) += vt8231.o
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
new file mode 100644
index 000000000000..d8a26d16d948
--- /dev/null
+++ b/drivers/hwmon/k10temp.c
@@ -0,0 +1,197 @@
1/*
2 * k10temp.c - AMD Family 10h/11h processor hardware monitoring
3 *
4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
5 *
6 *
7 * This driver is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This driver is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 * See the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this driver; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/err.h>
21#include <linux/hwmon.h>
22#include <linux/hwmon-sysfs.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <asm/processor.h>
27
28MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
30MODULE_LICENSE("GPL");
31
32static bool force;
33module_param(force, bool, 0444);
34MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
35
36#define REG_HARDWARE_THERMAL_CONTROL 0x64
37#define HTC_ENABLE 0x00000001
38
39#define REG_REPORTED_TEMPERATURE 0xa4
40
41#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
42#define NB_CAP_HTC 0x00000400
43
44static ssize_t show_temp(struct device *dev,
45 struct device_attribute *attr, char *buf)
46{
47 u32 regval;
48
49 pci_read_config_dword(to_pci_dev(dev),
50 REG_REPORTED_TEMPERATURE, &regval);
51 return sprintf(buf, "%u\n", (regval >> 21) * 125);
52}
53
54static ssize_t show_temp_max(struct device *dev,
55 struct device_attribute *attr, char *buf)
56{
57 return sprintf(buf, "%d\n", 70 * 1000);
58}
59
60static ssize_t show_temp_crit(struct device *dev,
61 struct device_attribute *devattr, char *buf)
62{
63 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
64 int show_hyst = attr->index;
65 u32 regval;
66 int value;
67
68 pci_read_config_dword(to_pci_dev(dev),
69 REG_HARDWARE_THERMAL_CONTROL, &regval);
70 value = ((regval >> 16) & 0x7f) * 500 + 52000;
71 if (show_hyst)
72 value -= ((regval >> 24) & 0xf) * 500;
73 return sprintf(buf, "%d\n", value);
74}
75
76static ssize_t show_name(struct device *dev,
77 struct device_attribute *attr, char *buf)
78{
79 return sprintf(buf, "k10temp\n");
80}
81
82static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
83static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL);
84static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
85static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
86static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
87
88static bool __devinit has_erratum_319(void)
89{
90 /*
91 * Erratum 319: The thermal sensor of older Family 10h processors
92 * (B steppings) may be unreliable.
93 */
94 return boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model <= 2;
95}
96
97static int __devinit k10temp_probe(struct pci_dev *pdev,
98 const struct pci_device_id *id)
99{
100 struct device *hwmon_dev;
101 u32 reg_caps, reg_htc;
102 int err;
103
104 if (has_erratum_319() && !force) {
105 dev_err(&pdev->dev,
106 "unreliable CPU thermal sensor; monitoring disabled\n");
107 err = -ENODEV;
108 goto exit;
109 }
110
111 err = device_create_file(&pdev->dev, &dev_attr_temp1_input);
112 if (err)
113 goto exit;
114 err = device_create_file(&pdev->dev, &dev_attr_temp1_max);
115 if (err)
116 goto exit_remove;
117
118 pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, &reg_caps);
119 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, &reg_htc);
120 if ((reg_caps & NB_CAP_HTC) && (reg_htc & HTC_ENABLE)) {
121 err = device_create_file(&pdev->dev,
122 &sensor_dev_attr_temp1_crit.dev_attr);
123 if (err)
124 goto exit_remove;
125 err = device_create_file(&pdev->dev,
126 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
127 if (err)
128 goto exit_remove;
129 }
130
131 err = device_create_file(&pdev->dev, &dev_attr_name);
132 if (err)
133 goto exit_remove;
134
135 hwmon_dev = hwmon_device_register(&pdev->dev);
136 if (IS_ERR(hwmon_dev)) {
137 err = PTR_ERR(hwmon_dev);
138 goto exit_remove;
139 }
140 dev_set_drvdata(&pdev->dev, hwmon_dev);
141
142 if (has_erratum_319() && force)
143 dev_warn(&pdev->dev,
144 "unreliable CPU thermal sensor; check erratum 319\n");
145 return 0;
146
147exit_remove:
148 device_remove_file(&pdev->dev, &dev_attr_name);
149 device_remove_file(&pdev->dev, &dev_attr_temp1_input);
150 device_remove_file(&pdev->dev, &dev_attr_temp1_max);
151 device_remove_file(&pdev->dev,
152 &sensor_dev_attr_temp1_crit.dev_attr);
153 device_remove_file(&pdev->dev,
154 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
155exit:
156 return err;
157}
158
159static void __devexit k10temp_remove(struct pci_dev *pdev)
160{
161 hwmon_device_unregister(dev_get_drvdata(&pdev->dev));
162 device_remove_file(&pdev->dev, &dev_attr_name);
163 device_remove_file(&pdev->dev, &dev_attr_temp1_input);
164 device_remove_file(&pdev->dev, &dev_attr_temp1_max);
165 device_remove_file(&pdev->dev,
166 &sensor_dev_attr_temp1_crit.dev_attr);
167 device_remove_file(&pdev->dev,
168 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
169 dev_set_drvdata(&pdev->dev, NULL);
170}
171
172static struct pci_device_id k10temp_id_table[] = {
173 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
174 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
175 {}
176};
177MODULE_DEVICE_TABLE(pci, k10temp_id_table);
178
179static struct pci_driver k10temp_driver = {
180 .name = "k10temp",
181 .id_table = k10temp_id_table,
182 .probe = k10temp_probe,
183 .remove = __devexit_p(k10temp_remove),
184};
185
186static int __init k10temp_init(void)
187{
188 return pci_register_driver(&k10temp_driver);
189}
190
191static void __exit k10temp_exit(void)
192{
193 pci_unregister_driver(&k10temp_driver);
194}
195
196module_init(k10temp_init)
197module_exit(k10temp_exit)
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
new file mode 100644
index 000000000000..dc1f5402c1d7
--- /dev/null
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -0,0 +1,183 @@
1/*
2 * drivers/hwmon/lis3lv02d_i2c.c
3 *
4 * Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer.
5 * Driver is based on corresponding SPI driver written by Daniel Mack
6 * (lis3lv02d_spi.c (C) 2009 Daniel Mack <daniel@caiaq.de> ).
7 *
8 * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
9 *
10 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 * 02110-1301 USA
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/err.h>
31#include <linux/i2c.h>
32#include "lis3lv02d.h"
33
34#define DRV_NAME "lis3lv02d_i2c"
35
36static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value)
37{
38 struct i2c_client *c = lis3->bus_priv;
39 return i2c_smbus_write_byte_data(c, reg, value);
40}
41
42static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v)
43{
44 struct i2c_client *c = lis3->bus_priv;
45 *v = i2c_smbus_read_byte_data(c, reg);
46 return 0;
47}
48
49static int lis3_i2c_init(struct lis3lv02d *lis3)
50{
51 u8 reg;
52 int ret;
53
54 /* power up the device */
55 ret = lis3->read(lis3, CTRL_REG1, &reg);
56 if (ret < 0)
57 return ret;
58
59 reg |= CTRL1_PD0;
60 return lis3->write(lis3, CTRL_REG1, reg);
61}
62
63/* Default axis mapping but it can be overwritten by platform data */
64static struct axis_conversion lis3lv02d_axis_map = { LIS3_DEV_X,
65 LIS3_DEV_Y,
66 LIS3_DEV_Z };
67
68static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
69 const struct i2c_device_id *id)
70{
71 int ret = 0;
72 struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
73
74 if (pdata) {
75 if (pdata->axis_x)
76 lis3lv02d_axis_map.x = pdata->axis_x;
77
78 if (pdata->axis_y)
79 lis3lv02d_axis_map.y = pdata->axis_y;
80
81 if (pdata->axis_z)
82 lis3lv02d_axis_map.z = pdata->axis_z;
83
84 if (pdata->setup_resources)
85 ret = pdata->setup_resources();
86
87 if (ret)
88 goto fail;
89 }
90
91 lis3_dev.pdata = pdata;
92 lis3_dev.bus_priv = client;
93 lis3_dev.init = lis3_i2c_init;
94 lis3_dev.read = lis3_i2c_read;
95 lis3_dev.write = lis3_i2c_write;
96 lis3_dev.irq = client->irq;
97 lis3_dev.ac = lis3lv02d_axis_map;
98
99 i2c_set_clientdata(client, &lis3_dev);
100 ret = lis3lv02d_init_device(&lis3_dev);
101fail:
102 return ret;
103}
104
105static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
106{
107 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
108 struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
109
110 if (pdata && pdata->release_resources)
111 pdata->release_resources();
112
113 lis3lv02d_joystick_disable();
114 lis3lv02d_poweroff(lis3);
115
116 return lis3lv02d_remove_fs(&lis3_dev);
117}
118
119#ifdef CONFIG_PM
120static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
121{
122 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
123
124 if (!lis3->pdata->wakeup_flags)
125 lis3lv02d_poweroff(lis3);
126 return 0;
127}
128
129static int lis3lv02d_i2c_resume(struct i2c_client *client)
130{
131 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
132
133 if (!lis3->pdata->wakeup_flags)
134 lis3lv02d_poweron(lis3);
135 return 0;
136}
137
138static void lis3lv02d_i2c_shutdown(struct i2c_client *client)
139{
140 lis3lv02d_i2c_suspend(client, PMSG_SUSPEND);
141}
142#else
143#define lis3lv02d_i2c_suspend NULL
144#define lis3lv02d_i2c_resume NULL
145#define lis3lv02d_i2c_shutdown NULL
146#endif
147
148static const struct i2c_device_id lis3lv02d_id[] = {
149 {"lis3lv02d", 0 },
150 {}
151};
152
153MODULE_DEVICE_TABLE(i2c, lis3lv02d_id);
154
155static struct i2c_driver lis3lv02d_i2c_driver = {
156 .driver = {
157 .name = DRV_NAME,
158 .owner = THIS_MODULE,
159 },
160 .suspend = lis3lv02d_i2c_suspend,
161 .shutdown = lis3lv02d_i2c_shutdown,
162 .resume = lis3lv02d_i2c_resume,
163 .probe = lis3lv02d_i2c_probe,
164 .remove = __devexit_p(lis3lv02d_i2c_remove),
165 .id_table = lis3lv02d_id,
166};
167
168static int __init lis3lv02d_init(void)
169{
170 return i2c_add_driver(&lis3lv02d_i2c_driver);
171}
172
173static void __exit lis3lv02d_exit(void)
174{
175 i2c_del_driver(&lis3lv02d_i2c_driver);
176}
177
178MODULE_AUTHOR("Nokia Corporation");
179MODULE_DESCRIPTION("lis3lv02d I2C interface");
180MODULE_LICENSE("GPL");
181
182module_init(lis3lv02d_init);
183module_exit(lis3lv02d_exit);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index ebe38b680ee3..864a371f6eb9 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data)
305 int d1 = 0; 305 int d1 = 0;
306 int i; 306 int i;
307 307
308 for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++) 308 for (i = 1; i < ARRAY_SIZE(temppoints); i++)
309 /* Find pointer to interpolate */ 309 /* Find pointer to interpolate */
310 if (data->supply_uV > temppoints[i - 1].vdd) { 310 if (data->supply_uV > temppoints[i - 1].vdd) {
311 d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) 311 d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data)
332 332
333 const int c1 = -4; 333 const int c1 = -4;
334 const int c2 = 40500; /* x 10 ^ -6 */ 334 const int c2 = 40500; /* x 10 ^ -6 */
335 const int c3 = 2800; /* x10 ^ -9 */ 335 const int c3 = -2800; /* x10 ^ -9 */
336 336
337 RHlinear = c1*1000 337 RHlinear = c1*1000
338 + c2 * data->val_humid/1000 338 + c2 * data->val_humid/1000
339 + (data->val_humid * data->val_humid * c3)/1000000; 339 + (data->val_humid * data->val_humid * c3)/1000000;
340 return (temp - 25000) * (10000 + 800 * data->val_humid) 340 return (temp - 25000) * (10000 + 80 * data->val_humid)
341 / 1000000 + RHlinear; 341 / 1000000 + RHlinear;
342} 342}
343 343
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 8ad50fdba00d..9ca97818bd4b 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -136,11 +136,11 @@ struct smsc47m1_data {
136 136
137struct smsc47m1_sio_data { 137struct smsc47m1_sio_data {
138 enum chips type; 138 enum chips type;
139 u8 activate; /* Remember initial device state */
139}; 140};
140 141
141 142
142static int smsc47m1_probe(struct platform_device *pdev); 143static int __exit smsc47m1_remove(struct platform_device *pdev);
143static int __devexit smsc47m1_remove(struct platform_device *pdev);
144static struct smsc47m1_data *smsc47m1_update_device(struct device *dev, 144static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
145 int init); 145 int init);
146 146
@@ -160,8 +160,7 @@ static struct platform_driver smsc47m1_driver = {
160 .owner = THIS_MODULE, 160 .owner = THIS_MODULE,
161 .name = DRVNAME, 161 .name = DRVNAME,
162 }, 162 },
163 .probe = smsc47m1_probe, 163 .remove = __exit_p(smsc47m1_remove),
164 .remove = __devexit_p(smsc47m1_remove),
165}; 164};
166 165
167static ssize_t get_fan(struct device *dev, struct device_attribute 166static ssize_t get_fan(struct device *dev, struct device_attribute
@@ -470,24 +469,126 @@ static int __init smsc47m1_find(unsigned short *addr,
470 superio_select(); 469 superio_select();
471 *addr = (superio_inb(SUPERIO_REG_BASE) << 8) 470 *addr = (superio_inb(SUPERIO_REG_BASE) << 8)
472 | superio_inb(SUPERIO_REG_BASE + 1); 471 | superio_inb(SUPERIO_REG_BASE + 1);
473 val = superio_inb(SUPERIO_REG_ACT); 472 if (*addr == 0) {
474 if (*addr == 0 || (val & 0x01) == 0) { 473 pr_info(DRVNAME ": Device address not set, will not use\n");
475 pr_info(DRVNAME ": Device is disabled, will not use\n");
476 superio_exit(); 474 superio_exit();
477 return -ENODEV; 475 return -ENODEV;
478 } 476 }
479 477
478 /* Enable only if address is set (needed at least on the
479 * Compaq Presario S4000NX) */
480 sio_data->activate = superio_inb(SUPERIO_REG_ACT);
481 if ((sio_data->activate & 0x01) == 0) {
482 pr_info(DRVNAME ": Enabling device\n");
483 superio_outb(SUPERIO_REG_ACT, sio_data->activate | 0x01);
484 }
485
480 superio_exit(); 486 superio_exit();
481 return 0; 487 return 0;
482} 488}
483 489
484static int __devinit smsc47m1_probe(struct platform_device *pdev) 490/* Restore device to its initial state */
491static void __init smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
492{
493 if ((sio_data->activate & 0x01) == 0) {
494 superio_enter();
495 superio_select();
496
497 pr_info(DRVNAME ": Disabling device\n");
498 superio_outb(SUPERIO_REG_ACT, sio_data->activate);
499
500 superio_exit();
501 }
502}
503
504#define CHECK 1
505#define REQUEST 2
506#define RELEASE 3
507
508/*
509 * This function can be used to:
510 * - test for resource conflicts with ACPI
511 * - request the resources
512 * - release the resources
513 * We only allocate the I/O ports we really need, to minimize the risk of
514 * conflicts with ACPI or with other drivers.
515 */
516static int smsc47m1_handle_resources(unsigned short address, enum chips type,
517 int action, struct device *dev)
518{
519 static const u8 ports_m1[] = {
520 /* register, region length */
521 0x04, 1,
522 0x33, 4,
523 0x56, 7,
524 };
525
526 static const u8 ports_m2[] = {
527 /* register, region length */
528 0x04, 1,
529 0x09, 1,
530 0x2c, 2,
531 0x35, 4,
532 0x56, 7,
533 0x69, 4,
534 };
535
536 int i, ports_size, err;
537 const u8 *ports;
538
539 switch (type) {
540 case smsc47m1:
541 default:
542 ports = ports_m1;
543 ports_size = ARRAY_SIZE(ports_m1);
544 break;
545 case smsc47m2:
546 ports = ports_m2;
547 ports_size = ARRAY_SIZE(ports_m2);
548 break;
549 }
550
551 for (i = 0; i + 1 < ports_size; i += 2) {
552 unsigned short start = address + ports[i];
553 unsigned short len = ports[i + 1];
554
555 switch (action) {
556 case CHECK:
557 /* Only check for conflicts */
558 err = acpi_check_region(start, len, DRVNAME);
559 if (err)
560 return err;
561 break;
562 case REQUEST:
563 /* Request the resources */
564 if (!request_region(start, len, DRVNAME)) {
565 dev_err(dev, "Region 0x%hx-0x%hx already in "
566 "use!\n", start, start + len);
567
568 /* Undo all requests */
569 for (i -= 2; i >= 0; i -= 2)
570 release_region(address + ports[i],
571 ports[i + 1]);
572 return -EBUSY;
573 }
574 break;
575 case RELEASE:
576 /* Release the resources */
577 release_region(start, len);
578 break;
579 }
580 }
581
582 return 0;
583}
584
585static int __init smsc47m1_probe(struct platform_device *pdev)
485{ 586{
486 struct device *dev = &pdev->dev; 587 struct device *dev = &pdev->dev;
487 struct smsc47m1_sio_data *sio_data = dev->platform_data; 588 struct smsc47m1_sio_data *sio_data = dev->platform_data;
488 struct smsc47m1_data *data; 589 struct smsc47m1_data *data;
489 struct resource *res; 590 struct resource *res;
490 int err = 0; 591 int err;
491 int fan1, fan2, fan3, pwm1, pwm2, pwm3; 592 int fan1, fan2, fan3, pwm1, pwm2, pwm3;
492 593
493 static const char *names[] = { 594 static const char *names[] = {
@@ -496,12 +597,10 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
496 }; 597 };
497 598
498 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 599 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
499 if (!request_region(res->start, SMSC_EXTENT, DRVNAME)) { 600 err = smsc47m1_handle_resources(res->start, sio_data->type,
500 dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", 601 REQUEST, dev);
501 (unsigned long)res->start, 602 if (err < 0)
502 (unsigned long)res->end); 603 return err;
503 return -EBUSY;
504 }
505 604
506 if (!(data = kzalloc(sizeof(struct smsc47m1_data), GFP_KERNEL))) { 605 if (!(data = kzalloc(sizeof(struct smsc47m1_data), GFP_KERNEL))) {
507 err = -ENOMEM; 606 err = -ENOMEM;
@@ -637,11 +736,11 @@ error_free:
637 platform_set_drvdata(pdev, NULL); 736 platform_set_drvdata(pdev, NULL);
638 kfree(data); 737 kfree(data);
639error_release: 738error_release:
640 release_region(res->start, SMSC_EXTENT); 739 smsc47m1_handle_resources(res->start, sio_data->type, RELEASE, dev);
641 return err; 740 return err;
642} 741}
643 742
644static int __devexit smsc47m1_remove(struct platform_device *pdev) 743static int __exit smsc47m1_remove(struct platform_device *pdev)
645{ 744{
646 struct smsc47m1_data *data = platform_get_drvdata(pdev); 745 struct smsc47m1_data *data = platform_get_drvdata(pdev);
647 struct resource *res; 746 struct resource *res;
@@ -650,7 +749,7 @@ static int __devexit smsc47m1_remove(struct platform_device *pdev)
650 sysfs_remove_group(&pdev->dev.kobj, &smsc47m1_group); 749 sysfs_remove_group(&pdev->dev.kobj, &smsc47m1_group);
651 750
652 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 751 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
653 release_region(res->start, SMSC_EXTENT); 752 smsc47m1_handle_resources(res->start, data->type, RELEASE, &pdev->dev);
654 platform_set_drvdata(pdev, NULL); 753 platform_set_drvdata(pdev, NULL);
655 kfree(data); 754 kfree(data);
656 755
@@ -717,7 +816,7 @@ static int __init smsc47m1_device_add(unsigned short address,
717 }; 816 };
718 int err; 817 int err;
719 818
720 err = acpi_check_resource_conflict(&res); 819 err = smsc47m1_handle_resources(address, sio_data->type, CHECK, NULL);
721 if (err) 820 if (err)
722 goto exit; 821 goto exit;
723 822
@@ -766,27 +865,29 @@ static int __init sm_smsc47m1_init(void)
766 if (smsc47m1_find(&address, &sio_data)) 865 if (smsc47m1_find(&address, &sio_data))
767 return -ENODEV; 866 return -ENODEV;
768 867
769 err = platform_driver_register(&smsc47m1_driver); 868 /* Sets global pdev as a side effect */
869 err = smsc47m1_device_add(address, &sio_data);
770 if (err) 870 if (err)
771 goto exit; 871 goto exit;
772 872
773 /* Sets global pdev as a side effect */ 873 err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe);
774 err = smsc47m1_device_add(address, &sio_data);
775 if (err) 874 if (err)
776 goto exit_driver; 875 goto exit_device;
777 876
778 return 0; 877 return 0;
779 878
780exit_driver: 879exit_device:
781 platform_driver_unregister(&smsc47m1_driver); 880 platform_device_unregister(pdev);
881 smsc47m1_restore(&sio_data);
782exit: 882exit:
783 return err; 883 return err;
784} 884}
785 885
786static void __exit sm_smsc47m1_exit(void) 886static void __exit sm_smsc47m1_exit(void)
787{ 887{
788 platform_device_unregister(pdev);
789 platform_driver_unregister(&smsc47m1_driver); 888 platform_driver_unregister(&smsc47m1_driver);
889 smsc47m1_restore(pdev->dev.platform_data);
890 platform_device_unregister(pdev);
790} 891}
791 892
792MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>"); 893MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
new file mode 100644
index 000000000000..7442cf754856
--- /dev/null
+++ b/drivers/hwmon/via-cputemp.c
@@ -0,0 +1,356 @@
1/*
2 * via-cputemp.c - Driver for VIA CPU core temperature monitoring
3 * Copyright (C) 2009 VIA Technologies, Inc.
4 *
5 * based on existing coretemp.c, which is
6 *
7 * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301 USA.
22 */
23
24#include <linux/module.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/jiffies.h>
29#include <linux/hwmon.h>
30#include <linux/sysfs.h>
31#include <linux/hwmon-sysfs.h>
32#include <linux/err.h>
33#include <linux/mutex.h>
34#include <linux/list.h>
35#include <linux/platform_device.h>
36#include <linux/cpu.h>
37#include <asm/msr.h>
38#include <asm/processor.h>
39
40#define DRVNAME "via_cputemp"
41
42enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW;
43
44/*
45 * Functions declaration
46 */
47
48struct via_cputemp_data {
49 struct device *hwmon_dev;
50 const char *name;
51 u32 id;
52 u32 msr;
53};
54
55/*
56 * Sysfs stuff
57 */
58
59static ssize_t show_name(struct device *dev, struct device_attribute
60 *devattr, char *buf)
61{
62 int ret;
63 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
64 struct via_cputemp_data *data = dev_get_drvdata(dev);
65
66 if (attr->index == SHOW_NAME)
67 ret = sprintf(buf, "%s\n", data->name);
68 else /* show label */
69 ret = sprintf(buf, "Core %d\n", data->id);
70 return ret;
71}
72
73static ssize_t show_temp(struct device *dev,
74 struct device_attribute *devattr, char *buf)
75{
76 struct via_cputemp_data *data = dev_get_drvdata(dev);
77 u32 eax, edx;
78 int err;
79
80 err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
81 if (err)
82 return -EAGAIN;
83
84 return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000);
85}
86
87static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
88 SHOW_TEMP);
89static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
90static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
91
92static struct attribute *via_cputemp_attributes[] = {
93 &sensor_dev_attr_name.dev_attr.attr,
94 &sensor_dev_attr_temp1_label.dev_attr.attr,
95 &sensor_dev_attr_temp1_input.dev_attr.attr,
96 NULL
97};
98
99static const struct attribute_group via_cputemp_group = {
100 .attrs = via_cputemp_attributes,
101};
102
103static int __devinit via_cputemp_probe(struct platform_device *pdev)
104{
105 struct via_cputemp_data *data;
106 struct cpuinfo_x86 *c = &cpu_data(pdev->id);
107 int err;
108 u32 eax, edx;
109
110 data = kzalloc(sizeof(struct via_cputemp_data), GFP_KERNEL);
111 if (!data) {
112 err = -ENOMEM;
113 dev_err(&pdev->dev, "Out of memory\n");
114 goto exit;
115 }
116
117 data->id = pdev->id;
118 data->name = "via_cputemp";
119
120 switch (c->x86_model) {
121 case 0xA:
122 /* C7 A */
123 case 0xD:
124 /* C7 D */
125 data->msr = 0x1169;
126 break;
127 case 0xF:
128 /* Nano */
129 data->msr = 0x1423;
130 break;
131 default:
132 err = -ENODEV;
133 goto exit_free;
134 }
135
136 /* test if we can access the TEMPERATURE MSR */
137 err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
138 if (err) {
139 dev_err(&pdev->dev,
140 "Unable to access TEMPERATURE MSR, giving up\n");
141 goto exit_free;
142 }
143
144 platform_set_drvdata(pdev, data);
145
146 err = sysfs_create_group(&pdev->dev.kobj, &via_cputemp_group);
147 if (err)
148 goto exit_free;
149
150 data->hwmon_dev = hwmon_device_register(&pdev->dev);
151 if (IS_ERR(data->hwmon_dev)) {
152 err = PTR_ERR(data->hwmon_dev);
153 dev_err(&pdev->dev, "Class registration failed (%d)\n",
154 err);
155 goto exit_remove;
156 }
157
158 return 0;
159
160exit_remove:
161 sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
162exit_free:
163 platform_set_drvdata(pdev, NULL);
164 kfree(data);
165exit:
166 return err;
167}
168
169static int __devexit via_cputemp_remove(struct platform_device *pdev)
170{
171 struct via_cputemp_data *data = platform_get_drvdata(pdev);
172
173 hwmon_device_unregister(data->hwmon_dev);
174 sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
175 platform_set_drvdata(pdev, NULL);
176 kfree(data);
177 return 0;
178}
179
180static struct platform_driver via_cputemp_driver = {
181 .driver = {
182 .owner = THIS_MODULE,
183 .name = DRVNAME,
184 },
185 .probe = via_cputemp_probe,
186 .remove = __devexit_p(via_cputemp_remove),
187};
188
189struct pdev_entry {
190 struct list_head list;
191 struct platform_device *pdev;
192 unsigned int cpu;
193};
194
195static LIST_HEAD(pdev_list);
196static DEFINE_MUTEX(pdev_list_mutex);
197
198static int __cpuinit via_cputemp_device_add(unsigned int cpu)
199{
200 int err;
201 struct platform_device *pdev;
202 struct pdev_entry *pdev_entry;
203
204 pdev = platform_device_alloc(DRVNAME, cpu);
205 if (!pdev) {
206 err = -ENOMEM;
207 printk(KERN_ERR DRVNAME ": Device allocation failed\n");
208 goto exit;
209 }
210
211 pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
212 if (!pdev_entry) {
213 err = -ENOMEM;
214 goto exit_device_put;
215 }
216
217 err = platform_device_add(pdev);
218 if (err) {
219 printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
220 err);
221 goto exit_device_free;
222 }
223
224 pdev_entry->pdev = pdev;
225 pdev_entry->cpu = cpu;
226 mutex_lock(&pdev_list_mutex);
227 list_add_tail(&pdev_entry->list, &pdev_list);
228 mutex_unlock(&pdev_list_mutex);
229
230 return 0;
231
232exit_device_free:
233 kfree(pdev_entry);
234exit_device_put:
235 platform_device_put(pdev);
236exit:
237 return err;
238}
239
240#ifdef CONFIG_HOTPLUG_CPU
241static void via_cputemp_device_remove(unsigned int cpu)
242{
243 struct pdev_entry *p, *n;
244 mutex_lock(&pdev_list_mutex);
245 list_for_each_entry_safe(p, n, &pdev_list, list) {
246 if (p->cpu == cpu) {
247 platform_device_unregister(p->pdev);
248 list_del(&p->list);
249 kfree(p);
250 }
251 }
252 mutex_unlock(&pdev_list_mutex);
253}
254
255static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
256 unsigned long action, void *hcpu)
257{
258 unsigned int cpu = (unsigned long) hcpu;
259
260 switch (action) {
261 case CPU_ONLINE:
262 case CPU_DOWN_FAILED:
263 via_cputemp_device_add(cpu);
264 break;
265 case CPU_DOWN_PREPARE:
266 via_cputemp_device_remove(cpu);
267 break;
268 }
269 return NOTIFY_OK;
270}
271
272static struct notifier_block via_cputemp_cpu_notifier __refdata = {
273 .notifier_call = via_cputemp_cpu_callback,
274};
275#endif /* !CONFIG_HOTPLUG_CPU */
276
277static int __init via_cputemp_init(void)
278{
279 int i, err;
280 struct pdev_entry *p, *n;
281
282 if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
283 printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n");
284 err = -ENODEV;
285 goto exit;
286 }
287
288 err = platform_driver_register(&via_cputemp_driver);
289 if (err)
290 goto exit;
291
292 for_each_online_cpu(i) {
293 struct cpuinfo_x86 *c = &cpu_data(i);
294
295 if (c->x86 != 6)
296 continue;
297
298 if (c->x86_model < 0x0a)
299 continue;
300
301 if (c->x86_model > 0x0f) {
302 printk(KERN_WARNING DRVNAME ": Unknown CPU "
303 "model 0x%x\n", c->x86_model);
304 continue;
305 }
306
307 err = via_cputemp_device_add(i);
308 if (err)
309 goto exit_devices_unreg;
310 }
311 if (list_empty(&pdev_list)) {
312 err = -ENODEV;
313 goto exit_driver_unreg;
314 }
315
316#ifdef CONFIG_HOTPLUG_CPU
317 register_hotcpu_notifier(&via_cputemp_cpu_notifier);
318#endif
319 return 0;
320
321exit_devices_unreg:
322 mutex_lock(&pdev_list_mutex);
323 list_for_each_entry_safe(p, n, &pdev_list, list) {
324 platform_device_unregister(p->pdev);
325 list_del(&p->list);
326 kfree(p);
327 }
328 mutex_unlock(&pdev_list_mutex);
329exit_driver_unreg:
330 platform_driver_unregister(&via_cputemp_driver);
331exit:
332 return err;
333}
334
335static void __exit via_cputemp_exit(void)
336{
337 struct pdev_entry *p, *n;
338#ifdef CONFIG_HOTPLUG_CPU
339 unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
340#endif
341 mutex_lock(&pdev_list_mutex);
342 list_for_each_entry_safe(p, n, &pdev_list, list) {
343 platform_device_unregister(p->pdev);
344 list_del(&p->list);
345 kfree(p);
346 }
347 mutex_unlock(&pdev_list_mutex);
348 platform_driver_unregister(&via_cputemp_driver);
349}
350
351MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
352MODULE_DESCRIPTION("VIA CPU temperature monitor");
353MODULE_LICENSE("GPL");
354
355module_init(via_cputemp_init)
356module_exit(via_cputemp_exit)
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index b257c7223733..38e280523071 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1135,6 +1135,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
1135 "W83687THF", 1135 "W83687THF",
1136 }; 1136 };
1137 1137
1138 sio_data->sioaddr = sioaddr;
1138 superio_enter(sio_data); 1139 superio_enter(sio_data);
1139 val = force_id ? force_id : superio_inb(sio_data, DEVID); 1140 val = force_id ? force_id : superio_inb(sio_data, DEVID);
1140 switch (val) { 1141 switch (val) {
@@ -1177,7 +1178,6 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
1177 } 1178 }
1178 1179
1179 err = 0; 1180 err = 0;
1180 sio_data->sioaddr = sioaddr;
1181 pr_info(DRVNAME ": Found %s chip at %#x\n", 1181 pr_info(DRVNAME ": Found %s chip at %#x\n",
1182 names[sio_data->type], *addr); 1182 names[sio_data->type], *addr);
1183 1183
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index 1f20a042a4f5..dd253002cd50 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
81static u8 i7300_idle_thrtlow_saved; 81static u8 i7300_idle_thrtlow_saved;
82static u32 i7300_idle_mc_saved; 82static u32 i7300_idle_mc_saved;
83 83
84static cpumask_t idle_cpumask; 84static cpumask_var_t idle_cpumask;
85static ktime_t start_ktime; 85static ktime_t start_ktime;
86static unsigned long avg_idle_us; 86static unsigned long avg_idle_us;
87 87
@@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
459 spin_lock_irqsave(&i7300_idle_lock, flags); 459 spin_lock_irqsave(&i7300_idle_lock, flags);
460 if (val == IDLE_START) { 460 if (val == IDLE_START) {
461 461
462 cpu_set(smp_processor_id(), idle_cpumask); 462 cpumask_set_cpu(smp_processor_id(), idle_cpumask);
463 463
464 if (cpus_weight(idle_cpumask) != num_online_cpus()) 464 if (cpumask_weight(idle_cpumask) != num_online_cpus())
465 goto end; 465 goto end;
466 466
467 now_ktime = ktime_get(); 467 now_ktime = ktime_get();
@@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
478 i7300_idle_ioat_start(); 478 i7300_idle_ioat_start();
479 479
480 } else if (val == IDLE_END) { 480 } else if (val == IDLE_END) {
481 cpu_clear(smp_processor_id(), idle_cpumask); 481 cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
482 if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) { 482 if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
483 /* First CPU coming out of idle */ 483 /* First CPU coming out of idle */
484 u64 idle_duration_us; 484 u64 idle_duration_us;
485 485
@@ -553,7 +553,6 @@ struct debugfs_file_info {
553static int __init i7300_idle_init(void) 553static int __init i7300_idle_init(void)
554{ 554{
555 spin_lock_init(&i7300_idle_lock); 555 spin_lock_init(&i7300_idle_lock);
556 cpus_clear(idle_cpumask);
557 total_us = 0; 556 total_us = 0;
558 557
559 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload)) 558 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
@@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
565 if (i7300_idle_ioat_init()) 564 if (i7300_idle_ioat_init())
566 return -ENODEV; 565 return -ENODEV;
567 566
567 if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
568 return -ENOMEM;
569
568 debugfs_dir = debugfs_create_dir("i7300_idle", NULL); 570 debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
569 if (debugfs_dir) { 571 if (debugfs_dir) {
570 int i = 0; 572 int i = 0;
@@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
589static void __exit i7300_idle_exit(void) 591static void __exit i7300_idle_exit(void)
590{ 592{
591 idle_notifier_unregister(&i7300_idle_nb); 593 idle_notifier_unregister(&i7300_idle_nb);
594 free_cpumask_var(idle_cpumask);
592 595
593 if (debugfs_dir) { 596 if (debugfs_dir) {
594 int i = 0; 597 int i = 0;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e4f599f20e38..8a0e1ec95e4a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -229,6 +229,12 @@ config LEDS_PWM
229 help 229 help
230 This option enables support for pwm driven LEDs 230 This option enables support for pwm driven LEDs
231 231
232config LEDS_REGULATOR
233 tristate "REGULATOR driven LED support"
234 depends on LEDS_CLASS && REGULATOR
235 help
236 This option enables support for regulator driven LEDs.
237
232config LEDS_BD2802 238config LEDS_BD2802
233 tristate "LED driver for BD2802 RGB LED" 239 tristate "LED driver for BD2802 RGB LED"
234 depends on LEDS_CLASS && I2C 240 depends on LEDS_CLASS && I2C
@@ -236,6 +242,33 @@ config LEDS_BD2802
236 This option enables support for BD2802GU RGB LED driver chips 242 This option enables support for BD2802GU RGB LED driver chips
237 accessed via the I2C bus. 243 accessed via the I2C bus.
238 244
245config LEDS_INTEL_SS4200
246 tristate "LED driver for Intel NAS SS4200 series"
247 depends on LEDS_CLASS && PCI && DMI
248 help
249 This option enables support for the Intel SS4200 series of
250 Network Attached Storage servers. You may control the hard
251 drive or power LEDs on the front panel. Using this driver
252 can stop the front LED from blinking after startup.
253
254config LEDS_LT3593
255 tristate "LED driver for LT3593 controllers"
256 depends on LEDS_CLASS && GENERIC_GPIO
257 help
258 This option enables support for LEDs driven by a Linear Technology
259 LT3593 controller. This controller uses a special one-wire pulse
260 coding protocol to set the brightness.
261
262config LEDS_ADP5520
263 tristate "LED Support for ADP5520/ADP5501 PMIC"
264 depends on LEDS_CLASS && PMIC_ADP5520
265 help
266 This option enables support for on-chip LED drivers found
267 on Analog Devices ADP5520/ADP5501 PMICs.
268
269 To compile this driver as a module, choose M here: the module will
270 be called leds-adp5520.
271
239comment "LED Triggers" 272comment "LED Triggers"
240 273
241config LEDS_TRIGGERS 274config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 46d72704d606..9e63869d7c0d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -29,6 +29,10 @@ obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
29obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o 29obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
30obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o 30obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
31obj-$(CONFIG_LEDS_PWM) += leds-pwm.o 31obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
32obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
33obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
34obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
35obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
32 36
33# LED SPI Drivers 37# LED SPI Drivers
34obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o 38obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
new file mode 100644
index 000000000000..a8f315902131
--- /dev/null
+++ b/drivers/leds/leds-adp5520.c
@@ -0,0 +1,230 @@
1/*
2 * LEDs driver for Analog Devices ADP5520/ADP5501 MFD PMICs
3 *
4 * Copyright 2009 Analog Devices Inc.
5 *
6 * Loosely derived from leds-da903x:
7 * Copyright (C) 2008 Compulab, Ltd.
8 * Mike Rapoport <mike@compulab.co.il>
9 *
10 * Copyright (C) 2006-2008 Marvell International Ltd.
11 * Eric Miao <eric.miao@marvell.com>
12 *
13 * Licensed under the GPL-2 or later.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/platform_device.h>
20#include <linux/leds.h>
21#include <linux/workqueue.h>
22#include <linux/mfd/adp5520.h>
23
24struct adp5520_led {
25 struct led_classdev cdev;
26 struct work_struct work;
27 struct device *master;
28 enum led_brightness new_brightness;
29 int id;
30 int flags;
31};
32
33static void adp5520_led_work(struct work_struct *work)
34{
35 struct adp5520_led *led = container_of(work, struct adp5520_led, work);
36 adp5520_write(led->master, ADP5520_LED1_CURRENT + led->id - 1,
37 led->new_brightness >> 2);
38}
39
40static void adp5520_led_set(struct led_classdev *led_cdev,
41 enum led_brightness value)
42{
43 struct adp5520_led *led;
44
45 led = container_of(led_cdev, struct adp5520_led, cdev);
46 led->new_brightness = value;
47 schedule_work(&led->work);
48}
49
50static int adp5520_led_setup(struct adp5520_led *led)
51{
52 struct device *dev = led->master;
53 int flags = led->flags;
54 int ret = 0;
55
56 switch (led->id) {
57 case FLAG_ID_ADP5520_LED1_ADP5501_LED0:
58 ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
59 (flags >> ADP5520_FLAG_OFFT_SHIFT) &
60 ADP5520_FLAG_OFFT_MASK);
61 ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
62 ADP5520_LED1_EN);
63 break;
64 case FLAG_ID_ADP5520_LED2_ADP5501_LED1:
65 ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
66 ((flags >> ADP5520_FLAG_OFFT_SHIFT) &
67 ADP5520_FLAG_OFFT_MASK) << 2);
68 ret |= adp5520_clr_bits(dev, ADP5520_LED_CONTROL,
69 ADP5520_R3_MODE);
70 ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
71 ADP5520_LED2_EN);
72 break;
73 case FLAG_ID_ADP5520_LED3_ADP5501_LED2:
74 ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
75 ((flags >> ADP5520_FLAG_OFFT_SHIFT) &
76 ADP5520_FLAG_OFFT_MASK) << 4);
77 ret |= adp5520_clr_bits(dev, ADP5520_LED_CONTROL,
78 ADP5520_C3_MODE);
79 ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
80 ADP5520_LED3_EN);
81 break;
82 }
83
84 return ret;
85}
86
87static int __devinit adp5520_led_prepare(struct platform_device *pdev)
88{
89 struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
90 struct device *dev = pdev->dev.parent;
91 int ret = 0;
92
93 ret |= adp5520_write(dev, ADP5520_LED1_CURRENT, 0);
94 ret |= adp5520_write(dev, ADP5520_LED2_CURRENT, 0);
95 ret |= adp5520_write(dev, ADP5520_LED3_CURRENT, 0);
96 ret |= adp5520_write(dev, ADP5520_LED_TIME, pdata->led_on_time << 6);
97 ret |= adp5520_write(dev, ADP5520_LED_FADE, FADE_VAL(pdata->fade_in,
98 pdata->fade_out));
99
100 return ret;
101}
102
103static int __devinit adp5520_led_probe(struct platform_device *pdev)
104{
105 struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
106 struct adp5520_led *led, *led_dat;
107 struct led_info *cur_led;
108 int ret, i;
109
110 if (pdata == NULL) {
111 dev_err(&pdev->dev, "missing platform data\n");
112 return -ENODEV;
113 }
114
115 if (pdata->num_leds > ADP5520_01_MAXLEDS) {
116 dev_err(&pdev->dev, "can't handle more than %d LEDS\n",
117 ADP5520_01_MAXLEDS);
118 return -EFAULT;
119 }
120
121 led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
122 if (led == NULL) {
123 dev_err(&pdev->dev, "failed to alloc memory\n");
124 return -ENOMEM;
125 }
126
127 ret = adp5520_led_prepare(pdev);
128
129 if (ret) {
130 dev_err(&pdev->dev, "failed to write\n");
131 goto err_free;
132 }
133
134 for (i = 0; i < pdata->num_leds; ++i) {
135 cur_led = &pdata->leds[i];
136 led_dat = &led[i];
137
138 led_dat->cdev.name = cur_led->name;
139 led_dat->cdev.default_trigger = cur_led->default_trigger;
140 led_dat->cdev.brightness_set = adp5520_led_set;
141 led_dat->cdev.brightness = LED_OFF;
142
143 if (cur_led->flags & ADP5520_FLAG_LED_MASK)
144 led_dat->flags = cur_led->flags;
145 else
146 led_dat->flags = i + 1;
147
148 led_dat->id = led_dat->flags & ADP5520_FLAG_LED_MASK;
149
150 led_dat->master = pdev->dev.parent;
151 led_dat->new_brightness = LED_OFF;
152
153 INIT_WORK(&led_dat->work, adp5520_led_work);
154
155 ret = led_classdev_register(led_dat->master, &led_dat->cdev);
156 if (ret) {
157 dev_err(&pdev->dev, "failed to register LED %d\n",
158 led_dat->id);
159 goto err;
160 }
161
162 ret = adp5520_led_setup(led_dat);
163 if (ret) {
164 dev_err(&pdev->dev, "failed to write\n");
165 i++;
166 goto err;
167 }
168 }
169
170 platform_set_drvdata(pdev, led);
171 return 0;
172
173err:
174 if (i > 0) {
175 for (i = i - 1; i >= 0; i--) {
176 led_classdev_unregister(&led[i].cdev);
177 cancel_work_sync(&led[i].work);
178 }
179 }
180
181err_free:
182 kfree(led);
183 return ret;
184}
185
186static int __devexit adp5520_led_remove(struct platform_device *pdev)
187{
188 struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
189 struct adp5520_led *led;
190 int i;
191
192 led = platform_get_drvdata(pdev);
193
194 adp5520_clr_bits(led->master, ADP5520_LED_CONTROL,
195 ADP5520_LED1_EN | ADP5520_LED2_EN | ADP5520_LED3_EN);
196
197 for (i = 0; i < pdata->num_leds; i++) {
198 led_classdev_unregister(&led[i].cdev);
199 cancel_work_sync(&led[i].work);
200 }
201
202 kfree(led);
203 return 0;
204}
205
206static struct platform_driver adp5520_led_driver = {
207 .driver = {
208 .name = "adp5520-led",
209 .owner = THIS_MODULE,
210 },
211 .probe = adp5520_led_probe,
212 .remove = __devexit_p(adp5520_led_remove),
213};
214
215static int __init adp5520_led_init(void)
216{
217 return platform_driver_register(&adp5520_led_driver);
218}
219module_init(adp5520_led_init);
220
221static void __exit adp5520_led_exit(void)
222{
223 platform_driver_unregister(&adp5520_led_driver);
224}
225module_exit(adp5520_led_exit);
226
227MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
228MODULE_DESCRIPTION("LEDS ADP5520(01) Driver");
229MODULE_LICENSE("GPL");
230MODULE_ALIAS("platform:adp5520-led");
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index 731d4eef3425..f59ffadf5125 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -11,11 +11,24 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/pci.h>
14 15
15static int force = 0; 16static int force = 0;
16module_param(force, bool, 0444); 17module_param(force, bool, 0444);
17MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs"); 18MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
18 19
20#define MSR_LBAR_GPIO 0x5140000C
21#define CS5535_GPIO_SIZE 256
22
23static u32 gpio_base;
24
25static struct pci_device_id divil_pci[] = {
26 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
27 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
28 { } /* NULL entry */
29};
30MODULE_DEVICE_TABLE(pci, divil_pci);
31
19struct alix_led { 32struct alix_led {
20 struct led_classdev cdev; 33 struct led_classdev cdev;
21 unsigned short port; 34 unsigned short port;
@@ -30,9 +43,9 @@ static void alix_led_set(struct led_classdev *led_cdev,
30 container_of(led_cdev, struct alix_led, cdev); 43 container_of(led_cdev, struct alix_led, cdev);
31 44
32 if (brightness) 45 if (brightness)
33 outl(led_dev->on_value, led_dev->port); 46 outl(led_dev->on_value, gpio_base + led_dev->port);
34 else 47 else
35 outl(led_dev->off_value, led_dev->port); 48 outl(led_dev->off_value, gpio_base + led_dev->port);
36} 49}
37 50
38static struct alix_led alix_leds[] = { 51static struct alix_led alix_leds[] = {
@@ -41,7 +54,7 @@ static struct alix_led alix_leds[] = {
41 .name = "alix:1", 54 .name = "alix:1",
42 .brightness_set = alix_led_set, 55 .brightness_set = alix_led_set,
43 }, 56 },
44 .port = 0x6100, 57 .port = 0x00,
45 .on_value = 1 << 22, 58 .on_value = 1 << 22,
46 .off_value = 1 << 6, 59 .off_value = 1 << 6,
47 }, 60 },
@@ -50,7 +63,7 @@ static struct alix_led alix_leds[] = {
50 .name = "alix:2", 63 .name = "alix:2",
51 .brightness_set = alix_led_set, 64 .brightness_set = alix_led_set,
52 }, 65 },
53 .port = 0x6180, 66 .port = 0x80,
54 .on_value = 1 << 25, 67 .on_value = 1 << 25,
55 .off_value = 1 << 9, 68 .off_value = 1 << 9,
56 }, 69 },
@@ -59,7 +72,7 @@ static struct alix_led alix_leds[] = {
59 .name = "alix:3", 72 .name = "alix:3",
60 .brightness_set = alix_led_set, 73 .brightness_set = alix_led_set,
61 }, 74 },
62 .port = 0x6180, 75 .port = 0x80,
63 .on_value = 1 << 27, 76 .on_value = 1 << 27,
64 .off_value = 1 << 11, 77 .off_value = 1 << 11,
65 }, 78 },
@@ -101,64 +114,104 @@ static struct platform_driver alix_led_driver = {
101 }, 114 },
102}; 115};
103 116
104static int __init alix_present(void) 117static int __init alix_present(unsigned long bios_phys,
118 const char *alix_sig,
119 size_t alix_sig_len)
105{ 120{
106 const unsigned long bios_phys = 0x000f0000;
107 const size_t bios_len = 0x00010000; 121 const size_t bios_len = 0x00010000;
108 const char alix_sig[] = "PC Engines ALIX.";
109 const size_t alix_sig_len = sizeof(alix_sig) - 1;
110
111 const char *bios_virt; 122 const char *bios_virt;
112 const char *scan_end; 123 const char *scan_end;
113 const char *p; 124 const char *p;
114 int ret = 0; 125 char name[64];
115 126
116 if (force) { 127 if (force) {
117 printk(KERN_NOTICE "%s: forced to skip BIOS test, " 128 printk(KERN_NOTICE "%s: forced to skip BIOS test, "
118 "assume system has ALIX.2 style LEDs\n", 129 "assume system has ALIX.2 style LEDs\n",
119 KBUILD_MODNAME); 130 KBUILD_MODNAME);
120 ret = 1; 131 return 1;
121 goto out;
122 } 132 }
123 133
124 bios_virt = phys_to_virt(bios_phys); 134 bios_virt = phys_to_virt(bios_phys);
125 scan_end = bios_virt + bios_len - (alix_sig_len + 2); 135 scan_end = bios_virt + bios_len - (alix_sig_len + 2);
126 for (p = bios_virt; p < scan_end; p++) { 136 for (p = bios_virt; p < scan_end; p++) {
127 const char *tail; 137 const char *tail;
138 char *a;
128 139
129 if (memcmp(p, alix_sig, alix_sig_len) != 0) { 140 if (memcmp(p, alix_sig, alix_sig_len) != 0)
130 continue; 141 continue;
131 } 142
143 memcpy(name, p, sizeof(name));
144
145 /* remove the first \0 character from string */
146 a = strchr(name, '\0');
147 if (a)
148 *a = ' ';
149
150 /* cut the string at a newline */
151 a = strchr(name, '\r');
152 if (a)
153 *a = '\0';
132 154
133 tail = p + alix_sig_len; 155 tail = p + alix_sig_len;
134 if ((tail[0] == '2' || tail[0] == '3') && tail[1] == '\0') { 156 if ((tail[0] == '2' || tail[0] == '3')) {
135 printk(KERN_INFO 157 printk(KERN_INFO
136 "%s: system is recognized as \"%s\"\n", 158 "%s: system is recognized as \"%s\"\n",
137 KBUILD_MODNAME, p); 159 KBUILD_MODNAME, name);
138 ret = 1; 160 return 1;
139 break;
140 } 161 }
141 } 162 }
142 163
143out: 164 return 0;
144 return ret;
145} 165}
146 166
147static struct platform_device *pdev; 167static struct platform_device *pdev;
148 168
149static int __init alix_led_init(void) 169static int __init alix_pci_led_init(void)
150{ 170{
151 int ret; 171 u32 low, hi;
152 172
153 if (!alix_present()) { 173 if (pci_dev_present(divil_pci) == 0) {
154 ret = -ENODEV; 174 printk(KERN_WARNING KBUILD_MODNAME": DIVIL not found\n");
155 goto out; 175 return -ENODEV;
156 } 176 }
157 177
158 /* enable output on GPIO for LED 1,2,3 */ 178 /* Grab the GPIO I/O range */
159 outl(1 << 6, 0x6104); 179 rdmsr(MSR_LBAR_GPIO, low, hi);
160 outl(1 << 9, 0x6184); 180
161 outl(1 << 11, 0x6184); 181 /* Check the mask and whether GPIO is enabled (sanity check) */
182 if (hi != 0x0000f001) {
183 printk(KERN_WARNING KBUILD_MODNAME": GPIO not enabled\n");
184 return -ENODEV;
185 }
186
187 /* Mask off the IO base address */
188 gpio_base = low & 0x0000ff00;
189
190 if (!request_region(gpio_base, CS5535_GPIO_SIZE, KBUILD_MODNAME)) {
191 printk(KERN_ERR KBUILD_MODNAME": can't allocate I/O for GPIO\n");
192 return -ENODEV;
193 }
194
195 /* Set GPIO function to output */
196 outl(1 << 6, gpio_base + 0x04);
197 outl(1 << 9, gpio_base + 0x84);
198 outl(1 << 11, gpio_base + 0x84);
199
200 return 0;
201}
202
203static int __init alix_led_init(void)
204{
205 int ret = -ENODEV;
206 const char tinybios_sig[] = "PC Engines ALIX.";
207 const char coreboot_sig[] = "PC Engines\0ALIX.";
208
209 if (alix_present(0xf0000, tinybios_sig, sizeof(tinybios_sig) - 1) ||
210 alix_present(0x500, coreboot_sig, sizeof(coreboot_sig) - 1))
211 ret = alix_pci_led_init();
212
213 if (ret < 0)
214 return ret;
162 215
163 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); 216 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
164 if (!IS_ERR(pdev)) { 217 if (!IS_ERR(pdev)) {
@@ -168,7 +221,6 @@ static int __init alix_led_init(void)
168 } else 221 } else
169 ret = PTR_ERR(pdev); 222 ret = PTR_ERR(pdev);
170 223
171out:
172 return ret; 224 return ret;
173} 225}
174 226
@@ -176,6 +228,7 @@ static void __exit alix_led_exit(void)
176{ 228{
177 platform_device_unregister(pdev); 229 platform_device_unregister(pdev);
178 platform_driver_unregister(&alix_led_driver); 230 platform_driver_unregister(&alix_led_driver);
231 release_region(gpio_base, CS5535_GPIO_SIZE);
179} 232}
180 233
181module_init(alix_led_init); 234module_init(alix_led_init);
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 8816806accd2..da5fb016b1a5 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -31,7 +31,7 @@ static struct led_classdev qube_front_led = {
31 .name = "qube::front", 31 .name = "qube::front",
32 .brightness = LED_FULL, 32 .brightness = LED_FULL,
33 .brightness_set = qube_front_led_set, 33 .brightness_set = qube_front_led_set,
34 .default_trigger = "ide-disk", 34 .default_trigger = "default-on",
35}; 35};
36 36
37static int __devinit cobalt_qube_led_probe(struct platform_device *pdev) 37static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
@@ -43,7 +43,7 @@ static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
43 if (!res) 43 if (!res)
44 return -EBUSY; 44 return -EBUSY;
45 45
46 led_port = ioremap(res->start, res->end - res->start + 1); 46 led_port = ioremap(res->start, resource_size(res));
47 if (!led_port) 47 if (!led_port)
48 return -ENOMEM; 48 return -ENOMEM;
49 49
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index defc212105f3..438d48384636 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -84,7 +84,7 @@ static int __devinit cobalt_raq_led_probe(struct platform_device *pdev)
84 if (!res) 84 if (!res)
85 return -EBUSY; 85 return -EBUSY;
86 86
87 led_port = ioremap(res->start, res->end - res->start + 1); 87 led_port = ioremap(res->start, resource_size(res));
88 if (!led_port) 88 if (!led_port)
89 return -ENOMEM; 89 return -ENOMEM;
90 90
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
new file mode 100644
index 000000000000..fee40a841959
--- /dev/null
+++ b/drivers/leds/leds-lt3593.c
@@ -0,0 +1,217 @@
1/*
2 * LEDs driver for LT3593 controllers
3 *
4 * See the datasheet at http://cds.linear.com/docs/Datasheet/3593f.pdf
5 *
6 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
7 *
8 * Based on leds-gpio.c,
9 *
10 * Copyright (C) 2007 8D Technologies inc.
11 * Raphael Assenat <raph@8d.com>
12 * Copyright (C) 2008 Freescale Semiconductor, Inc.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/leds.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/gpio.h>
26
27struct lt3593_led_data {
28 struct led_classdev cdev;
29 unsigned gpio;
30 struct work_struct work;
31 u8 new_level;
32};
33
34static void lt3593_led_work(struct work_struct *work)
35{
36 int pulses;
37 struct lt3593_led_data *led_dat =
38 container_of(work, struct lt3593_led_data, work);
39
40 /*
41 * The LT3593 resets its internal current level register to the maximum
42 * level on the first falling edge on the control pin. Each following
43 * falling edge decreases the current level by 625uA. Up to 32 pulses
44 * can be sent, so the maximum power reduction is 20mA.
45 * After a timeout of 128us, the value is taken from the register and
46 * applied is to the output driver.
47 */
48
49 if (led_dat->new_level == 0) {
50 gpio_set_value_cansleep(led_dat->gpio, 0);
51 return;
52 }
53
54 pulses = 32 - (led_dat->new_level * 32) / 255;
55
56 if (pulses == 0) {
57 gpio_set_value_cansleep(led_dat->gpio, 0);
58 mdelay(1);
59 gpio_set_value_cansleep(led_dat->gpio, 1);
60 return;
61 }
62
63 gpio_set_value_cansleep(led_dat->gpio, 1);
64
65 while (pulses--) {
66 gpio_set_value_cansleep(led_dat->gpio, 0);
67 udelay(1);
68 gpio_set_value_cansleep(led_dat->gpio, 1);
69 udelay(1);
70 }
71}
72
73static void lt3593_led_set(struct led_classdev *led_cdev,
74 enum led_brightness value)
75{
76 struct lt3593_led_data *led_dat =
77 container_of(led_cdev, struct lt3593_led_data, cdev);
78
79 led_dat->new_level = value;
80 schedule_work(&led_dat->work);
81}
82
83static int __devinit create_lt3593_led(const struct gpio_led *template,
84 struct lt3593_led_data *led_dat, struct device *parent)
85{
86 int ret, state;
87
88 /* skip leds on GPIOs that aren't available */
89 if (!gpio_is_valid(template->gpio)) {
90 printk(KERN_INFO "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
91 KBUILD_MODNAME, template->gpio, template->name);
92 return 0;
93 }
94
95 ret = gpio_request(template->gpio, template->name);
96 if (ret < 0)
97 return ret;
98
99 led_dat->cdev.name = template->name;
100 led_dat->cdev.default_trigger = template->default_trigger;
101 led_dat->gpio = template->gpio;
102
103 led_dat->cdev.brightness_set = lt3593_led_set;
104
105 state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
106 led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
107
108 if (!template->retain_state_suspended)
109 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
110
111 ret = gpio_direction_output(led_dat->gpio, state);
112 if (ret < 0)
113 goto err;
114
115 INIT_WORK(&led_dat->work, lt3593_led_work);
116
117 ret = led_classdev_register(parent, &led_dat->cdev);
118 if (ret < 0)
119 goto err;
120
121 printk(KERN_INFO "%s: registered LT3593 LED '%s' at GPIO %d\n",
122 KBUILD_MODNAME, template->name, template->gpio);
123
124 return 0;
125
126err:
127 gpio_free(led_dat->gpio);
128 return ret;
129}
130
131static void delete_lt3593_led(struct lt3593_led_data *led)
132{
133 if (!gpio_is_valid(led->gpio))
134 return;
135
136 led_classdev_unregister(&led->cdev);
137 cancel_work_sync(&led->work);
138 gpio_free(led->gpio);
139}
140
141static int __devinit lt3593_led_probe(struct platform_device *pdev)
142{
143 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
144 struct lt3593_led_data *leds_data;
145 int i, ret = 0;
146
147 if (!pdata)
148 return -EBUSY;
149
150 leds_data = kzalloc(sizeof(struct lt3593_led_data) * pdata->num_leds,
151 GFP_KERNEL);
152 if (!leds_data)
153 return -ENOMEM;
154
155 for (i = 0; i < pdata->num_leds; i++) {
156 ret = create_lt3593_led(&pdata->leds[i], &leds_data[i],
157 &pdev->dev);
158 if (ret < 0)
159 goto err;
160 }
161
162 platform_set_drvdata(pdev, leds_data);
163
164 return 0;
165
166err:
167 for (i = i - 1; i >= 0; i--)
168 delete_lt3593_led(&leds_data[i]);
169
170 kfree(leds_data);
171
172 return ret;
173}
174
175static int __devexit lt3593_led_remove(struct platform_device *pdev)
176{
177 int i;
178 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
179 struct lt3593_led_data *leds_data;
180
181 leds_data = platform_get_drvdata(pdev);
182
183 for (i = 0; i < pdata->num_leds; i++)
184 delete_lt3593_led(&leds_data[i]);
185
186 kfree(leds_data);
187
188 return 0;
189}
190
191static struct platform_driver lt3593_led_driver = {
192 .probe = lt3593_led_probe,
193 .remove = __devexit_p(lt3593_led_remove),
194 .driver = {
195 .name = "leds-lt3593",
196 .owner = THIS_MODULE,
197 },
198};
199
200MODULE_ALIAS("platform:leds-lt3593");
201
202static int __init lt3593_led_init(void)
203{
204 return platform_driver_register(&lt3593_led_driver);
205}
206
207static void __exit lt3593_led_exit(void)
208{
209 platform_driver_unregister(&lt3593_led_driver);
210}
211
212module_init(lt3593_led_init);
213module_exit(lt3593_led_exit);
214
215MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
216MODULE_DESCRIPTION("LED driver for LT3593 controllers");
217MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index cdfdc8714e10..88b1dd091cfb 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -27,7 +27,6 @@ struct led_pwm_data {
27 struct pwm_device *pwm; 27 struct pwm_device *pwm;
28 unsigned int active_low; 28 unsigned int active_low;
29 unsigned int period; 29 unsigned int period;
30 unsigned int max_brightness;
31}; 30};
32 31
33static void led_pwm_set(struct led_classdev *led_cdev, 32static void led_pwm_set(struct led_classdev *led_cdev,
@@ -35,7 +34,7 @@ static void led_pwm_set(struct led_classdev *led_cdev,
35{ 34{
36 struct led_pwm_data *led_dat = 35 struct led_pwm_data *led_dat =
37 container_of(led_cdev, struct led_pwm_data, cdev); 36 container_of(led_cdev, struct led_pwm_data, cdev);
38 unsigned int max = led_dat->max_brightness; 37 unsigned int max = led_dat->cdev.max_brightness;
39 unsigned int period = led_dat->period; 38 unsigned int period = led_dat->period;
40 39
41 if (brightness == 0) { 40 if (brightness == 0) {
@@ -77,10 +76,10 @@ static int led_pwm_probe(struct platform_device *pdev)
77 led_dat->cdev.name = cur_led->name; 76 led_dat->cdev.name = cur_led->name;
78 led_dat->cdev.default_trigger = cur_led->default_trigger; 77 led_dat->cdev.default_trigger = cur_led->default_trigger;
79 led_dat->active_low = cur_led->active_low; 78 led_dat->active_low = cur_led->active_low;
80 led_dat->max_brightness = cur_led->max_brightness;
81 led_dat->period = cur_led->pwm_period_ns; 79 led_dat->period = cur_led->pwm_period_ns;
82 led_dat->cdev.brightness_set = led_pwm_set; 80 led_dat->cdev.brightness_set = led_pwm_set;
83 led_dat->cdev.brightness = LED_OFF; 81 led_dat->cdev.brightness = LED_OFF;
82 led_dat->cdev.max_brightness = cur_led->max_brightness;
84 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 83 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
85 84
86 ret = led_classdev_register(&pdev->dev, &led_dat->cdev); 85 ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
new file mode 100644
index 000000000000..7f00de3ef922
--- /dev/null
+++ b/drivers/leds/leds-regulator.c
@@ -0,0 +1,242 @@
1/*
2 * leds-regulator.c - LED class driver for regulator driven LEDs.
3 *
4 * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
5 *
6 * Inspired by leds-wm8350 driver.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/err.h>
16#include <linux/workqueue.h>
17#include <linux/leds.h>
18#include <linux/leds-regulator.h>
19#include <linux/platform_device.h>
20#include <linux/regulator/consumer.h>
21
22#define to_regulator_led(led_cdev) \
23 container_of(led_cdev, struct regulator_led, cdev)
24
25struct regulator_led {
26 struct led_classdev cdev;
27 enum led_brightness value;
28 int enabled;
29 struct mutex mutex;
30 struct work_struct work;
31
32 struct regulator *vcc;
33};
34
35static inline int led_regulator_get_max_brightness(struct regulator *supply)
36{
37 int ret;
38 int voltage = regulator_list_voltage(supply, 0);
39
40 if (voltage <= 0)
41 return 1;
42
43 /* even if regulator can't change voltages,
44 * we still assume it can change status
45 * and the LED can be turned on and off.
46 */
47 ret = regulator_set_voltage(supply, voltage, voltage);
48 if (ret < 0)
49 return 1;
50
51 return regulator_count_voltages(supply);
52}
53
54static int led_regulator_get_voltage(struct regulator *supply,
55 enum led_brightness brightness)
56{
57 if (brightness == 0)
58 return -EINVAL;
59
60 return regulator_list_voltage(supply, brightness - 1);
61}
62
63
64static void regulator_led_enable(struct regulator_led *led)
65{
66 int ret;
67
68 if (led->enabled)
69 return;
70
71 ret = regulator_enable(led->vcc);
72 if (ret != 0) {
73 dev_err(led->cdev.dev, "Failed to enable vcc: %d\n", ret);
74 return;
75 }
76
77 led->enabled = 1;
78}
79
80static void regulator_led_disable(struct regulator_led *led)
81{
82 int ret;
83
84 if (!led->enabled)
85 return;
86
87 ret = regulator_disable(led->vcc);
88 if (ret != 0) {
89 dev_err(led->cdev.dev, "Failed to disable vcc: %d\n", ret);
90 return;
91 }
92
93 led->enabled = 0;
94}
95
96static void regulator_led_set_value(struct regulator_led *led)
97{
98 int voltage;
99 int ret;
100
101 mutex_lock(&led->mutex);
102
103 if (led->value == LED_OFF) {
104 regulator_led_disable(led);
105 goto out;
106 }
107
108 if (led->cdev.max_brightness > 1) {
109 voltage = led_regulator_get_voltage(led->vcc, led->value);
110 dev_dbg(led->cdev.dev, "brightness: %d voltage: %d\n",
111 led->value, voltage);
112
113 ret = regulator_set_voltage(led->vcc, voltage, voltage);
114 if (ret != 0)
115 dev_err(led->cdev.dev, "Failed to set voltage %d: %d\n",
116 voltage, ret);
117 }
118
119 regulator_led_enable(led);
120
121out:
122 mutex_unlock(&led->mutex);
123}
124
125static void led_work(struct work_struct *work)
126{
127 struct regulator_led *led;
128
129 led = container_of(work, struct regulator_led, work);
130 regulator_led_set_value(led);
131}
132
133static void regulator_led_brightness_set(struct led_classdev *led_cdev,
134 enum led_brightness value)
135{
136 struct regulator_led *led = to_regulator_led(led_cdev);
137
138 led->value = value;
139 schedule_work(&led->work);
140}
141
142static int __devinit regulator_led_probe(struct platform_device *pdev)
143{
144 struct led_regulator_platform_data *pdata = pdev->dev.platform_data;
145 struct regulator_led *led;
146 struct regulator *vcc;
147 int ret = 0;
148
149 if (pdata == NULL) {
150 dev_err(&pdev->dev, "no platform data\n");
151 return -ENODEV;
152 }
153
154 vcc = regulator_get_exclusive(&pdev->dev, "vled");
155 if (IS_ERR(vcc)) {
156 dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
157 return PTR_ERR(vcc);
158 }
159
160 led = kzalloc(sizeof(*led), GFP_KERNEL);
161 if (led == NULL) {
162 ret = -ENOMEM;
163 goto err_vcc;
164 }
165
166 led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
167 if (pdata->brightness > led->cdev.max_brightness) {
168 dev_err(&pdev->dev, "Invalid default brightness %d\n",
169 pdata->brightness);
170 ret = -EINVAL;
171 goto err_led;
172 }
173 led->value = pdata->brightness;
174
175 led->cdev.brightness_set = regulator_led_brightness_set;
176 led->cdev.name = pdata->name;
177 led->cdev.flags |= LED_CORE_SUSPENDRESUME;
178 led->vcc = vcc;
179
180 mutex_init(&led->mutex);
181 INIT_WORK(&led->work, led_work);
182
183 platform_set_drvdata(pdev, led);
184
185 ret = led_classdev_register(&pdev->dev, &led->cdev);
186 if (ret < 0) {
187 cancel_work_sync(&led->work);
188 goto err_led;
189 }
190
191 /* to expose the default value to userspace */
192 led->cdev.brightness = led->value;
193
194 /* Set the default led status */
195 regulator_led_set_value(led);
196
197 return 0;
198
199err_led:
200 kfree(led);
201err_vcc:
202 regulator_put(vcc);
203 return ret;
204}
205
206static int __devexit regulator_led_remove(struct platform_device *pdev)
207{
208 struct regulator_led *led = platform_get_drvdata(pdev);
209
210 led_classdev_unregister(&led->cdev);
211 cancel_work_sync(&led->work);
212 regulator_led_disable(led);
213 regulator_put(led->vcc);
214 kfree(led);
215 return 0;
216}
217
218static struct platform_driver regulator_led_driver = {
219 .driver = {
220 .name = "leds-regulator",
221 .owner = THIS_MODULE,
222 },
223 .probe = regulator_led_probe,
224 .remove = __devexit_p(regulator_led_remove),
225};
226
227static int __init regulator_led_init(void)
228{
229 return platform_driver_register(&regulator_led_driver);
230}
231module_init(regulator_led_init);
232
233static void __exit regulator_led_exit(void)
234{
235 platform_driver_unregister(&regulator_led_driver);
236}
237module_exit(regulator_led_exit);
238
239MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
240MODULE_DESCRIPTION("Regulator driven LED driver");
241MODULE_LICENSE("GPL");
242MODULE_ALIAS("platform:leds-regulator");
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
new file mode 100644
index 000000000000..97f04984c1ca
--- /dev/null
+++ b/drivers/leds/leds-ss4200.c
@@ -0,0 +1,556 @@
1/*
2 * SS4200-E Hardware API
3 * Copyright (c) 2009, Intel Corporation.
4 * Copyright IBM Corporation, 2009
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Author: Dave Hansen <dave@sr71.net>
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/dmi.h>
25#include <linux/init.h>
26#include <linux/ioport.h>
27#include <linux/kernel.h>
28#include <linux/leds.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/types.h>
32#include <linux/uaccess.h>
33
34MODULE_AUTHOR("Rodney Girod <rgirod@confocus.com>, Dave Hansen <dave@sr71.net>");
35MODULE_DESCRIPTION("Intel NAS/Home Server ICH7 GPIO Driver");
36MODULE_LICENSE("GPL");
37
38/*
39 * ICH7 LPC/GPIO PCI Config register offsets
40 */
41#define PMBASE 0x040
42#define GPIO_BASE 0x048
43#define GPIO_CTRL 0x04c
44#define GPIO_EN 0x010
45
46/*
47 * The ICH7 GPIO register block is 64 bytes in size.
48 */
49#define ICH7_GPIO_SIZE 64
50
51/*
52 * Define register offsets within the ICH7 register block.
53 */
54#define GPIO_USE_SEL 0x000
55#define GP_IO_SEL 0x004
56#define GP_LVL 0x00c
57#define GPO_BLINK 0x018
58#define GPI_INV 0x030
59#define GPIO_USE_SEL2 0x034
60#define GP_IO_SEL2 0x038
61#define GP_LVL2 0x03c
62
63/*
64 * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
65 */
66static struct pci_device_id ich7_lpc_pci_id[] =
67{
68 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
69 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
70 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
71 { } /* NULL entry */
72};
73
74MODULE_DEVICE_TABLE(pci, ich7_lpc_pci_id);
75
76static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id)
77{
78 pr_info("detected '%s'\n", id->ident);
79 return 1;
80}
81
82static unsigned int __initdata nodetect;
83module_param_named(nodetect, nodetect, bool, 0);
84MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
85
86/*
87 * struct nas_led_whitelist - List of known good models
88 *
89 * Contains the known good models this driver is compatible with.
90 * When adding a new model try to be as strict as possible. This
91 * makes it possible to keep the false positives (the model is
92 * detected as working, but in reality it is not) as low as
93 * possible.
94 */
95static struct dmi_system_id __initdata nas_led_whitelist[] = {
96 {
97 .callback = ss4200_led_dmi_callback,
98 .ident = "Intel SS4200-E",
99 .matches = {
100 DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
101 DMI_MATCH(DMI_PRODUCT_NAME, "SS4200-E"),
102 DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
103 }
104 },
105};
106
107/*
108 * Base I/O address assigned to the Power Management register block
109 */
110static u32 g_pm_io_base;
111
112/*
113 * Base I/O address assigned to the ICH7 GPIO register block
114 */
115static u32 nas_gpio_io_base;
116
117/*
118 * When we successfully register a region, we are returned a resource.
119 * We use these to identify which regions we need to release on our way
120 * back out.
121 */
122static struct resource *gp_gpio_resource;
123
124struct nasgpio_led {
125 char *name;
126 u32 gpio_bit;
127 struct led_classdev led_cdev;
128};
129
130/*
131 * gpio_bit(s) are the ICH7 GPIO bit assignments
132 */
133static struct nasgpio_led nasgpio_leds[] = {
134 { .name = "hdd1:blue:sata", .gpio_bit = 0 },
135 { .name = "hdd1:amber:sata", .gpio_bit = 1 },
136 { .name = "hdd2:blue:sata", .gpio_bit = 2 },
137 { .name = "hdd2:amber:sata", .gpio_bit = 3 },
138 { .name = "hdd3:blue:sata", .gpio_bit = 4 },
139 { .name = "hdd3:amber:sata", .gpio_bit = 5 },
140 { .name = "hdd4:blue:sata", .gpio_bit = 6 },
141 { .name = "hdd4:amber:sata", .gpio_bit = 7 },
142 { .name = "power:blue:power", .gpio_bit = 27},
143 { .name = "power:amber:power", .gpio_bit = 28},
144};
145
146#define NAS_RECOVERY 0x00000400 /* GPIO10 */
147
148static struct nasgpio_led *
149led_classdev_to_nasgpio_led(struct led_classdev *led_cdev)
150{
151 return container_of(led_cdev, struct nasgpio_led, led_cdev);
152}
153
154static struct nasgpio_led *get_led_named(char *name)
155{
156 int i;
157 for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) {
158 if (strcmp(nasgpio_leds[i].name, name))
159 continue;
160 return &nasgpio_leds[i];
161 }
162 return NULL;
163}
164
165/*
166 * This protects access to the gpio ports.
167 */
168static DEFINE_SPINLOCK(nasgpio_gpio_lock);
169
170/*
171 * There are two gpio ports, one for blinking and the other
172 * for power. @port tells us if we're doing blinking or
173 * power control.
174 *
175 * Caller must hold nasgpio_gpio_lock
176 */
177static void __nasgpio_led_set_attr(struct led_classdev *led_cdev,
178 u32 port, u32 value)
179{
180 struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
181 u32 gpio_out;
182
183 gpio_out = inl(nas_gpio_io_base + port);
184 if (value)
185 gpio_out |= (1<<led->gpio_bit);
186 else
187 gpio_out &= ~(1<<led->gpio_bit);
188
189 outl(gpio_out, nas_gpio_io_base + port);
190}
191
192static void nasgpio_led_set_attr(struct led_classdev *led_cdev,
193 u32 port, u32 value)
194{
195 spin_lock(&nasgpio_gpio_lock);
196 __nasgpio_led_set_attr(led_cdev, port, value);
197 spin_unlock(&nasgpio_gpio_lock);
198}
199
200u32 nasgpio_led_get_attr(struct led_classdev *led_cdev, u32 port)
201{
202 struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
203 u32 gpio_in;
204
205 spin_lock(&nasgpio_gpio_lock);
206 gpio_in = inl(nas_gpio_io_base + port);
207 spin_unlock(&nasgpio_gpio_lock);
208 if (gpio_in & (1<<led->gpio_bit))
209 return 1;
210 return 0;
211}
212
213/*
214 * There is actual brightness control in the hardware,
215 * but it is via smbus commands and not implemented
216 * in this driver.
217 */
218static void nasgpio_led_set_brightness(struct led_classdev *led_cdev,
219 enum led_brightness brightness)
220{
221 u32 setting = 0;
222 if (brightness >= LED_HALF)
223 setting = 1;
224 /*
225 * Hold the lock across both operations. This ensures
226 * consistency so that both the "turn off blinking"
227 * and "turn light off" operations complete as a set.
228 */
229 spin_lock(&nasgpio_gpio_lock);
230 /*
231 * LED class documentation asks that past blink state
232 * be disabled when brightness is turned to zero.
233 */
234 if (brightness == 0)
235 __nasgpio_led_set_attr(led_cdev, GPO_BLINK, 0);
236 __nasgpio_led_set_attr(led_cdev, GP_LVL, setting);
237 spin_unlock(&nasgpio_gpio_lock);
238}
239
240static int nasgpio_led_set_blink(struct led_classdev *led_cdev,
241 unsigned long *delay_on,
242 unsigned long *delay_off)
243{
244 u32 setting = 1;
245 if (!(*delay_on == 0 && *delay_off == 0) &&
246 !(*delay_on == 500 && *delay_off == 500))
247 return -EINVAL;
248 /*
249 * These are very approximate.
250 */
251 *delay_on = 500;
252 *delay_off = 500;
253
254 nasgpio_led_set_attr(led_cdev, GPO_BLINK, setting);
255
256 return 0;
257}
258
259
260/*
261 * Initialize the ICH7 GPIO registers for NAS usage. The BIOS should have
262 * already taken care of this, but we will do so in a non destructive manner
263 * so that we have what we need whether the BIOS did it or not.
264 */
265static int __devinit ich7_gpio_init(struct device *dev)
266{
267 int i;
268 u32 config_data = 0;
269 u32 all_nas_led = 0;
270
271 for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++)
272 all_nas_led |= (1<<nasgpio_leds[i].gpio_bit);
273
274 spin_lock(&nasgpio_gpio_lock);
275 /*
276 * We need to enable all of the GPIO lines used by the NAS box,
277 * so we will read the current Use Selection and add our usage
278 * to it. This should be benign with regard to the original
279 * BIOS configuration.
280 */
281 config_data = inl(nas_gpio_io_base + GPIO_USE_SEL);
282 dev_dbg(dev, ": Data read from GPIO_USE_SEL = 0x%08x\n", config_data);
283 config_data |= all_nas_led + NAS_RECOVERY;
284 outl(config_data, nas_gpio_io_base + GPIO_USE_SEL);
285 config_data = inl(nas_gpio_io_base + GPIO_USE_SEL);
286 dev_dbg(dev, ": GPIO_USE_SEL = 0x%08x\n\n", config_data);
287
288 /*
289 * The LED GPIO outputs need to be configured for output, so we
290 * will ensure that all LED lines are cleared for output and the
291 * RECOVERY line ready for input. This too should be benign with
292 * regard to BIOS configuration.
293 */
294 config_data = inl(nas_gpio_io_base + GP_IO_SEL);
295 dev_dbg(dev, ": Data read from GP_IO_SEL = 0x%08x\n",
296 config_data);
297 config_data &= ~all_nas_led;
298 config_data |= NAS_RECOVERY;
299 outl(config_data, nas_gpio_io_base + GP_IO_SEL);
300 config_data = inl(nas_gpio_io_base + GP_IO_SEL);
301 dev_dbg(dev, ": GP_IO_SEL = 0x%08x\n", config_data);
302
303 /*
304 * In our final system, the BIOS will initialize the state of all
305 * of the LEDs. For now, we turn them all off (or Low).
306 */
307 config_data = inl(nas_gpio_io_base + GP_LVL);
308 dev_dbg(dev, ": Data read from GP_LVL = 0x%08x\n", config_data);
309 /*
310 * In our final system, the BIOS will initialize the blink state of all
311 * of the LEDs. For now, we turn blink off for all of them.
312 */
313 config_data = inl(nas_gpio_io_base + GPO_BLINK);
314 dev_dbg(dev, ": Data read from GPO_BLINK = 0x%08x\n", config_data);
315
316 /*
317 * At this moment, I am unsure if anything needs to happen with GPI_INV
318 */
319 config_data = inl(nas_gpio_io_base + GPI_INV);
320 dev_dbg(dev, ": Data read from GPI_INV = 0x%08x\n", config_data);
321
322 spin_unlock(&nasgpio_gpio_lock);
323 return 0;
324}
325
326static void ich7_lpc_cleanup(struct device *dev)
327{
328 /*
329 * If we were given exclusive use of the GPIO
330 * I/O Address range, we must return it.
331 */
332 if (gp_gpio_resource) {
333 dev_dbg(dev, ": Releasing GPIO I/O addresses\n");
334 release_region(nas_gpio_io_base, ICH7_GPIO_SIZE);
335 gp_gpio_resource = NULL;
336 }
337}
338
339/*
340 * The OS has determined that the LPC of the Intel ICH7 Southbridge is present
341 * so we can retrive the required operational information and prepare the GPIO.
342 */
343static struct pci_dev *nas_gpio_pci_dev;
344static int __devinit ich7_lpc_probe(struct pci_dev *dev,
345 const struct pci_device_id *id)
346{
347 int status;
348 u32 gc = 0;
349
350 status = pci_enable_device(dev);
351 if (status) {
352 dev_err(&dev->dev, "pci_enable_device failed\n");
353 return -EIO;
354 }
355
356 nas_gpio_pci_dev = dev;
357 status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base);
358 if (status)
359 goto out;
360 g_pm_io_base &= 0x00000ff80;
361
362 status = pci_read_config_dword(dev, GPIO_CTRL, &gc);
363 if (!(GPIO_EN & gc)) {
364 status = -EEXIST;
365 dev_info(&dev->dev,
366 "ERROR: The LPC GPIO Block has not been enabled.\n");
367 goto out;
368 }
369
370 status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base);
371 if (0 > status) {
372 dev_info(&dev->dev, "Unable to read GPIOBASE.\n");
373 goto out;
374 }
375 dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base);
376 nas_gpio_io_base &= 0x00000ffc0;
377
378 /*
379 * Insure that we have exclusive access to the GPIO I/O address range.
380 */
381 gp_gpio_resource = request_region(nas_gpio_io_base, ICH7_GPIO_SIZE,
382 KBUILD_MODNAME);
383 if (NULL == gp_gpio_resource) {
384 dev_info(&dev->dev,
385 "ERROR Unable to register GPIO I/O addresses.\n");
386 status = -1;
387 goto out;
388 }
389
390 /*
391 * Initialize the GPIO for NAS/Home Server Use
392 */
393 ich7_gpio_init(&dev->dev);
394
395out:
396 if (status) {
397 ich7_lpc_cleanup(&dev->dev);
398 pci_disable_device(dev);
399 }
400 return status;
401}
402
403static void ich7_lpc_remove(struct pci_dev *dev)
404{
405 ich7_lpc_cleanup(&dev->dev);
406 pci_disable_device(dev);
407}
408
409/*
410 * pci_driver structure passed to the PCI modules
411 */
412static struct pci_driver nas_gpio_pci_driver = {
413 .name = KBUILD_MODNAME,
414 .id_table = ich7_lpc_pci_id,
415 .probe = ich7_lpc_probe,
416 .remove = ich7_lpc_remove,
417};
418
419static struct led_classdev *get_classdev_for_led_nr(int nr)
420{
421 struct nasgpio_led *nas_led = &nasgpio_leds[nr];
422 struct led_classdev *led = &nas_led->led_cdev;
423 return led;
424}
425
426
427static void set_power_light_amber_noblink(void)
428{
429 struct nasgpio_led *amber = get_led_named("power:amber:power");
430 struct nasgpio_led *blue = get_led_named("power:blue:power");
431
432 if (!amber || !blue)
433 return;
434 /*
435 * LED_OFF implies disabling future blinking
436 */
437 pr_debug("setting blue off and amber on\n");
438
439 nasgpio_led_set_brightness(&blue->led_cdev, LED_OFF);
440 nasgpio_led_set_brightness(&amber->led_cdev, LED_FULL);
441}
442
443static ssize_t nas_led_blink_show(struct device *dev,
444 struct device_attribute *attr, char *buf)
445{
446 struct led_classdev *led = dev_get_drvdata(dev);
447 int blinking = 0;
448 if (nasgpio_led_get_attr(led, GPO_BLINK))
449 blinking = 1;
450 return sprintf(buf, "%u\n", blinking);
451}
452
453static ssize_t nas_led_blink_store(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t size)
456{
457 int ret;
458 struct led_classdev *led = dev_get_drvdata(dev);
459 unsigned long blink_state;
460
461 ret = strict_strtoul(buf, 10, &blink_state);
462 if (ret)
463 return ret;
464
465 nasgpio_led_set_attr(led, GPO_BLINK, blink_state);
466
467 return size;
468}
469
470static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store);
471
472static int register_nasgpio_led(int led_nr)
473{
474 int ret;
475 struct nasgpio_led *nas_led = &nasgpio_leds[led_nr];
476 struct led_classdev *led = get_classdev_for_led_nr(led_nr);
477
478 led->name = nas_led->name;
479 led->brightness = LED_OFF;
480 if (nasgpio_led_get_attr(led, GP_LVL))
481 led->brightness = LED_FULL;
482 led->brightness_set = nasgpio_led_set_brightness;
483 led->blink_set = nasgpio_led_set_blink;
484 ret = led_classdev_register(&nas_gpio_pci_dev->dev, led);
485 if (ret)
486 return ret;
487 ret = device_create_file(led->dev, &dev_attr_blink);
488 if (ret)
489 led_classdev_unregister(led);
490 return ret;
491}
492
493static void unregister_nasgpio_led(int led_nr)
494{
495 struct led_classdev *led = get_classdev_for_led_nr(led_nr);
496 led_classdev_unregister(led);
497 device_remove_file(led->dev, &dev_attr_blink);
498}
499/*
500 * module load/initialization
501 */
502static int __init nas_gpio_init(void)
503{
504 int i;
505 int ret = 0;
506 int nr_devices = 0;
507
508 nr_devices = dmi_check_system(nas_led_whitelist);
509 if (nodetect) {
510 pr_info("skipping hardware autodetection\n");
511 pr_info("Please send 'dmidecode' output to dave@sr71.net\n");
512 nr_devices++;
513 }
514
515 if (nr_devices <= 0) {
516 pr_info("no LED devices found\n");
517 return -ENODEV;
518 }
519
520 pr_info("registering PCI driver\n");
521 ret = pci_register_driver(&nas_gpio_pci_driver);
522 if (ret)
523 return ret;
524 for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) {
525 ret = register_nasgpio_led(i);
526 if (ret)
527 goto out_err;
528 }
529 /*
530 * When the system powers on, the BIOS leaves the power
531 * light blue and blinking. This will turn it solid
532 * amber once the driver is loaded.
533 */
534 set_power_light_amber_noblink();
535 return 0;
536out_err:
537 for (; i >= 0; i--)
538 unregister_nasgpio_led(i);
539 pci_unregister_driver(&nas_gpio_pci_driver);
540 return ret;
541}
542
543/*
544 * module unload
545 */
546static void __exit nas_gpio_exit(void)
547{
548 int i;
549 pr_info("Unregistering driver\n");
550 for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++)
551 unregister_nasgpio_led(i);
552 pci_unregister_driver(&nas_gpio_pci_driver);
553}
554
555module_init(nas_gpio_init);
556module_exit(nas_gpio_exit);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 610e914abe6c..85bc6a685e36 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1587,7 +1587,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1587{ 1587{
1588 u8 __iomem *mem; 1588 u8 __iomem *mem;
1589 int ii; 1589 int ii;
1590 unsigned long mem_phys; 1590 resource_size_t mem_phys;
1591 unsigned long port; 1591 unsigned long port;
1592 u32 msize; 1592 u32 msize;
1593 u32 psize; 1593 u32 psize;
@@ -1677,8 +1677,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1677 return -EINVAL; 1677 return -EINVAL;
1678 } 1678 }
1679 ioc->memmap = mem; 1679 ioc->memmap = mem;
1680 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %lx\n", 1680 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
1681 ioc->name, mem, mem_phys)); 1681 ioc->name, mem, (unsigned long long)mem_phys));
1682 1682
1683 ioc->mem_phys = mem_phys; 1683 ioc->mem_phys = mem_phys;
1684 ioc->chip = (SYSIF_REGS __iomem *)mem; 1684 ioc->chip = (SYSIF_REGS __iomem *)mem;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1a7a9fc50ea1..e3551d20464f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -203,6 +203,7 @@ config CS5535_MFGPT
203 203
204config CS5535_MFGPT_DEFAULT_IRQ 204config CS5535_MFGPT_DEFAULT_IRQ
205 int 205 int
206 depends on CS5535_MFGPT
206 default 7 207 default 7
207 help 208 help
208 MFGPTs on the CS5535 require an interrupt. The selected IRQ 209 MFGPTs on the CS5535 require an interrupt. The selected IRQ
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index e9eae4a78402..1eac626e710a 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
391 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", 391 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
392 [ENCLOSURE_STATUS_UNKNOWN] = "unknown", 392 [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
393 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", 393 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
394 [ENCLOSURE_STATUS_MAX] = NULL,
394}; 395};
395 396
396static const char *const enclosure_type [] = { 397static const char *const enclosure_type [] = {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index cdb845b68ab5..06b64085a355 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -516,7 +516,8 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
516 * The number of functions on the card is encoded inside 516 * The number of functions on the card is encoded inside
517 * the ocr. 517 * the ocr.
518 */ 518 */
519 card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28; 519 funcs = (ocr & 0x70000000) >> 28;
520 card->sdio_funcs = 0;
520 521
521 /* 522 /*
522 * If needed, disconnect card detection pull-up resistor. 523 * If needed, disconnect card detection pull-up resistor.
@@ -528,7 +529,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
528 /* 529 /*
529 * Initialize (but don't add) all present functions. 530 * Initialize (but don't add) all present functions.
530 */ 531 */
531 for (i = 0;i < funcs;i++) { 532 for (i = 0; i < funcs; i++, card->sdio_funcs++) {
532 err = sdio_init_func(host->card, i + 1); 533 err = sdio_init_func(host->card, i + 1);
533 if (err) 534 if (err)
534 goto remove; 535 goto remove;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d37464e296a5..9e060c87e64d 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -248,12 +248,15 @@ int sdio_add_func(struct sdio_func *func)
248/* 248/*
249 * Unregister a SDIO function with the driver model, and 249 * Unregister a SDIO function with the driver model, and
250 * (eventually) free it. 250 * (eventually) free it.
251 * This function can be called through error paths where sdio_add_func() was
252 * never executed (because a failure occurred at an earlier point).
251 */ 253 */
252void sdio_remove_func(struct sdio_func *func) 254void sdio_remove_func(struct sdio_func *func)
253{ 255{
254 if (sdio_func_present(func)) 256 if (!sdio_func_present(func))
255 device_del(&func->dev); 257 return;
256 258
259 device_del(&func->dev);
257 put_device(&func->dev); 260 put_device(&func->dev);
258} 261}
259 262
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9d405b181781..ce1d28884e29 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -44,6 +44,19 @@ config MMC_SDHCI_IO_ACCESSORS
44 This is silent Kconfig symbol that is selected by the drivers that 44 This is silent Kconfig symbol that is selected by the drivers that
45 need to overwrite SDHCI IO memory accessors. 45 need to overwrite SDHCI IO memory accessors.
46 46
47config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
48 bool
49 select MMC_SDHCI_IO_ACCESSORS
50 help
51 This option is selected by drivers running on big endian hosts
52 and performing I/O to a SDHCI controller through a bus that
53 implements a hardware byte swapper using a 32-bit datum.
54 This endian mapping mode is called "data invariance" and
55 has the effect of scrambling the addresses and formats of data
56 accessed in sizes other than the datum size.
57
58 This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
59
47config MMC_SDHCI_PCI 60config MMC_SDHCI_PCI
48 tristate "SDHCI support on PCI bus" 61 tristate "SDHCI support on PCI bus"
49 depends on MMC_SDHCI && PCI 62 depends on MMC_SDHCI && PCI
@@ -75,11 +88,29 @@ config MMC_RICOH_MMC
75config MMC_SDHCI_OF 88config MMC_SDHCI_OF
76 tristate "SDHCI support on OpenFirmware platforms" 89 tristate "SDHCI support on OpenFirmware platforms"
77 depends on MMC_SDHCI && PPC_OF 90 depends on MMC_SDHCI && PPC_OF
78 select MMC_SDHCI_IO_ACCESSORS
79 help 91 help
80 This selects the OF support for Secure Digital Host Controller 92 This selects the OF support for Secure Digital Host Controller
81 Interfaces. So far, only the Freescale eSDHC controller is known 93 Interfaces.
82 to exist on OF platforms. 94
95 If unsure, say N.
96
97config MMC_SDHCI_OF_ESDHC
98 bool "SDHCI OF support for the Freescale eSDHC controller"
99 depends on MMC_SDHCI_OF
100 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
101 help
102 This selects the Freescale eSDHC controller support.
103
104 If unsure, say N.
105
106config MMC_SDHCI_OF_HLWD
107 bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
108 depends on MMC_SDHCI_OF
109 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
110 help
111 This selects the Secure Digital Host Controller Interface (SDHCI)
112 found in the "Hollywood" chipset of the Nintendo Wii video game
113 console.
83 114
84 If unsure, say N. 115 If unsure, say N.
85 116
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ded4d8cdd9d7..3d253dd4240f 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
13obj-$(CONFIG_MMC_SDHCI) += sdhci.o 13obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
17obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 16obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
18obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 17obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
19obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -37,6 +36,11 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
37obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
38obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 37obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
39 38
39obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
40sdhci-of-y := sdhci-of-core.o
41sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
42sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
43
40ifeq ($(CONFIG_CB710_DEBUG),y) 44ifeq ($(CONFIG_CB710_DEBUG),y)
41 CFLAGS-cb710-mmc += -DDEBUG 45 CFLAGS-cb710-mmc += -DDEBUG
42endif 46endif
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of-core.c
index 01ab916c2802..55e33135edb4 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -22,62 +22,37 @@
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include "sdhci-of.h"
25#include "sdhci.h" 26#include "sdhci.h"
26 27
27struct sdhci_of_data { 28#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
28 unsigned int quirks;
29 struct sdhci_ops ops;
30};
31
32struct sdhci_of_host {
33 unsigned int clock;
34 u16 xfer_mode_shadow;
35};
36 29
37/* 30/*
38 * Ops and quirks for the Freescale eSDHC controller. 31 * These accessors are designed for big endian hosts doing I/O to
32 * little endian controllers incorporating a 32-bit hardware byte swapper.
39 */ 33 */
40 34
41#define ESDHC_DMA_SYSCTL 0x40c 35u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
42#define ESDHC_DMA_SNOOP 0x00000040
43
44#define ESDHC_SYSTEM_CONTROL 0x2c
45#define ESDHC_CLOCK_MASK 0x0000fff0
46#define ESDHC_PREDIV_SHIFT 8
47#define ESDHC_DIVIDER_SHIFT 4
48#define ESDHC_CLOCK_PEREN 0x00000004
49#define ESDHC_CLOCK_HCKEN 0x00000002
50#define ESDHC_CLOCK_IPGEN 0x00000001
51
52#define ESDHC_HOST_CONTROL_RES 0x05
53
54static u32 esdhc_readl(struct sdhci_host *host, int reg)
55{ 36{
56 return in_be32(host->ioaddr + reg); 37 return in_be32(host->ioaddr + reg);
57} 38}
58 39
59static u16 esdhc_readw(struct sdhci_host *host, int reg) 40u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
60{ 41{
61 u16 ret; 42 return in_be16(host->ioaddr + (reg ^ 0x2));
62
63 if (unlikely(reg == SDHCI_HOST_VERSION))
64 ret = in_be16(host->ioaddr + reg);
65 else
66 ret = in_be16(host->ioaddr + (reg ^ 0x2));
67 return ret;
68} 43}
69 44
70static u8 esdhc_readb(struct sdhci_host *host, int reg) 45u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
71{ 46{
72 return in_8(host->ioaddr + (reg ^ 0x3)); 47 return in_8(host->ioaddr + (reg ^ 0x3));
73} 48}
74 49
75static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) 50void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg)
76{ 51{
77 out_be32(host->ioaddr + reg, val); 52 out_be32(host->ioaddr + reg, val);
78} 53}
79 54
80static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) 55void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg)
81{ 56{
82 struct sdhci_of_host *of_host = sdhci_priv(host); 57 struct sdhci_of_host *of_host = sdhci_priv(host);
83 int base = reg & ~0x3; 58 int base = reg & ~0x3;
@@ -92,106 +67,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
92 of_host->xfer_mode_shadow = val; 67 of_host->xfer_mode_shadow = val;
93 return; 68 return;
94 case SDHCI_COMMAND: 69 case SDHCI_COMMAND:
95 esdhc_writel(host, val << 16 | of_host->xfer_mode_shadow, 70 sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow,
96 SDHCI_TRANSFER_MODE); 71 SDHCI_TRANSFER_MODE);
97 return; 72 return;
98 case SDHCI_BLOCK_SIZE:
99 /*
100 * Two last DMA bits are reserved, and first one is used for
101 * non-standard blksz of 4096 bytes that we don't support
102 * yet. So clear the DMA boundary bits.
103 */
104 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
105 /* fall through */
106 } 73 }
107 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); 74 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
108} 75}
109 76
110static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) 77void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
111{ 78{
112 int base = reg & ~0x3; 79 int base = reg & ~0x3;
113 int shift = (reg & 0x3) * 8; 80 int shift = (reg & 0x3) * 8;
114 81
115 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
116 if (reg == SDHCI_HOST_CONTROL)
117 val &= ~ESDHC_HOST_CONTROL_RES;
118
119 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); 82 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
120} 83}
121 84#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
122static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
123{
124 int pre_div = 2;
125 int div = 1;
126
127 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
128 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
129
130 if (clock == 0)
131 goto out;
132
133 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
134 pre_div *= 2;
135
136 while (host->max_clk / pre_div / div > clock && div < 16)
137 div++;
138
139 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
140 clock, host->max_clk / pre_div / div);
141
142 pre_div >>= 1;
143 div--;
144
145 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
146 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
147 div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
148 mdelay(100);
149out:
150 host->clock = clock;
151}
152
153static int esdhc_enable_dma(struct sdhci_host *host)
154{
155 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
156 return 0;
157}
158
159static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
160{
161 struct sdhci_of_host *of_host = sdhci_priv(host);
162
163 return of_host->clock;
164}
165
166static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
167{
168 struct sdhci_of_host *of_host = sdhci_priv(host);
169
170 return of_host->clock / 256 / 16;
171}
172
173static struct sdhci_of_data sdhci_esdhc = {
174 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
175 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
176 SDHCI_QUIRK_NO_BUSY_IRQ |
177 SDHCI_QUIRK_NONSTANDARD_CLOCK |
178 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
179 SDHCI_QUIRK_PIO_NEEDS_DELAY |
180 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
181 SDHCI_QUIRK_NO_CARD_NO_RESET,
182 .ops = {
183 .readl = esdhc_readl,
184 .readw = esdhc_readw,
185 .readb = esdhc_readb,
186 .writel = esdhc_writel,
187 .writew = esdhc_writew,
188 .writeb = esdhc_writeb,
189 .set_clock = esdhc_set_clock,
190 .enable_dma = esdhc_enable_dma,
191 .get_max_clock = esdhc_get_max_clock,
192 .get_min_clock = esdhc_get_min_clock,
193 },
194};
195 85
196#ifdef CONFIG_PM 86#ifdef CONFIG_PM
197 87
@@ -301,9 +191,14 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev)
301} 191}
302 192
303static const struct of_device_id sdhci_of_match[] = { 193static const struct of_device_id sdhci_of_match[] = {
194#ifdef CONFIG_MMC_SDHCI_OF_ESDHC
304 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, 195 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
305 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, 196 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
306 { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, 197 { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
198#endif
199#ifdef CONFIG_MMC_SDHCI_OF_HLWD
200 { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, },
201#endif
307 { .compatible = "generic-sdhci", }, 202 { .compatible = "generic-sdhci", },
308 {}, 203 {},
309}; 204};
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
new file mode 100644
index 000000000000..d5b11a17e648
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -0,0 +1,143 @@
1/*
2 * Freescale eSDHC controller driver.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 *
7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#include <linux/io.h>
17#include <linux/delay.h>
18#include <linux/mmc/host.h>
19#include "sdhci-of.h"
20#include "sdhci.h"
21
22/*
23 * Ops and quirks for the Freescale eSDHC controller.
24 */
25
26#define ESDHC_DMA_SYSCTL 0x40c
27#define ESDHC_DMA_SNOOP 0x00000040
28
29#define ESDHC_SYSTEM_CONTROL 0x2c
30#define ESDHC_CLOCK_MASK 0x0000fff0
31#define ESDHC_PREDIV_SHIFT 8
32#define ESDHC_DIVIDER_SHIFT 4
33#define ESDHC_CLOCK_PEREN 0x00000004
34#define ESDHC_CLOCK_HCKEN 0x00000002
35#define ESDHC_CLOCK_IPGEN 0x00000001
36
37#define ESDHC_HOST_CONTROL_RES 0x05
38
39static u16 esdhc_readw(struct sdhci_host *host, int reg)
40{
41 u16 ret;
42
43 if (unlikely(reg == SDHCI_HOST_VERSION))
44 ret = in_be16(host->ioaddr + reg);
45 else
46 ret = sdhci_be32bs_readw(host, reg);
47 return ret;
48}
49
50static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
51{
52 if (reg == SDHCI_BLOCK_SIZE) {
53 /*
54 * Two last DMA bits are reserved, and first one is used for
55 * non-standard blksz of 4096 bytes that we don't support
56 * yet. So clear the DMA boundary bits.
57 */
58 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
59 }
60 sdhci_be32bs_writew(host, val, reg);
61}
62
63static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
64{
65 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
66 if (reg == SDHCI_HOST_CONTROL)
67 val &= ~ESDHC_HOST_CONTROL_RES;
68 sdhci_be32bs_writeb(host, val, reg);
69}
70
71static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
72{
73 int pre_div = 2;
74 int div = 1;
75
76 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
77 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
78
79 if (clock == 0)
80 goto out;
81
82 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
83 pre_div *= 2;
84
85 while (host->max_clk / pre_div / div > clock && div < 16)
86 div++;
87
88 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
89 clock, host->max_clk / pre_div / div);
90
91 pre_div >>= 1;
92 div--;
93
94 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
95 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
96 div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
97 mdelay(100);
98out:
99 host->clock = clock;
100}
101
102static int esdhc_enable_dma(struct sdhci_host *host)
103{
104 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
105 return 0;
106}
107
108static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
109{
110 struct sdhci_of_host *of_host = sdhci_priv(host);
111
112 return of_host->clock;
113}
114
115static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
116{
117 struct sdhci_of_host *of_host = sdhci_priv(host);
118
119 return of_host->clock / 256 / 16;
120}
121
122struct sdhci_of_data sdhci_esdhc = {
123 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
124 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
125 SDHCI_QUIRK_NO_BUSY_IRQ |
126 SDHCI_QUIRK_NONSTANDARD_CLOCK |
127 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
128 SDHCI_QUIRK_PIO_NEEDS_DELAY |
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = {
132 .readl = sdhci_be32bs_readl,
133 .readw = esdhc_readw,
134 .readb = sdhci_be32bs_readb,
135 .writel = sdhci_be32bs_writel,
136 .writew = esdhc_writew,
137 .writeb = esdhc_writeb,
138 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma,
140 .get_max_clock = esdhc_get_max_clock,
141 .get_min_clock = esdhc_get_min_clock,
142 },
143};
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
new file mode 100644
index 000000000000..35117f3ed757
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -0,0 +1,65 @@
1/*
2 * drivers/mmc/host/sdhci-of-hlwd.c
3 *
4 * Nintendo Wii Secure Digital Host Controller Interface.
5 * Copyright (C) 2009 The GameCube Linux Team
6 * Copyright (C) 2009 Albert Herranz
7 *
8 * Based on sdhci-of-esdhc.c
9 *
10 * Copyright (c) 2007 Freescale Semiconductor, Inc.
11 * Copyright (c) 2009 MontaVista Software, Inc.
12 *
13 * Authors: Xiaobo Xie <X.Xie@freescale.com>
14 * Anton Vorontsov <avorontsov@ru.mvista.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or (at
19 * your option) any later version.
20 */
21
22#include <linux/delay.h>
23#include <linux/mmc/host.h>
24#include "sdhci-of.h"
25#include "sdhci.h"
26
27/*
28 * Ops and quirks for the Nintendo Wii SDHCI controllers.
29 */
30
31/*
32 * We need a small delay after each write, or things go horribly wrong.
33 */
34#define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */
35
36static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg)
37{
38 sdhci_be32bs_writel(host, val, reg);
39 udelay(SDHCI_HLWD_WRITE_DELAY);
40}
41
42static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg)
43{
44 sdhci_be32bs_writew(host, val, reg);
45 udelay(SDHCI_HLWD_WRITE_DELAY);
46}
47
48static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
49{
50 sdhci_be32bs_writeb(host, val, reg);
51 udelay(SDHCI_HLWD_WRITE_DELAY);
52}
53
54struct sdhci_of_data sdhci_hlwd = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = {
58 .readl = sdhci_be32bs_readl,
59 .readw = sdhci_be32bs_readw,
60 .readb = sdhci_be32bs_readb,
61 .writel = sdhci_hlwd_writel,
62 .writew = sdhci_hlwd_writew,
63 .writeb = sdhci_hlwd_writeb,
64 },
65};
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h
new file mode 100644
index 000000000000..ad09ad9915d8
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of.h
@@ -0,0 +1,42 @@
1/*
2 * OpenFirmware bindings for Secure Digital Host Controller Interface.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 *
7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#ifndef __SDHCI_OF_H
17#define __SDHCI_OF_H
18
19#include <linux/types.h>
20#include "sdhci.h"
21
22struct sdhci_of_data {
23 unsigned int quirks;
24 struct sdhci_ops ops;
25};
26
27struct sdhci_of_host {
28 unsigned int clock;
29 u16 xfer_mode_shadow;
30};
31
32extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg);
33extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg);
34extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg);
35extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg);
36extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg);
37extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg);
38
39extern struct sdhci_of_data sdhci_esdhc;
40extern struct sdhci_of_data sdhci_hlwd;
41
42#endif /* __SDHCI_OF_H */
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index ce5f1d73dc04..842f46f94284 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -8,6 +8,8 @@
8 * the Free Software Foundation; either version 2 of the License, or (at 8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version. 9 * your option) any later version.
10 */ 10 */
11#ifndef __SDHCI_H
12#define __SDHCI_H
11 13
12#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
13#include <linux/compiler.h> 15#include <linux/compiler.h>
@@ -408,3 +410,5 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
408extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); 410extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
409extern int sdhci_resume_host(struct sdhci_host *host); 411extern int sdhci_resume_host(struct sdhci_host *host);
410#endif 412#endif
413
414#endif /* __SDHCI_H */
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 74fa075c838a..b13f6417b5b2 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -20,14 +20,23 @@
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22#include <mach/hardware.h> 22#include <mach/hardware.h>
23#include <asm/cacheflush.h>
24 23
25#include <asm/mach/flash.h> 24#include <asm/mach/flash.h>
26 25
26#define CACHELINESIZE 32
27
27static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from, 28static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
28 ssize_t len) 29 ssize_t len)
29{ 30{
30 flush_ioremap_region(map->phys, map->cached, from, len); 31 unsigned long start = (unsigned long)map->cached + from;
32 unsigned long end = start + len;
33
34 start &= ~(CACHELINESIZE - 1);
35 while (start < end) {
36 /* invalidate D cache line */
37 asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
38 start += CACHELINESIZE;
39 }
31} 40}
32 41
33struct pxa2xx_flash_info { 42struct pxa2xx_flash_info {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7678538344f4..677cd53f18c3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -291,14 +291,6 @@ config MTD_NAND_SHARPSL
291 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" 291 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
292 depends on ARCH_PXA 292 depends on ARCH_PXA
293 293
294config MTD_NAND_BASLER_EXCITE
295 tristate "Support for NAND Flash on Basler eXcite"
296 depends on BASLER_EXCITE
297 help
298 This enables the driver for the NAND flash device found on the
299 Basler eXcite Smart Camera. If built as a module, the driver
300 will be named excite_nandflash.
301
302config MTD_NAND_CAFE 294config MTD_NAND_CAFE
303 tristate "NAND support for OLPC CAFÉ chip" 295 tristate "NAND support for OLPC CAFÉ chip"
304 depends on PCI 296 depends on PCI
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 460a1f39a8d1..1407bd144015 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
27obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o 27obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
28obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o 28obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
29obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 29obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
30obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
31obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o 30obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
32obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o 31obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
33obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 32obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
deleted file mode 100644
index af6a6a5399e1..000000000000
--- a/drivers/mtd/nand/excite_nandflash.c
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2* Copyright (C) 2005 - 2007 by Basler Vision Technologies AG
3* Author: Thomas Koeller <thomas.koeller.qbaslerweb.com>
4* Original code by Thies Moeller <thies.moeller@baslerweb.com>
5*
6* This program is free software; you can redistribute it and/or modify
7* it under the terms of the GNU General Public License as published by
8* the Free Software Foundation; either version 2 of the License, or
9* (at your option) any later version.
10*
11* This program is distributed in the hope that it will be useful,
12* but WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14* GNU General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/string.h>
26#include <linux/ioport.h>
27#include <linux/platform_device.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30
31#include <linux/mtd/mtd.h>
32#include <linux/mtd/nand.h>
33#include <linux/mtd/nand_ecc.h>
34#include <linux/mtd/partitions.h>
35
36#include <asm/io.h>
37#include <asm/rm9k-ocd.h>
38
39#include <excite_nandflash.h>
40
41#define EXCITE_NANDFLASH_VERSION "0.1"
42
43/* I/O register offsets */
44#define EXCITE_NANDFLASH_DATA_BYTE 0x00
45#define EXCITE_NANDFLASH_STATUS_BYTE 0x0c
46#define EXCITE_NANDFLASH_ADDR_BYTE 0x10
47#define EXCITE_NANDFLASH_CMD_BYTE 0x14
48
49/* prefix for debug output */
50static const char module_id[] = "excite_nandflash";
51
52/*
53 * partition definition
54 */
55static const struct mtd_partition partition_info[] = {
56 {
57 .name = "eXcite RootFS",
58 .offset = 0,
59 .size = MTDPART_SIZ_FULL
60 }
61};
62
63static inline const struct resource *
64excite_nand_get_resource(struct platform_device *d, unsigned long flags,
65 const char *basename)
66{
67 char buf[80];
68
69 if (snprintf(buf, sizeof buf, "%s_%u", basename, d->id) >= sizeof buf)
70 return NULL;
71 return platform_get_resource_byname(d, flags, buf);
72}
73
74static inline void __iomem *
75excite_nand_map_regs(struct platform_device *d, const char *basename)
76{
77 void *result = NULL;
78 const struct resource *const r =
79 excite_nand_get_resource(d, IORESOURCE_MEM, basename);
80
81 if (r)
82 result = ioremap_nocache(r->start, r->end + 1 - r->start);
83 return result;
84}
85
86/* controller and mtd information */
87struct excite_nand_drvdata {
88 struct mtd_info board_mtd;
89 struct nand_chip board_chip;
90 void __iomem *regs;
91 void __iomem *tgt;
92};
93
94/* Control function */
95static void excite_nand_control(struct mtd_info *mtd, int cmd,
96 unsigned int ctrl)
97{
98 struct excite_nand_drvdata * const d =
99 container_of(mtd, struct excite_nand_drvdata, board_mtd);
100
101 switch (ctrl) {
102 case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
103 d->tgt = d->regs + EXCITE_NANDFLASH_CMD_BYTE;
104 break;
105 case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
106 d->tgt = d->regs + EXCITE_NANDFLASH_ADDR_BYTE;
107 break;
108 case NAND_CTRL_CHANGE | NAND_NCE:
109 d->tgt = d->regs + EXCITE_NANDFLASH_DATA_BYTE;
110 break;
111 }
112
113 if (cmd != NAND_CMD_NONE)
114 __raw_writeb(cmd, d->tgt);
115}
116
117/* Return 0 if flash is busy, 1 if ready */
118static int excite_nand_devready(struct mtd_info *mtd)
119{
120 struct excite_nand_drvdata * const drvdata =
121 container_of(mtd, struct excite_nand_drvdata, board_mtd);
122
123 return __raw_readb(drvdata->regs + EXCITE_NANDFLASH_STATUS_BYTE);
124}
125
126/*
127 * Called by device layer to remove the driver.
128 * The binding to the mtd and all allocated
129 * resources are released.
130 */
131static int __devexit excite_nand_remove(struct platform_device *dev)
132{
133 struct excite_nand_drvdata * const this = platform_get_drvdata(dev);
134
135 platform_set_drvdata(dev, NULL);
136
137 if (unlikely(!this)) {
138 printk(KERN_ERR "%s: called %s without private data!!",
139 module_id, __func__);
140 return -EINVAL;
141 }
142
143 /* first thing we need to do is release our mtd
144 * then go through freeing the resource used
145 */
146 nand_release(&this->board_mtd);
147
148 /* free the common resources */
149 iounmap(this->regs);
150 kfree(this);
151
152 DEBUG(MTD_DEBUG_LEVEL1, "%s: removed\n", module_id);
153 return 0;
154}
155
156/*
157 * Called by device layer when it finds a device matching
158 * one our driver can handle. This code checks to see if
159 * it can allocate all necessary resources then calls the
160 * nand layer to look for devices.
161*/
162static int __init excite_nand_probe(struct platform_device *pdev)
163{
164 struct excite_nand_drvdata *drvdata; /* private driver data */
165 struct nand_chip *board_chip; /* private flash chip data */
166 struct mtd_info *board_mtd; /* mtd info for this board */
167 int scan_res;
168
169 drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
170 if (unlikely(!drvdata)) {
171 printk(KERN_ERR "%s: no memory for drvdata\n",
172 module_id);
173 return -ENOMEM;
174 }
175
176 /* bind private data into driver */
177 platform_set_drvdata(pdev, drvdata);
178
179 /* allocate and map the resource */
180 drvdata->regs =
181 excite_nand_map_regs(pdev, EXCITE_NANDFLASH_RESOURCE_REGS);
182
183 if (unlikely(!drvdata->regs)) {
184 printk(KERN_ERR "%s: cannot reserve register region\n",
185 module_id);
186 kfree(drvdata);
187 return -ENXIO;
188 }
189
190 drvdata->tgt = drvdata->regs + EXCITE_NANDFLASH_DATA_BYTE;
191
192 /* initialise our chip */
193 board_chip = &drvdata->board_chip;
194 board_chip->IO_ADDR_R = board_chip->IO_ADDR_W =
195 drvdata->regs + EXCITE_NANDFLASH_DATA_BYTE;
196 board_chip->cmd_ctrl = excite_nand_control;
197 board_chip->dev_ready = excite_nand_devready;
198 board_chip->chip_delay = 25;
199 board_chip->ecc.mode = NAND_ECC_SOFT;
200
201 /* link chip to mtd */
202 board_mtd = &drvdata->board_mtd;
203 board_mtd->priv = board_chip;
204
205 DEBUG(MTD_DEBUG_LEVEL2, "%s: device scan\n", module_id);
206 scan_res = nand_scan(&drvdata->board_mtd, 1);
207
208 if (likely(!scan_res)) {
209 DEBUG(MTD_DEBUG_LEVEL2, "%s: register partitions\n", module_id);
210 add_mtd_partitions(&drvdata->board_mtd, partition_info,
211 ARRAY_SIZE(partition_info));
212 } else {
213 iounmap(drvdata->regs);
214 kfree(drvdata);
215 printk(KERN_ERR "%s: device scan failed\n", module_id);
216 return -EIO;
217 }
218 return 0;
219}
220
221static struct platform_driver excite_nand_driver = {
222 .driver = {
223 .name = "excite_nand",
224 .owner = THIS_MODULE,
225 },
226 .probe = excite_nand_probe,
227 .remove = __devexit_p(excite_nand_remove)
228};
229
230static int __init excite_nand_init(void)
231{
232 pr_info("Basler eXcite nand flash driver Version "
233 EXCITE_NANDFLASH_VERSION "\n");
234 return platform_driver_register(&excite_nand_driver);
235}
236
237static void __exit excite_nand_exit(void)
238{
239 platform_driver_unregister(&excite_nand_driver);
240}
241
242module_init(excite_nand_init);
243module_exit(excite_nand_exit);
244
245MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
246MODULE_DESCRIPTION("Basler eXcite NAND-Flash driver");
247MODULE_LICENSE("GPL");
248MODULE_VERSION(EXCITE_NANDFLASH_VERSION)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a5be9ac6405c..e58a65391ad2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1953,6 +1953,8 @@ config BCM63XX_ENET
1953 1953
1954source "drivers/net/fs_enet/Kconfig" 1954source "drivers/net/fs_enet/Kconfig"
1955 1955
1956source "drivers/net/octeon/Kconfig"
1957
1956endif # NET_ETHERNET 1958endif # NET_ETHERNET
1957 1959
1958# 1960#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 246323d7f161..ad1346dd9da9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -285,3 +285,5 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
285obj-$(CONFIG_SFC) += sfc/ 285obj-$(CONFIG_SFC) += sfc/
286 286
287obj-$(CONFIG_WIMAX) += wimax/ 287obj-$(CONFIG_WIMAX) += wimax/
288
289obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig
new file mode 100644
index 000000000000..1e56bbf3f5c0
--- /dev/null
+++ b/drivers/net/octeon/Kconfig
@@ -0,0 +1,10 @@
1config OCTEON_MGMT_ETHERNET
2 tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
3 depends on CPU_CAVIUM_OCTEON
4 select PHYLIB
5 select MDIO_OCTEON
6 default y
7 help
8 This option enables the ethernet driver for the management
9 port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
10 CN54XX, CN52XX, and CN6XXX chips.
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile
new file mode 100644
index 000000000000..906edecacfd3
--- /dev/null
+++ b/drivers/net/octeon/Makefile
@@ -0,0 +1,2 @@
1
2obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
new file mode 100644
index 000000000000..050538bf155a
--- /dev/null
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -0,0 +1,1176 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
9#include <linux/capability.h>
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/platform_device.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/if_vlan.h>
16#include <linux/phy.h>
17#include <linux/spinlock.h>
18
19#include <asm/octeon/octeon.h>
20#include <asm/octeon/cvmx-mixx-defs.h>
21#include <asm/octeon/cvmx-agl-defs.h>
22
23#define DRV_NAME "octeon_mgmt"
24#define DRV_VERSION "2.0"
25#define DRV_DESCRIPTION \
26 "Cavium Networks Octeon MII (management) port Network Driver"
27
28#define OCTEON_MGMT_NAPI_WEIGHT 16
29
30/*
31 * Ring sizes that are powers of two allow for more efficient modulo
32 * opertions.
33 */
34#define OCTEON_MGMT_RX_RING_SIZE 512
35#define OCTEON_MGMT_TX_RING_SIZE 128
36
37/* Allow 8 bytes for vlan and FCS. */
38#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
39
40union mgmt_port_ring_entry {
41 u64 d64;
42 struct {
43 u64 reserved_62_63:2;
44 /* Length of the buffer/packet in bytes */
45 u64 len:14;
46 /* For TX, signals that the packet should be timestamped */
47 u64 tstamp:1;
48 /* The RX error code */
49 u64 code:7;
50#define RING_ENTRY_CODE_DONE 0xf
51#define RING_ENTRY_CODE_MORE 0x10
52 /* Physical address of the buffer */
53 u64 addr:40;
54 } s;
55};
56
57struct octeon_mgmt {
58 struct net_device *netdev;
59 int port;
60 int irq;
61 u64 *tx_ring;
62 dma_addr_t tx_ring_handle;
63 unsigned int tx_next;
64 unsigned int tx_next_clean;
65 unsigned int tx_current_fill;
66 /* The tx_list lock also protects the ring related variables */
67 struct sk_buff_head tx_list;
68
69 /* RX variables only touched in napi_poll. No locking necessary. */
70 u64 *rx_ring;
71 dma_addr_t rx_ring_handle;
72 unsigned int rx_next;
73 unsigned int rx_next_fill;
74 unsigned int rx_current_fill;
75 struct sk_buff_head rx_list;
76
77 spinlock_t lock;
78 unsigned int last_duplex;
79 unsigned int last_link;
80 struct device *dev;
81 struct napi_struct napi;
82 struct tasklet_struct tx_clean_tasklet;
83 struct phy_device *phydev;
84};
85
86static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
87{
88 int port = p->port;
89 union cvmx_mixx_intena mix_intena;
90 unsigned long flags;
91
92 spin_lock_irqsave(&p->lock, flags);
93 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
94 mix_intena.s.ithena = enable ? 1 : 0;
95 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
96 spin_unlock_irqrestore(&p->lock, flags);
97}
98
99static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
100{
101 int port = p->port;
102 union cvmx_mixx_intena mix_intena;
103 unsigned long flags;
104
105 spin_lock_irqsave(&p->lock, flags);
106 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
107 mix_intena.s.othena = enable ? 1 : 0;
108 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
109 spin_unlock_irqrestore(&p->lock, flags);
110}
111
112static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
113{
114 octeon_mgmt_set_rx_irq(p, 1);
115}
116
117static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
118{
119 octeon_mgmt_set_rx_irq(p, 0);
120}
121
122static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
123{
124 octeon_mgmt_set_tx_irq(p, 1);
125}
126
127static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
128{
129 octeon_mgmt_set_tx_irq(p, 0);
130}
131
132static unsigned int ring_max_fill(unsigned int ring_size)
133{
134 return ring_size - 8;
135}
136
137static unsigned int ring_size_to_bytes(unsigned int ring_size)
138{
139 return ring_size * sizeof(union mgmt_port_ring_entry);
140}
141
142static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
143{
144 struct octeon_mgmt *p = netdev_priv(netdev);
145 int port = p->port;
146
147 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
148 unsigned int size;
149 union mgmt_port_ring_entry re;
150 struct sk_buff *skb;
151
152 /* CN56XX pass 1 needs 8 bytes of padding. */
153 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
154
155 skb = netdev_alloc_skb(netdev, size);
156 if (!skb)
157 break;
158 skb_reserve(skb, NET_IP_ALIGN);
159 __skb_queue_tail(&p->rx_list, skb);
160
161 re.d64 = 0;
162 re.s.len = size;
163 re.s.addr = dma_map_single(p->dev, skb->data,
164 size,
165 DMA_FROM_DEVICE);
166
167 /* Put it in the ring. */
168 p->rx_ring[p->rx_next_fill] = re.d64;
169 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
170 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
171 DMA_BIDIRECTIONAL);
172 p->rx_next_fill =
173 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
174 p->rx_current_fill++;
175 /* Ring the bell. */
176 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
177 }
178}
179
180static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
181{
182 int port = p->port;
183 union cvmx_mixx_orcnt mix_orcnt;
184 union mgmt_port_ring_entry re;
185 struct sk_buff *skb;
186 int cleaned = 0;
187 unsigned long flags;
188
189 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
190 while (mix_orcnt.s.orcnt) {
191 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
192 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
193 DMA_BIDIRECTIONAL);
194
195 spin_lock_irqsave(&p->tx_list.lock, flags);
196
197 re.d64 = p->tx_ring[p->tx_next_clean];
198 p->tx_next_clean =
199 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
200 skb = __skb_dequeue(&p->tx_list);
201
202 mix_orcnt.u64 = 0;
203 mix_orcnt.s.orcnt = 1;
204
205 /* Acknowledge to hardware that we have the buffer. */
206 cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
207 p->tx_current_fill--;
208
209 spin_unlock_irqrestore(&p->tx_list.lock, flags);
210
211 dma_unmap_single(p->dev, re.s.addr, re.s.len,
212 DMA_TO_DEVICE);
213 dev_kfree_skb_any(skb);
214 cleaned++;
215
216 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
217 }
218
219 if (cleaned && netif_queue_stopped(p->netdev))
220 netif_wake_queue(p->netdev);
221}
222
223static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
224{
225 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
226 octeon_mgmt_clean_tx_buffers(p);
227 octeon_mgmt_enable_tx_irq(p);
228}
229
230static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
231{
232 struct octeon_mgmt *p = netdev_priv(netdev);
233 int port = p->port;
234 unsigned long flags;
235 u64 drop, bad;
236
237 /* These reads also clear the count registers. */
238 drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
239 bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
240
241 if (drop || bad) {
242 /* Do an atomic update. */
243 spin_lock_irqsave(&p->lock, flags);
244 netdev->stats.rx_errors += bad;
245 netdev->stats.rx_dropped += drop;
246 spin_unlock_irqrestore(&p->lock, flags);
247 }
248}
249
250static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
251{
252 struct octeon_mgmt *p = netdev_priv(netdev);
253 int port = p->port;
254 unsigned long flags;
255
256 union cvmx_agl_gmx_txx_stat0 s0;
257 union cvmx_agl_gmx_txx_stat1 s1;
258
259 /* These reads also clear the count registers. */
260 s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
261 s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
262
263 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
264 /* Do an atomic update. */
265 spin_lock_irqsave(&p->lock, flags);
266 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
267 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
268 spin_unlock_irqrestore(&p->lock, flags);
269 }
270}
271
272/*
273 * Dequeue a receive skb and its corresponding ring entry. The ring
274 * entry is returned, *pskb is updated to point to the skb.
275 */
276static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
277 struct sk_buff **pskb)
278{
279 union mgmt_port_ring_entry re;
280
281 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
282 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
283 DMA_BIDIRECTIONAL);
284
285 re.d64 = p->rx_ring[p->rx_next];
286 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
287 p->rx_current_fill--;
288 *pskb = __skb_dequeue(&p->rx_list);
289
290 dma_unmap_single(p->dev, re.s.addr,
291 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
292 DMA_FROM_DEVICE);
293
294 return re.d64;
295}
296
297
298static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
299{
300 int port = p->port;
301 struct net_device *netdev = p->netdev;
302 union cvmx_mixx_ircnt mix_ircnt;
303 union mgmt_port_ring_entry re;
304 struct sk_buff *skb;
305 struct sk_buff *skb2;
306 struct sk_buff *skb_new;
307 union mgmt_port_ring_entry re2;
308 int rc = 1;
309
310
311 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
312 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
313 /* A good packet, send it up. */
314 skb_put(skb, re.s.len);
315good:
316 skb->protocol = eth_type_trans(skb, netdev);
317 netdev->stats.rx_packets++;
318 netdev->stats.rx_bytes += skb->len;
319 netdev->last_rx = jiffies;
320 netif_receive_skb(skb);
321 rc = 0;
322 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
323 /*
324 * Packet split across skbs. This can happen if we
325 * increase the MTU. Buffers that are already in the
326 * rx ring can then end up being too small. As the rx
327 * ring is refilled, buffers sized for the new MTU
328 * will be used and we should go back to the normal
329 * non-split case.
330 */
331 skb_put(skb, re.s.len);
332 do {
333 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
334 if (re2.s.code != RING_ENTRY_CODE_MORE
335 && re2.s.code != RING_ENTRY_CODE_DONE)
336 goto split_error;
337 skb_put(skb2, re2.s.len);
338 skb_new = skb_copy_expand(skb, 0, skb2->len,
339 GFP_ATOMIC);
340 if (!skb_new)
341 goto split_error;
342 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
343 skb2->len))
344 goto split_error;
345 skb_put(skb_new, skb2->len);
346 dev_kfree_skb_any(skb);
347 dev_kfree_skb_any(skb2);
348 skb = skb_new;
349 } while (re2.s.code == RING_ENTRY_CODE_MORE);
350 goto good;
351 } else {
352 /* Some other error, discard it. */
353 dev_kfree_skb_any(skb);
354 /*
355 * Error statistics are accumulated in
356 * octeon_mgmt_update_rx_stats.
357 */
358 }
359 goto done;
360split_error:
361 /* Discard the whole mess. */
362 dev_kfree_skb_any(skb);
363 dev_kfree_skb_any(skb2);
364 while (re2.s.code == RING_ENTRY_CODE_MORE) {
365 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
366 dev_kfree_skb_any(skb2);
367 }
368 netdev->stats.rx_errors++;
369
370done:
371 /* Tell the hardware we processed a packet. */
372 mix_ircnt.u64 = 0;
373 mix_ircnt.s.ircnt = 1;
374 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
375 return rc;
376
377}
378
379static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
380{
381 int port = p->port;
382 unsigned int work_done = 0;
383 union cvmx_mixx_ircnt mix_ircnt;
384 int rc;
385
386
387 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
388 while (work_done < budget && mix_ircnt.s.ircnt) {
389
390 rc = octeon_mgmt_receive_one(p);
391 if (!rc)
392 work_done++;
393
394 /* Check for more packets. */
395 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
396 }
397
398 octeon_mgmt_rx_fill_ring(p->netdev);
399
400 return work_done;
401}
402
403static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
404{
405 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
406 struct net_device *netdev = p->netdev;
407 unsigned int work_done = 0;
408
409 work_done = octeon_mgmt_receive_packets(p, budget);
410
411 if (work_done < budget) {
412 /* We stopped because no more packets were available. */
413 napi_complete(napi);
414 octeon_mgmt_enable_rx_irq(p);
415 }
416 octeon_mgmt_update_rx_stats(netdev);
417
418 return work_done;
419}
420
421/* Reset the hardware to clean state. */
422static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
423{
424 union cvmx_mixx_ctl mix_ctl;
425 union cvmx_mixx_bist mix_bist;
426 union cvmx_agl_gmx_bist agl_gmx_bist;
427
428 mix_ctl.u64 = 0;
429 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
430 do {
431 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
432 } while (mix_ctl.s.busy);
433 mix_ctl.s.reset = 1;
434 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
435 cvmx_read_csr(CVMX_MIXX_CTL(p->port));
436 cvmx_wait(64);
437
438 mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
439 if (mix_bist.u64)
440 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
441 (unsigned long long)mix_bist.u64);
442
443 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
444 if (agl_gmx_bist.u64)
445 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
446 (unsigned long long)agl_gmx_bist.u64);
447}
448
449struct octeon_mgmt_cam_state {
450 u64 cam[6];
451 u64 cam_mask;
452 int cam_index;
453};
454
455static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
456 unsigned char *addr)
457{
458 int i;
459
460 for (i = 0; i < 6; i++)
461 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
462 cs->cam_mask |= (1ULL << cs->cam_index);
463 cs->cam_index++;
464}
465
466static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
467{
468 struct octeon_mgmt *p = netdev_priv(netdev);
469 int port = p->port;
470 int i;
471 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
472 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
473 unsigned long flags;
474 unsigned int prev_packet_enable;
475 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
476 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
477 struct octeon_mgmt_cam_state cam_state;
478 struct dev_addr_list *list;
479 struct list_head *pos;
480 int available_cam_entries;
481
482 memset(&cam_state, 0, sizeof(cam_state));
483
484 if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
485 cam_mode = 0;
486 available_cam_entries = 8;
487 } else {
488 /*
489 * One CAM entry for the primary address, leaves seven
490 * for the secondary addresses.
491 */
492 available_cam_entries = 7 - netdev->dev_addrs.count;
493 }
494
495 if (netdev->flags & IFF_MULTICAST) {
496 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
497 || netdev->mc_count > available_cam_entries)
498 multicast_mode = 2; /* 1 - Accept all multicast. */
499 else
500 multicast_mode = 0; /* 0 - Use CAM. */
501 }
502
503 if (cam_mode == 1) {
504 /* Add primary address. */
505 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
506 list_for_each(pos, &netdev->dev_addrs.list) {
507 struct netdev_hw_addr *hw_addr;
508 hw_addr = list_entry(pos, struct netdev_hw_addr, list);
509 octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
510 list = list->next;
511 }
512 }
513 if (multicast_mode == 0) {
514 i = netdev->mc_count;
515 list = netdev->mc_list;
516 while (i--) {
517 octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
518 list = list->next;
519 }
520 }
521
522
523 spin_lock_irqsave(&p->lock, flags);
524
525 /* Disable packet I/O. */
526 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
527 prev_packet_enable = agl_gmx_prtx.s.en;
528 agl_gmx_prtx.s.en = 0;
529 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
530
531
532 adr_ctl.u64 = 0;
533 adr_ctl.s.cam_mode = cam_mode;
534 adr_ctl.s.mcst = multicast_mode;
535 adr_ctl.s.bcst = 1; /* Allow broadcast */
536
537 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
538
539 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
540 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
541 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
542 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
543 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
544 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
545 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
546
547 /* Restore packet I/O. */
548 agl_gmx_prtx.s.en = prev_packet_enable;
549 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
550
551 spin_unlock_irqrestore(&p->lock, flags);
552}
553
554static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
555{
556 struct sockaddr *sa = addr;
557
558 if (!is_valid_ether_addr(sa->sa_data))
559 return -EADDRNOTAVAIL;
560
561 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
562
563 octeon_mgmt_set_rx_filtering(netdev);
564
565 return 0;
566}
567
568static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
569{
570 struct octeon_mgmt *p = netdev_priv(netdev);
571 int port = p->port;
572 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
573
574 /*
575 * Limit the MTU to make sure the ethernet packets are between
576 * 64 bytes and 16383 bytes.
577 */
578 if (size_without_fcs < 64 || size_without_fcs > 16383) {
579 dev_warn(p->dev, "MTU must be between %d and %d.\n",
580 64 - OCTEON_MGMT_RX_HEADROOM,
581 16383 - OCTEON_MGMT_RX_HEADROOM);
582 return -EINVAL;
583 }
584
585 netdev->mtu = new_mtu;
586
587 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
588 cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
589 (size_without_fcs + 7) & 0xfff8);
590
591 return 0;
592}
593
594static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
595{
596 struct net_device *netdev = dev_id;
597 struct octeon_mgmt *p = netdev_priv(netdev);
598 int port = p->port;
599 union cvmx_mixx_isr mixx_isr;
600
601 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
602
603 /* Clear any pending interrupts */
604 cvmx_write_csr(CVMX_MIXX_ISR(port),
605 cvmx_read_csr(CVMX_MIXX_ISR(port)));
606 cvmx_read_csr(CVMX_MIXX_ISR(port));
607
608 if (mixx_isr.s.irthresh) {
609 octeon_mgmt_disable_rx_irq(p);
610 napi_schedule(&p->napi);
611 }
612 if (mixx_isr.s.orthresh) {
613 octeon_mgmt_disable_tx_irq(p);
614 tasklet_schedule(&p->tx_clean_tasklet);
615 }
616
617 return IRQ_HANDLED;
618}
619
620static int octeon_mgmt_ioctl(struct net_device *netdev,
621 struct ifreq *rq, int cmd)
622{
623 struct octeon_mgmt *p = netdev_priv(netdev);
624
625 if (!netif_running(netdev))
626 return -EINVAL;
627
628 if (!p->phydev)
629 return -EINVAL;
630
631 return phy_mii_ioctl(p->phydev, if_mii(rq), cmd);
632}
633
634static void octeon_mgmt_adjust_link(struct net_device *netdev)
635{
636 struct octeon_mgmt *p = netdev_priv(netdev);
637 int port = p->port;
638 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
639 unsigned long flags;
640 int link_changed = 0;
641
642 spin_lock_irqsave(&p->lock, flags);
643 if (p->phydev->link) {
644 if (!p->last_link)
645 link_changed = 1;
646 if (p->last_duplex != p->phydev->duplex) {
647 p->last_duplex = p->phydev->duplex;
648 prtx_cfg.u64 =
649 cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
650 prtx_cfg.s.duplex = p->phydev->duplex;
651 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
652 prtx_cfg.u64);
653 }
654 } else {
655 if (p->last_link)
656 link_changed = -1;
657 }
658 p->last_link = p->phydev->link;
659 spin_unlock_irqrestore(&p->lock, flags);
660
661 if (link_changed != 0) {
662 if (link_changed > 0) {
663 netif_carrier_on(netdev);
664 pr_info("%s: Link is up - %d/%s\n", netdev->name,
665 p->phydev->speed,
666 DUPLEX_FULL == p->phydev->duplex ?
667 "Full" : "Half");
668 } else {
669 netif_carrier_off(netdev);
670 pr_info("%s: Link is down\n", netdev->name);
671 }
672 }
673}
674
675static int octeon_mgmt_init_phy(struct net_device *netdev)
676{
677 struct octeon_mgmt *p = netdev_priv(netdev);
678 char phy_id[20];
679
680 if (octeon_is_simulation()) {
681 /* No PHYs in the simulator. */
682 netif_carrier_on(netdev);
683 return 0;
684 }
685
686 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port);
687
688 p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
689 PHY_INTERFACE_MODE_MII);
690
691 if (IS_ERR(p->phydev)) {
692 p->phydev = NULL;
693 return -1;
694 }
695
696 phy_start_aneg(p->phydev);
697
698 return 0;
699}
700
701static int octeon_mgmt_open(struct net_device *netdev)
702{
703 struct octeon_mgmt *p = netdev_priv(netdev);
704 int port = p->port;
705 union cvmx_mixx_ctl mix_ctl;
706 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
707 union cvmx_mixx_oring1 oring1;
708 union cvmx_mixx_iring1 iring1;
709 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
710 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
711 union cvmx_mixx_irhwm mix_irhwm;
712 union cvmx_mixx_orhwm mix_orhwm;
713 union cvmx_mixx_intena mix_intena;
714 struct sockaddr sa;
715
716 /* Allocate ring buffers. */
717 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
718 GFP_KERNEL);
719 if (!p->tx_ring)
720 return -ENOMEM;
721 p->tx_ring_handle =
722 dma_map_single(p->dev, p->tx_ring,
723 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
724 DMA_BIDIRECTIONAL);
725 p->tx_next = 0;
726 p->tx_next_clean = 0;
727 p->tx_current_fill = 0;
728
729
730 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
731 GFP_KERNEL);
732 if (!p->rx_ring)
733 goto err_nomem;
734 p->rx_ring_handle =
735 dma_map_single(p->dev, p->rx_ring,
736 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
737 DMA_BIDIRECTIONAL);
738
739 p->rx_next = 0;
740 p->rx_next_fill = 0;
741 p->rx_current_fill = 0;
742
743 octeon_mgmt_reset_hw(p);
744
745 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
746
747 /* Bring it out of reset if needed. */
748 if (mix_ctl.s.reset) {
749 mix_ctl.s.reset = 0;
750 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
751 do {
752 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
753 } while (mix_ctl.s.reset);
754 }
755
756 agl_gmx_inf_mode.u64 = 0;
757 agl_gmx_inf_mode.s.en = 1;
758 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
759
760 oring1.u64 = 0;
761 oring1.s.obase = p->tx_ring_handle >> 3;
762 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
763 cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
764
765 iring1.u64 = 0;
766 iring1.s.ibase = p->rx_ring_handle >> 3;
767 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
768 cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
769
770 /* Disable packet I/O. */
771 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
772 prtx_cfg.s.en = 0;
773 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
774
775 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
776 octeon_mgmt_set_mac_address(netdev, &sa);
777
778 octeon_mgmt_change_mtu(netdev, netdev->mtu);
779
780 /*
781 * Enable the port HW. Packets are not allowed until
782 * cvmx_mgmt_port_enable() is called.
783 */
784 mix_ctl.u64 = 0;
785 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
786 mix_ctl.s.en = 1; /* Enable the port */
787 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
788 /* MII CB-request FIFO programmable high watermark */
789 mix_ctl.s.mrq_hwm = 1;
790 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
791
792 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
793 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
794 /*
795 * Force compensation values, as they are not
796 * determined properly by HW
797 */
798 union cvmx_agl_gmx_drv_ctl drv_ctl;
799
800 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
801 if (port) {
802 drv_ctl.s.byp_en1 = 1;
803 drv_ctl.s.nctl1 = 6;
804 drv_ctl.s.pctl1 = 6;
805 } else {
806 drv_ctl.s.byp_en = 1;
807 drv_ctl.s.nctl = 6;
808 drv_ctl.s.pctl = 6;
809 }
810 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
811 }
812
813 octeon_mgmt_rx_fill_ring(netdev);
814
815 /* Clear statistics. */
816 /* Clear on read. */
817 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
818 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
819 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
820
821 cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
822 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
823 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
824
825 /* Clear any pending interrupts */
826 cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
827
828 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
829 netdev)) {
830 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
831 goto err_noirq;
832 }
833
834 /* Interrupt every single RX packet */
835 mix_irhwm.u64 = 0;
836 mix_irhwm.s.irhwm = 0;
837 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
838
839 /* Interrupt when we have 5 or more packets to clean. */
840 mix_orhwm.u64 = 0;
841 mix_orhwm.s.orhwm = 5;
842 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
843
844 /* Enable receive and transmit interrupts */
845 mix_intena.u64 = 0;
846 mix_intena.s.ithena = 1;
847 mix_intena.s.othena = 1;
848 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
849
850
851 /* Enable packet I/O. */
852
853 rxx_frm_ctl.u64 = 0;
854 rxx_frm_ctl.s.pre_align = 1;
855 /*
856 * When set, disables the length check for non-min sized pkts
857 * with padding in the client data.
858 */
859 rxx_frm_ctl.s.pad_len = 1;
860 /* When set, disables the length check for VLAN pkts */
861 rxx_frm_ctl.s.vlan_len = 1;
862 /* When set, PREAMBLE checking is less strict */
863 rxx_frm_ctl.s.pre_free = 1;
864 /* Control Pause Frames can match station SMAC */
865 rxx_frm_ctl.s.ctl_smac = 0;
866 /* Control Pause Frames can match globally assign Multicast address */
867 rxx_frm_ctl.s.ctl_mcst = 1;
868 /* Forward pause information to TX block */
869 rxx_frm_ctl.s.ctl_bck = 1;
870 /* Drop Control Pause Frames */
871 rxx_frm_ctl.s.ctl_drp = 1;
872 /* Strip off the preamble */
873 rxx_frm_ctl.s.pre_strp = 1;
874 /*
875 * This port is configured to send PREAMBLE+SFD to begin every
876 * frame. GMX checks that the PREAMBLE is sent correctly.
877 */
878 rxx_frm_ctl.s.pre_chk = 1;
879 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
880
881 /* Enable the AGL block */
882 agl_gmx_inf_mode.u64 = 0;
883 agl_gmx_inf_mode.s.en = 1;
884 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
885
886 /* Configure the port duplex and enables */
887 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
888 prtx_cfg.s.tx_en = 1;
889 prtx_cfg.s.rx_en = 1;
890 prtx_cfg.s.en = 1;
891 p->last_duplex = 1;
892 prtx_cfg.s.duplex = p->last_duplex;
893 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
894
895 p->last_link = 0;
896 netif_carrier_off(netdev);
897
898 if (octeon_mgmt_init_phy(netdev)) {
899 dev_err(p->dev, "Cannot initialize PHY.\n");
900 goto err_noirq;
901 }
902
903 netif_wake_queue(netdev);
904 napi_enable(&p->napi);
905
906 return 0;
907err_noirq:
908 octeon_mgmt_reset_hw(p);
909 dma_unmap_single(p->dev, p->rx_ring_handle,
910 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
911 DMA_BIDIRECTIONAL);
912 kfree(p->rx_ring);
913err_nomem:
914 dma_unmap_single(p->dev, p->tx_ring_handle,
915 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
916 DMA_BIDIRECTIONAL);
917 kfree(p->tx_ring);
918 return -ENOMEM;
919}
920
921static int octeon_mgmt_stop(struct net_device *netdev)
922{
923 struct octeon_mgmt *p = netdev_priv(netdev);
924
925 napi_disable(&p->napi);
926 netif_stop_queue(netdev);
927
928 if (p->phydev)
929 phy_disconnect(p->phydev);
930
931 netif_carrier_off(netdev);
932
933 octeon_mgmt_reset_hw(p);
934
935
936 free_irq(p->irq, netdev);
937
938 /* dma_unmap is a nop on Octeon, so just free everything. */
939 skb_queue_purge(&p->tx_list);
940 skb_queue_purge(&p->rx_list);
941
942 dma_unmap_single(p->dev, p->rx_ring_handle,
943 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
944 DMA_BIDIRECTIONAL);
945 kfree(p->rx_ring);
946
947 dma_unmap_single(p->dev, p->tx_ring_handle,
948 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
949 DMA_BIDIRECTIONAL);
950 kfree(p->tx_ring);
951
952
953 return 0;
954}
955
956static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
957{
958 struct octeon_mgmt *p = netdev_priv(netdev);
959 int port = p->port;
960 union mgmt_port_ring_entry re;
961 unsigned long flags;
962
963 re.d64 = 0;
964 re.s.len = skb->len;
965 re.s.addr = dma_map_single(p->dev, skb->data,
966 skb->len,
967 DMA_TO_DEVICE);
968
969 spin_lock_irqsave(&p->tx_list.lock, flags);
970
971 if (unlikely(p->tx_current_fill >=
972 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
973 spin_unlock_irqrestore(&p->tx_list.lock, flags);
974
975 dma_unmap_single(p->dev, re.s.addr, re.s.len,
976 DMA_TO_DEVICE);
977
978 netif_stop_queue(netdev);
979 return NETDEV_TX_BUSY;
980 }
981
982 __skb_queue_tail(&p->tx_list, skb);
983
984 /* Put it in the ring. */
985 p->tx_ring[p->tx_next] = re.d64;
986 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
987 p->tx_current_fill++;
988
989 spin_unlock_irqrestore(&p->tx_list.lock, flags);
990
991 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
992 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
993 DMA_BIDIRECTIONAL);
994
995 netdev->stats.tx_packets++;
996 netdev->stats.tx_bytes += skb->len;
997
998 /* Ring the bell. */
999 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
1000
1001 netdev->trans_start = jiffies;
1002 octeon_mgmt_clean_tx_buffers(p);
1003 octeon_mgmt_update_tx_stats(netdev);
1004 return NETDEV_TX_OK;
1005}
1006
1007#ifdef CONFIG_NET_POLL_CONTROLLER
1008static void octeon_mgmt_poll_controller(struct net_device *netdev)
1009{
1010 struct octeon_mgmt *p = netdev_priv(netdev);
1011
1012 octeon_mgmt_receive_packets(p, 16);
1013 octeon_mgmt_update_rx_stats(netdev);
1014 return;
1015}
1016#endif
1017
1018static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1019 struct ethtool_drvinfo *info)
1020{
1021 strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1022 strncpy(info->version, DRV_VERSION, sizeof(info->version));
1023 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1024 strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1025 info->n_stats = 0;
1026 info->testinfo_len = 0;
1027 info->regdump_len = 0;
1028 info->eedump_len = 0;
1029}
1030
1031static int octeon_mgmt_get_settings(struct net_device *netdev,
1032 struct ethtool_cmd *cmd)
1033{
1034 struct octeon_mgmt *p = netdev_priv(netdev);
1035
1036 if (p->phydev)
1037 return phy_ethtool_gset(p->phydev, cmd);
1038
1039 return -EINVAL;
1040}
1041
1042static int octeon_mgmt_set_settings(struct net_device *netdev,
1043 struct ethtool_cmd *cmd)
1044{
1045 struct octeon_mgmt *p = netdev_priv(netdev);
1046
1047 if (!capable(CAP_NET_ADMIN))
1048 return -EPERM;
1049
1050 if (p->phydev)
1051 return phy_ethtool_sset(p->phydev, cmd);
1052
1053 return -EINVAL;
1054}
1055
1056static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1057 .get_drvinfo = octeon_mgmt_get_drvinfo,
1058 .get_link = ethtool_op_get_link,
1059 .get_settings = octeon_mgmt_get_settings,
1060 .set_settings = octeon_mgmt_set_settings
1061};
1062
1063static const struct net_device_ops octeon_mgmt_ops = {
1064 .ndo_open = octeon_mgmt_open,
1065 .ndo_stop = octeon_mgmt_stop,
1066 .ndo_start_xmit = octeon_mgmt_xmit,
1067 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1068 .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering,
1069 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1070 .ndo_do_ioctl = octeon_mgmt_ioctl,
1071 .ndo_change_mtu = octeon_mgmt_change_mtu,
1072#ifdef CONFIG_NET_POLL_CONTROLLER
1073 .ndo_poll_controller = octeon_mgmt_poll_controller,
1074#endif
1075};
1076
1077static int __init octeon_mgmt_probe(struct platform_device *pdev)
1078{
1079 struct resource *res_irq;
1080 struct net_device *netdev;
1081 struct octeon_mgmt *p;
1082 int i;
1083
1084 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1085 if (netdev == NULL)
1086 return -ENOMEM;
1087
1088 dev_set_drvdata(&pdev->dev, netdev);
1089 p = netdev_priv(netdev);
1090 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1091 OCTEON_MGMT_NAPI_WEIGHT);
1092
1093 p->netdev = netdev;
1094 p->dev = &pdev->dev;
1095
1096 p->port = pdev->id;
1097 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1098
1099 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1100 if (!res_irq)
1101 goto err;
1102
1103 p->irq = res_irq->start;
1104 spin_lock_init(&p->lock);
1105
1106 skb_queue_head_init(&p->tx_list);
1107 skb_queue_head_init(&p->rx_list);
1108 tasklet_init(&p->tx_clean_tasklet,
1109 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1110
1111 netdev->netdev_ops = &octeon_mgmt_ops;
1112 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1113
1114
1115 /* The mgmt ports get the first N MACs. */
1116 for (i = 0; i < 6; i++)
1117 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
1118 netdev->dev_addr[5] += p->port;
1119
1120 if (p->port >= octeon_bootinfo->mac_addr_count)
1121 dev_err(&pdev->dev,
1122 "Error %s: Using MAC outside of the assigned range: "
1123 "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
1124 netdev->dev_addr[0], netdev->dev_addr[1],
1125 netdev->dev_addr[2], netdev->dev_addr[3],
1126 netdev->dev_addr[4], netdev->dev_addr[5]);
1127
1128 if (register_netdev(netdev))
1129 goto err;
1130
1131 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1132 return 0;
1133err:
1134 free_netdev(netdev);
1135 return -ENOENT;
1136}
1137
1138static int __exit octeon_mgmt_remove(struct platform_device *pdev)
1139{
1140 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1141
1142 unregister_netdev(netdev);
1143 free_netdev(netdev);
1144 return 0;
1145}
1146
1147static struct platform_driver octeon_mgmt_driver = {
1148 .driver = {
1149 .name = "octeon_mgmt",
1150 .owner = THIS_MODULE,
1151 },
1152 .probe = octeon_mgmt_probe,
1153 .remove = __exit_p(octeon_mgmt_remove),
1154};
1155
1156extern void octeon_mdiobus_force_mod_depencency(void);
1157
1158static int __init octeon_mgmt_mod_init(void)
1159{
1160 /* Force our mdiobus driver module to be loaded first. */
1161 octeon_mdiobus_force_mod_depencency();
1162 return platform_driver_register(&octeon_mgmt_driver);
1163}
1164
1165static void __exit octeon_mgmt_mod_exit(void)
1166{
1167 platform_driver_unregister(&octeon_mgmt_driver);
1168}
1169
1170module_init(octeon_mgmt_mod_init);
1171module_exit(octeon_mgmt_mod_exit);
1172
1173MODULE_DESCRIPTION(DRV_DESCRIPTION);
1174MODULE_AUTHOR("David Daney");
1175MODULE_LICENSE("GPL");
1176MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d5d8e1c5bc91..fc5938ba3d78 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -115,4 +115,15 @@ config MDIO_GPIO
115 To compile this driver as a module, choose M here: the module 115 To compile this driver as a module, choose M here: the module
116 will be called mdio-gpio. 116 will be called mdio-gpio.
117 117
118config MDIO_OCTEON
119 tristate "Support for MDIO buses on Octeon SOCs"
120 depends on CPU_CAVIUM_OCTEON
121 default y
122 help
123
124 This module provides a driver for the Octeon MDIO busses.
125 It is required by the Octeon Ethernet device drivers.
126
127 If in doubt, say Y.
128
118endif # PHYLIB 129endif # PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index edfaac48cbd5..1342585af381 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o 20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
21obj-$(CONFIG_NATIONAL_PHY) += national.o 21obj-$(CONFIG_NATIONAL_PHY) += national.o
22obj-$(CONFIG_STE10XP) += ste10Xp.o 22obj-$(CONFIG_STE10XP) += ste10Xp.o
23obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
new file mode 100644
index 000000000000..61a4461cbda5
--- /dev/null
+++ b/drivers/net/phy/mdio-octeon.c
@@ -0,0 +1,180 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/phy.h>
13
14#include <asm/octeon/octeon.h>
15#include <asm/octeon/cvmx-smix-defs.h>
16
17#define DRV_VERSION "1.0"
18#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
19
20struct octeon_mdiobus {
21 struct mii_bus *mii_bus;
22 int unit;
23 int phy_irq[PHY_MAX_ADDR];
24};
25
26static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
27{
28 struct octeon_mdiobus *p = bus->priv;
29 union cvmx_smix_cmd smi_cmd;
30 union cvmx_smix_rd_dat smi_rd;
31 int timeout = 1000;
32
33 smi_cmd.u64 = 0;
34 smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */
35 smi_cmd.s.phy_adr = phy_id;
36 smi_cmd.s.reg_adr = regnum;
37 cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
38
39 do {
40 /*
41 * Wait 1000 clocks so we don't saturate the RSL bus
42 * doing reads.
43 */
44 cvmx_wait(1000);
45 smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit));
46 } while (smi_rd.s.pending && --timeout);
47
48 if (smi_rd.s.val)
49 return smi_rd.s.dat;
50 else
51 return -EIO;
52}
53
54static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
55 int regnum, u16 val)
56{
57 struct octeon_mdiobus *p = bus->priv;
58 union cvmx_smix_cmd smi_cmd;
59 union cvmx_smix_wr_dat smi_wr;
60 int timeout = 1000;
61
62 smi_wr.u64 = 0;
63 smi_wr.s.dat = val;
64 cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64);
65
66 smi_cmd.u64 = 0;
67 smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */
68 smi_cmd.s.phy_adr = phy_id;
69 smi_cmd.s.reg_adr = regnum;
70 cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
71
72 do {
73 /*
74 * Wait 1000 clocks so we don't saturate the RSL bus
75 * doing reads.
76 */
77 cvmx_wait(1000);
78 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit));
79 } while (smi_wr.s.pending && --timeout);
80
81 if (timeout <= 0)
82 return -EIO;
83
84 return 0;
85}
86
87static int __init octeon_mdiobus_probe(struct platform_device *pdev)
88{
89 struct octeon_mdiobus *bus;
90 int i;
91 int err = -ENOENT;
92
93 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
94 if (!bus)
95 return -ENOMEM;
96
97 /* The platform_device id is our unit number. */
98 bus->unit = pdev->id;
99
100 bus->mii_bus = mdiobus_alloc();
101
102 if (!bus->mii_bus)
103 goto err;
104
105 /*
106 * Standard Octeon evaluation boards don't support phy
107 * interrupts, we need to poll.
108 */
109 for (i = 0; i < PHY_MAX_ADDR; i++)
110 bus->phy_irq[i] = PHY_POLL;
111
112 bus->mii_bus->priv = bus;
113 bus->mii_bus->irq = bus->phy_irq;
114 bus->mii_bus->name = "mdio-octeon";
115 snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit);
116 bus->mii_bus->parent = &pdev->dev;
117
118 bus->mii_bus->read = octeon_mdiobus_read;
119 bus->mii_bus->write = octeon_mdiobus_write;
120
121 dev_set_drvdata(&pdev->dev, bus);
122
123 err = mdiobus_register(bus->mii_bus);
124 if (err)
125 goto err_register;
126
127 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
128
129 return 0;
130err_register:
131 mdiobus_free(bus->mii_bus);
132
133err:
134 devm_kfree(&pdev->dev, bus);
135 return err;
136}
137
138static int __exit octeon_mdiobus_remove(struct platform_device *pdev)
139{
140 struct octeon_mdiobus *bus;
141
142 bus = dev_get_drvdata(&pdev->dev);
143
144 mdiobus_unregister(bus->mii_bus);
145 mdiobus_free(bus->mii_bus);
146 return 0;
147}
148
149static struct platform_driver octeon_mdiobus_driver = {
150 .driver = {
151 .name = "mdio-octeon",
152 .owner = THIS_MODULE,
153 },
154 .probe = octeon_mdiobus_probe,
155 .remove = __exit_p(octeon_mdiobus_remove),
156};
157
158void octeon_mdiobus_force_mod_depencency(void)
159{
160 /* Let ethernet drivers force us to be loaded. */
161}
162EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
163
164static int __init octeon_mdiobus_mod_init(void)
165{
166 return platform_driver_register(&octeon_mdiobus_driver);
167}
168
169static void __exit octeon_mdiobus_mod_exit(void)
170{
171 platform_driver_unregister(&octeon_mdiobus_driver);
172}
173
174module_init(octeon_mdiobus_mod_init);
175module_exit(octeon_mdiobus_mod_exit);
176
177MODULE_DESCRIPTION(DRV_DESCRIPTION);
178MODULE_VERSION(DRV_VERSION);
179MODULE_AUTHOR("David Daney");
180MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 675b7df632fc..27ca859e7453 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,7 +63,7 @@
63#ifndef __iwl_core_h__ 63#ifndef __iwl_core_h__
64#define __iwl_core_h__ 64#define __iwl_core_h__
65 65
66#include <linux/utsrelease.h> 66#include <generated/utsrelease.h>
67 67
68/************************ 68/************************
69 * forward declarations * 69 * forward declarations *
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3aabf1e37988..76e640bccde8 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -291,7 +291,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
291 skt->nr = ops->first + i; 291 skt->nr = ops->first + i;
292 skt->ops = ops; 292 skt->ops = ops;
293 skt->socket.owner = ops->owner; 293 skt->socket.owner = ops->owner;
294 skt->socket.dev.parent = dev; 294 skt->socket.dev.parent = &dev->dev;
295 skt->socket.pci_irq = NO_IRQ; 295 skt->socket.pci_irq = NO_IRQ;
296 296
297 ret = pxa2xx_drv_pcmcia_add_one(skt); 297 ret = pxa2xx_drv_pcmcia_add_one(skt);
@@ -304,8 +304,8 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
304 soc_pcmcia_remove_one(&sinfo->skt[i]); 304 soc_pcmcia_remove_one(&sinfo->skt[i]);
305 kfree(sinfo); 305 kfree(sinfo);
306 } else { 306 } else {
307 pxa2xx_configure_sockets(dev); 307 pxa2xx_configure_sockets(&dev->dev);
308 dev_set_drvdata(dev, sinfo); 308 dev_set_drvdata(&dev->dev, sinfo);
309 } 309 }
310 310
311 return ret; 311 return ret;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 11003bba10d3..1a387e79f719 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -51,7 +51,6 @@
51#include <linux/dmi.h> 51#include <linux/dmi.h>
52#include <linux/backlight.h> 52#include <linux/backlight.h>
53#include <linux/platform_device.h> 53#include <linux/platform_device.h>
54#include <linux/autoconf.h>
55 54
56#define COMPAL_DRIVER_VERSION "0.2.6" 55#define COMPAL_DRIVER_VERSION "0.2.6"
57 56
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
new file mode 100644
index 000000000000..04719551381b
--- /dev/null
+++ b/drivers/regulator/88pm8607.c
@@ -0,0 +1,685 @@
1/*
2 * Regulators driver for Marvell 88PM8607
3 *
4 * Copyright (C) 2009 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/err.h>
14#include <linux/platform_device.h>
15#include <linux/regulator/driver.h>
16#include <linux/regulator/machine.h>
17#include <linux/mfd/88pm8607.h>
18
19struct pm8607_regulator_info {
20 struct regulator_desc desc;
21 struct pm8607_chip *chip;
22 struct regulator_dev *regulator;
23
24 int min_uV;
25 int max_uV;
26 int step_uV;
27 int vol_reg;
28 int vol_shift;
29 int vol_nbits;
30 int update_reg;
31 int update_bit;
32 int enable_reg;
33 int enable_bit;
34 int slope_double;
35};
36
37static inline int check_range(struct pm8607_regulator_info *info,
38 int min_uV, int max_uV)
39{
40 if (max_uV < info->min_uV || min_uV > info->max_uV || min_uV > max_uV)
41 return -EINVAL;
42
43 return 0;
44}
45
46static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
47{
48 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
49 uint8_t chip_id = info->chip->chip_id;
50 int ret = -EINVAL;
51
52 switch (info->desc.id) {
53 case PM8607_ID_BUCK1:
54 ret = (index < 0x1d) ? (index * 25000 + 800000) :
55 ((index < 0x20) ? 1500000 :
56 ((index < 0x40) ? ((index - 0x20) * 25000) :
57 -EINVAL));
58 break;
59 case PM8607_ID_BUCK3:
60 ret = (index < 0x3d) ? (index * 25000) :
61 ((index < 0x40) ? 1500000 : -EINVAL);
62 if (ret < 0)
63 break;
64 if (info->slope_double)
65 ret <<= 1;
66 break;
67 case PM8607_ID_LDO1:
68 ret = (index == 0) ? 1800000 :
69 ((index == 1) ? 1200000 :
70 ((index == 2) ? 2800000 : -EINVAL));
71 break;
72 case PM8607_ID_LDO5:
73 ret = (index == 0) ? 2900000 :
74 ((index == 1) ? 3000000 :
75 ((index == 2) ? 3100000 : 3300000));
76 break;
77 case PM8607_ID_LDO7:
78 case PM8607_ID_LDO8:
79 ret = (index < 3) ? (index * 50000 + 1800000) :
80 ((index < 8) ? (index * 50000 + 2550000) :
81 -EINVAL);
82 break;
83 case PM8607_ID_LDO12:
84 ret = (index < 2) ? (index * 100000 + 1800000) :
85 ((index < 7) ? (index * 100000 + 2500000) :
86 ((index == 7) ? 3300000 : 1200000));
87 break;
88 case PM8607_ID_LDO2:
89 case PM8607_ID_LDO3:
90 case PM8607_ID_LDO9:
91 switch (chip_id) {
92 case PM8607_CHIP_A0:
93 case PM8607_CHIP_A1:
94 ret = (index < 3) ? (index * 50000 + 1800000) :
95 ((index < 8) ? (index * 50000 + 2550000) :
96 -EINVAL);
97 break;
98 case PM8607_CHIP_B0:
99 ret = (index < 3) ? (index * 50000 + 1800000) :
100 ((index < 7) ? (index * 50000 + 2550000) :
101 3300000);
102 break;
103 }
104 break;
105 case PM8607_ID_LDO4:
106 switch (chip_id) {
107 case PM8607_CHIP_A0:
108 case PM8607_CHIP_A1:
109 ret = (index < 3) ? (index * 50000 + 1800000) :
110 ((index < 8) ? (index * 50000 + 2550000) :
111 -EINVAL);
112 break;
113 case PM8607_CHIP_B0:
114 ret = (index < 3) ? (index * 50000 + 1800000) :
115 ((index < 6) ? (index * 50000 + 2550000) :
116 ((index == 6) ? 2900000 : 3300000));
117 break;
118 }
119 break;
120 case PM8607_ID_LDO6:
121 switch (chip_id) {
122 case PM8607_CHIP_A0:
123 case PM8607_CHIP_A1:
124 ret = (index < 3) ? (index * 50000 + 1800000) :
125 ((index < 8) ? (index * 50000 + 2450000) :
126 -EINVAL);
127 break;
128 case PM8607_CHIP_B0:
129 ret = (index < 2) ? (index * 50000 + 1800000) :
130 ((index < 7) ? (index * 50000 + 2500000) :
131 3300000);
132 break;
133 }
134 break;
135 case PM8607_ID_LDO10:
136 switch (chip_id) {
137 case PM8607_CHIP_A0:
138 case PM8607_CHIP_A1:
139 ret = (index < 3) ? (index * 50000 + 1800000) :
140 ((index < 8) ? (index * 50000 + 2550000) :
141 1200000);
142 break;
143 case PM8607_CHIP_B0:
144 ret = (index < 3) ? (index * 50000 + 1800000) :
145 ((index < 7) ? (index * 50000 + 2550000) :
146 ((index == 7) ? 3300000 : 1200000));
147 break;
148 }
149 break;
150 case PM8607_ID_LDO14:
151 switch (chip_id) {
152 case PM8607_CHIP_A0:
153 case PM8607_CHIP_A1:
154 ret = (index < 3) ? (index * 50000 + 1800000) :
155 ((index < 8) ? (index * 50000 + 2550000) :
156 -EINVAL);
157 break;
158 case PM8607_CHIP_B0:
159 ret = (index < 2) ? (index * 50000 + 1800000) :
160 ((index < 7) ? (index * 50000 + 2600000) :
161 3300000);
162 break;
163 }
164 break;
165 }
166 return ret;
167}
168
169static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
170{
171 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
172 uint8_t chip_id = info->chip->chip_id;
173 int val = -ENOENT;
174 int ret;
175
176 switch (info->desc.id) {
177 case PM8607_ID_BUCK1:
178 if (min_uV >= 800000) /* 800mV ~ 1500mV / 25mV */
179 val = (min_uV - 775001) / 25000;
180 else { /* 25mV ~ 775mV / 25mV */
181 val = (min_uV + 249999) / 25000;
182 val += 32;
183 }
184 break;
185 case PM8607_ID_BUCK3:
186 if (info->slope_double)
187 min_uV = min_uV >> 1;
188 val = (min_uV + 249999) / 25000; /* 0mV ~ 1500mV / 25mV */
189
190 break;
191 case PM8607_ID_LDO1:
192 if (min_uV > 1800000)
193 val = 2;
194 else if (min_uV > 1200000)
195 val = 0;
196 else
197 val = 1;
198 break;
199 case PM8607_ID_LDO5:
200 if (min_uV > 3100000)
201 val = 3;
202 else /* 2900mV ~ 3100mV / 100mV */
203 val = (min_uV - 2800001) / 100000;
204 break;
205 case PM8607_ID_LDO7:
206 case PM8607_ID_LDO8:
207 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
208 if (min_uV <= 1800000)
209 val = 0; /* 1800mv */
210 else if (min_uV <= 1900000)
211 val = (min_uV - 1750001) / 50000;
212 else
213 val = 3; /* 2700mV */
214 } else { /* 2700mV ~ 2900mV / 50mV */
215 if (min_uV <= 2900000) {
216 val = (min_uV - 2650001) / 50000;
217 val += 3;
218 } else
219 val = -EINVAL;
220 }
221 break;
222 case PM8607_ID_LDO10:
223 if (min_uV > 2850000)
224 val = 7;
225 else if (min_uV <= 1200000)
226 val = 8;
227 else if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
228 val = (min_uV - 1750001) / 50000;
229 else { /* 2700mV ~ 2850mV / 50mV */
230 val = (min_uV - 2650001) / 50000;
231 val += 3;
232 }
233 break;
234 case PM8607_ID_LDO12:
235 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 100mV */
236 if (min_uV <= 1200000)
237 val = 8; /* 1200mV */
238 else if (min_uV <= 1800000)
239 val = 0; /* 1800mV */
240 else if (min_uV <= 1900000)
241 val = (min_uV - 1700001) / 100000;
242 else
243 val = 2; /* 2700mV */
244 } else { /* 2700mV ~ 3100mV / 100mV */
245 if (min_uV <= 3100000) {
246 val = (min_uV - 2600001) / 100000;
247 val += 2;
248 } else if (min_uV <= 3300000)
249 val = 7;
250 else
251 val = -EINVAL;
252 }
253 break;
254 case PM8607_ID_LDO2:
255 case PM8607_ID_LDO3:
256 case PM8607_ID_LDO9:
257 switch (chip_id) {
258 case PM8607_CHIP_A0:
259 case PM8607_CHIP_A1:
260 if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
261 if (min_uV <= 1800000)
262 val = 0;
263 else if (min_uV <= 1900000)
264 val = (min_uV - 1750001) / 50000;
265 else
266 val = 3; /* 2700mV */
267 else { /* 2700mV ~ 2900mV / 50mV */
268 if (min_uV <= 2900000) {
269 val = (min_uV - 2650001) / 50000;
270 val += 3;
271 } else
272 val = -EINVAL;
273 }
274 break;
275 case PM8607_CHIP_B0:
276 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
277 if (min_uV <= 1800000)
278 val = 0;
279 else if (min_uV <= 1900000)
280 val = (min_uV - 1750001) / 50000;
281 else
282 val = 3; /* 2700mV */
283 } else { /* 2700mV ~ 2850mV / 50mV */
284 if (min_uV <= 2850000) {
285 val = (min_uV - 2650001) / 50000;
286 val += 3;
287 } else if (min_uV <= 3300000)
288 val = 7;
289 else
290 val = -EINVAL;
291 }
292 break;
293 }
294 break;
295 case PM8607_ID_LDO4:
296 switch (chip_id) {
297 case PM8607_CHIP_A0:
298 case PM8607_CHIP_A1:
299 if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
300 if (min_uV <= 1800000)
301 val = 0;
302 else if (min_uV <= 1900000)
303 val = (min_uV - 1750001) / 50000;
304 else
305 val = 3; /* 2700mV */
306 else { /* 2700mV ~ 2900mV / 50mV */
307 if (min_uV <= 2900000) {
308 val = (min_uV - 2650001) / 50000;
309 val += 3;
310 } else
311 val = -EINVAL;
312 }
313 break;
314 case PM8607_CHIP_B0:
315 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
316 if (min_uV <= 1800000)
317 val = 0;
318 else if (min_uV <= 1900000)
319 val = (min_uV - 1750001) / 50000;
320 else
321 val = 3; /* 2700mV */
322 } else { /* 2700mV ~ 2800mV / 50mV */
323 if (min_uV <= 2850000) {
324 val = (min_uV - 2650001) / 50000;
325 val += 3;
326 } else if (min_uV <= 2900000)
327 val = 6;
328 else if (min_uV <= 3300000)
329 val = 7;
330 else
331 val = -EINVAL;
332 }
333 break;
334 }
335 break;
336 case PM8607_ID_LDO6:
337 switch (chip_id) {
338 case PM8607_CHIP_A0:
339 case PM8607_CHIP_A1:
340 if (min_uV < 2600000) { /* 1800mV ~ 1900mV / 50mV */
341 if (min_uV <= 1800000)
342 val = 0;
343 else if (min_uV <= 1900000)
344 val = (min_uV - 1750001) / 50000;
345 else
346 val = 3; /* 2600mV */
347 } else { /* 2600mV ~ 2800mV / 50mV */
348 if (min_uV <= 2800000) {
349 val = (min_uV - 2550001) / 50000;
350 val += 3;
351 } else
352 val = -EINVAL;
353 }
354 break;
355 case PM8607_CHIP_B0:
356 if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
357 if (min_uV <= 1800000)
358 val = 0;
359 else if (min_uV <= 1850000)
360 val = (min_uV - 1750001) / 50000;
361 else
362 val = 2; /* 2600mV */
363 } else { /* 2600mV ~ 2800mV / 50mV */
364 if (min_uV <= 2800000) {
365 val = (min_uV - 2550001) / 50000;
366 val += 2;
367 } else if (min_uV <= 3300000)
368 val = 7;
369 else
370 val = -EINVAL;
371 }
372 break;
373 }
374 break;
375 case PM8607_ID_LDO14:
376 switch (chip_id) {
377 case PM8607_CHIP_A0:
378 case PM8607_CHIP_A1:
379 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
380 if (min_uV <= 1800000)
381 val = 0;
382 else if (min_uV <= 1900000)
383 val = (min_uV - 1750001) / 50000;
384 else
385 val = 3; /* 2700mV */
386 } else { /* 2700mV ~ 2900mV / 50mV */
387 if (min_uV <= 2900000) {
388 val = (min_uV - 2650001) / 50000;
389 val += 3;
390 } else
391 val = -EINVAL;
392 }
393 break;
394 case PM8607_CHIP_B0:
395 if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
396 if (min_uV <= 1800000)
397 val = 0;
398 else if (min_uV <= 1850000)
399 val = (min_uV - 1750001) / 50000;
400 else
401 val = 2; /* 2700mV */
402 } else { /* 2700mV ~ 2900mV / 50mV */
403 if (min_uV <= 2900000) {
404 val = (min_uV - 2650001) / 50000;
405 val += 2;
406 } else if (min_uV <= 3300000)
407 val = 7;
408 else
409 val = -EINVAL;
410 }
411 break;
412 }
413 break;
414 }
415 if (val >= 0) {
416 ret = pm8607_list_voltage(rdev, val);
417 if (ret > max_uV) {
418 pr_err("exceed voltage range (%d %d) uV",
419 min_uV, max_uV);
420 return -EINVAL;
421 }
422 } else
423 pr_err("invalid voltage range (%d %d) uV", min_uV, max_uV);
424 return val;
425}
426
427static int pm8607_set_voltage(struct regulator_dev *rdev,
428 int min_uV, int max_uV)
429{
430 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
431 struct pm8607_chip *chip = info->chip;
432 uint8_t val, mask;
433 int ret;
434
435 if (check_range(info, min_uV, max_uV)) {
436 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
437 return -EINVAL;
438 }
439
440 ret = choose_voltage(rdev, min_uV, max_uV);
441 if (ret < 0)
442 return -EINVAL;
443 val = (uint8_t)(ret << info->vol_shift);
444 mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
445
446 ret = pm8607_set_bits(chip, info->vol_reg, mask, val);
447 if (ret)
448 return ret;
449 switch (info->desc.id) {
450 case PM8607_ID_BUCK1:
451 case PM8607_ID_BUCK3:
452 ret = pm8607_set_bits(chip, info->update_reg,
453 1 << info->update_bit,
454 1 << info->update_bit);
455 break;
456 }
457 return ret;
458}
459
460static int pm8607_get_voltage(struct regulator_dev *rdev)
461{
462 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
463 struct pm8607_chip *chip = info->chip;
464 uint8_t val, mask;
465 int ret;
466
467 ret = pm8607_reg_read(chip, info->vol_reg);
468 if (ret < 0)
469 return ret;
470
471 mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
472 val = ((unsigned char)ret & mask) >> info->vol_shift;
473
474 return pm8607_list_voltage(rdev, val);
475}
476
477static int pm8607_enable(struct regulator_dev *rdev)
478{
479 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
480 struct pm8607_chip *chip = info->chip;
481
482 return pm8607_set_bits(chip, info->enable_reg,
483 1 << info->enable_bit,
484 1 << info->enable_bit);
485}
486
487static int pm8607_disable(struct regulator_dev *rdev)
488{
489 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
490 struct pm8607_chip *chip = info->chip;
491
492 return pm8607_set_bits(chip, info->enable_reg,
493 1 << info->enable_bit, 0);
494}
495
496static int pm8607_is_enabled(struct regulator_dev *rdev)
497{
498 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
499 struct pm8607_chip *chip = info->chip;
500 int ret;
501
502 ret = pm8607_reg_read(chip, info->enable_reg);
503 if (ret < 0)
504 return ret;
505
506 return !!((unsigned char)ret & (1 << info->enable_bit));
507}
508
509static struct regulator_ops pm8607_regulator_ops = {
510 .set_voltage = pm8607_set_voltage,
511 .get_voltage = pm8607_get_voltage,
512 .enable = pm8607_enable,
513 .disable = pm8607_disable,
514 .is_enabled = pm8607_is_enabled,
515};
516
517#define PM8607_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
518{ \
519 .desc = { \
520 .name = "BUCK" #_id, \
521 .ops = &pm8607_regulator_ops, \
522 .type = REGULATOR_VOLTAGE, \
523 .id = PM8607_ID_BUCK##_id, \
524 .owner = THIS_MODULE, \
525 }, \
526 .min_uV = (min) * 1000, \
527 .max_uV = (max) * 1000, \
528 .step_uV = (step) * 1000, \
529 .vol_reg = PM8607_##vreg, \
530 .vol_shift = (0), \
531 .vol_nbits = (nbits), \
532 .update_reg = PM8607_##ureg, \
533 .update_bit = (ubit), \
534 .enable_reg = PM8607_##ereg, \
535 .enable_bit = (ebit), \
536 .slope_double = (0), \
537}
538
539#define PM8607_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
540{ \
541 .desc = { \
542 .name = "LDO" #_id, \
543 .ops = &pm8607_regulator_ops, \
544 .type = REGULATOR_VOLTAGE, \
545 .id = PM8607_ID_LDO##_id, \
546 .owner = THIS_MODULE, \
547 }, \
548 .min_uV = (min) * 1000, \
549 .max_uV = (max) * 1000, \
550 .step_uV = (step) * 1000, \
551 .vol_reg = PM8607_##vreg, \
552 .vol_shift = (shift), \
553 .vol_nbits = (nbits), \
554 .enable_reg = PM8607_##ereg, \
555 .enable_bit = (ebit), \
556 .slope_double = (0), \
557}
558
559static struct pm8607_regulator_info pm8607_regulator_info[] = {
560 PM8607_DVC(1, 0, 1500, 25, BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
561 PM8607_DVC(3, 0, 1500, 25, BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
562
563 PM8607_LDO(1 , 1200, 2800, 0, LDO1 , 0, 2, SUPPLIES_EN11, 3),
564 PM8607_LDO(2 , 1800, 3300, 0, LDO2 , 0, 3, SUPPLIES_EN11, 4),
565 PM8607_LDO(3 , 1800, 3300, 0, LDO3 , 0, 3, SUPPLIES_EN11, 5),
566 PM8607_LDO(4 , 1800, 3300, 0, LDO4 , 0, 3, SUPPLIES_EN11, 6),
567 PM8607_LDO(5 , 2900, 3300, 0, LDO5 , 0, 2, SUPPLIES_EN11, 7),
568 PM8607_LDO(6 , 1800, 3300, 0, LDO6 , 0, 3, SUPPLIES_EN12, 0),
569 PM8607_LDO(7 , 1800, 2900, 0, LDO7 , 0, 3, SUPPLIES_EN12, 1),
570 PM8607_LDO(8 , 1800, 2900, 0, LDO8 , 0, 3, SUPPLIES_EN12, 2),
571 PM8607_LDO(9 , 1800, 3300, 0, LDO9 , 0, 3, SUPPLIES_EN12, 3),
572 PM8607_LDO(10, 1200, 3300, 0, LDO10, 0, 4, SUPPLIES_EN11, 4),
573 PM8607_LDO(12, 1200, 3300, 0, LDO12, 0, 4, SUPPLIES_EN11, 5),
574 PM8607_LDO(14, 1800, 3300, 0, LDO14, 0, 3, SUPPLIES_EN11, 6),
575};
576
577static inline struct pm8607_regulator_info *find_regulator_info(int id)
578{
579 struct pm8607_regulator_info *info;
580 int i;
581
582 for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
583 info = &pm8607_regulator_info[i];
584 if (info->desc.id == id)
585 return info;
586 }
587 return NULL;
588}
589
590static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
591{
592 struct pm8607_chip *chip = dev_get_drvdata(pdev->dev.parent);
593 struct pm8607_platform_data *pdata = chip->dev->platform_data;
594 struct pm8607_regulator_info *info = NULL;
595
596 info = find_regulator_info(pdev->id);
597 if (info == NULL) {
598 dev_err(&pdev->dev, "invalid regulator ID specified\n");
599 return -EINVAL;
600 }
601
602 info->chip = chip;
603
604 info->regulator = regulator_register(&info->desc, &pdev->dev,
605 pdata->regulator[pdev->id], info);
606 if (IS_ERR(info->regulator)) {
607 dev_err(&pdev->dev, "failed to register regulator %s\n",
608 info->desc.name);
609 return PTR_ERR(info->regulator);
610 }
611
612 /* check DVC ramp slope double */
613 if (info->desc.id == PM8607_ID_BUCK3)
614 if (info->chip->buck3_double)
615 info->slope_double = 1;
616
617 platform_set_drvdata(pdev, info);
618 return 0;
619}
620
621static int __devexit pm8607_regulator_remove(struct platform_device *pdev)
622{
623 struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
624
625 regulator_unregister(info->regulator);
626 return 0;
627}
628
629#define PM8607_REGULATOR_DRIVER(_name) \
630{ \
631 .driver = { \
632 .name = "88pm8607-" #_name, \
633 .owner = THIS_MODULE, \
634 }, \
635 .probe = pm8607_regulator_probe, \
636 .remove = __devexit_p(pm8607_regulator_remove), \
637}
638
639static struct platform_driver pm8607_regulator_driver[] = {
640 PM8607_REGULATOR_DRIVER(buck1),
641 PM8607_REGULATOR_DRIVER(buck2),
642 PM8607_REGULATOR_DRIVER(buck3),
643 PM8607_REGULATOR_DRIVER(ldo1),
644 PM8607_REGULATOR_DRIVER(ldo2),
645 PM8607_REGULATOR_DRIVER(ldo3),
646 PM8607_REGULATOR_DRIVER(ldo4),
647 PM8607_REGULATOR_DRIVER(ldo5),
648 PM8607_REGULATOR_DRIVER(ldo6),
649 PM8607_REGULATOR_DRIVER(ldo7),
650 PM8607_REGULATOR_DRIVER(ldo8),
651 PM8607_REGULATOR_DRIVER(ldo9),
652 PM8607_REGULATOR_DRIVER(ldo10),
653 PM8607_REGULATOR_DRIVER(ldo12),
654 PM8607_REGULATOR_DRIVER(ldo14),
655};
656
657static int __init pm8607_regulator_init(void)
658{
659 int i, count, ret;
660
661 count = ARRAY_SIZE(pm8607_regulator_driver);
662 for (i = 0; i < count; i++) {
663 ret = platform_driver_register(&pm8607_regulator_driver[i]);
664 if (ret != 0)
665 pr_err("Failed to register regulator driver: %d\n",
666 ret);
667 }
668 return 0;
669}
670subsys_initcall(pm8607_regulator_init);
671
672static void __exit pm8607_regulator_exit(void)
673{
674 int i, count;
675
676 count = ARRAY_SIZE(pm8607_regulator_driver);
677 for (i = 0; i < count; i++)
678 platform_driver_unregister(&pm8607_regulator_driver[i]);
679}
680module_exit(pm8607_regulator_exit);
681
682MODULE_LICENSE("GPL");
683MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
684MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM8607 PMIC");
685MODULE_ALIAS("platform:88pm8607-regulator");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 7cfdd65bebb4..262f62eec837 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -69,6 +69,13 @@ config REGULATOR_MAX1586
69 regulator via I2C bus. The provided regulator is suitable 69 regulator via I2C bus. The provided regulator is suitable
70 for PXA27x chips to control VCC_CORE and VCC_USIM voltages. 70 for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
71 71
72config REGULATOR_MAX8660
73 tristate "Maxim 8660/8661 voltage regulator"
74 depends on I2C
75 help
76 This driver controls a Maxim 8660/8661 voltage output
77 regulator via I2C bus.
78
72config REGULATOR_TWL4030 79config REGULATOR_TWL4030
73 bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC" 80 bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
74 depends on TWL4030_CORE 81 depends on TWL4030_CORE
@@ -157,5 +164,11 @@ config REGULATOR_TPS6507X
157 three step-down converters and two general-purpose LDO voltage regulators. 164 three step-down converters and two general-purpose LDO voltage regulators.
158 It supports TI's software based Class-2 SmartReflex implementation. 165 It supports TI's software based Class-2 SmartReflex implementation.
159 166
167config REGULATOR_88PM8607
168 bool "Marvell 88PM8607 Power regulators"
169 depends on MFD_88PM8607=y
170 help
171 This driver supports 88PM8607 voltage regulator chips.
172
160endif 173endif
161 174
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 9ae3cc44e668..b3c806c79415 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
12obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o 12obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
13obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o 13obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
14obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o 14obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
15obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
15obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o 16obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
16obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o 17obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
17obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o 18obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
@@ -20,10 +21,11 @@ obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
20obj-$(CONFIG_REGULATOR_DA903X) += da903x.o 21obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
21obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o 22obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
22obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o 23obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
23obj-$(CONFIG_REGULATOR_MC13783) += mc13783.o 24obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
24obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o 25obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
25 26
26obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o 27obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
27obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o 28obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
29obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
28 30
29ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG 31ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 49aeee823a25..b349db4504b7 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -81,7 +81,7 @@ static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = {
81#define LDO_C_VOLTAGE 2650000 81#define LDO_C_VOLTAGE 2650000
82#define LDO_D_VOLTAGE 2650000 82#define LDO_D_VOLTAGE 2650000
83 83
84static const int const ldo_e_buck_typ_voltages[] = { 84static const int ldo_e_buck_typ_voltages[] = {
85 1800000, 85 1800000,
86 1400000, 86 1400000,
87 1300000, 87 1300000,
@@ -91,7 +91,7 @@ static const int const ldo_e_buck_typ_voltages[] = {
91 900000, 91 900000,
92}; 92};
93 93
94static const int const ldo_f_typ_voltages[] = { 94static const int ldo_f_typ_voltages[] = {
95 1800000, 95 1800000,
96 1400000, 96 1400000,
97 1300000, 97 1300000,
@@ -102,21 +102,21 @@ static const int const ldo_f_typ_voltages[] = {
102 2650000, 102 2650000,
103}; 103};
104 104
105static const int const ldo_g_typ_voltages[] = { 105static const int ldo_g_typ_voltages[] = {
106 2850000, 106 2850000,
107 2750000, 107 2750000,
108 1800000, 108 1800000,
109 1500000, 109 1500000,
110}; 110};
111 111
112static const int const ldo_h_typ_voltages[] = { 112static const int ldo_h_typ_voltages[] = {
113 2750000, 113 2750000,
114 1800000, 114 1800000,
115 1500000, 115 1500000,
116 1200000, 116 1200000,
117}; 117};
118 118
119static const int const ldo_k_typ_voltages[] = { 119static const int ldo_k_typ_voltages[] = {
120 2750000, 120 2750000,
121 1800000, 121 1800000,
122}; 122};
@@ -241,24 +241,12 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
241 * LDO D is a special regulator. When it is disabled, the entire 241 * LDO D is a special regulator. When it is disabled, the entire
242 * system is shut down. So this is handled specially. 242 * system is shut down. So this is handled specially.
243 */ 243 */
244 pr_info("Called ab3100_disable_regulator\n");
244 if (abreg->regreg == AB3100_LDO_D) { 245 if (abreg->regreg == AB3100_LDO_D) {
245 int i;
246
247 dev_info(&reg->dev, "disabling LDO D - shut down system\n"); 246 dev_info(&reg->dev, "disabling LDO D - shut down system\n");
248 /*
249 * Set regulators to default values, ignore any errors,
250 * we're going DOWN
251 */
252 for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
253 (void) ab3100_set_register_interruptible(abreg->ab3100,
254 ab3100_reg_init_order[i],
255 abreg->plfdata->reg_initvals[i]);
256 }
257
258 /* Setting LDO D to 0x00 cuts the power to the SoC */ 247 /* Setting LDO D to 0x00 cuts the power to the SoC */
259 return ab3100_set_register_interruptible(abreg->ab3100, 248 return ab3100_set_register_interruptible(abreg->ab3100,
260 AB3100_LDO_D, 0x00U); 249 AB3100_LDO_D, 0x00U);
261
262 } 250 }
263 251
264 /* 252 /*
@@ -607,13 +595,6 @@ static int __init ab3100_regulators_probe(struct platform_device *pdev)
607 } 595 }
608 } 596 }
609 597
610 if (err) {
611 dev_err(&pdev->dev,
612 "LDO D regulator initialization failed with error %d\n",
613 err);
614 return err;
615 }
616
617 /* Register the regulators */ 598 /* Register the regulators */
618 for (i = 0; i < AB3100_NUM_REGULATORS; i++) { 599 for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
619 struct ab3100_regulator *reg = &ab3100_regulators[i]; 600 struct ab3100_regulator *reg = &ab3100_regulators[i];
@@ -688,7 +669,7 @@ static __init int ab3100_regulators_init(void)
688 669
689static __exit void ab3100_regulators_exit(void) 670static __exit void ab3100_regulators_exit(void)
690{ 671{
691 platform_driver_register(&ab3100_regulators_driver); 672 platform_driver_unregister(&ab3100_regulators_driver);
692} 673}
693 674
694subsys_initcall(ab3100_regulators_init); 675subsys_initcall(ab3100_regulators_init);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index efe568deda12..686ef270ecf7 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -66,6 +66,16 @@ static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
66static void _notifier_call_chain(struct regulator_dev *rdev, 66static void _notifier_call_chain(struct regulator_dev *rdev,
67 unsigned long event, void *data); 67 unsigned long event, void *data);
68 68
69static const char *rdev_get_name(struct regulator_dev *rdev)
70{
71 if (rdev->constraints && rdev->constraints->name)
72 return rdev->constraints->name;
73 else if (rdev->desc->name)
74 return rdev->desc->name;
75 else
76 return "";
77}
78
69/* gets the regulator for a given consumer device */ 79/* gets the regulator for a given consumer device */
70static struct regulator *get_device_regulator(struct device *dev) 80static struct regulator *get_device_regulator(struct device *dev)
71{ 81{
@@ -96,12 +106,12 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
96 106
97 if (!rdev->constraints) { 107 if (!rdev->constraints) {
98 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 108 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
99 rdev->desc->name); 109 rdev_get_name(rdev));
100 return -ENODEV; 110 return -ENODEV;
101 } 111 }
102 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { 112 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
103 printk(KERN_ERR "%s: operation not allowed for %s\n", 113 printk(KERN_ERR "%s: operation not allowed for %s\n",
104 __func__, rdev->desc->name); 114 __func__, rdev_get_name(rdev));
105 return -EPERM; 115 return -EPERM;
106 } 116 }
107 117
@@ -124,12 +134,12 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
124 134
125 if (!rdev->constraints) { 135 if (!rdev->constraints) {
126 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 136 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
127 rdev->desc->name); 137 rdev_get_name(rdev));
128 return -ENODEV; 138 return -ENODEV;
129 } 139 }
130 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) { 140 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
131 printk(KERN_ERR "%s: operation not allowed for %s\n", 141 printk(KERN_ERR "%s: operation not allowed for %s\n",
132 __func__, rdev->desc->name); 142 __func__, rdev_get_name(rdev));
133 return -EPERM; 143 return -EPERM;
134 } 144 }
135 145
@@ -159,17 +169,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
159 169
160 if (!rdev->constraints) { 170 if (!rdev->constraints) {
161 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 171 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
162 rdev->desc->name); 172 rdev_get_name(rdev));
163 return -ENODEV; 173 return -ENODEV;
164 } 174 }
165 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) { 175 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
166 printk(KERN_ERR "%s: operation not allowed for %s\n", 176 printk(KERN_ERR "%s: operation not allowed for %s\n",
167 __func__, rdev->desc->name); 177 __func__, rdev_get_name(rdev));
168 return -EPERM; 178 return -EPERM;
169 } 179 }
170 if (!(rdev->constraints->valid_modes_mask & mode)) { 180 if (!(rdev->constraints->valid_modes_mask & mode)) {
171 printk(KERN_ERR "%s: invalid mode %x for %s\n", 181 printk(KERN_ERR "%s: invalid mode %x for %s\n",
172 __func__, mode, rdev->desc->name); 182 __func__, mode, rdev_get_name(rdev));
173 return -EINVAL; 183 return -EINVAL;
174 } 184 }
175 return 0; 185 return 0;
@@ -180,12 +190,12 @@ static int regulator_check_drms(struct regulator_dev *rdev)
180{ 190{
181 if (!rdev->constraints) { 191 if (!rdev->constraints) {
182 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 192 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
183 rdev->desc->name); 193 rdev_get_name(rdev));
184 return -ENODEV; 194 return -ENODEV;
185 } 195 }
186 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) { 196 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
187 printk(KERN_ERR "%s: operation not allowed for %s\n", 197 printk(KERN_ERR "%s: operation not allowed for %s\n",
188 __func__, rdev->desc->name); 198 __func__, rdev_get_name(rdev));
189 return -EPERM; 199 return -EPERM;
190 } 200 }
191 return 0; 201 return 0;
@@ -230,16 +240,8 @@ static ssize_t regulator_name_show(struct device *dev,
230 struct device_attribute *attr, char *buf) 240 struct device_attribute *attr, char *buf)
231{ 241{
232 struct regulator_dev *rdev = dev_get_drvdata(dev); 242 struct regulator_dev *rdev = dev_get_drvdata(dev);
233 const char *name;
234 243
235 if (rdev->constraints && rdev->constraints->name) 244 return sprintf(buf, "%s\n", rdev_get_name(rdev));
236 name = rdev->constraints->name;
237 else if (rdev->desc->name)
238 name = rdev->desc->name;
239 else
240 name = "";
241
242 return sprintf(buf, "%s\n", name);
243} 245}
244 246
245static ssize_t regulator_print_opmode(char *buf, int mode) 247static ssize_t regulator_print_opmode(char *buf, int mode)
@@ -388,7 +390,7 @@ static ssize_t regulator_total_uA_show(struct device *dev,
388 390
389 mutex_lock(&rdev->mutex); 391 mutex_lock(&rdev->mutex);
390 list_for_each_entry(regulator, &rdev->consumer_list, list) 392 list_for_each_entry(regulator, &rdev->consumer_list, list)
391 uA += regulator->uA_load; 393 uA += regulator->uA_load;
392 mutex_unlock(&rdev->mutex); 394 mutex_unlock(&rdev->mutex);
393 return sprintf(buf, "%d\n", uA); 395 return sprintf(buf, "%d\n", uA);
394} 396}
@@ -563,7 +565,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
563 565
564 /* calc total requested load */ 566 /* calc total requested load */
565 list_for_each_entry(sibling, &rdev->consumer_list, list) 567 list_for_each_entry(sibling, &rdev->consumer_list, list)
566 current_uA += sibling->uA_load; 568 current_uA += sibling->uA_load;
567 569
568 /* now get the optimum mode for our new total regulator load */ 570 /* now get the optimum mode for our new total regulator load */
569 mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, 571 mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
@@ -579,10 +581,29 @@ static int suspend_set_state(struct regulator_dev *rdev,
579 struct regulator_state *rstate) 581 struct regulator_state *rstate)
580{ 582{
581 int ret = 0; 583 int ret = 0;
584 bool can_set_state;
582 585
583 /* enable & disable are mandatory for suspend control */ 586 can_set_state = rdev->desc->ops->set_suspend_enable &&
584 if (!rdev->desc->ops->set_suspend_enable || 587 rdev->desc->ops->set_suspend_disable;
585 !rdev->desc->ops->set_suspend_disable) { 588
589 /* If we have no suspend mode configration don't set anything;
590 * only warn if the driver actually makes the suspend mode
591 * configurable.
592 */
593 if (!rstate->enabled && !rstate->disabled) {
594 if (can_set_state)
595 printk(KERN_WARNING "%s: No configuration for %s\n",
596 __func__, rdev_get_name(rdev));
597 return 0;
598 }
599
600 if (rstate->enabled && rstate->disabled) {
601 printk(KERN_ERR "%s: invalid configuration for %s\n",
602 __func__, rdev_get_name(rdev));
603 return -EINVAL;
604 }
605
606 if (!can_set_state) {
586 printk(KERN_ERR "%s: no way to set suspend state\n", 607 printk(KERN_ERR "%s: no way to set suspend state\n",
587 __func__); 608 __func__);
588 return -EINVAL; 609 return -EINVAL;
@@ -641,25 +662,43 @@ static void print_constraints(struct regulator_dev *rdev)
641{ 662{
642 struct regulation_constraints *constraints = rdev->constraints; 663 struct regulation_constraints *constraints = rdev->constraints;
643 char buf[80]; 664 char buf[80];
644 int count; 665 int count = 0;
666 int ret;
645 667
646 if (rdev->desc->type == REGULATOR_VOLTAGE) { 668 if (constraints->min_uV && constraints->max_uV) {
647 if (constraints->min_uV == constraints->max_uV) 669 if (constraints->min_uV == constraints->max_uV)
648 count = sprintf(buf, "%d mV ", 670 count += sprintf(buf + count, "%d mV ",
649 constraints->min_uV / 1000); 671 constraints->min_uV / 1000);
650 else 672 else
651 count = sprintf(buf, "%d <--> %d mV ", 673 count += sprintf(buf + count, "%d <--> %d mV ",
652 constraints->min_uV / 1000, 674 constraints->min_uV / 1000,
653 constraints->max_uV / 1000); 675 constraints->max_uV / 1000);
654 } else { 676 }
677
678 if (!constraints->min_uV ||
679 constraints->min_uV != constraints->max_uV) {
680 ret = _regulator_get_voltage(rdev);
681 if (ret > 0)
682 count += sprintf(buf + count, "at %d mV ", ret / 1000);
683 }
684
685 if (constraints->min_uA && constraints->max_uA) {
655 if (constraints->min_uA == constraints->max_uA) 686 if (constraints->min_uA == constraints->max_uA)
656 count = sprintf(buf, "%d mA ", 687 count += sprintf(buf + count, "%d mA ",
657 constraints->min_uA / 1000); 688 constraints->min_uA / 1000);
658 else 689 else
659 count = sprintf(buf, "%d <--> %d mA ", 690 count += sprintf(buf + count, "%d <--> %d mA ",
660 constraints->min_uA / 1000, 691 constraints->min_uA / 1000,
661 constraints->max_uA / 1000); 692 constraints->max_uA / 1000);
662 } 693 }
694
695 if (!constraints->min_uA ||
696 constraints->min_uA != constraints->max_uA) {
697 ret = _regulator_get_current_limit(rdev);
698 if (ret > 0)
699 count += sprintf(buf + count, "at %d uA ", ret / 1000);
700 }
701
663 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) 702 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
664 count += sprintf(buf + count, "fast "); 703 count += sprintf(buf + count, "fast ");
665 if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL) 704 if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL)
@@ -669,33 +708,30 @@ static void print_constraints(struct regulator_dev *rdev)
669 if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY) 708 if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
670 count += sprintf(buf + count, "standby"); 709 count += sprintf(buf + count, "standby");
671 710
672 printk(KERN_INFO "regulator: %s: %s\n", rdev->desc->name, buf); 711 printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf);
673} 712}
674 713
675/** 714static int machine_constraints_voltage(struct regulator_dev *rdev,
676 * set_machine_constraints - sets regulator constraints
677 * @rdev: regulator source
678 * @constraints: constraints to apply
679 *
680 * Allows platform initialisation code to define and constrain
681 * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
682 * Constraints *must* be set by platform code in order for some
683 * regulator operations to proceed i.e. set_voltage, set_current_limit,
684 * set_mode.
685 */
686static int set_machine_constraints(struct regulator_dev *rdev,
687 struct regulation_constraints *constraints) 715 struct regulation_constraints *constraints)
688{ 716{
689 int ret = 0;
690 const char *name;
691 struct regulator_ops *ops = rdev->desc->ops; 717 struct regulator_ops *ops = rdev->desc->ops;
718 const char *name = rdev_get_name(rdev);
719 int ret;
692 720
693 if (constraints->name) 721 /* do we need to apply the constraint voltage */
694 name = constraints->name; 722 if (rdev->constraints->apply_uV &&
695 else if (rdev->desc->name) 723 rdev->constraints->min_uV == rdev->constraints->max_uV &&
696 name = rdev->desc->name; 724 ops->set_voltage) {
697 else 725 ret = ops->set_voltage(rdev,
698 name = "regulator"; 726 rdev->constraints->min_uV, rdev->constraints->max_uV);
727 if (ret < 0) {
728 printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
729 __func__,
730 rdev->constraints->min_uV, name);
731 rdev->constraints = NULL;
732 return ret;
733 }
734 }
699 735
700 /* constrain machine-level voltage specs to fit 736 /* constrain machine-level voltage specs to fit
701 * the actual range supported by this regulator. 737 * the actual range supported by this regulator.
@@ -719,14 +755,13 @@ static int set_machine_constraints(struct regulator_dev *rdev,
719 755
720 /* voltage constraints are optional */ 756 /* voltage constraints are optional */
721 if ((cmin == 0) && (cmax == 0)) 757 if ((cmin == 0) && (cmax == 0))
722 goto out; 758 return 0;
723 759
724 /* else require explicit machine-level constraints */ 760 /* else require explicit machine-level constraints */
725 if (cmin <= 0 || cmax <= 0 || cmax < cmin) { 761 if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
726 pr_err("%s: %s '%s' voltage constraints\n", 762 pr_err("%s: %s '%s' voltage constraints\n",
727 __func__, "invalid", name); 763 __func__, "invalid", name);
728 ret = -EINVAL; 764 return -EINVAL;
729 goto out;
730 } 765 }
731 766
732 /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */ 767 /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */
@@ -748,8 +783,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
748 if (max_uV < min_uV) { 783 if (max_uV < min_uV) {
749 pr_err("%s: %s '%s' voltage constraints\n", 784 pr_err("%s: %s '%s' voltage constraints\n",
750 __func__, "unsupportable", name); 785 __func__, "unsupportable", name);
751 ret = -EINVAL; 786 return -EINVAL;
752 goto out;
753 } 787 }
754 788
755 /* use regulator's subset of machine constraints */ 789 /* use regulator's subset of machine constraints */
@@ -767,22 +801,34 @@ static int set_machine_constraints(struct regulator_dev *rdev,
767 } 801 }
768 } 802 }
769 803
804 return 0;
805}
806
807/**
808 * set_machine_constraints - sets regulator constraints
809 * @rdev: regulator source
810 * @constraints: constraints to apply
811 *
812 * Allows platform initialisation code to define and constrain
813 * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
814 * Constraints *must* be set by platform code in order for some
815 * regulator operations to proceed i.e. set_voltage, set_current_limit,
816 * set_mode.
817 */
818static int set_machine_constraints(struct regulator_dev *rdev,
819 struct regulation_constraints *constraints)
820{
821 int ret = 0;
822 const char *name;
823 struct regulator_ops *ops = rdev->desc->ops;
824
770 rdev->constraints = constraints; 825 rdev->constraints = constraints;
771 826
772 /* do we need to apply the constraint voltage */ 827 name = rdev_get_name(rdev);
773 if (rdev->constraints->apply_uV && 828
774 rdev->constraints->min_uV == rdev->constraints->max_uV && 829 ret = machine_constraints_voltage(rdev, constraints);
775 ops->set_voltage) { 830 if (ret != 0)
776 ret = ops->set_voltage(rdev, 831 goto out;
777 rdev->constraints->min_uV, rdev->constraints->max_uV);
778 if (ret < 0) {
779 printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
780 __func__,
781 rdev->constraints->min_uV, name);
782 rdev->constraints = NULL;
783 goto out;
784 }
785 }
786 832
787 /* do we need to setup our suspend state */ 833 /* do we need to setup our suspend state */
788 if (constraints->initial_state) { 834 if (constraints->initial_state) {
@@ -903,7 +949,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
903 dev_name(&node->regulator->dev), 949 dev_name(&node->regulator->dev),
904 node->regulator->desc->name, 950 node->regulator->desc->name,
905 supply, 951 supply,
906 dev_name(&rdev->dev), rdev->desc->name); 952 dev_name(&rdev->dev), rdev_get_name(rdev));
907 return -EBUSY; 953 return -EBUSY;
908 } 954 }
909 955
@@ -1212,7 +1258,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
1212 ret = _regulator_enable(rdev->supply); 1258 ret = _regulator_enable(rdev->supply);
1213 if (ret < 0) { 1259 if (ret < 0) {
1214 printk(KERN_ERR "%s: failed to enable %s: %d\n", 1260 printk(KERN_ERR "%s: failed to enable %s: %d\n",
1215 __func__, rdev->desc->name, ret); 1261 __func__, rdev_get_name(rdev), ret);
1216 return ret; 1262 return ret;
1217 } 1263 }
1218 } 1264 }
@@ -1238,7 +1284,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
1238 } 1284 }
1239 } else if (ret < 0) { 1285 } else if (ret < 0) {
1240 printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n", 1286 printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
1241 __func__, rdev->desc->name, ret); 1287 __func__, rdev_get_name(rdev), ret);
1242 return ret; 1288 return ret;
1243 } 1289 }
1244 /* Fallthrough on positive return values - already enabled */ 1290 /* Fallthrough on positive return values - already enabled */
@@ -1279,7 +1325,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
1279 1325
1280 if (WARN(rdev->use_count <= 0, 1326 if (WARN(rdev->use_count <= 0,
1281 "unbalanced disables for %s\n", 1327 "unbalanced disables for %s\n",
1282 rdev->desc->name)) 1328 rdev_get_name(rdev)))
1283 return -EIO; 1329 return -EIO;
1284 1330
1285 /* are we the last user and permitted to disable ? */ 1331 /* are we the last user and permitted to disable ? */
@@ -1292,7 +1338,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
1292 ret = rdev->desc->ops->disable(rdev); 1338 ret = rdev->desc->ops->disable(rdev);
1293 if (ret < 0) { 1339 if (ret < 0) {
1294 printk(KERN_ERR "%s: failed to disable %s\n", 1340 printk(KERN_ERR "%s: failed to disable %s\n",
1295 __func__, rdev->desc->name); 1341 __func__, rdev_get_name(rdev));
1296 return ret; 1342 return ret;
1297 } 1343 }
1298 } 1344 }
@@ -1349,7 +1395,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
1349 ret = rdev->desc->ops->disable(rdev); 1395 ret = rdev->desc->ops->disable(rdev);
1350 if (ret < 0) { 1396 if (ret < 0) {
1351 printk(KERN_ERR "%s: failed to force disable %s\n", 1397 printk(KERN_ERR "%s: failed to force disable %s\n",
1352 __func__, rdev->desc->name); 1398 __func__, rdev_get_name(rdev));
1353 return ret; 1399 return ret;
1354 } 1400 }
1355 /* notify other consumers that power has been forced off */ 1401 /* notify other consumers that power has been forced off */
@@ -1766,7 +1812,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1766 output_uV = rdev->desc->ops->get_voltage(rdev); 1812 output_uV = rdev->desc->ops->get_voltage(rdev);
1767 if (output_uV <= 0) { 1813 if (output_uV <= 0) {
1768 printk(KERN_ERR "%s: invalid output voltage found for %s\n", 1814 printk(KERN_ERR "%s: invalid output voltage found for %s\n",
1769 __func__, rdev->desc->name); 1815 __func__, rdev_get_name(rdev));
1770 goto out; 1816 goto out;
1771 } 1817 }
1772 1818
@@ -1777,13 +1823,13 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1777 input_uV = rdev->constraints->input_uV; 1823 input_uV = rdev->constraints->input_uV;
1778 if (input_uV <= 0) { 1824 if (input_uV <= 0) {
1779 printk(KERN_ERR "%s: invalid input voltage found for %s\n", 1825 printk(KERN_ERR "%s: invalid input voltage found for %s\n",
1780 __func__, rdev->desc->name); 1826 __func__, rdev_get_name(rdev));
1781 goto out; 1827 goto out;
1782 } 1828 }
1783 1829
1784 /* calc total requested load for this regulator */ 1830 /* calc total requested load for this regulator */
1785 list_for_each_entry(consumer, &rdev->consumer_list, list) 1831 list_for_each_entry(consumer, &rdev->consumer_list, list)
1786 total_uA_load += consumer->uA_load; 1832 total_uA_load += consumer->uA_load;
1787 1833
1788 mode = rdev->desc->ops->get_optimum_mode(rdev, 1834 mode = rdev->desc->ops->get_optimum_mode(rdev,
1789 input_uV, output_uV, 1835 input_uV, output_uV,
@@ -1791,7 +1837,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1791 ret = regulator_check_mode(rdev, mode); 1837 ret = regulator_check_mode(rdev, mode);
1792 if (ret < 0) { 1838 if (ret < 0) {
1793 printk(KERN_ERR "%s: failed to get optimum mode for %s @" 1839 printk(KERN_ERR "%s: failed to get optimum mode for %s @"
1794 " %d uA %d -> %d uV\n", __func__, rdev->desc->name, 1840 " %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev),
1795 total_uA_load, input_uV, output_uV); 1841 total_uA_load, input_uV, output_uV);
1796 goto out; 1842 goto out;
1797 } 1843 }
@@ -1799,7 +1845,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1799 ret = rdev->desc->ops->set_mode(rdev, mode); 1845 ret = rdev->desc->ops->set_mode(rdev, mode);
1800 if (ret < 0) { 1846 if (ret < 0) {
1801 printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n", 1847 printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
1802 __func__, mode, rdev->desc->name); 1848 __func__, mode, rdev_get_name(rdev));
1803 goto out; 1849 goto out;
1804 } 1850 }
1805 ret = mode; 1851 ret = mode;
@@ -1852,9 +1898,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
1852 1898
1853 /* now notify regulator we supply */ 1899 /* now notify regulator we supply */
1854 list_for_each_entry(_rdev, &rdev->supply_list, slist) { 1900 list_for_each_entry(_rdev, &rdev->supply_list, slist) {
1855 mutex_lock(&_rdev->mutex); 1901 mutex_lock(&_rdev->mutex);
1856 _notifier_call_chain(_rdev, event, data); 1902 _notifier_call_chain(_rdev, event, data);
1857 mutex_unlock(&_rdev->mutex); 1903 mutex_unlock(&_rdev->mutex);
1858 } 1904 }
1859} 1905}
1860 1906
@@ -1885,9 +1931,9 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
1885 consumers[i].consumer = regulator_get(dev, 1931 consumers[i].consumer = regulator_get(dev,
1886 consumers[i].supply); 1932 consumers[i].supply);
1887 if (IS_ERR(consumers[i].consumer)) { 1933 if (IS_ERR(consumers[i].consumer)) {
1888 dev_err(dev, "Failed to get supply '%s'\n",
1889 consumers[i].supply);
1890 ret = PTR_ERR(consumers[i].consumer); 1934 ret = PTR_ERR(consumers[i].consumer);
1935 dev_err(dev, "Failed to get supply '%s': %d\n",
1936 consumers[i].supply, ret);
1891 consumers[i].consumer = NULL; 1937 consumers[i].consumer = NULL;
1892 goto err; 1938 goto err;
1893 } 1939 }
@@ -1930,8 +1976,8 @@ int regulator_bulk_enable(int num_consumers,
1930 return 0; 1976 return 0;
1931 1977
1932err: 1978err:
1933 printk(KERN_ERR "Failed to enable %s\n", consumers[i].supply); 1979 printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret);
1934 for (i = 0; i < num_consumers; i++) 1980 for (--i; i >= 0; --i)
1935 regulator_disable(consumers[i].consumer); 1981 regulator_disable(consumers[i].consumer);
1936 1982
1937 return ret; 1983 return ret;
@@ -1965,8 +2011,9 @@ int regulator_bulk_disable(int num_consumers,
1965 return 0; 2011 return 0;
1966 2012
1967err: 2013err:
1968 printk(KERN_ERR "Failed to disable %s\n", consumers[i].supply); 2014 printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply,
1969 for (i = 0; i < num_consumers; i++) 2015 ret);
2016 for (--i; i >= 0; --i)
1970 regulator_enable(consumers[i].consumer); 2017 regulator_enable(consumers[i].consumer);
1971 2018
1972 return ret; 2019 return ret;
@@ -2316,7 +2363,7 @@ int regulator_suspend_prepare(suspend_state_t state)
2316 2363
2317 if (ret < 0) { 2364 if (ret < 0) {
2318 printk(KERN_ERR "%s: failed to prepare %s\n", 2365 printk(KERN_ERR "%s: failed to prepare %s\n",
2319 __func__, rdev->desc->name); 2366 __func__, rdev_get_name(rdev));
2320 goto out; 2367 goto out;
2321 } 2368 }
2322 } 2369 }
@@ -2429,12 +2476,7 @@ static int __init regulator_init_complete(void)
2429 ops = rdev->desc->ops; 2476 ops = rdev->desc->ops;
2430 c = rdev->constraints; 2477 c = rdev->constraints;
2431 2478
2432 if (c && c->name) 2479 name = rdev_get_name(rdev);
2433 name = c->name;
2434 else if (rdev->desc->name)
2435 name = rdev->desc->name;
2436 else
2437 name = "regulator";
2438 2480
2439 if (!ops->disable || (c && c->always_on)) 2481 if (!ops->disable || (c && c->always_on))
2440 continue; 2482 continue;
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index aa224d936e0d..f8c4661a7a81 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -331,7 +331,7 @@ static int da9034_get_ldo12_voltage(struct regulator_dev *rdev)
331static int da9034_list_ldo12_voltage(struct regulator_dev *rdev, 331static int da9034_list_ldo12_voltage(struct regulator_dev *rdev,
332 unsigned selector) 332 unsigned selector)
333{ 333{
334 if (selector > ARRAY_SIZE(da9034_ldo12_data)) 334 if (selector >= ARRAY_SIZE(da9034_ldo12_data))
335 return -EINVAL; 335 return -EINVAL;
336 return da9034_ldo12_data[selector] * 1000; 336 return da9034_ldo12_data[selector] * 1000;
337} 337}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 7803a320543b..76d08c282f9c 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -446,8 +446,8 @@ static int setup_regulators(struct lp3971 *lp3971,
446 lp3971->rdev[i] = regulator_register(&regulators[id], 446 lp3971->rdev[i] = regulator_register(&regulators[id],
447 lp3971->dev, pdata->regulators[i].initdata, lp3971); 447 lp3971->dev, pdata->regulators[i].initdata, lp3971);
448 448
449 err = IS_ERR(lp3971->rdev[i]); 449 if (IS_ERR(lp3971->rdev[i])) {
450 if (err) { 450 err = PTR_ERR(lp3971->rdev[i]);
451 dev_err(lp3971->dev, "regulator init failed: %d\n", 451 dev_err(lp3971->dev, "regulator init failed: %d\n",
452 err); 452 err);
453 goto error; 453 goto error;
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
new file mode 100644
index 000000000000..acc2fb7b6087
--- /dev/null
+++ b/drivers/regulator/max8660.c
@@ -0,0 +1,510 @@
1/*
2 * max8660.c -- Voltage regulation for the Maxim 8660/8661
3 *
4 * based on max1586.c and wm8400-regulator.c
5 *
6 * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Some info:
22 *
23 * Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8660-MAX8661.pdf
24 *
25 * This chip is a bit nasty because it is a write-only device. Thus, the driver
26 * uses shadow registers to keep track of its values. The main problem appears
27 * to be the initialization: When Linux boots up, we cannot know if the chip is
28 * in the default state or not, so we would have to pass such information in
29 * platform_data. As this adds a bit of complexity to the driver, this is left
30 * out for now until it is really needed.
31 *
32 * [A|S|M]DTV1 registers are currently not used, but [A|S|M]DTV2.
33 *
34 * If the driver is feature complete, it might be worth to check if one set of
35 * functions for V3-V7 is sufficient. For maximum flexibility during
36 * development, they are separated for now.
37 *
38 */
39
40#include <linux/module.h>
41#include <linux/err.h>
42#include <linux/i2c.h>
43#include <linux/platform_device.h>
44#include <linux/regulator/driver.h>
45#include <linux/regulator/max8660.h>
46
47#define MAX8660_DCDC_MIN_UV 725000
48#define MAX8660_DCDC_MAX_UV 1800000
49#define MAX8660_DCDC_STEP 25000
50#define MAX8660_DCDC_MAX_SEL 0x2b
51
52#define MAX8660_LDO5_MIN_UV 1700000
53#define MAX8660_LDO5_MAX_UV 2000000
54#define MAX8660_LDO5_STEP 25000
55#define MAX8660_LDO5_MAX_SEL 0x0c
56
57#define MAX8660_LDO67_MIN_UV 1800000
58#define MAX8660_LDO67_MAX_UV 3300000
59#define MAX8660_LDO67_STEP 100000
60#define MAX8660_LDO67_MAX_SEL 0x0f
61
62enum {
63 MAX8660_OVER1,
64 MAX8660_OVER2,
65 MAX8660_VCC1,
66 MAX8660_ADTV1,
67 MAX8660_ADTV2,
68 MAX8660_SDTV1,
69 MAX8660_SDTV2,
70 MAX8660_MDTV1,
71 MAX8660_MDTV2,
72 MAX8660_L12VCR,
73 MAX8660_FPWM,
74 MAX8660_N_REGS, /* not a real register */
75};
76
77struct max8660 {
78 struct i2c_client *client;
79 u8 shadow_regs[MAX8660_N_REGS]; /* as chip is write only */
80 struct regulator_dev *rdev[];
81};
82
83static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val)
84{
85 static const u8 max8660_addresses[MAX8660_N_REGS] =
86 { 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 };
87
88 int ret;
89 u8 reg_val = (max8660->shadow_regs[reg] & mask) | val;
90 dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n",
91 max8660_addresses[reg], reg_val);
92
93 ret = i2c_smbus_write_byte_data(max8660->client,
94 max8660_addresses[reg], reg_val);
95 if (ret == 0)
96 max8660->shadow_regs[reg] = reg_val;
97
98 return ret;
99}
100
101
102/*
103 * DCDC functions
104 */
105
106static int max8660_dcdc_is_enabled(struct regulator_dev *rdev)
107{
108 struct max8660 *max8660 = rdev_get_drvdata(rdev);
109 u8 val = max8660->shadow_regs[MAX8660_OVER1];
110 u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
111 return !!(val & mask);
112}
113
114static int max8660_dcdc_enable(struct regulator_dev *rdev)
115{
116 struct max8660 *max8660 = rdev_get_drvdata(rdev);
117 u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
118 return max8660_write(max8660, MAX8660_OVER1, 0xff, bit);
119}
120
121static int max8660_dcdc_disable(struct regulator_dev *rdev)
122{
123 struct max8660 *max8660 = rdev_get_drvdata(rdev);
124 u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4;
125 return max8660_write(max8660, MAX8660_OVER1, mask, 0);
126}
127
128static int max8660_dcdc_list(struct regulator_dev *rdev, unsigned selector)
129{
130 if (selector > MAX8660_DCDC_MAX_SEL)
131 return -EINVAL;
132 return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
133}
134
135static int max8660_dcdc_get(struct regulator_dev *rdev)
136{
137 struct max8660 *max8660 = rdev_get_drvdata(rdev);
138 u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
139 u8 selector = max8660->shadow_regs[reg];
140 return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
141}
142
143static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
144{
145 struct max8660 *max8660 = rdev_get_drvdata(rdev);
146 u8 reg, selector, bits;
147 int ret;
148
149 if (min_uV < MAX8660_DCDC_MIN_UV || min_uV > MAX8660_DCDC_MAX_UV)
150 return -EINVAL;
151 if (max_uV < MAX8660_DCDC_MIN_UV || max_uV > MAX8660_DCDC_MAX_UV)
152 return -EINVAL;
153
154 selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1))
155 / MAX8660_DCDC_STEP;
156
157 ret = max8660_dcdc_list(rdev, selector);
158 if (ret < 0 || ret > max_uV)
159 return -EINVAL;
160
161 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
162 ret = max8660_write(max8660, reg, 0, selector);
163 if (ret)
164 return ret;
165
166 /* Select target voltage register and activate regulation */
167 bits = (rdev_get_id(rdev) == MAX8660_V3) ? 0x03 : 0x30;
168 return max8660_write(max8660, MAX8660_VCC1, 0xff, bits);
169}
170
171static struct regulator_ops max8660_dcdc_ops = {
172 .is_enabled = max8660_dcdc_is_enabled,
173 .list_voltage = max8660_dcdc_list,
174 .set_voltage = max8660_dcdc_set,
175 .get_voltage = max8660_dcdc_get,
176};
177
178
179/*
180 * LDO5 functions
181 */
182
183static int max8660_ldo5_list(struct regulator_dev *rdev, unsigned selector)
184{
185 if (selector > MAX8660_LDO5_MAX_SEL)
186 return -EINVAL;
187 return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
188}
189
190static int max8660_ldo5_get(struct regulator_dev *rdev)
191{
192 struct max8660 *max8660 = rdev_get_drvdata(rdev);
193 u8 selector = max8660->shadow_regs[MAX8660_MDTV2];
194
195 return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
196}
197
198static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
199{
200 struct max8660 *max8660 = rdev_get_drvdata(rdev);
201 u8 selector;
202 int ret;
203
204 if (min_uV < MAX8660_LDO5_MIN_UV || min_uV > MAX8660_LDO5_MAX_UV)
205 return -EINVAL;
206 if (max_uV < MAX8660_LDO5_MIN_UV || max_uV > MAX8660_LDO5_MAX_UV)
207 return -EINVAL;
208
209 selector = (min_uV - (MAX8660_LDO5_MIN_UV - MAX8660_LDO5_STEP + 1))
210 / MAX8660_LDO5_STEP;
211 ret = max8660_ldo5_list(rdev, selector);
212 if (ret < 0 || ret > max_uV)
213 return -EINVAL;
214
215 ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
216 if (ret)
217 return ret;
218
219 /* Select target voltage register and activate regulation */
220 return max8660_write(max8660, MAX8660_VCC1, 0xff, 0xc0);
221}
222
223static struct regulator_ops max8660_ldo5_ops = {
224 .list_voltage = max8660_ldo5_list,
225 .set_voltage = max8660_ldo5_set,
226 .get_voltage = max8660_ldo5_get,
227};
228
229
230/*
231 * LDO67 functions
232 */
233
234static int max8660_ldo67_is_enabled(struct regulator_dev *rdev)
235{
236 struct max8660 *max8660 = rdev_get_drvdata(rdev);
237 u8 val = max8660->shadow_regs[MAX8660_OVER2];
238 u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
239 return !!(val & mask);
240}
241
242static int max8660_ldo67_enable(struct regulator_dev *rdev)
243{
244 struct max8660 *max8660 = rdev_get_drvdata(rdev);
245 u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
246 return max8660_write(max8660, MAX8660_OVER2, 0xff, bit);
247}
248
249static int max8660_ldo67_disable(struct regulator_dev *rdev)
250{
251 struct max8660 *max8660 = rdev_get_drvdata(rdev);
252 u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4;
253 return max8660_write(max8660, MAX8660_OVER2, mask, 0);
254}
255
256static int max8660_ldo67_list(struct regulator_dev *rdev, unsigned selector)
257{
258 if (selector > MAX8660_LDO67_MAX_SEL)
259 return -EINVAL;
260 return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
261}
262
263static int max8660_ldo67_get(struct regulator_dev *rdev)
264{
265 struct max8660 *max8660 = rdev_get_drvdata(rdev);
266 u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4;
267 u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf;
268
269 return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
270}
271
272static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
273{
274 struct max8660 *max8660 = rdev_get_drvdata(rdev);
275 u8 selector;
276 int ret;
277
278 if (min_uV < MAX8660_LDO67_MIN_UV || min_uV > MAX8660_LDO67_MAX_UV)
279 return -EINVAL;
280 if (max_uV < MAX8660_LDO67_MIN_UV || max_uV > MAX8660_LDO67_MAX_UV)
281 return -EINVAL;
282
283 selector = (min_uV - (MAX8660_LDO67_MIN_UV - MAX8660_LDO67_STEP + 1))
284 / MAX8660_LDO67_STEP;
285
286 ret = max8660_ldo67_list(rdev, selector);
287 if (ret < 0 || ret > max_uV)
288 return -EINVAL;
289
290 if (rdev_get_id(rdev) == MAX8660_V6)
291 return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
292 else
293 return max8660_write(max8660, MAX8660_L12VCR, 0x0f, selector << 4);
294}
295
296static struct regulator_ops max8660_ldo67_ops = {
297 .is_enabled = max8660_ldo67_is_enabled,
298 .enable = max8660_ldo67_enable,
299 .disable = max8660_ldo67_disable,
300 .list_voltage = max8660_ldo67_list,
301 .get_voltage = max8660_ldo67_get,
302 .set_voltage = max8660_ldo67_set,
303};
304
305static struct regulator_desc max8660_reg[] = {
306 {
307 .name = "V3(DCDC)",
308 .id = MAX8660_V3,
309 .ops = &max8660_dcdc_ops,
310 .type = REGULATOR_VOLTAGE,
311 .n_voltages = MAX8660_DCDC_MAX_SEL + 1,
312 .owner = THIS_MODULE,
313 },
314 {
315 .name = "V4(DCDC)",
316 .id = MAX8660_V4,
317 .ops = &max8660_dcdc_ops,
318 .type = REGULATOR_VOLTAGE,
319 .n_voltages = MAX8660_DCDC_MAX_SEL + 1,
320 .owner = THIS_MODULE,
321 },
322 {
323 .name = "V5(LDO)",
324 .id = MAX8660_V5,
325 .ops = &max8660_ldo5_ops,
326 .type = REGULATOR_VOLTAGE,
327 .n_voltages = MAX8660_LDO5_MAX_SEL + 1,
328 .owner = THIS_MODULE,
329 },
330 {
331 .name = "V6(LDO)",
332 .id = MAX8660_V6,
333 .ops = &max8660_ldo67_ops,
334 .type = REGULATOR_VOLTAGE,
335 .n_voltages = MAX8660_LDO67_MAX_SEL + 1,
336 .owner = THIS_MODULE,
337 },
338 {
339 .name = "V7(LDO)",
340 .id = MAX8660_V7,
341 .ops = &max8660_ldo67_ops,
342 .type = REGULATOR_VOLTAGE,
343 .n_voltages = MAX8660_LDO67_MAX_SEL + 1,
344 .owner = THIS_MODULE,
345 },
346};
347
348static int max8660_probe(struct i2c_client *client,
349 const struct i2c_device_id *i2c_id)
350{
351 struct regulator_dev **rdev;
352 struct max8660_platform_data *pdata = client->dev.platform_data;
353 struct max8660 *max8660;
354 int boot_on, i, id, ret = -EINVAL;
355
356 if (pdata->num_subdevs > MAX8660_V_END) {
357 dev_err(&client->dev, "Too much regulators found!\n");
358 goto out;
359 }
360
361 max8660 = kzalloc(sizeof(struct max8660) +
362 sizeof(struct regulator_dev *) * MAX8660_V_END,
363 GFP_KERNEL);
364 if (!max8660) {
365 ret = -ENOMEM;
366 goto out;
367 }
368
369 max8660->client = client;
370 rdev = max8660->rdev;
371
372 if (pdata->en34_is_high) {
373 /* Simulate always on */
374 max8660->shadow_regs[MAX8660_OVER1] = 5;
375 } else {
376 /* Otherwise devices can be toggled via software */
377 max8660_dcdc_ops.enable = max8660_dcdc_enable;
378 max8660_dcdc_ops.disable = max8660_dcdc_disable;
379 }
380
381 /*
382 * First, set up shadow registers to prevent glitches. As some
383 * registers are shared between regulators, everything must be properly
384 * set up for all regulators in advance.
385 */
386 max8660->shadow_regs[MAX8660_ADTV1] =
387 max8660->shadow_regs[MAX8660_ADTV2] =
388 max8660->shadow_regs[MAX8660_SDTV1] =
389 max8660->shadow_regs[MAX8660_SDTV2] = 0x1b;
390 max8660->shadow_regs[MAX8660_MDTV1] =
391 max8660->shadow_regs[MAX8660_MDTV2] = 0x04;
392
393 for (i = 0; i < pdata->num_subdevs; i++) {
394
395 if (!pdata->subdevs[i].platform_data)
396 goto err_free;
397
398 boot_on = pdata->subdevs[i].platform_data->constraints.boot_on;
399
400 switch (pdata->subdevs[i].id) {
401 case MAX8660_V3:
402 if (boot_on)
403 max8660->shadow_regs[MAX8660_OVER1] |= 1;
404 break;
405
406 case MAX8660_V4:
407 if (boot_on)
408 max8660->shadow_regs[MAX8660_OVER1] |= 4;
409 break;
410
411 case MAX8660_V5:
412 break;
413
414 case MAX8660_V6:
415 if (boot_on)
416 max8660->shadow_regs[MAX8660_OVER2] |= 2;
417 break;
418
419 case MAX8660_V7:
420 if (!strcmp(i2c_id->name, "max8661")) {
421 dev_err(&client->dev, "Regulator not on this chip!\n");
422 goto err_free;
423 }
424
425 if (boot_on)
426 max8660->shadow_regs[MAX8660_OVER2] |= 4;
427 break;
428
429 default:
430 dev_err(&client->dev, "invalid regulator %s\n",
431 pdata->subdevs[i].name);
432 goto err_free;
433 }
434 }
435
436 /* Finally register devices */
437 for (i = 0; i < pdata->num_subdevs; i++) {
438
439 id = pdata->subdevs[i].id;
440
441 rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
442 pdata->subdevs[i].platform_data,
443 max8660);
444 if (IS_ERR(rdev[i])) {
445 ret = PTR_ERR(rdev[i]);
446 dev_err(&client->dev, "failed to register %s\n",
447 max8660_reg[id].name);
448 goto err_unregister;
449 }
450 }
451
452 i2c_set_clientdata(client, rdev);
453 dev_info(&client->dev, "Maxim 8660/8661 regulator driver loaded\n");
454 return 0;
455
456err_unregister:
457 while (--i >= 0)
458 regulator_unregister(rdev[i]);
459err_free:
460 kfree(max8660);
461out:
462 return ret;
463}
464
465static int max8660_remove(struct i2c_client *client)
466{
467 struct regulator_dev **rdev = i2c_get_clientdata(client);
468 int i;
469
470 for (i = 0; i < MAX8660_V_END; i++)
471 if (rdev[i])
472 regulator_unregister(rdev[i]);
473 kfree(rdev);
474 i2c_set_clientdata(client, NULL);
475
476 return 0;
477}
478
479static const struct i2c_device_id max8660_id[] = {
480 { "max8660", 0 },
481 { "max8661", 0 },
482 { }
483};
484MODULE_DEVICE_TABLE(i2c, max8660_id);
485
486static struct i2c_driver max8660_driver = {
487 .probe = max8660_probe,
488 .remove = max8660_remove,
489 .driver = {
490 .name = "max8660",
491 },
492 .id_table = max8660_id,
493};
494
495static int __init max8660_init(void)
496{
497 return i2c_add_driver(&max8660_driver);
498}
499subsys_initcall(max8660_init);
500
501static void __exit max8660_exit(void)
502{
503 i2c_del_driver(&max8660_driver);
504}
505module_exit(max8660_exit);
506
507/* Module information */
508MODULE_DESCRIPTION("MAXIM 8660/8661 voltage regulator driver");
509MODULE_AUTHOR("Wolfram Sang");
510MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
new file mode 100644
index 000000000000..39c495300045
--- /dev/null
+++ b/drivers/regulator/mc13783-regulator.c
@@ -0,0 +1,245 @@
1/*
2 * Regulator Driver for Freescale MC13783 PMIC
3 *
4 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/mfd/mc13783.h>
12#include <linux/regulator/machine.h>
13#include <linux/regulator/driver.h>
14#include <linux/platform_device.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/err.h>
18
19#define MC13783_REG_SWITCHERS4 28
20#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18)
21
22#define MC13783_REG_SWITCHERS5 29
23#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
24
25#define MC13783_REG_REGULATORMODE0 32
26#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
27#define MC13783_REG_REGULATORMODE0_VIOHIEN (1 << 3)
28#define MC13783_REG_REGULATORMODE0_VIOLOEN (1 << 6)
29#define MC13783_REG_REGULATORMODE0_VDIGEN (1 << 9)
30#define MC13783_REG_REGULATORMODE0_VGENEN (1 << 12)
31#define MC13783_REG_REGULATORMODE0_VRFDIGEN (1 << 15)
32#define MC13783_REG_REGULATORMODE0_VRFREFEN (1 << 18)
33#define MC13783_REG_REGULATORMODE0_VRFCPEN (1 << 21)
34
35#define MC13783_REG_REGULATORMODE1 33
36#define MC13783_REG_REGULATORMODE1_VSIMEN (1 << 0)
37#define MC13783_REG_REGULATORMODE1_VESIMEN (1 << 3)
38#define MC13783_REG_REGULATORMODE1_VCAMEN (1 << 6)
39#define MC13783_REG_REGULATORMODE1_VRFBGEN (1 << 9)
40#define MC13783_REG_REGULATORMODE1_VVIBEN (1 << 11)
41#define MC13783_REG_REGULATORMODE1_VRF1EN (1 << 12)
42#define MC13783_REG_REGULATORMODE1_VRF2EN (1 << 15)
43#define MC13783_REG_REGULATORMODE1_VMMC1EN (1 << 18)
44#define MC13783_REG_REGULATORMODE1_VMMC2EN (1 << 21)
45
46#define MC13783_REG_POWERMISC 34
47#define MC13783_REG_POWERMISC_GPO1EN (1 << 6)
48#define MC13783_REG_POWERMISC_GPO2EN (1 << 8)
49#define MC13783_REG_POWERMISC_GPO3EN (1 << 10)
50#define MC13783_REG_POWERMISC_GPO4EN (1 << 12)
51
52struct mc13783_regulator {
53 struct regulator_desc desc;
54 int reg;
55 int enable_bit;
56};
57
58static struct regulator_ops mc13783_regulator_ops;
59
60#define MC13783_DEFINE(prefix, _name, _reg) \
61 [MC13783_ ## prefix ## _ ## _name] = { \
62 .desc = { \
63 .name = #prefix "_" #_name, \
64 .ops = &mc13783_regulator_ops, \
65 .type = REGULATOR_VOLTAGE, \
66 .id = MC13783_ ## prefix ## _ ## _name, \
67 .owner = THIS_MODULE, \
68 }, \
69 .reg = MC13783_REG_ ## _reg, \
70 .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
71 }
72
73#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg)
74#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg)
75
76static struct mc13783_regulator mc13783_regulators[] = {
77 MC13783_DEFINE_SW(SW3, SWITCHERS5),
78 MC13783_DEFINE_SW(PLL, SWITCHERS4),
79
80 MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0),
81 MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0),
82 MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0),
83 MC13783_DEFINE_REGU(VDIG, REGULATORMODE0),
84 MC13783_DEFINE_REGU(VGEN, REGULATORMODE0),
85 MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0),
86 MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0),
87 MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0),
88 MC13783_DEFINE_REGU(VSIM, REGULATORMODE1),
89 MC13783_DEFINE_REGU(VESIM, REGULATORMODE1),
90 MC13783_DEFINE_REGU(VCAM, REGULATORMODE1),
91 MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1),
92 MC13783_DEFINE_REGU(VVIB, REGULATORMODE1),
93 MC13783_DEFINE_REGU(VRF1, REGULATORMODE1),
94 MC13783_DEFINE_REGU(VRF2, REGULATORMODE1),
95 MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1),
96 MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1),
97 MC13783_DEFINE_REGU(GPO1, POWERMISC),
98 MC13783_DEFINE_REGU(GPO2, POWERMISC),
99 MC13783_DEFINE_REGU(GPO3, POWERMISC),
100 MC13783_DEFINE_REGU(GPO4, POWERMISC),
101};
102
103struct mc13783_regulator_priv {
104 struct mc13783 *mc13783;
105 struct regulator_dev *regulators[];
106};
107
108static int mc13783_regulator_enable(struct regulator_dev *rdev)
109{
110 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
111 int id = rdev_get_id(rdev);
112 int ret;
113
114 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
115
116 mc13783_lock(priv->mc13783);
117 ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
118 mc13783_regulators[id].enable_bit,
119 mc13783_regulators[id].enable_bit);
120 mc13783_unlock(priv->mc13783);
121
122 return ret;
123}
124
125static int mc13783_regulator_disable(struct regulator_dev *rdev)
126{
127 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
128 int id = rdev_get_id(rdev);
129 int ret;
130
131 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
132
133 mc13783_lock(priv->mc13783);
134 ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
135 mc13783_regulators[id].enable_bit, 0);
136 mc13783_unlock(priv->mc13783);
137
138 return ret;
139}
140
141static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
142{
143 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
144 int ret, id = rdev_get_id(rdev);
145 unsigned int val;
146
147 mc13783_lock(priv->mc13783);
148 ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
149 mc13783_unlock(priv->mc13783);
150
151 if (ret)
152 return ret;
153
154 return (val & mc13783_regulators[id].enable_bit) != 0;
155}
156
157static struct regulator_ops mc13783_regulator_ops = {
158 .enable = mc13783_regulator_enable,
159 .disable = mc13783_regulator_disable,
160 .is_enabled = mc13783_regulator_is_enabled,
161};
162
163static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
164{
165 struct mc13783_regulator_priv *priv;
166 struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
167 struct mc13783_regulator_platform_data *pdata =
168 dev_get_platdata(&pdev->dev);
169 struct mc13783_regulator_init_data *init_data;
170 int i, ret;
171
172 dev_dbg(&pdev->dev, "mc13783_regulator_probe id %d\n", pdev->id);
173
174 priv = kzalloc(sizeof(*priv) +
175 pdata->num_regulators * sizeof(priv->regulators[0]),
176 GFP_KERNEL);
177 if (!priv)
178 return -ENOMEM;
179
180 priv->mc13783 = mc13783;
181
182 for (i = 0; i < pdata->num_regulators; i++) {
183 init_data = &pdata->regulators[i];
184 priv->regulators[i] = regulator_register(
185 &mc13783_regulators[init_data->id].desc,
186 &pdev->dev, init_data->init_data, priv);
187
188 if (IS_ERR(priv->regulators[i])) {
189 dev_err(&pdev->dev, "failed to register regulator %s\n",
190 mc13783_regulators[i].desc.name);
191 ret = PTR_ERR(priv->regulators[i]);
192 goto err;
193 }
194 }
195
196 platform_set_drvdata(pdev, priv);
197
198 return 0;
199err:
200 while (--i >= 0)
201 regulator_unregister(priv->regulators[i]);
202
203 kfree(priv);
204
205 return ret;
206}
207
208static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
209{
210 struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev);
211 struct mc13783_regulator_platform_data *pdata =
212 dev_get_platdata(&pdev->dev);
213 int i;
214
215 for (i = 0; i < pdata->num_regulators; i++)
216 regulator_unregister(priv->regulators[i]);
217
218 return 0;
219}
220
221static struct platform_driver mc13783_regulator_driver = {
222 .driver = {
223 .name = "mc13783-regulator",
224 .owner = THIS_MODULE,
225 },
226 .remove = __devexit_p(mc13783_regulator_remove),
227 .probe = mc13783_regulator_probe,
228};
229
230static int __init mc13783_regulator_init(void)
231{
232 return platform_driver_register(&mc13783_regulator_driver);
233}
234subsys_initcall(mc13783_regulator_init);
235
236static void __exit mc13783_regulator_exit(void)
237{
238 platform_driver_unregister(&mc13783_regulator_driver);
239}
240module_exit(mc13783_regulator_exit);
241
242MODULE_LICENSE("GPL v2");
243MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
244MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
245MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/mc13783.c b/drivers/regulator/mc13783.c
deleted file mode 100644
index 710211f67449..000000000000
--- a/drivers/regulator/mc13783.c
+++ /dev/null
@@ -1,410 +0,0 @@
1/*
2 * Regulator Driver for Freescale MC13783 PMIC
3 *
4 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/mfd/mc13783-private.h>
12#include <linux/regulator/machine.h>
13#include <linux/regulator/driver.h>
14#include <linux/platform_device.h>
15#include <linux/mfd/mc13783.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19
20struct mc13783_regulator {
21 struct regulator_desc desc;
22 int reg;
23 int enable_bit;
24};
25
26static struct regulator_ops mc13783_regulator_ops;
27
28static struct mc13783_regulator mc13783_regulators[] = {
29 [MC13783_SW_SW3] = {
30 .desc = {
31 .name = "SW_SW3",
32 .ops = &mc13783_regulator_ops,
33 .type = REGULATOR_VOLTAGE,
34 .id = MC13783_SW_SW3,
35 .owner = THIS_MODULE,
36 },
37 .reg = MC13783_REG_SWITCHERS_5,
38 .enable_bit = MC13783_SWCTRL_SW3_EN,
39 },
40 [MC13783_SW_PLL] = {
41 .desc = {
42 .name = "SW_PLL",
43 .ops = &mc13783_regulator_ops,
44 .type = REGULATOR_VOLTAGE,
45 .id = MC13783_SW_PLL,
46 .owner = THIS_MODULE,
47 },
48 .reg = MC13783_REG_SWITCHERS_4,
49 .enable_bit = MC13783_SWCTRL_PLL_EN,
50 },
51 [MC13783_REGU_VAUDIO] = {
52 .desc = {
53 .name = "REGU_VAUDIO",
54 .ops = &mc13783_regulator_ops,
55 .type = REGULATOR_VOLTAGE,
56 .id = MC13783_REGU_VAUDIO,
57 .owner = THIS_MODULE,
58 },
59 .reg = MC13783_REG_REGULATOR_MODE_0,
60 .enable_bit = MC13783_REGCTRL_VAUDIO_EN,
61 },
62 [MC13783_REGU_VIOHI] = {
63 .desc = {
64 .name = "REGU_VIOHI",
65 .ops = &mc13783_regulator_ops,
66 .type = REGULATOR_VOLTAGE,
67 .id = MC13783_REGU_VIOHI,
68 .owner = THIS_MODULE,
69 },
70 .reg = MC13783_REG_REGULATOR_MODE_0,
71 .enable_bit = MC13783_REGCTRL_VIOHI_EN,
72 },
73 [MC13783_REGU_VIOLO] = {
74 .desc = {
75 .name = "REGU_VIOLO",
76 .ops = &mc13783_regulator_ops,
77 .type = REGULATOR_VOLTAGE,
78 .id = MC13783_REGU_VIOLO,
79 .owner = THIS_MODULE,
80 },
81 .reg = MC13783_REG_REGULATOR_MODE_0,
82 .enable_bit = MC13783_REGCTRL_VIOLO_EN,
83 },
84 [MC13783_REGU_VDIG] = {
85 .desc = {
86 .name = "REGU_VDIG",
87 .ops = &mc13783_regulator_ops,
88 .type = REGULATOR_VOLTAGE,
89 .id = MC13783_REGU_VDIG,
90 .owner = THIS_MODULE,
91 },
92 .reg = MC13783_REG_REGULATOR_MODE_0,
93 .enable_bit = MC13783_REGCTRL_VDIG_EN,
94 },
95 [MC13783_REGU_VGEN] = {
96 .desc = {
97 .name = "REGU_VGEN",
98 .ops = &mc13783_regulator_ops,
99 .type = REGULATOR_VOLTAGE,
100 .id = MC13783_REGU_VGEN,
101 .owner = THIS_MODULE,
102 },
103 .reg = MC13783_REG_REGULATOR_MODE_0,
104 .enable_bit = MC13783_REGCTRL_VGEN_EN,
105 },
106 [MC13783_REGU_VRFDIG] = {
107 .desc = {
108 .name = "REGU_VRFDIG",
109 .ops = &mc13783_regulator_ops,
110 .type = REGULATOR_VOLTAGE,
111 .id = MC13783_REGU_VRFDIG,
112 .owner = THIS_MODULE,
113 },
114 .reg = MC13783_REG_REGULATOR_MODE_0,
115 .enable_bit = MC13783_REGCTRL_VRFDIG_EN,
116 },
117 [MC13783_REGU_VRFREF] = {
118 .desc = {
119 .name = "REGU_VRFREF",
120 .ops = &mc13783_regulator_ops,
121 .type = REGULATOR_VOLTAGE,
122 .id = MC13783_REGU_VRFREF,
123 .owner = THIS_MODULE,
124 },
125 .reg = MC13783_REG_REGULATOR_MODE_0,
126 .enable_bit = MC13783_REGCTRL_VRFREF_EN,
127 },
128 [MC13783_REGU_VRFCP] = {
129 .desc = {
130 .name = "REGU_VRFCP",
131 .ops = &mc13783_regulator_ops,
132 .type = REGULATOR_VOLTAGE,
133 .id = MC13783_REGU_VRFCP,
134 .owner = THIS_MODULE,
135 },
136 .reg = MC13783_REG_REGULATOR_MODE_0,
137 .enable_bit = MC13783_REGCTRL_VRFCP_EN,
138 },
139 [MC13783_REGU_VSIM] = {
140 .desc = {
141 .name = "REGU_VSIM",
142 .ops = &mc13783_regulator_ops,
143 .type = REGULATOR_VOLTAGE,
144 .id = MC13783_REGU_VSIM,
145 .owner = THIS_MODULE,
146 },
147 .reg = MC13783_REG_REGULATOR_MODE_1,
148 .enable_bit = MC13783_REGCTRL_VSIM_EN,
149 },
150 [MC13783_REGU_VESIM] = {
151 .desc = {
152 .name = "REGU_VESIM",
153 .ops = &mc13783_regulator_ops,
154 .type = REGULATOR_VOLTAGE,
155 .id = MC13783_REGU_VESIM,
156 .owner = THIS_MODULE,
157 },
158 .reg = MC13783_REG_REGULATOR_MODE_1,
159 .enable_bit = MC13783_REGCTRL_VESIM_EN,
160 },
161 [MC13783_REGU_VCAM] = {
162 .desc = {
163 .name = "REGU_VCAM",
164 .ops = &mc13783_regulator_ops,
165 .type = REGULATOR_VOLTAGE,
166 .id = MC13783_REGU_VCAM,
167 .owner = THIS_MODULE,
168 },
169 .reg = MC13783_REG_REGULATOR_MODE_1,
170 .enable_bit = MC13783_REGCTRL_VCAM_EN,
171 },
172 [MC13783_REGU_VRFBG] = {
173 .desc = {
174 .name = "REGU_VRFBG",
175 .ops = &mc13783_regulator_ops,
176 .type = REGULATOR_VOLTAGE,
177 .id = MC13783_REGU_VRFBG,
178 .owner = THIS_MODULE,
179 },
180 .reg = MC13783_REG_REGULATOR_MODE_1,
181 .enable_bit = MC13783_REGCTRL_VRFBG_EN,
182 },
183 [MC13783_REGU_VVIB] = {
184 .desc = {
185 .name = "REGU_VVIB",
186 .ops = &mc13783_regulator_ops,
187 .type = REGULATOR_VOLTAGE,
188 .id = MC13783_REGU_VVIB,
189 .owner = THIS_MODULE,
190 },
191 .reg = MC13783_REG_REGULATOR_MODE_1,
192 .enable_bit = MC13783_REGCTRL_VVIB_EN,
193 },
194 [MC13783_REGU_VRF1] = {
195 .desc = {
196 .name = "REGU_VRF1",
197 .ops = &mc13783_regulator_ops,
198 .type = REGULATOR_VOLTAGE,
199 .id = MC13783_REGU_VRF1,
200 .owner = THIS_MODULE,
201 },
202 .reg = MC13783_REG_REGULATOR_MODE_1,
203 .enable_bit = MC13783_REGCTRL_VRF1_EN,
204 },
205 [MC13783_REGU_VRF2] = {
206 .desc = {
207 .name = "REGU_VRF2",
208 .ops = &mc13783_regulator_ops,
209 .type = REGULATOR_VOLTAGE,
210 .id = MC13783_REGU_VRF2,
211 .owner = THIS_MODULE,
212 },
213 .reg = MC13783_REG_REGULATOR_MODE_1,
214 .enable_bit = MC13783_REGCTRL_VRF2_EN,
215 },
216 [MC13783_REGU_VMMC1] = {
217 .desc = {
218 .name = "REGU_VMMC1",
219 .ops = &mc13783_regulator_ops,
220 .type = REGULATOR_VOLTAGE,
221 .id = MC13783_REGU_VMMC1,
222 .owner = THIS_MODULE,
223 },
224 .reg = MC13783_REG_REGULATOR_MODE_1,
225 .enable_bit = MC13783_REGCTRL_VMMC1_EN,
226 },
227 [MC13783_REGU_VMMC2] = {
228 .desc = {
229 .name = "REGU_VMMC2",
230 .ops = &mc13783_regulator_ops,
231 .type = REGULATOR_VOLTAGE,
232 .id = MC13783_REGU_VMMC2,
233 .owner = THIS_MODULE,
234 },
235 .reg = MC13783_REG_REGULATOR_MODE_1,
236 .enable_bit = MC13783_REGCTRL_VMMC2_EN,
237 },
238 [MC13783_REGU_GPO1] = {
239 .desc = {
240 .name = "REGU_GPO1",
241 .ops = &mc13783_regulator_ops,
242 .type = REGULATOR_VOLTAGE,
243 .id = MC13783_REGU_GPO1,
244 .owner = THIS_MODULE,
245 },
246 .reg = MC13783_REG_POWER_MISCELLANEOUS,
247 .enable_bit = MC13783_REGCTRL_GPO1_EN,
248 },
249 [MC13783_REGU_GPO2] = {
250 .desc = {
251 .name = "REGU_GPO2",
252 .ops = &mc13783_regulator_ops,
253 .type = REGULATOR_VOLTAGE,
254 .id = MC13783_REGU_GPO2,
255 .owner = THIS_MODULE,
256 },
257 .reg = MC13783_REG_POWER_MISCELLANEOUS,
258 .enable_bit = MC13783_REGCTRL_GPO2_EN,
259 },
260 [MC13783_REGU_GPO3] = {
261 .desc = {
262 .name = "REGU_GPO3",
263 .ops = &mc13783_regulator_ops,
264 .type = REGULATOR_VOLTAGE,
265 .id = MC13783_REGU_GPO3,
266 .owner = THIS_MODULE,
267 },
268 .reg = MC13783_REG_POWER_MISCELLANEOUS,
269 .enable_bit = MC13783_REGCTRL_GPO3_EN,
270 },
271 [MC13783_REGU_GPO4] = {
272 .desc = {
273 .name = "REGU_GPO4",
274 .ops = &mc13783_regulator_ops,
275 .type = REGULATOR_VOLTAGE,
276 .id = MC13783_REGU_GPO4,
277 .owner = THIS_MODULE,
278 },
279 .reg = MC13783_REG_POWER_MISCELLANEOUS,
280 .enable_bit = MC13783_REGCTRL_GPO4_EN,
281 },
282};
283
284struct mc13783_priv {
285 struct regulator_desc desc[ARRAY_SIZE(mc13783_regulators)];
286 struct mc13783 *mc13783;
287 struct regulator_dev *regulators[0];
288};
289
290static int mc13783_enable(struct regulator_dev *rdev)
291{
292 struct mc13783_priv *priv = rdev_get_drvdata(rdev);
293 int id = rdev_get_id(rdev);
294
295 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
296
297 return mc13783_set_bits(priv->mc13783, mc13783_regulators[id].reg,
298 mc13783_regulators[id].enable_bit,
299 mc13783_regulators[id].enable_bit);
300}
301
302static int mc13783_disable(struct regulator_dev *rdev)
303{
304 struct mc13783_priv *priv = rdev_get_drvdata(rdev);
305 int id = rdev_get_id(rdev);
306
307 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
308
309 return mc13783_set_bits(priv->mc13783, mc13783_regulators[id].reg,
310 mc13783_regulators[id].enable_bit, 0);
311}
312
313static int mc13783_is_enabled(struct regulator_dev *rdev)
314{
315 struct mc13783_priv *priv = rdev_get_drvdata(rdev);
316 int ret, id = rdev_get_id(rdev);
317 unsigned int val;
318
319 ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
320 if (ret)
321 return ret;
322
323 return (val & mc13783_regulators[id].enable_bit) != 0;
324}
325
326static struct regulator_ops mc13783_regulator_ops = {
327 .enable = mc13783_enable,
328 .disable = mc13783_disable,
329 .is_enabled = mc13783_is_enabled,
330};
331
332static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
333{
334 struct mc13783_priv *priv;
335 struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
336 struct mc13783_regulator_init_data *init_data;
337 int i, ret;
338
339 dev_dbg(&pdev->dev, "mc13783_regulator_probe id %d\n", pdev->id);
340
341 priv = kzalloc(sizeof(*priv) + mc13783->num_regulators * sizeof(void *),
342 GFP_KERNEL);
343 if (!priv)
344 return -ENOMEM;
345
346 priv->mc13783 = mc13783;
347
348 for (i = 0; i < mc13783->num_regulators; i++) {
349 init_data = &mc13783->regulators[i];
350 priv->regulators[i] = regulator_register(
351 &mc13783_regulators[init_data->id].desc,
352 &pdev->dev, init_data->init_data, priv);
353
354 if (IS_ERR(priv->regulators[i])) {
355 dev_err(&pdev->dev, "failed to register regulator %s\n",
356 mc13783_regulators[i].desc.name);
357 ret = PTR_ERR(priv->regulators[i]);
358 goto err;
359 }
360 }
361
362 platform_set_drvdata(pdev, priv);
363
364 return 0;
365err:
366 while (--i >= 0)
367 regulator_unregister(priv->regulators[i]);
368
369 kfree(priv);
370
371 return ret;
372}
373
374static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
375{
376 struct mc13783_priv *priv = platform_get_drvdata(pdev);
377 struct mc13783 *mc13783 = priv->mc13783;
378 int i;
379
380 for (i = 0; i < mc13783->num_regulators; i++)
381 regulator_unregister(priv->regulators[i]);
382
383 return 0;
384}
385
386static struct platform_driver mc13783_regulator_driver = {
387 .driver = {
388 .name = "mc13783-regulator",
389 .owner = THIS_MODULE,
390 },
391 .remove = __devexit_p(mc13783_regulator_remove),
392};
393
394static int __init mc13783_regulator_init(void)
395{
396 return platform_driver_probe(&mc13783_regulator_driver,
397 mc13783_regulator_probe);
398}
399subsys_initcall(mc13783_regulator_init);
400
401static void __exit mc13783_regulator_exit(void)
402{
403 platform_driver_unregister(&mc13783_regulator_driver);
404}
405module_exit(mc13783_regulator_exit);
406
407MODULE_LICENSE("GPL");
408MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
409MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
410MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7ea1c3a31081..7e674859bd59 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/delay.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16#include <linux/regulator/driver.h> 17#include <linux/regulator/driver.h>
17#include <linux/regulator/machine.h> 18#include <linux/regulator/machine.h>
@@ -40,6 +41,12 @@ struct twlreg_info {
40 u8 table_len; 41 u8 table_len;
41 const u16 *table; 42 const u16 *table;
42 43
44 /* regulator specific turn-on delay */
45 u16 delay;
46
47 /* State REMAP default configuration */
48 u8 remap;
49
43 /* chip constraints on regulator behavior */ 50 /* chip constraints on regulator behavior */
44 u16 min_mV; 51 u16 min_mV;
45 52
@@ -128,6 +135,7 @@ static int twlreg_enable(struct regulator_dev *rdev)
128{ 135{
129 struct twlreg_info *info = rdev_get_drvdata(rdev); 136 struct twlreg_info *info = rdev_get_drvdata(rdev);
130 int grp; 137 int grp;
138 int ret;
131 139
132 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); 140 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
133 if (grp < 0) 141 if (grp < 0)
@@ -138,7 +146,11 @@ static int twlreg_enable(struct regulator_dev *rdev)
138 else 146 else
139 grp |= P1_GRP_6030; 147 grp |= P1_GRP_6030;
140 148
141 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); 149 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
150
151 udelay(info->delay);
152
153 return ret;
142} 154}
143 155
144static int twlreg_disable(struct regulator_dev *rdev) 156static int twlreg_disable(struct regulator_dev *rdev)
@@ -151,9 +163,9 @@ static int twlreg_disable(struct regulator_dev *rdev)
151 return grp; 163 return grp;
152 164
153 if (twl_class_is_4030()) 165 if (twl_class_is_4030())
154 grp &= ~P1_GRP_4030; 166 grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
155 else 167 else
156 grp &= ~P1_GRP_6030; 168 grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
157 169
158 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); 170 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
159} 171}
@@ -294,6 +306,18 @@ static const u16 VSIM_VSEL_table[] = {
294static const u16 VDAC_VSEL_table[] = { 306static const u16 VDAC_VSEL_table[] = {
295 1200, 1300, 1800, 1800, 307 1200, 1300, 1800, 1800,
296}; 308};
309static const u16 VDD1_VSEL_table[] = {
310 800, 1450,
311};
312static const u16 VDD2_VSEL_table[] = {
313 800, 1450, 1500,
314};
315static const u16 VIO_VSEL_table[] = {
316 1800, 1850,
317};
318static const u16 VINTANA2_VSEL_table[] = {
319 2500, 2750,
320};
297static const u16 VAUX1_6030_VSEL_table[] = { 321static const u16 VAUX1_6030_VSEL_table[] = {
298 1000, 1300, 1800, 2500, 322 1000, 1300, 1800, 2500,
299 2800, 2900, 3000, 3000, 323 2800, 2900, 3000, 3000,
@@ -414,20 +438,30 @@ static struct regulator_ops twlfixed_ops = {
414 438
415/*----------------------------------------------------------------------*/ 439/*----------------------------------------------------------------------*/
416 440
417#define TWL4030_ADJUSTABLE_LDO(label, offset, num) \ 441#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
418 TWL_ADJUSTABLE_LDO(label, offset, num, TWL4030) 442 TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
419#define TWL4030_FIXED_LDO(label, offset, mVolts, num) \ 443 remap_conf, TWL4030)
420 TWL_FIXED_LDO(label, offset, mVolts, num, TWL4030) 444#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
421#define TWL6030_ADJUSTABLE_LDO(label, offset, num) \ 445 remap_conf) \
422 TWL_ADJUSTABLE_LDO(label, offset, num, TWL6030) 446 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
423#define TWL6030_FIXED_LDO(label, offset, mVolts, num) \ 447 remap_conf, TWL4030)
424 TWL_FIXED_LDO(label, offset, mVolts, num, TWL6030) 448#define TWL6030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
425 449 remap_conf) \
426#define TWL_ADJUSTABLE_LDO(label, offset, num, family) { \ 450 TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
451 remap_conf, TWL6030)
452#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
453 remap_conf) \
454 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
455 remap_conf, TWL6030)
456
457#define TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf, \
458 family) { \
427 .base = offset, \ 459 .base = offset, \
428 .id = num, \ 460 .id = num, \
429 .table_len = ARRAY_SIZE(label##_VSEL_table), \ 461 .table_len = ARRAY_SIZE(label##_VSEL_table), \
430 .table = label##_VSEL_table, \ 462 .table = label##_VSEL_table, \
463 .delay = turnon_delay, \
464 .remap = remap_conf, \
431 .desc = { \ 465 .desc = { \
432 .name = #label, \ 466 .name = #label, \
433 .id = family##_REG_##label, \ 467 .id = family##_REG_##label, \
@@ -438,10 +472,13 @@ static struct regulator_ops twlfixed_ops = {
438 }, \ 472 }, \
439 } 473 }
440 474
441#define TWL_FIXED_LDO(label, offset, mVolts, num, family) { \ 475#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
476 family) { \
442 .base = offset, \ 477 .base = offset, \
443 .id = num, \ 478 .id = num, \
444 .min_mV = mVolts, \ 479 .min_mV = mVolts, \
480 .delay = turnon_delay, \
481 .remap = remap_conf, \
445 .desc = { \ 482 .desc = { \
446 .name = #label, \ 483 .name = #label, \
447 .id = family##_REG_##label, \ 484 .id = family##_REG_##label, \
@@ -457,43 +494,41 @@ static struct regulator_ops twlfixed_ops = {
457 * software control over them after boot. 494 * software control over them after boot.
458 */ 495 */
459static struct twlreg_info twl_regs[] = { 496static struct twlreg_info twl_regs[] = {
460 TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1), 497 TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1, 100, 0x08),
461 TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2), 498 TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2, 100, 0x08),
462 TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2), 499 TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2, 100, 0x08),
463 TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3), 500 TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3, 100, 0x08),
464 TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4), 501 TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4, 100, 0x08),
465 TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5), 502 TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5, 100, 0x08),
466 TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6), 503 TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6, 100, 0x08),
467 /* 504 TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7, 100, 0x00),
468 TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7), 505 TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8, 100, 0x08),
469 */ 506 TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00),
470 TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8), 507 TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08),
471 TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9), 508 TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08),
472 TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10), 509 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
473 /* 510 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
474 TWL4030_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11), 511 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
475 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12), 512 TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
476 TWL4030_ADJUSTABLE_LDO(VINTDIG, 0x47, 13), 513 TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
477 TWL4030_SMPS(VIO, 0x4b, 14), 514 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
478 TWL4030_SMPS(VDD1, 0x55, 15), 515 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
479 TWL4030_SMPS(VDD2, 0x63, 16), 516 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
480 */
481 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17),
482 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18),
483 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19),
484 /* VUSBCP is managed *only* by the USB subchip */ 517 /* VUSBCP is managed *only* by the USB subchip */
485 518
486 /* 6030 REG with base as PMC Slave Misc : 0x0030 */ 519 /* 6030 REG with base as PMC Slave Misc : 0x0030 */
487 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1), 520 /* Turnon-delay and remap configuration values for 6030 are not
488 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2), 521 verified since the specification is not public */
489 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3), 522 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x08),
490 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4), 523 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x08),
491 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5), 524 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x08),
492 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7), 525 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x08),
493 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15), 526 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x08),
494 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16), 527 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x08),
495 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17), 528 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x08),
496 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18) 529 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x08),
530 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x08),
531 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x08)
497}; 532};
498 533
499static int twlreg_probe(struct platform_device *pdev) 534static int twlreg_probe(struct platform_device *pdev)
@@ -525,6 +560,19 @@ static int twlreg_probe(struct platform_device *pdev)
525 c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE 560 c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE
526 | REGULATOR_CHANGE_MODE 561 | REGULATOR_CHANGE_MODE
527 | REGULATOR_CHANGE_STATUS; 562 | REGULATOR_CHANGE_STATUS;
563 switch (pdev->id) {
564 case TWL4030_REG_VIO:
565 case TWL4030_REG_VDD1:
566 case TWL4030_REG_VDD2:
567 case TWL4030_REG_VPLL1:
568 case TWL4030_REG_VINTANA1:
569 case TWL4030_REG_VINTANA2:
570 case TWL4030_REG_VINTDIG:
571 c->always_on = true;
572 break;
573 default:
574 break;
575 }
528 576
529 rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); 577 rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
530 if (IS_ERR(rdev)) { 578 if (IS_ERR(rdev)) {
@@ -534,6 +582,9 @@ static int twlreg_probe(struct platform_device *pdev)
534 } 582 }
535 platform_set_drvdata(pdev, rdev); 583 platform_set_drvdata(pdev, rdev);
536 584
585 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
586 info->remap);
587
537 /* NOTE: many regulators support short-circuit IRQs (presentable 588 /* NOTE: many regulators support short-circuit IRQs (presentable
538 * as REGULATOR_OVER_CURRENT notifications?) configured via: 589 * as REGULATOR_OVER_CURRENT notifications?) configured via:
539 * - SC_CONFIG 590 * - SC_CONFIG
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 2eefc1a0cf08..0a6577577e8d 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -19,6 +19,8 @@
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/regulator/driver.h> 21#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h>
23#include <linux/gpio.h>
22 24
23#include <linux/mfd/wm831x/core.h> 25#include <linux/mfd/wm831x/core.h>
24#include <linux/mfd/wm831x/regulator.h> 26#include <linux/mfd/wm831x/regulator.h>
@@ -39,6 +41,7 @@
39#define WM831X_DCDC_CONTROL_2 1 41#define WM831X_DCDC_CONTROL_2 1
40#define WM831X_DCDC_ON_CONFIG 2 42#define WM831X_DCDC_ON_CONFIG 2
41#define WM831X_DCDC_SLEEP_CONTROL 3 43#define WM831X_DCDC_SLEEP_CONTROL 3
44#define WM831X_DCDC_DVS_CONTROL 4
42 45
43/* 46/*
44 * Shared 47 * Shared
@@ -50,6 +53,10 @@ struct wm831x_dcdc {
50 int base; 53 int base;
51 struct wm831x *wm831x; 54 struct wm831x *wm831x;
52 struct regulator_dev *regulator; 55 struct regulator_dev *regulator;
56 int dvs_gpio;
57 int dvs_gpio_state;
58 int on_vsel;
59 int dvs_vsel;
53}; 60};
54 61
55static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev) 62static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev)
@@ -240,11 +247,9 @@ static int wm831x_buckv_list_voltage(struct regulator_dev *rdev,
240 return -EINVAL; 247 return -EINVAL;
241} 248}
242 249
243static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg, 250static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
244 int min_uV, int max_uV) 251 int min_uV, int max_uV)
245{ 252{
246 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
247 struct wm831x *wm831x = dcdc->wm831x;
248 u16 vsel; 253 u16 vsel;
249 254
250 if (min_uV < 600000) 255 if (min_uV < 600000)
@@ -257,39 +262,126 @@ static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg,
257 if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV) 262 if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV)
258 return -EINVAL; 263 return -EINVAL;
259 264
260 return wm831x_set_bits(wm831x, reg, WM831X_DC1_ON_VSEL_MASK, vsel); 265 return vsel;
266}
267
268static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev,
269 int min_uV, int max_uV)
270{
271 u16 vsel;
272
273 if (max_uV < 600000 || max_uV > 1800000)
274 return -EINVAL;
275
276 vsel = ((max_uV - 600000) / 12500) + 8;
277
278 if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV ||
279 wm831x_buckv_list_voltage(rdev, vsel) < max_uV)
280 return -EINVAL;
281
282 return vsel;
283}
284
285static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
286{
287 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
288
289 if (state == dcdc->dvs_gpio_state)
290 return 0;
291
292 dcdc->dvs_gpio_state = state;
293 gpio_set_value(dcdc->dvs_gpio, state);
294
295 /* Should wait for DVS state change to be asserted if we have
296 * a GPIO for it, for now assume the device is configured
297 * for the fastest possible transition.
298 */
299
300 return 0;
261} 301}
262 302
263static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, 303static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
264 int min_uV, int max_uV) 304 int min_uV, int max_uV)
265{ 305{
266 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); 306 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
267 u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; 307 struct wm831x *wm831x = dcdc->wm831x;
308 int on_reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
309 int dvs_reg = dcdc->base + WM831X_DCDC_DVS_CONTROL;
310 int vsel, ret;
311
312 vsel = wm831x_buckv_select_min_voltage(rdev, min_uV, max_uV);
313 if (vsel < 0)
314 return vsel;
315
316 /* If this value is already set then do a GPIO update if we can */
317 if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
318 return wm831x_buckv_set_dvs(rdev, 0);
319
320 if (dcdc->dvs_gpio && dcdc->dvs_vsel == vsel)
321 return wm831x_buckv_set_dvs(rdev, 1);
322
323 /* Always set the ON status to the minimum voltage */
324 ret = wm831x_set_bits(wm831x, on_reg, WM831X_DC1_ON_VSEL_MASK, vsel);
325 if (ret < 0)
326 return ret;
327 dcdc->on_vsel = vsel;
328
329 if (!dcdc->dvs_gpio)
330 return ret;
331
332 /* Kick the voltage transition now */
333 ret = wm831x_buckv_set_dvs(rdev, 0);
334 if (ret < 0)
335 return ret;
336
337 /* Set the high voltage as the DVS voltage. This is optimised
338 * for CPUfreq usage, most processors will keep the maximum
339 * voltage constant and lower the minimum with the frequency. */
340 vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV);
341 if (vsel < 0) {
342 /* This should never happen - at worst the same vsel
343 * should be chosen */
344 WARN_ON(vsel < 0);
345 return 0;
346 }
347
348 /* Don't bother if it's the same VSEL we're already using */
349 if (vsel == dcdc->on_vsel)
350 return 0;
268 351
269 return wm831x_buckv_set_voltage_int(rdev, reg, min_uV, max_uV); 352 ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel);
353 if (ret == 0)
354 dcdc->dvs_vsel = vsel;
355 else
356 dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n",
357 ret);
358
359 return 0;
270} 360}
271 361
272static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev, 362static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
273 int uV) 363 int uV)
274{ 364{
275 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); 365 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
366 struct wm831x *wm831x = dcdc->wm831x;
276 u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; 367 u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
368 int vsel;
369
370 vsel = wm831x_buckv_select_min_voltage(rdev, uV, uV);
371 if (vsel < 0)
372 return vsel;
277 373
278 return wm831x_buckv_set_voltage_int(rdev, reg, uV, uV); 374 return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel);
279} 375}
280 376
281static int wm831x_buckv_get_voltage(struct regulator_dev *rdev) 377static int wm831x_buckv_get_voltage(struct regulator_dev *rdev)
282{ 378{
283 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); 379 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
284 struct wm831x *wm831x = dcdc->wm831x;
285 u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
286 int val;
287 380
288 val = wm831x_reg_read(wm831x, reg); 381 if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
289 if (val < 0) 382 return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel);
290 return val; 383 else
291 384 return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel);
292 return wm831x_buckv_list_voltage(rdev, val & WM831X_DC1_ON_VSEL_MASK);
293} 385}
294 386
295/* Current limit options */ 387/* Current limit options */
@@ -346,6 +438,64 @@ static struct regulator_ops wm831x_buckv_ops = {
346 .set_suspend_mode = wm831x_dcdc_set_suspend_mode, 438 .set_suspend_mode = wm831x_dcdc_set_suspend_mode,
347}; 439};
348 440
441/*
442 * Set up DVS control. We just log errors since we can still run
443 * (with reduced performance) if we fail.
444 */
445static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
446 struct wm831x_buckv_pdata *pdata)
447{
448 struct wm831x *wm831x = dcdc->wm831x;
449 int ret;
450 u16 ctrl;
451
452 if (!pdata || !pdata->dvs_gpio)
453 return;
454
455 switch (pdata->dvs_control_src) {
456 case 1:
457 ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
458 break;
459 case 2:
460 ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
461 break;
462 default:
463 dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
464 pdata->dvs_control_src, dcdc->name);
465 return;
466 }
467
468 ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
469 WM831X_DC1_DVS_SRC_MASK, ctrl);
470 if (ret < 0) {
471 dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
472 dcdc->name, ret);
473 return;
474 }
475
476 ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
477 if (ret < 0) {
478 dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
479 dcdc->name, ret);
480 return;
481 }
482
483 /* gpiolib won't let us read the GPIO status so pick the higher
484 * of the two existing voltages so we take it as platform data.
485 */
486 dcdc->dvs_gpio_state = pdata->dvs_init_state;
487
488 ret = gpio_direction_output(pdata->dvs_gpio, dcdc->dvs_gpio_state);
489 if (ret < 0) {
490 dev_err(wm831x->dev, "Failed to enable %s DVS GPIO: %d\n",
491 dcdc->name, ret);
492 gpio_free(pdata->dvs_gpio);
493 return;
494 }
495
496 dcdc->dvs_gpio = pdata->dvs_gpio;
497}
498
349static __devinit int wm831x_buckv_probe(struct platform_device *pdev) 499static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
350{ 500{
351 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 501 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
@@ -384,6 +534,23 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
384 dcdc->desc.ops = &wm831x_buckv_ops; 534 dcdc->desc.ops = &wm831x_buckv_ops;
385 dcdc->desc.owner = THIS_MODULE; 535 dcdc->desc.owner = THIS_MODULE;
386 536
537 ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
538 if (ret < 0) {
539 dev_err(wm831x->dev, "Failed to read ON VSEL: %d\n", ret);
540 goto err;
541 }
542 dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK;
543
544 ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
545 if (ret < 0) {
546 dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret);
547 goto err;
548 }
549 dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK;
550
551 if (pdata->dcdc[id])
552 wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
553
387 dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, 554 dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
388 pdata->dcdc[id], dcdc); 555 pdata->dcdc[id], dcdc);
389 if (IS_ERR(dcdc->regulator)) { 556 if (IS_ERR(dcdc->regulator)) {
@@ -422,6 +589,8 @@ err_uv:
422err_regulator: 589err_regulator:
423 regulator_unregister(dcdc->regulator); 590 regulator_unregister(dcdc->regulator);
424err: 591err:
592 if (dcdc->dvs_gpio)
593 gpio_free(dcdc->dvs_gpio);
425 kfree(dcdc); 594 kfree(dcdc);
426 return ret; 595 return ret;
427} 596}
@@ -434,6 +603,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
434 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc); 603 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc);
435 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); 604 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
436 regulator_unregister(dcdc->regulator); 605 regulator_unregister(dcdc->regulator);
606 if (dcdc->dvs_gpio)
607 gpio_free(dcdc->dvs_gpio);
437 kfree(dcdc); 608 kfree(dcdc);
438 609
439 return 0; 610 return 0;
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 902db56ce099..61e02ac2fda3 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -470,7 +470,7 @@ static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
470 struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); 470 struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
471 struct wm831x *wm831x = ldo->wm831x; 471 struct wm831x *wm831x = ldo->wm831x;
472 int on_reg = ldo->base + WM831X_LDO_ON_CONTROL; 472 int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
473 unsigned int ret; 473 int ret;
474 474
475 ret = wm831x_reg_read(wm831x, on_reg); 475 ret = wm831x_reg_read(wm831x, on_reg);
476 if (ret < 0) 476 if (ret < 0)
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index eb154dc57164..c8c12325e69b 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -686,7 +686,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
686 */ 686 */
687#if defined(CONFIG_ATARI) 687#if defined(CONFIG_ATARI)
688 address_space = 64; 688 address_space = 64;
689#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__sparc__) 689#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
690 || defined(__sparc__) || defined(__mips__)
690 address_space = 128; 691 address_space = 128;
691#else 692#else
692#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes. 693#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 259db7f3535b..9630e7d3314e 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -778,6 +778,8 @@ static int __devinit ds1305_probe(struct spi_device *spi)
778 spi->irq, status); 778 spi->irq, status);
779 goto fail1; 779 goto fail1;
780 } 780 }
781
782 device_set_wakeup_capable(&spi->dev, 1);
781 } 783 }
782 784
783 /* export NVRAM */ 785 /* export NVRAM */
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 8a99da6f2f24..c4ec5c158aa1 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -881,6 +881,8 @@ read_rtc:
881 "unable to request IRQ!\n"); 881 "unable to request IRQ!\n");
882 goto exit_irq; 882 goto exit_irq;
883 } 883 }
884
885 device_set_wakeup_capable(&client->dev, 1);
884 set_bit(HAS_ALARM, &ds1307->flags); 886 set_bit(HAS_ALARM, &ds1307->flags);
885 dev_dbg(&client->dev, "got IRQ %d\n", client->irq); 887 dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
886 } 888 }
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 713f7bf5afb3..5317bbcbc7a0 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -383,6 +383,8 @@ static int ds1374_probe(struct i2c_client *client,
383 dev_err(&client->dev, "unable to request IRQ\n"); 383 dev_err(&client->dev, "unable to request IRQ\n");
384 goto out_free; 384 goto out_free;
385 } 385 }
386
387 device_set_wakeup_capable(&client->dev, 1);
386 } 388 }
387 389
388 ds1374->rtc = rtc_device_register(client->name, &client->dev, 390 ds1374->rtc = rtc_device_register(client->name, &client->dev,
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index fd1231738ef4..148b1dd24070 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -218,7 +218,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
218 spin_unlock_irqrestore(&aliastree.lock, flags); 218 spin_unlock_irqrestore(&aliastree.lock, flags);
219 newlcu = _allocate_lcu(uid); 219 newlcu = _allocate_lcu(uid);
220 if (IS_ERR(newlcu)) 220 if (IS_ERR(newlcu))
221 return PTR_ERR(lcu); 221 return PTR_ERR(newlcu);
222 spin_lock_irqsave(&aliastree.lock, flags); 222 spin_lock_irqsave(&aliastree.lock, flags);
223 lcu = _find_lcu(server, uid); 223 lcu = _find_lcu(server, uid);
224 if (!lcu) { 224 if (!lcu) {
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f64d0db881b4..6e14863f5c70 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#define KMSG_COMPONENT "dasd-diag" 11#define KMSG_COMPONENT "dasd"
12 12
13#include <linux/stddef.h> 13#include <linux/stddef.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
@@ -146,16 +146,16 @@ dasd_diag_erp(struct dasd_device *device)
146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); 146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
147 if (rc == 4) { 147 if (rc == 4) {
148 if (!(device->features & DASD_FEATURE_READONLY)) { 148 if (!(device->features & DASD_FEATURE_READONLY)) {
149 dev_warn(&device->cdev->dev, 149 pr_warning("%s: The access mode of a DIAG device "
150 "The access mode of a DIAG device changed" 150 "changed to read-only\n",
151 " to read-only"); 151 dev_name(&device->cdev->dev));
152 device->features |= DASD_FEATURE_READONLY; 152 device->features |= DASD_FEATURE_READONLY;
153 } 153 }
154 rc = 0; 154 rc = 0;
155 } 155 }
156 if (rc) 156 if (rc)
157 dev_warn(&device->cdev->dev, "DIAG ERP failed with " 157 pr_warning("%s: DIAG ERP failed with "
158 "rc=%d\n", rc); 158 "rc=%d\n", dev_name(&device->cdev->dev), rc);
159} 159}
160 160
161/* Start a given request at the device. Return zero on success, non-zero 161/* Start a given request at the device. Return zero on success, non-zero
@@ -371,8 +371,9 @@ dasd_diag_check_device(struct dasd_device *device)
371 private->pt_block = 2; 371 private->pt_block = 2;
372 break; 372 break;
373 default: 373 default:
374 dev_warn(&device->cdev->dev, "Device type %d is not supported " 374 pr_warning("%s: Device type %d is not supported "
375 "in DIAG mode\n", private->rdc_data.vdev_class); 375 "in DIAG mode\n", dev_name(&device->cdev->dev),
376 private->rdc_data.vdev_class);
376 rc = -EOPNOTSUPP; 377 rc = -EOPNOTSUPP;
377 goto out; 378 goto out;
378 } 379 }
@@ -413,8 +414,8 @@ dasd_diag_check_device(struct dasd_device *device)
413 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 414 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
414 rc = dia250(&private->iob, RW_BIO); 415 rc = dia250(&private->iob, RW_BIO);
415 if (rc == 3) { 416 if (rc == 3) {
416 dev_warn(&device->cdev->dev, 417 pr_warning("%s: A 64-bit DIAG call failed\n",
417 "A 64-bit DIAG call failed\n"); 418 dev_name(&device->cdev->dev));
418 rc = -EOPNOTSUPP; 419 rc = -EOPNOTSUPP;
419 goto out_label; 420 goto out_label;
420 } 421 }
@@ -423,8 +424,9 @@ dasd_diag_check_device(struct dasd_device *device)
423 break; 424 break;
424 } 425 }
425 if (bsize > PAGE_SIZE) { 426 if (bsize > PAGE_SIZE) {
426 dev_warn(&device->cdev->dev, "Accessing the DASD failed because" 427 pr_warning("%s: Accessing the DASD failed because of an "
427 " of an incorrect format (rc=%d)\n", rc); 428 "incorrect format (rc=%d)\n",
429 dev_name(&device->cdev->dev), rc);
428 rc = -EIO; 430 rc = -EIO;
429 goto out_label; 431 goto out_label;
430 } 432 }
@@ -442,18 +444,18 @@ dasd_diag_check_device(struct dasd_device *device)
442 block->s2b_shift++; 444 block->s2b_shift++;
443 rc = mdsk_init_io(device, block->bp_block, 0, NULL); 445 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
444 if (rc && (rc != 4)) { 446 if (rc && (rc != 4)) {
445 dev_warn(&device->cdev->dev, "DIAG initialization " 447 pr_warning("%s: DIAG initialization failed with rc=%d\n",
446 "failed with rc=%d\n", rc); 448 dev_name(&device->cdev->dev), rc);
447 rc = -EIO; 449 rc = -EIO;
448 } else { 450 } else {
449 if (rc == 4) 451 if (rc == 4)
450 device->features |= DASD_FEATURE_READONLY; 452 device->features |= DASD_FEATURE_READONLY;
451 dev_info(&device->cdev->dev, 453 pr_info("%s: New DASD with %ld byte/block, total size %ld "
452 "New DASD with %ld byte/block, total size %ld KB%s\n", 454 "KB%s\n", dev_name(&device->cdev->dev),
453 (unsigned long) block->bp_block, 455 (unsigned long) block->bp_block,
454 (unsigned long) (block->blocks << 456 (unsigned long) (block->blocks <<
455 block->s2b_shift) >> 1, 457 block->s2b_shift) >> 1,
456 (rc == 4) ? ", read-only device" : ""); 458 (rc == 4) ? ", read-only device" : "");
457 rc = 0; 459 rc = 0;
458 } 460 }
459out_label: 461out_label:
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 28e4649fa9e4..247b2b934728 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -467,7 +467,7 @@ fs3270_open(struct inode *inode, struct file *filp)
467 if (IS_ERR(ib)) { 467 if (IS_ERR(ib)) {
468 raw3270_put_view(&fp->view); 468 raw3270_put_view(&fp->view);
469 raw3270_del_view(&fp->view); 469 raw3270_del_view(&fp->view);
470 rc = PTR_ERR(fp); 470 rc = PTR_ERR(ib);
471 goto out; 471 goto out;
472 } 472 }
473 fp->rdbuf = ib; 473 fp->rdbuf = ib;
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 3657fe103c27..cb70fa1cf539 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape_34xx" 11#define KMSG_COMPONENT "tape_34xx"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/init.h> 15#include <linux/init.h>
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 0c72aadb8391..9821c5886613 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape_3590" 11#define KMSG_COMPONENT "tape_3590"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/init.h> 15#include <linux/init.h>
@@ -136,7 +137,7 @@ static void int_to_ext_kekl(struct tape3592_kekl *in,
136 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; 137 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
137 memcpy(out->label, in->label, sizeof(in->label)); 138 memcpy(out->label, in->label, sizeof(in->label));
138 EBCASC(out->label, sizeof(in->label)); 139 EBCASC(out->label, sizeof(in->label));
139 strstrip(out->label); 140 strim(out->label);
140} 141}
141 142
142static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, 143static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 4799cc2f73c3..96816149368a 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#define KMSG_COMPONENT "tape" 13#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 15
15#include <linux/fs.h> 16#include <linux/fs.h>
16#include <linux/module.h> 17#include <linux/module.h>
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 23d773a0d113..2125ec7d95f0 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -10,6 +10,9 @@
10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 */ 11 */
12 12
13#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
13#include <linux/module.h> 16#include <linux/module.h>
14#include <linux/types.h> 17#include <linux/types.h>
15#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index ddc914ccea8f..b2864e3edb6d 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -7,6 +7,10 @@
7 * Author: Stefan Bader <shbader@de.ibm.com> 7 * Author: Stefan Bader <shbader@de.ibm.com>
8 * Based on simple class device code by Greg K-H 8 * Based on simple class device code by Greg K-H
9 */ 9 */
10
11#define KMSG_COMPONENT "tape"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
10#include "tape_class.h" 14#include "tape_class.h"
11 15
12MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>"); 16MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f5d6802dc5da..81b094e480e6 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -12,6 +12,8 @@
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape" 14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
15#include <linux/module.h> 17#include <linux/module.h>
16#include <linux/init.h> // for kernel parameters 18#include <linux/init.h> // for kernel parameters
17#include <linux/kmod.h> // for requesting modules 19#include <linux/kmod.h> // for requesting modules
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index ebd820ccfb24..0ceb37984f77 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -11,6 +11,9 @@
11 * PROCFS Functions 11 * PROCFS Functions
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
14#include <linux/module.h> 17#include <linux/module.h>
15#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
16#include <linux/seq_file.h> 19#include <linux/seq_file.h>
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 750354ad16e5..03f07e5dd6e9 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -11,6 +11,9 @@
11 * Stefan Bader <shbader@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com>
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
14#include <linux/stddef.h> 17#include <linux/stddef.h>
15#include <linux/kernel.h> 18#include <linux/kernel.h>
16#include <linux/bio.h> 19#include <linux/bio.h>
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 9509e3860934..7a28a3029a3f 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -49,7 +49,6 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
49 */ 49 */
50static void ccwreq_stop(struct ccw_device *cdev, int rc) 50static void ccwreq_stop(struct ccw_device *cdev, int rc)
51{ 51{
52 struct subchannel *sch = to_subchannel(cdev->dev.parent);
53 struct ccw_request *req = &cdev->private->req; 52 struct ccw_request *req = &cdev->private->req;
54 53
55 if (req->done) 54 if (req->done)
@@ -57,7 +56,6 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
57 req->done = 1; 56 req->done = 1;
58 ccw_device_set_timeout(cdev, 0); 57 ccw_device_set_timeout(cdev, 0);
59 memset(&cdev->private->irb, 0, sizeof(struct irb)); 58 memset(&cdev->private->irb, 0, sizeof(struct irb));
60 sch->lpm = sch->schib.pmcw.pam;
61 if (rc && rc != -ENODEV && req->drc) 59 if (rc && rc != -ENODEV && req->drc)
62 rc = req->drc; 60 rc = req->drc;
63 req->callback(cdev, req->data, rc); 61 req->callback(cdev, req->data, rc);
@@ -80,7 +78,6 @@ static void ccwreq_do(struct ccw_device *cdev)
80 continue; 78 continue;
81 } 79 }
82 /* Perform start function. */ 80 /* Perform start function. */
83 sch->lpm = 0xff;
84 memset(&cdev->private->irb, 0, sizeof(struct irb)); 81 memset(&cdev->private->irb, 0, sizeof(struct irb));
85 rc = cio_start(sch, cp, (u8) req->mask); 82 rc = cio_start(sch, cp, (u8) req->mask);
86 if (rc == 0) { 83 if (rc == 0) {
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 73901c9e260f..a6c7d5426fb2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1519,6 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1519 sch->driver = &io_subchannel_driver; 1519 sch->driver = &io_subchannel_driver;
1520 /* Initialize the ccw_device structure. */ 1520 /* Initialize the ccw_device structure. */
1521 cdev->dev.parent= &sch->dev; 1521 cdev->dev.parent= &sch->dev;
1522 sch_set_cdev(sch, cdev);
1522 io_subchannel_recog(cdev, sch); 1523 io_subchannel_recog(cdev, sch);
1523 /* Now wait for the async. recognition to come to an end. */ 1524 /* Now wait for the async. recognition to come to an end. */
1524 spin_lock_irq(cdev->ccwlock); 1525 spin_lock_irq(cdev->ccwlock);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index aad188e43b4f..6facb5499a65 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -142,7 +142,7 @@ static void spid_do(struct ccw_device *cdev)
142 u8 fn; 142 u8 fn;
143 143
144 /* Use next available path that is not already in correct state. */ 144 /* Use next available path that is not already in correct state. */
145 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm); 145 req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
146 if (!req->lpm) 146 if (!req->lpm)
147 goto out_nopath; 147 goto out_nopath;
148 /* Channel program setup. */ 148 /* Channel program setup. */
@@ -254,15 +254,15 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
254 *p = first; 254 *p = first;
255} 255}
256 256
257static u8 pgid_to_vpm(struct ccw_device *cdev) 257static u8 pgid_to_donepm(struct ccw_device *cdev)
258{ 258{
259 struct subchannel *sch = to_subchannel(cdev->dev.parent); 259 struct subchannel *sch = to_subchannel(cdev->dev.parent);
260 struct pgid *pgid; 260 struct pgid *pgid;
261 int i; 261 int i;
262 int lpm; 262 int lpm;
263 u8 vpm = 0; 263 u8 donepm = 0;
264 264
265 /* Set VPM bits for paths which are already in the target state. */ 265 /* Set bits for paths which are already in the target state. */
266 for (i = 0; i < 8; i++) { 266 for (i = 0; i < 8; i++) {
267 lpm = 0x80 >> i; 267 lpm = 0x80 >> i;
268 if ((cdev->private->pgid_valid_mask & lpm) == 0) 268 if ((cdev->private->pgid_valid_mask & lpm) == 0)
@@ -282,10 +282,10 @@ static u8 pgid_to_vpm(struct ccw_device *cdev)
282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) 282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
283 continue; 283 continue;
284 } 284 }
285 vpm |= lpm; 285 donepm |= lpm;
286 } 286 }
287 287
288 return vpm; 288 return donepm;
289} 289}
290 290
291static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) 291static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
@@ -307,6 +307,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
307 int mismatch = 0; 307 int mismatch = 0;
308 int reserved = 0; 308 int reserved = 0;
309 int reset = 0; 309 int reset = 0;
310 u8 donepm;
310 311
311 if (rc) 312 if (rc)
312 goto out; 313 goto out;
@@ -316,18 +317,20 @@ static void snid_done(struct ccw_device *cdev, int rc)
316 else if (mismatch) 317 else if (mismatch)
317 rc = -EOPNOTSUPP; 318 rc = -EOPNOTSUPP;
318 else { 319 else {
319 sch->vpm = pgid_to_vpm(cdev); 320 donepm = pgid_to_donepm(cdev);
321 sch->vpm = donepm & sch->opm;
322 cdev->private->pgid_todo_mask &= ~donepm;
320 pgid_fill(cdev, pgid); 323 pgid_fill(cdev, pgid);
321 } 324 }
322out: 325out:
323 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 326 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
324 "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc, 327 "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid,
325 cdev->private->pgid_valid_mask, sch->vpm, mismatch, 328 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
326 reserved, reset); 329 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
327 switch (rc) { 330 switch (rc) {
328 case 0: 331 case 0:
329 /* Anything left to do? */ 332 /* Anything left to do? */
330 if (sch->vpm == sch->schib.pmcw.pam) { 333 if (cdev->private->pgid_todo_mask == 0) {
331 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); 334 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
332 return; 335 return;
333 } 336 }
@@ -411,6 +414,7 @@ static void verify_start(struct ccw_device *cdev)
411 struct ccw_dev_id *devid = &cdev->private->dev_id; 414 struct ccw_dev_id *devid = &cdev->private->dev_id;
412 415
413 sch->vpm = 0; 416 sch->vpm = 0;
417 sch->lpm = sch->schib.pmcw.pam;
414 /* Initialize request data. */ 418 /* Initialize request data. */
415 memset(req, 0, sizeof(*req)); 419 memset(req, 0, sizeof(*req));
416 req->timeout = PGID_TIMEOUT; 420 req->timeout = PGID_TIMEOUT;
@@ -442,11 +446,14 @@ static void verify_start(struct ccw_device *cdev)
442 */ 446 */
443void ccw_device_verify_start(struct ccw_device *cdev) 447void ccw_device_verify_start(struct ccw_device *cdev)
444{ 448{
449 struct subchannel *sch = to_subchannel(cdev->dev.parent);
450
445 CIO_TRACE_EVENT(4, "vrfy"); 451 CIO_TRACE_EVENT(4, "vrfy");
446 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 452 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
447 /* Initialize PGID data. */ 453 /* Initialize PGID data. */
448 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); 454 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
449 cdev->private->pgid_valid_mask = 0; 455 cdev->private->pgid_valid_mask = 0;
456 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
450 /* 457 /*
451 * Initialize pathgroup and multipath state with target values. 458 * Initialize pathgroup and multipath state with target values.
452 * They may change in the course of path verification. 459 * They may change in the course of path verification.
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 61677dfbdc9b..ca5e9bb9d458 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -163,7 +163,7 @@ void tcw_finalize(struct tcw *tcw, int num_tidaws)
163 /* Add tcat to tccb. */ 163 /* Add tcat to tccb. */
164 tccb = tcw_get_tccb(tcw); 164 tccb = tcw_get_tccb(tcw);
165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; 165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
166 memset(tcat, 0, sizeof(tcat)); 166 memset(tcat, 0, sizeof(*tcat));
167 /* Calculate tcw input/output count and tcat transport count. */ 167 /* Calculate tcw input/output count and tcat transport count. */
168 count = calc_dcw_count(tccb); 168 count = calc_dcw_count(tccb);
169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) 169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tccb_init);
269 */ 269 */
270void tsb_init(struct tsb *tsb) 270void tsb_init(struct tsb *tsb)
271{ 271{
272 memset(tsb, 0, sizeof(tsb)); 272 memset(tsb, 0, sizeof(*tsb));
273} 273}
274EXPORT_SYMBOL(tsb_init); 274EXPORT_SYMBOL(tsb_init);
275 275
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index d72ae4c93af9..b9ce712a7f25 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -150,6 +150,7 @@ struct ccw_device_private {
150 struct ccw_request req; /* internal I/O request */ 150 struct ccw_request req; /* internal I/O request */
151 int iretry; 151 int iretry;
152 u8 pgid_valid_mask; /* mask of valid PGIDs */ 152 u8 pgid_valid_mask; /* mask of valid PGIDs */
153 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
153 struct { 154 struct {
154 unsigned int fast:1; /* post with "channel end" */ 155 unsigned int fast:1; /* post with "channel end" */
155 unsigned int repall:1; /* report every interrupt status */ 156 unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4be6e84b9599..b2275c5000e7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -486,7 +486,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
486 case SLSB_P_INPUT_PRIMED: 486 case SLSB_P_INPUT_PRIMED:
487 inbound_primed(q, count); 487 inbound_primed(q, count);
488 q->first_to_check = add_buf(q->first_to_check, count); 488 q->first_to_check = add_buf(q->first_to_check, count);
489 atomic_sub(count, &q->nr_buf_used); 489 if (atomic_sub(count, &q->nr_buf_used) == 0)
490 qdio_perf_stat_inc(&perf_stats.inbound_queue_full);
490 break; 491 break;
491 case SLSB_P_INPUT_ERROR: 492 case SLSB_P_INPUT_ERROR:
492 announce_buffer_error(q, count); 493 announce_buffer_error(q, count);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index 968e3c7c2632..54f7c325a3e6 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -64,6 +64,8 @@ static int qdio_perf_proc_show(struct seq_file *m, void *v)
64 (long)atomic_long_read(&perf_stats.fast_requeue)); 64 (long)atomic_long_read(&perf_stats.fast_requeue));
65 seq_printf(m, "Number of outbound target full condition\t: %li\n", 65 seq_printf(m, "Number of outbound target full condition\t: %li\n",
66 (long)atomic_long_read(&perf_stats.outbound_target_full)); 66 (long)atomic_long_read(&perf_stats.outbound_target_full));
67 seq_printf(m, "Number of inbound queue full condition\t\t: %li\n",
68 (long)atomic_long_read(&perf_stats.inbound_queue_full));
67 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", 69 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
68 (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); 70 (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
69 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", 71 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
index ff4504ce1e3c..12454231dc8b 100644
--- a/drivers/s390/cio/qdio_perf.h
+++ b/drivers/s390/cio/qdio_perf.h
@@ -36,6 +36,7 @@ struct qdio_perf_stats {
36 atomic_long_t outbound_handler; 36 atomic_long_t outbound_handler;
37 atomic_long_t fast_requeue; 37 atomic_long_t fast_requeue;
38 atomic_long_t outbound_target_full; 38 atomic_long_t outbound_target_full;
39 atomic_long_t inbound_queue_full;
39 40
40 /* for debugging */ 41 /* for debugging */
41 atomic_long_t debug_tl_out_timer; 42 atomic_long_t debug_tl_out_timer;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 18d54fc21ce9..8c2dea5fa2b4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -48,7 +48,6 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
48 if (!irq_ptr) 48 if (!irq_ptr)
49 return; 49 return;
50 50
51 WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
52 irq_ptr->qib.pfmt = qib_param_field_format; 51 irq_ptr->qib.pfmt = qib_param_field_format;
53 if (qib_param_field) 52 if (qib_param_field)
54 memcpy(irq_ptr->qib.parm, qib_param_field, 53 memcpy(irq_ptr->qib.parm, qib_param_field,
@@ -82,14 +81,12 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
82 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 81 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
83 if (!q) 82 if (!q)
84 return -ENOMEM; 83 return -ENOMEM;
85 WARN_ON((unsigned long)q & 0xff);
86 84
87 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 85 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
88 if (!q->slib) { 86 if (!q->slib) {
89 kmem_cache_free(qdio_q_cache, q); 87 kmem_cache_free(qdio_q_cache, q);
90 return -ENOMEM; 88 return -ENOMEM;
91 } 89 }
92 WARN_ON((unsigned long)q->slib & 0x7ff);
93 irq_ptr_qs[i] = q; 90 irq_ptr_qs[i] = q;
94 } 91 }
95 return 0; 92 return 0;
@@ -131,7 +128,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
131 /* fill in sbal */ 128 /* fill in sbal */
132 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { 129 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
133 q->sbal[j] = *sbals_array++; 130 q->sbal[j] = *sbals_array++;
134 WARN_ON((unsigned long)q->sbal[j] & 0xff); 131 BUG_ON((unsigned long)q->sbal[j] & 0xff);
135 } 132 }
136 133
137 /* fill in slib */ 134 /* fill in slib */
@@ -147,11 +144,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
147 /* fill in sl */ 144 /* fill in sl */
148 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 145 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
149 q->sl->element[j].sbal = (unsigned long)q->sbal[j]; 146 q->sl->element[j].sbal = (unsigned long)q->sbal[j];
150
151 DBF_EVENT("sl-slsb-sbal");
152 DBF_HEX(q->sl, sizeof(void *));
153 DBF_HEX(&q->slsb, sizeof(void *));
154 DBF_HEX(q->sbal, sizeof(void *));
155} 147}
156 148
157static void setup_queues(struct qdio_irq *irq_ptr, 149static void setup_queues(struct qdio_irq *irq_ptr,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3bf75924741f..84d3bbaa95e7 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -76,6 +76,7 @@
76 Fix bug in twa_get_param() on 4GB+. 76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap(). 77 Use pci_resource_len() for ioremap().
78 2.26.02.012 - Add power management support. 78 2.26.02.012 - Add power management support.
79 2.26.02.013 - Fix bug in twa_load_sgl().
79*/ 80*/
80 81
81#include <linux/module.h> 82#include <linux/module.h>
@@ -100,7 +101,7 @@
100#include "3w-9xxx.h" 101#include "3w-9xxx.h"
101 102
102/* Globals */ 103/* Globals */
103#define TW_DRIVER_VERSION "2.26.02.012" 104#define TW_DRIVER_VERSION "2.26.02.013"
104static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
105static unsigned int twa_device_extension_count; 106static unsigned int twa_device_extension_count;
106static int twa_major = -1; 107static int twa_major = -1;
@@ -1382,10 +1383,12 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
1382 newcommand = &full_command_packet->command.newcommand; 1383 newcommand = &full_command_packet->command.newcommand;
1383 newcommand->request_id__lunl = 1384 newcommand->request_id__lunl =
1384 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); 1385 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1385 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1386 if (length) {
1386 newcommand->sg_list[0].length = cpu_to_le32(length); 1387 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1388 newcommand->sg_list[0].length = cpu_to_le32(length);
1389 }
1387 newcommand->sgl_entries__lunh = 1390 newcommand->sgl_entries__lunh =
1388 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1)); 1391 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1389 } else { 1392 } else {
1390 oldcommand = &full_command_packet->command.oldcommand; 1393 oldcommand = &full_command_packet->command.oldcommand;
1391 oldcommand->request_id = request_id; 1394 oldcommand->request_id = request_id;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 36900c71a592..9191d1ea6451 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID
388 Please read the comments at the top of 388 Please read the comments at the top of
389 <file:drivers/scsi/3w-xxxx.c>. 389 <file:drivers/scsi/3w-xxxx.c>.
390 390
391config SCSI_HPSA
392 tristate "HP Smart Array SCSI driver"
393 depends on PCI && SCSI
394 help
395 This driver supports HP Smart Array Controllers (circa 2009).
396 It is a SCSI alternative to the cciss driver, which is a block
397 driver. Anyone wishing to use HP Smart Array controllers who
398 would prefer the devices be presented to linux as SCSI devices,
399 rather than as generic block devices should say Y here.
400
391config SCSI_3W_9XXX 401config SCSI_3W_9XXX
392 tristate "3ware 9xxx SATA-RAID support" 402 tristate "3ware 9xxx SATA-RAID support"
393 depends on PCI && SCSI 403 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 280d3c657d60..92a8c500b23d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
91obj-$(CONFIG_SCSI_PAS16) += pas16.o 91obj-$(CONFIG_SCSI_PAS16) += pas16.o
92obj-$(CONFIG_SCSI_T128) += t128.o 92obj-$(CONFIG_SCSI_T128) += t128.o
93obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o 93obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
94obj-$(CONFIG_SCSI_HPSA) += hpsa.o
94obj-$(CONFIG_SCSI_DTC3280) += dtc.o 95obj-$(CONFIG_SCSI_DTC3280) += dtc.o
95obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ 96obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
96obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o 97obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 698a527d6cca..f008708f1b08 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -135,11 +135,15 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
135 while ((compl = be_mcc_compl_get(phba))) { 135 while ((compl = be_mcc_compl_get(phba))) {
136 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 136 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
137 /* Interpret flags as an async trailer */ 137 /* Interpret flags as an async trailer */
138 BUG_ON(!is_link_state_evt(compl->flags)); 138 if (is_link_state_evt(compl->flags))
139 /* Interpret compl as a async link evt */
140 beiscsi_async_link_state_process(phba,
141 (struct be_async_event_link_state *) compl);
142 else
143 SE_DEBUG(DBG_LVL_1,
144 " Unsupported Async Event, flags"
145 " = 0x%08x \n", compl->flags);
139 146
140 /* Interpret compl as a async link evt */
141 beiscsi_async_link_state_process(phba,
142 (struct be_async_event_link_state *) compl);
143 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 147 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
144 status = be_mcc_compl_process(ctrl, compl); 148 status = be_mcc_compl_process(ctrl, compl);
145 atomic_dec(&phba->ctrl.mcc_obj.q.used); 149 atomic_dec(&phba->ctrl.mcc_obj.q.used);
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 2b973f3c2eb2..6cf9dc37d78b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -684,6 +684,7 @@ extern unsigned int error_mask1, error_mask2;
684extern u64 iscsi_error_mask; 684extern u64 iscsi_error_mask;
685extern unsigned int en_tcp_dack; 685extern unsigned int en_tcp_dack;
686extern unsigned int event_coal_div; 686extern unsigned int event_coal_div;
687extern unsigned int event_coal_min;
687 688
688extern struct scsi_transport_template *bnx2i_scsi_xport_template; 689extern struct scsi_transport_template *bnx2i_scsi_xport_template;
689extern struct iscsi_transport bnx2i_iscsi_transport; 690extern struct iscsi_transport bnx2i_iscsi_transport;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5c8d7630c13e..1af578dec276 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -133,20 +133,38 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{ 133{
134 struct bnx2i_5771x_cq_db *cq_db; 134 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index; 135 u16 cq_index;
136 u16 next_index;
137 u32 num_active_cmds;
136 138
139
140 /* Coalesce CQ entries only on 10G devices */
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 141 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return; 142 return;
139 143
144 /* Do not update CQ DB multiple times before firmware writes
145 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
146 * interrupts and other unwanted results
147 */
148 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
149 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
150 return;
151
140 if (action == CNIC_ARM_CQE) { 152 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn + 153 num_active_cmds = ep->num_active_cmds;
142 ep->num_active_cmds / event_coal_div; 154 if (num_active_cmds <= event_coal_min)
143 cq_index %= (ep->qp.cqe_size * 2 + 1); 155 next_index = 1;
144 if (!cq_index) { 156 else
157 next_index = event_coal_min +
158 (num_active_cmds - event_coal_min) / event_coal_div;
159 if (!next_index)
160 next_index = 1;
161 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
162 if (cq_index > ep->qp.cqe_size * 2)
163 cq_index -= ep->qp.cqe_size * 2;
164 if (!cq_index)
145 cq_index = 1; 165 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *) 166
147 ep->qp.cq_pgtbl_virt; 167 cq_db->sqn[0] = cq_index;
148 cq_db->sqn[0] = cq_index;
149 }
150 } 168 }
151} 169}
152 170
@@ -366,6 +384,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
366 struct bnx2i_cmd *bnx2i_cmd; 384 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe; 385 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword; 386 u32 dword;
387 u32 scsi_lun[2];
369 388
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; 389 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 390 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -376,27 +395,35 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
376 tmfabort_wqe->op_attr = 0; 395 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr = 396 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; 397 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381 398
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); 399 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0; 400 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); 401 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385 402
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); 403 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc) 404 if (!ctask || !ctask->sc)
388 /* 405 /*
389 * the iscsi layer must have completed the cmd while this 406 * the iscsi layer must have completed the cmd while this
390 * was starting up. 407 * was starting up.
408 *
409 * Note: In the case of a SCSI cmd timeout, the task's sc
410 * is still active; hence ctask->sc != 0
411 * In this case, the task must be aborted
391 */ 412 */
392 return 0; 413 return 0;
414
393 ref_sc = ctask->sc; 415 ref_sc = ctask->sc;
394 416
417 /* Retrieve LUN directly from the ref_sc */
418 int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun);
419 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
420 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
421
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE) 422 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 423 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else 424 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 425 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt); 426 tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); 427 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401 428
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 429 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 0c4210d48ee8..6d8172e781cf 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18 18
19#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.0.1e" 20#define DRV_MODULE_VERSION "2.1.0"
21#define DRV_MODULE_RELDATE "June 22, 2009" 21#define DRV_MODULE_RELDATE "Dec 06, 2009"
22 22
23static char version[] __devinitdata = 23static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -32,6 +32,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
32 32
33static DEFINE_MUTEX(bnx2i_dev_lock); 33static DEFINE_MUTEX(bnx2i_dev_lock);
34 34
35unsigned int event_coal_min = 24;
36module_param(event_coal_min, int, 0664);
37MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
38
35unsigned int event_coal_div = 1; 39unsigned int event_coal_div = 1;
36module_param(event_coal_div, int, 0664); 40module_param(event_coal_div, int, 0664);
37MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); 41MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
@@ -83,8 +87,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
83 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); 87 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
84 hba->mail_queue_access = BNX2I_MQ_BIN_MODE; 88 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
85 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || 89 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
86 hba->pci_did == PCI_DEVICE_ID_NX2_57711) 90 hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
91 hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
87 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); 92 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
93 else
94 printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
95 hba->pci_did);
88} 96}
89 97
90 98
@@ -363,7 +371,7 @@ static int __init bnx2i_mod_init(void)
363 371
364 printk(KERN_INFO "%s", version); 372 printk(KERN_INFO "%s", version);
365 373
366 if (!is_power_of_2(sq_size)) 374 if (sq_size && !is_power_of_2(sq_size))
367 sq_size = roundup_pow_of_two(sq_size); 375 sq_size = roundup_pow_of_two(sq_size);
368 376
369 mutex_init(&bnx2i_dev_lock); 377 mutex_init(&bnx2i_dev_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 132898c88d5e..33b2294625bb 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -485,7 +485,6 @@ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
485 struct iscsi_task *task = session->cmds[i]; 485 struct iscsi_task *task = session->cmds[i];
486 struct bnx2i_cmd *cmd = task->dd_data; 486 struct bnx2i_cmd *cmd = task->dd_data;
487 487
488 /* Anil */
489 task->hdr = &cmd->hdr; 488 task->hdr = &cmd->hdr;
490 task->hdr_max = sizeof(struct iscsi_hdr); 489 task->hdr_max = sizeof(struct iscsi_hdr);
491 490
@@ -765,7 +764,6 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
765 hba->pci_svid = hba->pcidev->subsystem_vendor; 764 hba->pci_svid = hba->pcidev->subsystem_vendor;
766 hba->pci_func = PCI_FUNC(hba->pcidev->devfn); 765 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
767 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); 766 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
768 bnx2i_identify_device(hba);
769 767
770 bnx2i_identify_device(hba); 768 bnx2i_identify_device(hba);
771 bnx2i_setup_host_queue_size(hba, shost); 769 bnx2i_setup_host_queue_size(hba, shost);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index c1d5be4adf9c..26ffdcd5a437 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -291,7 +291,7 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
291 c3cn_hold(c3cn); 291 c3cn_hold(c3cn);
292 spin_lock_bh(&c3cn->lock); 292 spin_lock_bh(&c3cn->lock);
293 if (c3cn->state == C3CN_STATE_CONNECTING) 293 if (c3cn->state == C3CN_STATE_CONNECTING)
294 fail_act_open(c3cn, EHOSTUNREACH); 294 fail_act_open(c3cn, -EHOSTUNREACH);
295 spin_unlock_bh(&c3cn->lock); 295 spin_unlock_bh(&c3cn->lock);
296 c3cn_put(c3cn); 296 c3cn_put(c3cn);
297 __kfree_skb(skb); 297 __kfree_skb(skb);
@@ -792,18 +792,18 @@ static int act_open_rpl_status_to_errno(int status)
792{ 792{
793 switch (status) { 793 switch (status) {
794 case CPL_ERR_CONN_RESET: 794 case CPL_ERR_CONN_RESET:
795 return ECONNREFUSED; 795 return -ECONNREFUSED;
796 case CPL_ERR_ARP_MISS: 796 case CPL_ERR_ARP_MISS:
797 return EHOSTUNREACH; 797 return -EHOSTUNREACH;
798 case CPL_ERR_CONN_TIMEDOUT: 798 case CPL_ERR_CONN_TIMEDOUT:
799 return ETIMEDOUT; 799 return -ETIMEDOUT;
800 case CPL_ERR_TCAM_FULL: 800 case CPL_ERR_TCAM_FULL:
801 return ENOMEM; 801 return -ENOMEM;
802 case CPL_ERR_CONN_EXIST: 802 case CPL_ERR_CONN_EXIST:
803 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n"); 803 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
804 return EADDRINUSE; 804 return -EADDRINUSE;
805 default: 805 default:
806 return EIO; 806 return -EIO;
807 } 807 }
808} 808}
809 809
@@ -817,7 +817,7 @@ static void act_open_retry_timer(unsigned long data)
817 spin_lock_bh(&c3cn->lock); 817 spin_lock_bh(&c3cn->lock);
818 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC); 818 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
819 if (!skb) 819 if (!skb)
820 fail_act_open(c3cn, ENOMEM); 820 fail_act_open(c3cn, -ENOMEM);
821 else { 821 else {
822 skb->sk = (struct sock *)c3cn; 822 skb->sk = (struct sock *)c3cn;
823 set_arp_failure_handler(skb, act_open_req_arp_failure); 823 set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -966,14 +966,14 @@ static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
966 case CPL_ERR_BAD_SYN: /* fall through */ 966 case CPL_ERR_BAD_SYN: /* fall through */
967 case CPL_ERR_CONN_RESET: 967 case CPL_ERR_CONN_RESET:
968 return c3cn->state > C3CN_STATE_ESTABLISHED ? 968 return c3cn->state > C3CN_STATE_ESTABLISHED ?
969 EPIPE : ECONNRESET; 969 -EPIPE : -ECONNRESET;
970 case CPL_ERR_XMIT_TIMEDOUT: 970 case CPL_ERR_XMIT_TIMEDOUT:
971 case CPL_ERR_PERSIST_TIMEDOUT: 971 case CPL_ERR_PERSIST_TIMEDOUT:
972 case CPL_ERR_FINWAIT2_TIMEDOUT: 972 case CPL_ERR_FINWAIT2_TIMEDOUT:
973 case CPL_ERR_KEEPALIVE_TIMEDOUT: 973 case CPL_ERR_KEEPALIVE_TIMEDOUT:
974 return ETIMEDOUT; 974 return -ETIMEDOUT;
975 default: 975 default:
976 return EIO; 976 return -EIO;
977 } 977 }
978} 978}
979 979
@@ -1563,7 +1563,7 @@ free_tid:
1563 s3_free_atid(cdev, c3cn->tid); 1563 s3_free_atid(cdev, c3cn->tid);
1564 c3cn->tid = 0; 1564 c3cn->tid = 0;
1565out_err: 1565out_err:
1566 return -1; 1566 return -EINVAL;
1567} 1567}
1568 1568
1569 1569
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 709105071177..1fe3b0f1f3c9 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -388,8 +388,8 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
388 if (err > 0) { 388 if (err > 0) {
389 int pdulen = err; 389 int pdulen = err;
390 390
391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", 391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
392 task, skb, skb->len, skb->data_len, err); 392 task, skb, skb->len, skb->data_len, err);
393 393
394 if (task->conn->hdrdgst_en) 394 if (task->conn->hdrdgst_en)
395 pdulen += ISCSI_DIGEST_SIZE; 395 pdulen += ISCSI_DIGEST_SIZE;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 47cfe1c49c3e..1a660191a905 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -748,6 +748,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
748 {"IBM", "1724"}, 748 {"IBM", "1724"},
749 {"IBM", "1726"}, 749 {"IBM", "1726"},
750 {"IBM", "1742"}, 750 {"IBM", "1742"},
751 {"IBM", "1745"},
752 {"IBM", "1746"},
751 {"IBM", "1814"}, 753 {"IBM", "1814"},
752 {"IBM", "1815"}, 754 {"IBM", "1815"},
753 {"IBM", "1818"}, 755 {"IBM", "1818"},
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index a30ffaa1222c..10be9f36a4cc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -101,6 +101,8 @@ static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
101 101
102static int fcoe_create(const char *, struct kernel_param *); 102static int fcoe_create(const char *, struct kernel_param *);
103static int fcoe_destroy(const char *, struct kernel_param *); 103static int fcoe_destroy(const char *, struct kernel_param *);
104static int fcoe_enable(const char *, struct kernel_param *);
105static int fcoe_disable(const char *, struct kernel_param *);
104 106
105static struct fc_seq *fcoe_elsct_send(struct fc_lport *, 107static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
106 u32 did, struct fc_frame *, 108 u32 did, struct fc_frame *,
@@ -115,10 +117,16 @@ static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
115 117
116module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); 118module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
117__MODULE_PARM_TYPE(create, "string"); 119__MODULE_PARM_TYPE(create, "string");
118MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); 120MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
119module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); 121module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
120__MODULE_PARM_TYPE(destroy, "string"); 122__MODULE_PARM_TYPE(destroy, "string");
121MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); 123MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
124module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
125__MODULE_PARM_TYPE(enable, "string");
126MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
127module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
128__MODULE_PARM_TYPE(disable, "string");
129MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
122 130
123/* notification function for packets from net device */ 131/* notification function for packets from net device */
124static struct notifier_block fcoe_notifier = { 132static struct notifier_block fcoe_notifier = {
@@ -545,6 +553,23 @@ static void fcoe_queue_timer(ulong lport)
545} 553}
546 554
547/** 555/**
556 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
557 * @netdev: the associated net device
558 * @wwn: the output WWN
559 * @type: the type of WWN (WWPN or WWNN)
560 *
561 * Returns: 0 for success
562 */
563static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
564{
565 const struct net_device_ops *ops = netdev->netdev_ops;
566
567 if (ops->ndo_fcoe_get_wwn)
568 return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
569 return -EINVAL;
570}
571
572/**
548 * fcoe_netdev_config() - Set up net devive for SW FCoE 573 * fcoe_netdev_config() - Set up net devive for SW FCoE
549 * @lport: The local port that is associated with the net device 574 * @lport: The local port that is associated with the net device
550 * @netdev: The associated net device 575 * @netdev: The associated net device
@@ -611,9 +636,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
611 */ 636 */
612 if (netdev->priv_flags & IFF_802_1Q_VLAN) 637 if (netdev->priv_flags & IFF_802_1Q_VLAN)
613 vid = vlan_dev_vlan_id(netdev); 638 vid = vlan_dev_vlan_id(netdev);
614 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 639
640 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
641 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
615 fc_set_wwnn(lport, wwnn); 642 fc_set_wwnn(lport, wwnn);
616 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid); 643 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
644 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
645 2, vid);
617 fc_set_wwpn(lport, wwpn); 646 fc_set_wwpn(lport, wwpn);
618 } 647 }
619 648
@@ -1231,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1231 "CPU.\n"); 1260 "CPU.\n");
1232 1261
1233 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1262 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1234 cpu = first_cpu(cpu_online_map); 1263 cpu = cpumask_first(cpu_online_mask);
1235 fps = &per_cpu(fcoe_percpu, cpu); 1264 fps = &per_cpu(fcoe_percpu, cpu);
1236 spin_lock_bh(&fps->fcoe_rx_list.lock); 1265 spin_lock_bh(&fps->fcoe_rx_list.lock);
1237 if (!fps->thread) { 1266 if (!fps->thread) {
@@ -1838,6 +1867,104 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
1838} 1867}
1839 1868
1840/** 1869/**
1870 * fcoe_disable() - Disables a FCoE interface
1871 * @buffer: The name of the Ethernet interface to be disabled
1872 * @kp: The associated kernel parameter
1873 *
1874 * Called from sysfs.
1875 *
1876 * Returns: 0 for success
1877 */
1878static int fcoe_disable(const char *buffer, struct kernel_param *kp)
1879{
1880 struct fcoe_interface *fcoe;
1881 struct net_device *netdev;
1882 int rc = 0;
1883
1884 mutex_lock(&fcoe_config_mutex);
1885#ifdef CONFIG_FCOE_MODULE
1886 /*
1887 * Make sure the module has been initialized, and is not about to be
1888 * removed. Module paramter sysfs files are writable before the
1889 * module_init function is called and after module_exit.
1890 */
1891 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1892 rc = -ENODEV;
1893 goto out_nodev;
1894 }
1895#endif
1896
1897 netdev = fcoe_if_to_netdev(buffer);
1898 if (!netdev) {
1899 rc = -ENODEV;
1900 goto out_nodev;
1901 }
1902
1903 rtnl_lock();
1904 fcoe = fcoe_hostlist_lookup_port(netdev);
1905 rtnl_unlock();
1906
1907 if (fcoe)
1908 fc_fabric_logoff(fcoe->ctlr.lp);
1909 else
1910 rc = -ENODEV;
1911
1912 dev_put(netdev);
1913out_nodev:
1914 mutex_unlock(&fcoe_config_mutex);
1915 return rc;
1916}
1917
1918/**
1919 * fcoe_enable() - Enables a FCoE interface
1920 * @buffer: The name of the Ethernet interface to be enabled
1921 * @kp: The associated kernel parameter
1922 *
1923 * Called from sysfs.
1924 *
1925 * Returns: 0 for success
1926 */
1927static int fcoe_enable(const char *buffer, struct kernel_param *kp)
1928{
1929 struct fcoe_interface *fcoe;
1930 struct net_device *netdev;
1931 int rc = 0;
1932
1933 mutex_lock(&fcoe_config_mutex);
1934#ifdef CONFIG_FCOE_MODULE
1935 /*
1936 * Make sure the module has been initialized, and is not about to be
1937 * removed. Module paramter sysfs files are writable before the
1938 * module_init function is called and after module_exit.
1939 */
1940 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1941 rc = -ENODEV;
1942 goto out_nodev;
1943 }
1944#endif
1945
1946 netdev = fcoe_if_to_netdev(buffer);
1947 if (!netdev) {
1948 rc = -ENODEV;
1949 goto out_nodev;
1950 }
1951
1952 rtnl_lock();
1953 fcoe = fcoe_hostlist_lookup_port(netdev);
1954 rtnl_unlock();
1955
1956 if (fcoe)
1957 rc = fc_fabric_login(fcoe->ctlr.lp);
1958 else
1959 rc = -ENODEV;
1960
1961 dev_put(netdev);
1962out_nodev:
1963 mutex_unlock(&fcoe_config_mutex);
1964 return rc;
1965}
1966
1967/**
1841 * fcoe_destroy() - Destroy a FCoE interface 1968 * fcoe_destroy() - Destroy a FCoE interface
1842 * @buffer: The name of the Ethernet interface to be destroyed 1969 * @buffer: The name of the Ethernet interface to be destroyed
1843 * @kp: The associated kernel parameter 1970 * @kp: The associated kernel parameter
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
new file mode 100644
index 000000000000..bb96fdd58e23
--- /dev/null
+++ b/drivers/scsi/hpsa.c
@@ -0,0 +1,3531 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/delay.h>
29#include <linux/fs.h>
30#include <linux/timer.h>
31#include <linux/seq_file.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp_lock.h>
35#include <linux/compat.h>
36#include <linux/blktrace_api.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/dma-mapping.h>
40#include <linux/completion.h>
41#include <linux/moduleparam.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
49#include <asm/atomic.h>
50#include <linux/kthread.h>
51#include "hpsa_cmd.h"
52#include "hpsa.h"
53
54/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55#define HPSA_DRIVER_VERSION "1.0.0"
56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
57
58/* How long to wait (in milliseconds) for board to go into simple mode */
59#define MAX_CONFIG_WAIT 30000
60#define MAX_IOCTL_CONFIG_WAIT 1000
61
62/*define how many times we will try a command because of bus resets */
63#define MAX_CMD_RETRIES 3
64
65/* Embedded module documentation macros - see modules.h */
66MODULE_AUTHOR("Hewlett-Packard Company");
67MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
68 HPSA_DRIVER_VERSION);
69MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
70MODULE_VERSION(HPSA_DRIVER_VERSION);
71MODULE_LICENSE("GPL");
72
73static int hpsa_allow_any;
74module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
75MODULE_PARM_DESC(hpsa_allow_any,
76 "Allow hpsa driver to access unknown HP Smart Array hardware");
77
78/* define the PCI info for the cards we can control */
79static const struct pci_device_id hpsa_pci_device_id[] = {
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
90 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
91 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
96
97/* board_id = Subsystem Device ID & Vendor ID
98 * product = Marketing Name for the board
99 * access = Address of the struct of function pointers
100 */
101static struct board_type products[] = {
102 {0x3223103C, "Smart Array P800", &SA5_access},
103 {0x3234103C, "Smart Array P400", &SA5_access},
104 {0x323d103c, "Smart Array P700M", &SA5_access},
105 {0x3241103C, "Smart Array P212", &SA5_access},
106 {0x3243103C, "Smart Array P410", &SA5_access},
107 {0x3245103C, "Smart Array P410i", &SA5_access},
108 {0x3247103C, "Smart Array P411", &SA5_access},
109 {0x3249103C, "Smart Array P812", &SA5_access},
110 {0x324a103C, "Smart Array P712m", &SA5_access},
111 {0x324b103C, "Smart Array P711m", &SA5_access},
112 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
113};
114
115static int number_of_controllers;
116
117static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
118static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
119static void start_io(struct ctlr_info *h);
120
121#ifdef CONFIG_COMPAT
122static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
123#endif
124
125static void cmd_free(struct ctlr_info *h, struct CommandList *c);
126static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
127static struct CommandList *cmd_alloc(struct ctlr_info *h);
128static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
129static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
130 void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
131 int cmd_type);
132
133static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
134 void (*done)(struct scsi_cmnd *));
135
136static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
137static int hpsa_slave_alloc(struct scsi_device *sdev);
138static void hpsa_slave_destroy(struct scsi_device *sdev);
139
140static ssize_t raid_level_show(struct device *dev,
141 struct device_attribute *attr, char *buf);
142static ssize_t lunid_show(struct device *dev,
143 struct device_attribute *attr, char *buf);
144static ssize_t unique_id_show(struct device *dev,
145 struct device_attribute *attr, char *buf);
146static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
147static ssize_t host_store_rescan(struct device *dev,
148 struct device_attribute *attr, const char *buf, size_t count);
149static int check_for_unit_attention(struct ctlr_info *h,
150 struct CommandList *c);
151static void check_ioctl_unit_attention(struct ctlr_info *h,
152 struct CommandList *c);
153
154static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
155static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
156static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
157static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
158
159static struct device_attribute *hpsa_sdev_attrs[] = {
160 &dev_attr_raid_level,
161 &dev_attr_lunid,
162 &dev_attr_unique_id,
163 NULL,
164};
165
166static struct device_attribute *hpsa_shost_attrs[] = {
167 &dev_attr_rescan,
168 NULL,
169};
170
171static struct scsi_host_template hpsa_driver_template = {
172 .module = THIS_MODULE,
173 .name = "hpsa",
174 .proc_name = "hpsa",
175 .queuecommand = hpsa_scsi_queue_command,
176 .can_queue = 512,
177 .this_id = -1,
178 .sg_tablesize = MAXSGENTRIES,
179 .cmd_per_lun = 512,
180 .use_clustering = ENABLE_CLUSTERING,
181 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
182 .ioctl = hpsa_ioctl,
183 .slave_alloc = hpsa_slave_alloc,
184 .slave_destroy = hpsa_slave_destroy,
185#ifdef CONFIG_COMPAT
186 .compat_ioctl = hpsa_compat_ioctl,
187#endif
188 .sdev_attrs = hpsa_sdev_attrs,
189 .shost_attrs = hpsa_shost_attrs,
190};
191
192static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
193{
194 unsigned long *priv = shost_priv(sdev->host);
195 return (struct ctlr_info *) *priv;
196}
197
198static struct task_struct *hpsa_scan_thread;
199static DEFINE_MUTEX(hpsa_scan_mutex);
200static LIST_HEAD(hpsa_scan_q);
201static int hpsa_scan_func(void *data);
202
203/**
204 * add_to_scan_list() - add controller to rescan queue
205 * @h: Pointer to the controller.
206 *
207 * Adds the controller to the rescan queue if not already on the queue.
208 *
209 * returns 1 if added to the queue, 0 if skipped (could be on the
210 * queue already, or the controller could be initializing or shutting
211 * down).
212 **/
213static int add_to_scan_list(struct ctlr_info *h)
214{
215 struct ctlr_info *test_h;
216 int found = 0;
217 int ret = 0;
218
219 if (h->busy_initializing)
220 return 0;
221
222 /*
223 * If we don't get the lock, it means the driver is unloading
224 * and there's no point in scheduling a new scan.
225 */
226 if (!mutex_trylock(&h->busy_shutting_down))
227 return 0;
228
229 mutex_lock(&hpsa_scan_mutex);
230 list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
231 if (test_h == h) {
232 found = 1;
233 break;
234 }
235 }
236 if (!found && !h->busy_scanning) {
237 INIT_COMPLETION(h->scan_wait);
238 list_add_tail(&h->scan_list, &hpsa_scan_q);
239 ret = 1;
240 }
241 mutex_unlock(&hpsa_scan_mutex);
242 mutex_unlock(&h->busy_shutting_down);
243
244 return ret;
245}
246
247/**
248 * remove_from_scan_list() - remove controller from rescan queue
249 * @h: Pointer to the controller.
250 *
251 * Removes the controller from the rescan queue if present. Blocks if
252 * the controller is currently conducting a rescan. The controller
253 * can be in one of three states:
254 * 1. Doesn't need a scan
255 * 2. On the scan list, but not scanning yet (we remove it)
256 * 3. Busy scanning (and not on the list). In this case we want to wait for
257 * the scan to complete to make sure the scanning thread for this
258 * controller is completely idle.
259 **/
260static void remove_from_scan_list(struct ctlr_info *h)
261{
262 struct ctlr_info *test_h, *tmp_h;
263
264 mutex_lock(&hpsa_scan_mutex);
265 list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
266 if (test_h == h) { /* state 2. */
267 list_del(&h->scan_list);
268 complete_all(&h->scan_wait);
269 mutex_unlock(&hpsa_scan_mutex);
270 return;
271 }
272 }
273 if (h->busy_scanning) { /* state 3. */
274 mutex_unlock(&hpsa_scan_mutex);
275 wait_for_completion(&h->scan_wait);
276 } else { /* state 1, nothing to do. */
277 mutex_unlock(&hpsa_scan_mutex);
278 }
279}
280
281/* hpsa_scan_func() - kernel thread used to rescan controllers
282 * @data: Ignored.
283 *
284 * A kernel thread used scan for drive topology changes on
285 * controllers. The thread processes only one controller at a time
286 * using a queue. Controllers are added to the queue using
287 * add_to_scan_list() and removed from the queue either after done
288 * processing or using remove_from_scan_list().
289 *
290 * returns 0.
291 **/
292static int hpsa_scan_func(__attribute__((unused)) void *data)
293{
294 struct ctlr_info *h;
295 int host_no;
296
297 while (1) {
298 set_current_state(TASK_INTERRUPTIBLE);
299 schedule();
300 if (kthread_should_stop())
301 break;
302
303 while (1) {
304 mutex_lock(&hpsa_scan_mutex);
305 if (list_empty(&hpsa_scan_q)) {
306 mutex_unlock(&hpsa_scan_mutex);
307 break;
308 }
309 h = list_entry(hpsa_scan_q.next, struct ctlr_info,
310 scan_list);
311 list_del(&h->scan_list);
312 h->busy_scanning = 1;
313 mutex_unlock(&hpsa_scan_mutex);
314 host_no = h->scsi_host ? h->scsi_host->host_no : -1;
315 hpsa_update_scsi_devices(h, host_no);
316 complete_all(&h->scan_wait);
317 mutex_lock(&hpsa_scan_mutex);
318 h->busy_scanning = 0;
319 mutex_unlock(&hpsa_scan_mutex);
320 }
321 }
322 return 0;
323}
324
325static int check_for_unit_attention(struct ctlr_info *h,
326 struct CommandList *c)
327{
328 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
329 return 0;
330
331 switch (c->err_info->SenseInfo[12]) {
332 case STATE_CHANGED:
333 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
334 "detected, command retried\n", h->ctlr);
335 break;
336 case LUN_FAILED:
337 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
338 "detected, action required\n", h->ctlr);
339 break;
340 case REPORT_LUNS_CHANGED:
341 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
342 "changed\n", h->ctlr);
343 /*
344 * Here, we could call add_to_scan_list and wake up the scan thread,
345 * except that it's quite likely that we will get more than one
346 * REPORT_LUNS_CHANGED condition in quick succession, which means
347 * that those which occur after the first one will likely happen
348 * *during* the hpsa_scan_thread's rescan. And the rescan code is not
349 * robust enough to restart in the middle, undoing what it has already
350 * done, and it's not clear that it's even possible to do this, since
351 * part of what it does is notify the SCSI mid layer, which starts
352 * doing it's own i/o to read partition tables and so on, and the
353 * driver doesn't have visibility to know what might need undoing.
354 * In any event, if possible, it is horribly complicated to get right
355 * so we just don't do it for now.
356 *
357 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
358 */
359 break;
360 case POWER_OR_RESET:
361 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
362 "or device reset detected\n", h->ctlr);
363 break;
364 case UNIT_ATTENTION_CLEARED:
365 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
366 "cleared by another initiator\n", h->ctlr);
367 break;
368 default:
369 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
370 "unit attention detected\n", h->ctlr);
371 break;
372 }
373 return 1;
374}
375
376static ssize_t host_store_rescan(struct device *dev,
377 struct device_attribute *attr,
378 const char *buf, size_t count)
379{
380 struct ctlr_info *h;
381 struct Scsi_Host *shost = class_to_shost(dev);
382 unsigned long *priv = shost_priv(shost);
383 h = (struct ctlr_info *) *priv;
384 if (add_to_scan_list(h)) {
385 wake_up_process(hpsa_scan_thread);
386 wait_for_completion_interruptible(&h->scan_wait);
387 }
388 return count;
389}
390
391/* Enqueuing and dequeuing functions for cmdlists. */
392static inline void addQ(struct hlist_head *list, struct CommandList *c)
393{
394 hlist_add_head(&c->list, list);
395}
396
397static void enqueue_cmd_and_start_io(struct ctlr_info *h,
398 struct CommandList *c)
399{
400 unsigned long flags;
401 spin_lock_irqsave(&h->lock, flags);
402 addQ(&h->reqQ, c);
403 h->Qdepth++;
404 start_io(h);
405 spin_unlock_irqrestore(&h->lock, flags);
406}
407
408static inline void removeQ(struct CommandList *c)
409{
410 if (WARN_ON(hlist_unhashed(&c->list)))
411 return;
412 hlist_del_init(&c->list);
413}
414
415static inline int is_hba_lunid(unsigned char scsi3addr[])
416{
417 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
418}
419
420static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
421{
422 return (scsi3addr[3] & 0xC0) == 0x40;
423}
424
425static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
426 "UNKNOWN"
427};
428#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
429
430static ssize_t raid_level_show(struct device *dev,
431 struct device_attribute *attr, char *buf)
432{
433 ssize_t l = 0;
434 int rlevel;
435 struct ctlr_info *h;
436 struct scsi_device *sdev;
437 struct hpsa_scsi_dev_t *hdev;
438 unsigned long flags;
439
440 sdev = to_scsi_device(dev);
441 h = sdev_to_hba(sdev);
442 spin_lock_irqsave(&h->lock, flags);
443 hdev = sdev->hostdata;
444 if (!hdev) {
445 spin_unlock_irqrestore(&h->lock, flags);
446 return -ENODEV;
447 }
448
449 /* Is this even a logical drive? */
450 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
451 spin_unlock_irqrestore(&h->lock, flags);
452 l = snprintf(buf, PAGE_SIZE, "N/A\n");
453 return l;
454 }
455
456 rlevel = hdev->raid_level;
457 spin_unlock_irqrestore(&h->lock, flags);
458 if (rlevel < 0 || rlevel > RAID_UNKNOWN)
459 rlevel = RAID_UNKNOWN;
460 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
461 return l;
462}
463
464static ssize_t lunid_show(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 struct ctlr_info *h;
468 struct scsi_device *sdev;
469 struct hpsa_scsi_dev_t *hdev;
470 unsigned long flags;
471 unsigned char lunid[8];
472
473 sdev = to_scsi_device(dev);
474 h = sdev_to_hba(sdev);
475 spin_lock_irqsave(&h->lock, flags);
476 hdev = sdev->hostdata;
477 if (!hdev) {
478 spin_unlock_irqrestore(&h->lock, flags);
479 return -ENODEV;
480 }
481 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
482 spin_unlock_irqrestore(&h->lock, flags);
483 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
484 lunid[0], lunid[1], lunid[2], lunid[3],
485 lunid[4], lunid[5], lunid[6], lunid[7]);
486}
487
488static ssize_t unique_id_show(struct device *dev,
489 struct device_attribute *attr, char *buf)
490{
491 struct ctlr_info *h;
492 struct scsi_device *sdev;
493 struct hpsa_scsi_dev_t *hdev;
494 unsigned long flags;
495 unsigned char sn[16];
496
497 sdev = to_scsi_device(dev);
498 h = sdev_to_hba(sdev);
499 spin_lock_irqsave(&h->lock, flags);
500 hdev = sdev->hostdata;
501 if (!hdev) {
502 spin_unlock_irqrestore(&h->lock, flags);
503 return -ENODEV;
504 }
505 memcpy(sn, hdev->device_id, sizeof(sn));
506 spin_unlock_irqrestore(&h->lock, flags);
507 return snprintf(buf, 16 * 2 + 2,
508 "%02X%02X%02X%02X%02X%02X%02X%02X"
509 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
510 sn[0], sn[1], sn[2], sn[3],
511 sn[4], sn[5], sn[6], sn[7],
512 sn[8], sn[9], sn[10], sn[11],
513 sn[12], sn[13], sn[14], sn[15]);
514}
515
516static int hpsa_find_target_lun(struct ctlr_info *h,
517 unsigned char scsi3addr[], int bus, int *target, int *lun)
518{
519 /* finds an unused bus, target, lun for a new physical device
520 * assumes h->devlock is held
521 */
522 int i, found = 0;
523 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
524
525 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
526
527 for (i = 0; i < h->ndevices; i++) {
528 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
529 set_bit(h->dev[i]->target, lun_taken);
530 }
531
532 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
533 if (!test_bit(i, lun_taken)) {
534 /* *bus = 1; */
535 *target = i;
536 *lun = 0;
537 found = 1;
538 break;
539 }
540 }
541 return !found;
542}
543
544/* Add an entry into h->dev[] array. */
545static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
546 struct hpsa_scsi_dev_t *device,
547 struct hpsa_scsi_dev_t *added[], int *nadded)
548{
549 /* assumes h->devlock is held */
550 int n = h->ndevices;
551 int i;
552 unsigned char addr1[8], addr2[8];
553 struct hpsa_scsi_dev_t *sd;
554
555 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
556 dev_err(&h->pdev->dev, "too many devices, some will be "
557 "inaccessible.\n");
558 return -1;
559 }
560
561 /* physical devices do not have lun or target assigned until now. */
562 if (device->lun != -1)
563 /* Logical device, lun is already assigned. */
564 goto lun_assigned;
565
566 /* If this device a non-zero lun of a multi-lun device
567 * byte 4 of the 8-byte LUN addr will contain the logical
568 * unit no, zero otherise.
569 */
570 if (device->scsi3addr[4] == 0) {
571 /* This is not a non-zero lun of a multi-lun device */
572 if (hpsa_find_target_lun(h, device->scsi3addr,
573 device->bus, &device->target, &device->lun) != 0)
574 return -1;
575 goto lun_assigned;
576 }
577
578 /* This is a non-zero lun of a multi-lun device.
579 * Search through our list and find the device which
580 * has the same 8 byte LUN address, excepting byte 4.
581 * Assign the same bus and target for this new LUN.
582 * Use the logical unit number from the firmware.
583 */
584 memcpy(addr1, device->scsi3addr, 8);
585 addr1[4] = 0;
586 for (i = 0; i < n; i++) {
587 sd = h->dev[i];
588 memcpy(addr2, sd->scsi3addr, 8);
589 addr2[4] = 0;
590 /* differ only in byte 4? */
591 if (memcmp(addr1, addr2, 8) == 0) {
592 device->bus = sd->bus;
593 device->target = sd->target;
594 device->lun = device->scsi3addr[4];
595 break;
596 }
597 }
598 if (device->lun == -1) {
599 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
600 " suspect firmware bug or unsupported hardware "
601 "configuration.\n");
602 return -1;
603 }
604
605lun_assigned:
606
607 h->dev[n] = device;
608 h->ndevices++;
609 added[*nadded] = device;
610 (*nadded)++;
611
612 /* initially, (before registering with scsi layer) we don't
613 * know our hostno and we don't want to print anything first
614 * time anyway (the scsi layer's inquiries will show that info)
615 */
616 /* if (hostno != -1) */
617 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
618 scsi_device_type(device->devtype), hostno,
619 device->bus, device->target, device->lun);
620 return 0;
621}
622
623/* Remove an entry from h->dev[] array. */
624static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
625 struct hpsa_scsi_dev_t *removed[], int *nremoved)
626{
627 /* assumes h->devlock is held */
628 int i;
629 struct hpsa_scsi_dev_t *sd;
630
631 if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
632 BUG();
633
634 sd = h->dev[entry];
635 removed[*nremoved] = h->dev[entry];
636 (*nremoved)++;
637
638 for (i = entry; i < h->ndevices-1; i++)
639 h->dev[i] = h->dev[i+1];
640 h->ndevices--;
641 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
642 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
643 sd->lun);
644}
645
646#define SCSI3ADDR_EQ(a, b) ( \
647 (a)[7] == (b)[7] && \
648 (a)[6] == (b)[6] && \
649 (a)[5] == (b)[5] && \
650 (a)[4] == (b)[4] && \
651 (a)[3] == (b)[3] && \
652 (a)[2] == (b)[2] && \
653 (a)[1] == (b)[1] && \
654 (a)[0] == (b)[0])
655
656static void fixup_botched_add(struct ctlr_info *h,
657 struct hpsa_scsi_dev_t *added)
658{
659 /* called when scsi_add_device fails in order to re-adjust
660 * h->dev[] to match the mid layer's view.
661 */
662 unsigned long flags;
663 int i, j;
664
665 spin_lock_irqsave(&h->lock, flags);
666 for (i = 0; i < h->ndevices; i++) {
667 if (h->dev[i] == added) {
668 for (j = i; j < h->ndevices-1; j++)
669 h->dev[j] = h->dev[j+1];
670 h->ndevices--;
671 break;
672 }
673 }
674 spin_unlock_irqrestore(&h->lock, flags);
675 kfree(added);
676}
677
678static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
679 struct hpsa_scsi_dev_t *dev2)
680{
681 if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
682 (dev1->lun != -1 && dev2->lun != -1)) &&
683 dev1->devtype != 0x0C)
684 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
685
686 /* we compare everything except lun and target as these
687 * are not yet assigned. Compare parts likely
688 * to differ first
689 */
690 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
691 sizeof(dev1->scsi3addr)) != 0)
692 return 0;
693 if (memcmp(dev1->device_id, dev2->device_id,
694 sizeof(dev1->device_id)) != 0)
695 return 0;
696 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
697 return 0;
698 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
699 return 0;
700 if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
701 return 0;
702 if (dev1->devtype != dev2->devtype)
703 return 0;
704 if (dev1->raid_level != dev2->raid_level)
705 return 0;
706 if (dev1->bus != dev2->bus)
707 return 0;
708 return 1;
709}
710
711/* Find needle in haystack. If exact match found, return DEVICE_SAME,
712 * and return needle location in *index. If scsi3addr matches, but not
713 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
714 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
715 */
716static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
717 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
718 int *index)
719{
720 int i;
721#define DEVICE_NOT_FOUND 0
722#define DEVICE_CHANGED 1
723#define DEVICE_SAME 2
724 for (i = 0; i < haystack_size; i++) {
725 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
726 *index = i;
727 if (device_is_the_same(needle, haystack[i]))
728 return DEVICE_SAME;
729 else
730 return DEVICE_CHANGED;
731 }
732 }
733 *index = -1;
734 return DEVICE_NOT_FOUND;
735}
736
737static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
738 struct hpsa_scsi_dev_t *sd[], int nsds)
739{
740 /* sd contains scsi3 addresses and devtypes, and inquiry
741 * data. This function takes what's in sd to be the current
742 * reality and updates h->dev[] to reflect that reality.
743 */
744 int i, entry, device_change, changes = 0;
745 struct hpsa_scsi_dev_t *csd;
746 unsigned long flags;
747 struct hpsa_scsi_dev_t **added, **removed;
748 int nadded, nremoved;
749 struct Scsi_Host *sh = NULL;
750
751 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
752 GFP_KERNEL);
753 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
754 GFP_KERNEL);
755
756 if (!added || !removed) {
757 dev_warn(&h->pdev->dev, "out of memory in "
758 "adjust_hpsa_scsi_table\n");
759 goto free_and_out;
760 }
761
762 spin_lock_irqsave(&h->devlock, flags);
763
764 /* find any devices in h->dev[] that are not in
765 * sd[] and remove them from h->dev[], and for any
766 * devices which have changed, remove the old device
767 * info and add the new device info.
768 */
769 i = 0;
770 nremoved = 0;
771 nadded = 0;
772 while (i < h->ndevices) {
773 csd = h->dev[i];
774 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
775 if (device_change == DEVICE_NOT_FOUND) {
776 changes++;
777 hpsa_scsi_remove_entry(h, hostno, i,
778 removed, &nremoved);
779 continue; /* remove ^^^, hence i not incremented */
780 } else if (device_change == DEVICE_CHANGED) {
781 changes++;
782 hpsa_scsi_remove_entry(h, hostno, i,
783 removed, &nremoved);
784 (void) hpsa_scsi_add_entry(h, hostno, sd[entry],
785 added, &nadded);
786 /* add can't fail, we just removed one. */
787 sd[entry] = NULL; /* prevent it from being freed */
788 }
789 i++;
790 }
791
792 /* Now, make sure every device listed in sd[] is also
793 * listed in h->dev[], adding them if they aren't found
794 */
795
796 for (i = 0; i < nsds; i++) {
797 if (!sd[i]) /* if already added above. */
798 continue;
799 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
800 h->ndevices, &entry);
801 if (device_change == DEVICE_NOT_FOUND) {
802 changes++;
803 if (hpsa_scsi_add_entry(h, hostno, sd[i],
804 added, &nadded) != 0)
805 break;
806 sd[i] = NULL; /* prevent from being freed later. */
807 } else if (device_change == DEVICE_CHANGED) {
808 /* should never happen... */
809 changes++;
810 dev_warn(&h->pdev->dev,
811 "device unexpectedly changed.\n");
812 /* but if it does happen, we just ignore that device */
813 }
814 }
815 spin_unlock_irqrestore(&h->devlock, flags);
816
817 /* Don't notify scsi mid layer of any changes the first time through
818 * (or if there are no changes) scsi_scan_host will do it later the
819 * first time through.
820 */
821 if (hostno == -1 || !changes)
822 goto free_and_out;
823
824 sh = h->scsi_host;
825 /* Notify scsi mid layer of any removed devices */
826 for (i = 0; i < nremoved; i++) {
827 struct scsi_device *sdev =
828 scsi_device_lookup(sh, removed[i]->bus,
829 removed[i]->target, removed[i]->lun);
830 if (sdev != NULL) {
831 scsi_remove_device(sdev);
832 scsi_device_put(sdev);
833 } else {
834 /* We don't expect to get here.
835 * future cmds to this device will get selection
836 * timeout as if the device was gone.
837 */
838 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
839 " for removal.", hostno, removed[i]->bus,
840 removed[i]->target, removed[i]->lun);
841 }
842 kfree(removed[i]);
843 removed[i] = NULL;
844 }
845
846 /* Notify scsi mid layer of any added devices */
847 for (i = 0; i < nadded; i++) {
848 if (scsi_add_device(sh, added[i]->bus,
849 added[i]->target, added[i]->lun) == 0)
850 continue;
851 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
852 "device not added.\n", hostno, added[i]->bus,
853 added[i]->target, added[i]->lun);
854 /* now we have to remove it from h->dev,
855 * since it didn't get added to scsi mid layer
856 */
857 fixup_botched_add(h, added[i]);
858 }
859
860free_and_out:
861 kfree(added);
862 kfree(removed);
863 return 0;
864}
865
866/*
867 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
868 * Assume's h->devlock is held.
869 */
870static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
871 int bus, int target, int lun)
872{
873 int i;
874 struct hpsa_scsi_dev_t *sd;
875
876 for (i = 0; i < h->ndevices; i++) {
877 sd = h->dev[i];
878 if (sd->bus == bus && sd->target == target && sd->lun == lun)
879 return sd;
880 }
881 return NULL;
882}
883
884/* link sdev->hostdata to our per-device structure. */
885static int hpsa_slave_alloc(struct scsi_device *sdev)
886{
887 struct hpsa_scsi_dev_t *sd;
888 unsigned long flags;
889 struct ctlr_info *h;
890
891 h = sdev_to_hba(sdev);
892 spin_lock_irqsave(&h->devlock, flags);
893 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
894 sdev_id(sdev), sdev->lun);
895 if (sd != NULL)
896 sdev->hostdata = sd;
897 spin_unlock_irqrestore(&h->devlock, flags);
898 return 0;
899}
900
901static void hpsa_slave_destroy(struct scsi_device *sdev)
902{
903 return; /* nothing to do. */
904}
905
906static void hpsa_scsi_setup(struct ctlr_info *h)
907{
908 h->ndevices = 0;
909 h->scsi_host = NULL;
910 spin_lock_init(&h->devlock);
911 return;
912}
913
914static void complete_scsi_command(struct CommandList *cp,
915 int timeout, __u32 tag)
916{
917 struct scsi_cmnd *cmd;
918 struct ctlr_info *h;
919 struct ErrorInfo *ei;
920
921 unsigned char sense_key;
922 unsigned char asc; /* additional sense code */
923 unsigned char ascq; /* additional sense code qualifier */
924
925 ei = cp->err_info;
926 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
927 h = cp->h;
928
929 scsi_dma_unmap(cmd); /* undo the DMA mappings */
930
931 cmd->result = (DID_OK << 16); /* host byte */
932 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
933 cmd->result |= (ei->ScsiStatus << 1);
934
935 /* copy the sense data whether we need to or not. */
936 memcpy(cmd->sense_buffer, ei->SenseInfo,
937 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
938 SCSI_SENSE_BUFFERSIZE :
939 ei->SenseLen);
940 scsi_set_resid(cmd, ei->ResidualCnt);
941
942 if (ei->CommandStatus == 0) {
943 cmd->scsi_done(cmd);
944 cmd_free(h, cp);
945 return;
946 }
947
948 /* an error has occurred */
949 switch (ei->CommandStatus) {
950
951 case CMD_TARGET_STATUS:
952 if (ei->ScsiStatus) {
953 /* Get sense key */
954 sense_key = 0xf & ei->SenseInfo[2];
955 /* Get additional sense code */
956 asc = ei->SenseInfo[12];
957 /* Get addition sense code qualifier */
958 ascq = ei->SenseInfo[13];
959 }
960
961 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
962 if (check_for_unit_attention(h, cp)) {
963 cmd->result = DID_SOFT_ERROR << 16;
964 break;
965 }
966 if (sense_key == ILLEGAL_REQUEST) {
967 /*
968 * SCSI REPORT_LUNS is commonly unsupported on
969 * Smart Array. Suppress noisy complaint.
970 */
971 if (cp->Request.CDB[0] == REPORT_LUNS)
972 break;
973
974 /* If ASC/ASCQ indicate Logical Unit
975 * Not Supported condition,
976 */
977 if ((asc == 0x25) && (ascq == 0x0)) {
978 dev_warn(&h->pdev->dev, "cp %p "
979 "has check condition\n", cp);
980 break;
981 }
982 }
983
984 if (sense_key == NOT_READY) {
985 /* If Sense is Not Ready, Logical Unit
986 * Not ready, Manual Intervention
987 * required
988 */
989 if ((asc == 0x04) && (ascq == 0x03)) {
990 cmd->result = DID_NO_CONNECT << 16;
991 dev_warn(&h->pdev->dev, "cp %p "
992 "has check condition: unit "
993 "not ready, manual "
994 "intervention required\n", cp);
995 break;
996 }
997 }
998
999
1000 /* Must be some other type of check condition */
1001 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1002 "unknown type: "
1003 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1004 "Returning result: 0x%x, "
1005 "cmd=[%02x %02x %02x %02x %02x "
1006 "%02x %02x %02x %02x %02x]\n",
1007 cp, sense_key, asc, ascq,
1008 cmd->result,
1009 cmd->cmnd[0], cmd->cmnd[1],
1010 cmd->cmnd[2], cmd->cmnd[3],
1011 cmd->cmnd[4], cmd->cmnd[5],
1012 cmd->cmnd[6], cmd->cmnd[7],
1013 cmd->cmnd[8], cmd->cmnd[9]);
1014 break;
1015 }
1016
1017
1018 /* Problem was not a check condition
1019 * Pass it up to the upper layers...
1020 */
1021 if (ei->ScsiStatus) {
1022 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1023 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1024 "Returning result: 0x%x\n",
1025 cp, ei->ScsiStatus,
1026 sense_key, asc, ascq,
1027 cmd->result);
1028 } else { /* scsi status is zero??? How??? */
1029 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1030 "Returning no connection.\n", cp),
1031
1032 /* Ordinarily, this case should never happen,
1033 * but there is a bug in some released firmware
1034 * revisions that allows it to happen if, for
1035 * example, a 4100 backplane loses power and
1036 * the tape drive is in it. We assume that
1037 * it's a fatal error of some kind because we
1038 * can't show that it wasn't. We will make it
1039 * look like selection timeout since that is
1040 * the most common reason for this to occur,
1041 * and it's severe enough.
1042 */
1043
1044 cmd->result = DID_NO_CONNECT << 16;
1045 }
1046 break;
1047
1048 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1049 break;
1050 case CMD_DATA_OVERRUN:
1051 dev_warn(&h->pdev->dev, "cp %p has"
1052 " completed with data overrun "
1053 "reported\n", cp);
1054 break;
1055 case CMD_INVALID: {
1056 /* print_bytes(cp, sizeof(*cp), 1, 0);
1057 print_cmd(cp); */
1058 /* We get CMD_INVALID if you address a non-existent device
1059 * instead of a selection timeout (no response). You will
1060 * see this if you yank out a drive, then try to access it.
1061 * This is kind of a shame because it means that any other
1062 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1063 * missing target. */
1064 cmd->result = DID_NO_CONNECT << 16;
1065 }
1066 break;
1067 case CMD_PROTOCOL_ERR:
1068 dev_warn(&h->pdev->dev, "cp %p has "
1069 "protocol error \n", cp);
1070 break;
1071 case CMD_HARDWARE_ERR:
1072 cmd->result = DID_ERROR << 16;
1073 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1074 break;
1075 case CMD_CONNECTION_LOST:
1076 cmd->result = DID_ERROR << 16;
1077 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1078 break;
1079 case CMD_ABORTED:
1080 cmd->result = DID_ABORT << 16;
1081 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1082 cp, ei->ScsiStatus);
1083 break;
1084 case CMD_ABORT_FAILED:
1085 cmd->result = DID_ERROR << 16;
1086 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1087 break;
1088 case CMD_UNSOLICITED_ABORT:
1089 cmd->result = DID_ABORT << 16;
1090 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1091 "abort\n", cp);
1092 break;
1093 case CMD_TIMEOUT:
1094 cmd->result = DID_TIME_OUT << 16;
1095 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1096 break;
1097 default:
1098 cmd->result = DID_ERROR << 16;
1099 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1100 cp, ei->CommandStatus);
1101 }
1102 cmd->scsi_done(cmd);
1103 cmd_free(h, cp);
1104}
1105
1106static int hpsa_scsi_detect(struct ctlr_info *h)
1107{
1108 struct Scsi_Host *sh;
1109 int error;
1110
1111 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1112 if (sh == NULL)
1113 goto fail;
1114
1115 sh->io_port = 0;
1116 sh->n_io_port = 0;
1117 sh->this_id = -1;
1118 sh->max_channel = 3;
1119 sh->max_cmd_len = MAX_COMMAND_SIZE;
1120 sh->max_lun = HPSA_MAX_LUN;
1121 sh->max_id = HPSA_MAX_LUN;
1122 h->scsi_host = sh;
1123 sh->hostdata[0] = (unsigned long) h;
1124 sh->irq = h->intr[SIMPLE_MODE_INT];
1125 sh->unique_id = sh->irq;
1126 error = scsi_add_host(sh, &h->pdev->dev);
1127 if (error)
1128 goto fail_host_put;
1129 scsi_scan_host(sh);
1130 return 0;
1131
1132 fail_host_put:
1133 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1134 " failed for controller %d\n", h->ctlr);
1135 scsi_host_put(sh);
1136 return -1;
1137 fail:
1138 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1139 " failed for controller %d\n", h->ctlr);
1140 return -1;
1141}
1142
1143static void hpsa_pci_unmap(struct pci_dev *pdev,
1144 struct CommandList *c, int sg_used, int data_direction)
1145{
1146 int i;
1147 union u64bit addr64;
1148
1149 for (i = 0; i < sg_used; i++) {
1150 addr64.val32.lower = c->SG[i].Addr.lower;
1151 addr64.val32.upper = c->SG[i].Addr.upper;
1152 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1153 data_direction);
1154 }
1155}
1156
1157static void hpsa_map_one(struct pci_dev *pdev,
1158 struct CommandList *cp,
1159 unsigned char *buf,
1160 size_t buflen,
1161 int data_direction)
1162{
1163 __u64 addr64;
1164
1165 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1166 cp->Header.SGList = 0;
1167 cp->Header.SGTotal = 0;
1168 return;
1169 }
1170
1171 addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
1172 cp->SG[0].Addr.lower =
1173 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1174 cp->SG[0].Addr.upper =
1175 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1176 cp->SG[0].Len = buflen;
1177 cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
1178 cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
1179}
1180
1181static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1182 struct CommandList *c)
1183{
1184 DECLARE_COMPLETION_ONSTACK(wait);
1185
1186 c->waiting = &wait;
1187 enqueue_cmd_and_start_io(h, c);
1188 wait_for_completion(&wait);
1189}
1190
1191static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1192 struct CommandList *c, int data_direction)
1193{
1194 int retry_count = 0;
1195
1196 do {
1197 memset(c->err_info, 0, sizeof(c->err_info));
1198 hpsa_scsi_do_simple_cmd_core(h, c);
1199 retry_count++;
1200 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1201 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1202}
1203
1204static void hpsa_scsi_interpret_error(struct CommandList *cp)
1205{
1206 struct ErrorInfo *ei;
1207 struct device *d = &cp->h->pdev->dev;
1208
1209 ei = cp->err_info;
1210 switch (ei->CommandStatus) {
1211 case CMD_TARGET_STATUS:
1212 dev_warn(d, "cmd %p has completed with errors\n", cp);
1213 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1214 ei->ScsiStatus);
1215 if (ei->ScsiStatus == 0)
1216 dev_warn(d, "SCSI status is abnormally zero. "
1217 "(probably indicates selection timeout "
1218 "reported incorrectly due to a known "
1219 "firmware bug, circa July, 2001.)\n");
1220 break;
1221 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1222 dev_info(d, "UNDERRUN\n");
1223 break;
1224 case CMD_DATA_OVERRUN:
1225 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1226 break;
1227 case CMD_INVALID: {
1228 /* controller unfortunately reports SCSI passthru's
1229 * to non-existent targets as invalid commands.
1230 */
1231 dev_warn(d, "cp %p is reported invalid (probably means "
1232 "target device no longer present)\n", cp);
1233 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1234 print_cmd(cp); */
1235 }
1236 break;
1237 case CMD_PROTOCOL_ERR:
1238 dev_warn(d, "cp %p has protocol error \n", cp);
1239 break;
1240 case CMD_HARDWARE_ERR:
1241 /* cmd->result = DID_ERROR << 16; */
1242 dev_warn(d, "cp %p had hardware error\n", cp);
1243 break;
1244 case CMD_CONNECTION_LOST:
1245 dev_warn(d, "cp %p had connection lost\n", cp);
1246 break;
1247 case CMD_ABORTED:
1248 dev_warn(d, "cp %p was aborted\n", cp);
1249 break;
1250 case CMD_ABORT_FAILED:
1251 dev_warn(d, "cp %p reports abort failed\n", cp);
1252 break;
1253 case CMD_UNSOLICITED_ABORT:
1254 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1255 break;
1256 case CMD_TIMEOUT:
1257 dev_warn(d, "cp %p timed out\n", cp);
1258 break;
1259 default:
1260 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1261 ei->CommandStatus);
1262 }
1263}
1264
1265static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1266 unsigned char page, unsigned char *buf,
1267 unsigned char bufsize)
1268{
1269 int rc = IO_OK;
1270 struct CommandList *c;
1271 struct ErrorInfo *ei;
1272
1273 c = cmd_special_alloc(h);
1274
1275 if (c == NULL) { /* trouble... */
1276 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1277 return -1;
1278 }
1279
1280 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1281 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1282 ei = c->err_info;
1283 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1284 hpsa_scsi_interpret_error(c);
1285 rc = -1;
1286 }
1287 cmd_special_free(h, c);
1288 return rc;
1289}
1290
1291static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1292{
1293 int rc = IO_OK;
1294 struct CommandList *c;
1295 struct ErrorInfo *ei;
1296
1297 c = cmd_special_alloc(h);
1298
1299 if (c == NULL) { /* trouble... */
1300 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1301 return -1;
1302 }
1303
1304 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1305 hpsa_scsi_do_simple_cmd_core(h, c);
1306 /* no unmap needed here because no data xfer. */
1307
1308 ei = c->err_info;
1309 if (ei->CommandStatus != 0) {
1310 hpsa_scsi_interpret_error(c);
1311 rc = -1;
1312 }
1313 cmd_special_free(h, c);
1314 return rc;
1315}
1316
1317static void hpsa_get_raid_level(struct ctlr_info *h,
1318 unsigned char *scsi3addr, unsigned char *raid_level)
1319{
1320 int rc;
1321 unsigned char *buf;
1322
1323 *raid_level = RAID_UNKNOWN;
1324 buf = kzalloc(64, GFP_KERNEL);
1325 if (!buf)
1326 return;
1327 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1328 if (rc == 0)
1329 *raid_level = buf[8];
1330 if (*raid_level > RAID_UNKNOWN)
1331 *raid_level = RAID_UNKNOWN;
1332 kfree(buf);
1333 return;
1334}
1335
1336/* Get the device id from inquiry page 0x83 */
1337static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1338 unsigned char *device_id, int buflen)
1339{
1340 int rc;
1341 unsigned char *buf;
1342
1343 if (buflen > 16)
1344 buflen = 16;
1345 buf = kzalloc(64, GFP_KERNEL);
1346 if (!buf)
1347 return -1;
1348 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1349 if (rc == 0)
1350 memcpy(device_id, &buf[8], buflen);
1351 kfree(buf);
1352 return rc != 0;
1353}
1354
1355static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1356 struct ReportLUNdata *buf, int bufsize,
1357 int extended_response)
1358{
1359 int rc = IO_OK;
1360 struct CommandList *c;
1361 unsigned char scsi3addr[8];
1362 struct ErrorInfo *ei;
1363
1364 c = cmd_special_alloc(h);
1365 if (c == NULL) { /* trouble... */
1366 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1367 return -1;
1368 }
1369
1370 memset(&scsi3addr[0], 0, 8); /* address the controller */
1371
1372 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1373 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1374 if (extended_response)
1375 c->Request.CDB[1] = extended_response;
1376 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1377 ei = c->err_info;
1378 if (ei->CommandStatus != 0 &&
1379 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1380 hpsa_scsi_interpret_error(c);
1381 rc = -1;
1382 }
1383 cmd_special_free(h, c);
1384 return rc;
1385}
1386
1387static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1388 struct ReportLUNdata *buf,
1389 int bufsize, int extended_response)
1390{
1391 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1392}
1393
1394static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1395 struct ReportLUNdata *buf, int bufsize)
1396{
1397 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1398}
1399
1400static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1401 int bus, int target, int lun)
1402{
1403 device->bus = bus;
1404 device->target = target;
1405 device->lun = lun;
1406}
1407
1408static int hpsa_update_device_info(struct ctlr_info *h,
1409 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1410{
1411#define OBDR_TAPE_INQ_SIZE 49
1412 unsigned char *inq_buff = NULL;
1413
1414 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1415 if (!inq_buff)
1416 goto bail_out;
1417
1418 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
1419 /* Do an inquiry to the device to see what it is. */
1420 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1421 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1422 /* Inquiry failed (msg printed already) */
1423 dev_err(&h->pdev->dev,
1424 "hpsa_update_device_info: inquiry failed\n");
1425 goto bail_out;
1426 }
1427
1428 /* As a side effect, record the firmware version number
1429 * if we happen to be talking to the RAID controller.
1430 */
1431 if (is_hba_lunid(scsi3addr))
1432 memcpy(h->firm_ver, &inq_buff[32], 4);
1433
1434 this_device->devtype = (inq_buff[0] & 0x1f);
1435 memcpy(this_device->scsi3addr, scsi3addr, 8);
1436 memcpy(this_device->vendor, &inq_buff[8],
1437 sizeof(this_device->vendor));
1438 memcpy(this_device->model, &inq_buff[16],
1439 sizeof(this_device->model));
1440 memcpy(this_device->revision, &inq_buff[32],
1441 sizeof(this_device->revision));
1442 memset(this_device->device_id, 0,
1443 sizeof(this_device->device_id));
1444 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1445 sizeof(this_device->device_id));
1446
1447 if (this_device->devtype == TYPE_DISK &&
1448 is_logical_dev_addr_mode(scsi3addr))
1449 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1450 else
1451 this_device->raid_level = RAID_UNKNOWN;
1452
1453 kfree(inq_buff);
1454 return 0;
1455
1456bail_out:
1457 kfree(inq_buff);
1458 return 1;
1459}
1460
1461static unsigned char *msa2xxx_model[] = {
1462 "MSA2012",
1463 "MSA2024",
1464 "MSA2312",
1465 "MSA2324",
1466 NULL,
1467};
1468
1469static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1470{
1471 int i;
1472
1473 for (i = 0; msa2xxx_model[i]; i++)
1474 if (strncmp(device->model, msa2xxx_model[i],
1475 strlen(msa2xxx_model[i])) == 0)
1476 return 1;
1477 return 0;
1478}
1479
1480/* Helper function to assign bus, target, lun mapping of devices.
1481 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1482 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1483 * Logical drive target and lun are assigned at this time, but
1484 * physical device lun and target assignment are deferred (assigned
1485 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1486 */
1487static void figure_bus_target_lun(struct ctlr_info *h,
1488 __u8 *lunaddrbytes, int *bus, int *target, int *lun,
1489 struct hpsa_scsi_dev_t *device)
1490{
1491
1492 __u32 lunid;
1493
1494 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1495 /* logical device */
1496 memcpy(&lunid, lunaddrbytes, sizeof(lunid));
1497 lunid = le32_to_cpu(lunid);
1498
1499 if (is_msa2xxx(h, device)) {
1500 *bus = 1;
1501 *target = (lunid >> 16) & 0x3fff;
1502 *lun = lunid & 0x00ff;
1503 } else {
1504 *bus = 0;
1505 *lun = 0;
1506 *target = lunid & 0x3fff;
1507 }
1508 } else {
1509 /* physical device */
1510 if (is_hba_lunid(lunaddrbytes))
1511 *bus = 3;
1512 else
1513 *bus = 2;
1514 *target = -1;
1515 *lun = -1; /* we will fill these in later. */
1516 }
1517}
1518
1519/*
1520 * If there is no lun 0 on a target, linux won't find any devices.
1521 * For the MSA2xxx boxes, we have to manually detect the enclosure
1522 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1523 * it for some reason. *tmpdevice is the target we're adding,
1524 * this_device is a pointer into the current element of currentsd[]
1525 * that we're building up in update_scsi_devices(), below.
1526 * lunzerobits is a bitmap that tracks which targets already have a
1527 * lun 0 assigned.
1528 * Returns 1 if an enclosure was added, 0 if not.
1529 */
1530static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1531 struct hpsa_scsi_dev_t *tmpdevice,
1532 struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
1533 int bus, int target, int lun, unsigned long lunzerobits[],
1534 int *nmsa2xxx_enclosures)
1535{
1536 unsigned char scsi3addr[8];
1537
1538 if (test_bit(target, lunzerobits))
1539 return 0; /* There is already a lun 0 on this target. */
1540
1541 if (!is_logical_dev_addr_mode(lunaddrbytes))
1542 return 0; /* It's the logical targets that may lack lun 0. */
1543
1544 if (!is_msa2xxx(h, tmpdevice))
1545 return 0; /* It's only the MSA2xxx that have this problem. */
1546
1547 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1548 return 0;
1549
1550 if (is_hba_lunid(scsi3addr))
1551 return 0; /* Don't add the RAID controller here. */
1552
1553#define MAX_MSA2XXX_ENCLOSURES 32
1554 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1555 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1556 "enclosures exceeded. Check your hardware "
1557 "configuration.");
1558 return 0;
1559 }
1560
1561 memset(scsi3addr, 0, 8);
1562 scsi3addr[3] = target;
1563 if (hpsa_update_device_info(h, scsi3addr, this_device))
1564 return 0;
1565 (*nmsa2xxx_enclosures)++;
1566 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1567 set_bit(target, lunzerobits);
1568 return 1;
1569}
1570
1571/*
1572 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1573 * logdev. The number of luns in physdev and logdev are returned in
1574 * *nphysicals and *nlogicals, respectively.
1575 * Returns 0 on success, -1 otherwise.
1576 */
1577static int hpsa_gather_lun_info(struct ctlr_info *h,
1578 int reportlunsize,
1579 struct ReportLUNdata *physdev, __u32 *nphysicals,
1580 struct ReportLUNdata *logdev, __u32 *nlogicals)
1581{
1582 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1583 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1584 return -1;
1585 }
1586 memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
1587 *nphysicals = be32_to_cpu(*nphysicals) / 8;
1588#ifdef DEBUG
1589 dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
1590#endif
1591 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1592 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1593 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1594 *nphysicals - HPSA_MAX_PHYS_LUN);
1595 *nphysicals = HPSA_MAX_PHYS_LUN;
1596 }
1597 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1598 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1599 return -1;
1600 }
1601 memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
1602 *nlogicals = be32_to_cpu(*nlogicals) / 8;
1603#ifdef DEBUG
1604 dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
1605#endif
1606 /* Reject Logicals in excess of our max capability. */
1607 if (*nlogicals > HPSA_MAX_LUN) {
1608 dev_warn(&h->pdev->dev,
1609 "maximum logical LUNs (%d) exceeded. "
1610 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1611 *nlogicals - HPSA_MAX_LUN);
1612 *nlogicals = HPSA_MAX_LUN;
1613 }
1614 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1615 dev_warn(&h->pdev->dev,
1616 "maximum logical + physical LUNs (%d) exceeded. "
1617 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1618 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1619 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1620 }
1621 return 0;
1622}
1623
1624static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1625{
1626 /* the idea here is we could get notified
1627 * that some devices have changed, so we do a report
1628 * physical luns and report logical luns cmd, and adjust
1629 * our list of devices accordingly.
1630 *
1631 * The scsi3addr's of devices won't change so long as the
1632 * adapter is not reset. That means we can rescan and
1633 * tell which devices we already know about, vs. new
1634 * devices, vs. disappearing devices.
1635 */
1636 struct ReportLUNdata *physdev_list = NULL;
1637 struct ReportLUNdata *logdev_list = NULL;
1638 unsigned char *inq_buff = NULL;
1639 __u32 nphysicals = 0;
1640 __u32 nlogicals = 0;
1641 __u32 ndev_allocated = 0;
1642 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1643 int ncurrent = 0;
1644 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1645 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1646 int bus, target, lun;
1647 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1648
1649 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1650 GFP_KERNEL);
1651 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1652 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1653 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1654 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1655
1656 if (!currentsd || !physdev_list || !logdev_list ||
1657 !inq_buff || !tmpdevice) {
1658 dev_err(&h->pdev->dev, "out of memory\n");
1659 goto out;
1660 }
1661 memset(lunzerobits, 0, sizeof(lunzerobits));
1662
1663 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1664 logdev_list, &nlogicals))
1665 goto out;
1666
1667 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1668 * but each of them 4 times through different paths. The plus 1
1669 * is for the RAID controller.
1670 */
1671 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1672
1673 /* Allocate the per device structures */
1674 for (i = 0; i < ndevs_to_allocate; i++) {
1675 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1676 if (!currentsd[i]) {
1677 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1678 __FILE__, __LINE__);
1679 goto out;
1680 }
1681 ndev_allocated++;
1682 }
1683
1684 /* adjust our table of devices */
1685 nmsa2xxx_enclosures = 0;
1686 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1687 __u8 *lunaddrbytes;
1688
1689 /* Figure out where the LUN ID info is coming from */
1690 if (i < nphysicals)
1691 lunaddrbytes = &physdev_list->LUN[i][0];
1692 else
1693 if (i < nphysicals + nlogicals)
1694 lunaddrbytes =
1695 &logdev_list->LUN[i-nphysicals][0];
1696 else /* jam in the RAID controller at the end */
1697 lunaddrbytes = RAID_CTLR_LUNID;
1698
1699 /* skip masked physical devices. */
1700 if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
1701 continue;
1702
1703 /* Get device type, vendor, model, device id */
1704 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1705 continue; /* skip it if we can't talk to it. */
1706 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1707 tmpdevice);
1708 this_device = currentsd[ncurrent];
1709
1710 /*
1711 * For the msa2xxx boxes, we have to insert a LUN 0 which
1712 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1713 * is nonetheless an enclosure device there. We have to
1714 * present that otherwise linux won't find anything if
1715 * there is no lun 0.
1716 */
1717 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1718 lunaddrbytes, bus, target, lun, lunzerobits,
1719 &nmsa2xxx_enclosures)) {
1720 ncurrent++;
1721 this_device = currentsd[ncurrent];
1722 }
1723
1724 *this_device = *tmpdevice;
1725 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1726
1727 switch (this_device->devtype) {
1728 case TYPE_ROM: {
1729 /* We don't *really* support actual CD-ROM devices,
1730 * just "One Button Disaster Recovery" tape drive
1731 * which temporarily pretends to be a CD-ROM drive.
1732 * So we check that the device is really an OBDR tape
1733 * device by checking for "$DR-10" in bytes 43-48 of
1734 * the inquiry data.
1735 */
1736 char obdr_sig[7];
1737#define OBDR_TAPE_SIG "$DR-10"
1738 strncpy(obdr_sig, &inq_buff[43], 6);
1739 obdr_sig[6] = '\0';
1740 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1741 /* Not OBDR device, ignore it. */
1742 break;
1743 }
1744 ncurrent++;
1745 break;
1746 case TYPE_DISK:
1747 if (i < nphysicals)
1748 break;
1749 ncurrent++;
1750 break;
1751 case TYPE_TAPE:
1752 case TYPE_MEDIUM_CHANGER:
1753 ncurrent++;
1754 break;
1755 case TYPE_RAID:
1756 /* Only present the Smartarray HBA as a RAID controller.
1757 * If it's a RAID controller other than the HBA itself
1758 * (an external RAID controller, MSA500 or similar)
1759 * don't present it.
1760 */
1761 if (!is_hba_lunid(lunaddrbytes))
1762 break;
1763 ncurrent++;
1764 break;
1765 default:
1766 break;
1767 }
1768 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1769 break;
1770 }
1771 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1772out:
1773 kfree(tmpdevice);
1774 for (i = 0; i < ndev_allocated; i++)
1775 kfree(currentsd[i]);
1776 kfree(currentsd);
1777 kfree(inq_buff);
1778 kfree(physdev_list);
1779 kfree(logdev_list);
1780 return;
1781}
1782
1783/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1784 * dma mapping and fills in the scatter gather entries of the
1785 * hpsa command, cp.
1786 */
1787static int hpsa_scatter_gather(struct pci_dev *pdev,
1788 struct CommandList *cp,
1789 struct scsi_cmnd *cmd)
1790{
1791 unsigned int len;
1792 struct scatterlist *sg;
1793 __u64 addr64;
1794 int use_sg, i;
1795
1796 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
1797
1798 use_sg = scsi_dma_map(cmd);
1799 if (use_sg < 0)
1800 return use_sg;
1801
1802 if (!use_sg)
1803 goto sglist_finished;
1804
1805 scsi_for_each_sg(cmd, sg, use_sg, i) {
1806 addr64 = (__u64) sg_dma_address(sg);
1807 len = sg_dma_len(sg);
1808 cp->SG[i].Addr.lower =
1809 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1810 cp->SG[i].Addr.upper =
1811 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1812 cp->SG[i].Len = len;
1813 cp->SG[i].Ext = 0; /* we are not chaining */
1814 }
1815
1816sglist_finished:
1817
1818 cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
1819 cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
1820 return 0;
1821}
1822
1823
1824static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1825 void (*done)(struct scsi_cmnd *))
1826{
1827 struct ctlr_info *h;
1828 struct hpsa_scsi_dev_t *dev;
1829 unsigned char scsi3addr[8];
1830 struct CommandList *c;
1831 unsigned long flags;
1832
1833 /* Get the ptr to our adapter structure out of cmd->host. */
1834 h = sdev_to_hba(cmd->device);
1835 dev = cmd->device->hostdata;
1836 if (!dev) {
1837 cmd->result = DID_NO_CONNECT << 16;
1838 done(cmd);
1839 return 0;
1840 }
1841 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1842
1843 /* Need a lock as this is being allocated from the pool */
1844 spin_lock_irqsave(&h->lock, flags);
1845 c = cmd_alloc(h);
1846 spin_unlock_irqrestore(&h->lock, flags);
1847 if (c == NULL) { /* trouble... */
1848 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1849 return SCSI_MLQUEUE_HOST_BUSY;
1850 }
1851
1852 /* Fill in the command list header */
1853
1854 cmd->scsi_done = done; /* save this for use by completion code */
1855
1856 /* save c in case we have to abort it */
1857 cmd->host_scribble = (unsigned char *) c;
1858
1859 c->cmd_type = CMD_SCSI;
1860 c->scsi_cmd = cmd;
1861 c->Header.ReplyQueue = 0; /* unused in simple mode */
1862 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1863 c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
1864
1865 /* Fill in the request block... */
1866
1867 c->Request.Timeout = 0;
1868 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1869 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1870 c->Request.CDBLen = cmd->cmd_len;
1871 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1872 c->Request.Type.Type = TYPE_CMD;
1873 c->Request.Type.Attribute = ATTR_SIMPLE;
1874 switch (cmd->sc_data_direction) {
1875 case DMA_TO_DEVICE:
1876 c->Request.Type.Direction = XFER_WRITE;
1877 break;
1878 case DMA_FROM_DEVICE:
1879 c->Request.Type.Direction = XFER_READ;
1880 break;
1881 case DMA_NONE:
1882 c->Request.Type.Direction = XFER_NONE;
1883 break;
1884 case DMA_BIDIRECTIONAL:
1885 /* This can happen if a buggy application does a scsi passthru
1886 * and sets both inlen and outlen to non-zero. ( see
1887 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
1888 */
1889
1890 c->Request.Type.Direction = XFER_RSVD;
1891 /* This is technically wrong, and hpsa controllers should
1892 * reject it with CMD_INVALID, which is the most correct
1893 * response, but non-fibre backends appear to let it
1894 * slide by, and give the same results as if this field
1895 * were set correctly. Either way is acceptable for
1896 * our purposes here.
1897 */
1898
1899 break;
1900
1901 default:
1902 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
1903 cmd->sc_data_direction);
1904 BUG();
1905 break;
1906 }
1907
1908 if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
1909 cmd_free(h, c);
1910 return SCSI_MLQUEUE_HOST_BUSY;
1911 }
1912 enqueue_cmd_and_start_io(h, c);
1913 /* the cmd'll come back via intr handler in complete_scsi_command() */
1914 return 0;
1915}
1916
1917static void hpsa_unregister_scsi(struct ctlr_info *h)
1918{
1919 /* we are being forcibly unloaded, and may not refuse. */
1920 scsi_remove_host(h->scsi_host);
1921 scsi_host_put(h->scsi_host);
1922 h->scsi_host = NULL;
1923}
1924
1925static int hpsa_register_scsi(struct ctlr_info *h)
1926{
1927 int rc;
1928
1929 hpsa_update_scsi_devices(h, -1);
1930 rc = hpsa_scsi_detect(h);
1931 if (rc != 0)
1932 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
1933 " hpsa_scsi_detect(), rc is %d\n", rc);
1934 return rc;
1935}
1936
1937static int wait_for_device_to_become_ready(struct ctlr_info *h,
1938 unsigned char lunaddr[])
1939{
1940 int rc = 0;
1941 int count = 0;
1942 int waittime = 1; /* seconds */
1943 struct CommandList *c;
1944
1945 c = cmd_special_alloc(h);
1946 if (!c) {
1947 dev_warn(&h->pdev->dev, "out of memory in "
1948 "wait_for_device_to_become_ready.\n");
1949 return IO_ERROR;
1950 }
1951
1952 /* Send test unit ready until device ready, or give up. */
1953 while (count < HPSA_TUR_RETRY_LIMIT) {
1954
1955 /* Wait for a bit. do this first, because if we send
1956 * the TUR right away, the reset will just abort it.
1957 */
1958 msleep(1000 * waittime);
1959 count++;
1960
1961 /* Increase wait time with each try, up to a point. */
1962 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
1963 waittime = waittime * 2;
1964
1965 /* Send the Test Unit Ready */
1966 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
1967 hpsa_scsi_do_simple_cmd_core(h, c);
1968 /* no unmap needed here because no data xfer. */
1969
1970 if (c->err_info->CommandStatus == CMD_SUCCESS)
1971 break;
1972
1973 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
1974 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
1975 (c->err_info->SenseInfo[2] == NO_SENSE ||
1976 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
1977 break;
1978
1979 dev_warn(&h->pdev->dev, "waiting %d secs "
1980 "for device to become ready.\n", waittime);
1981 rc = 1; /* device not ready. */
1982 }
1983
1984 if (rc)
1985 dev_warn(&h->pdev->dev, "giving up on device.\n");
1986 else
1987 dev_warn(&h->pdev->dev, "device is ready.\n");
1988
1989 cmd_special_free(h, c);
1990 return rc;
1991}
1992
1993/* Need at least one of these error handlers to keep ../scsi/hosts.c from
1994 * complaining. Doing a host- or bus-reset can't do anything good here.
1995 */
1996static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
1997{
1998 int rc;
1999 struct ctlr_info *h;
2000 struct hpsa_scsi_dev_t *dev;
2001
2002 /* find the controller to which the command to be aborted was sent */
2003 h = sdev_to_hba(scsicmd->device);
2004 if (h == NULL) /* paranoia */
2005 return FAILED;
2006 dev_warn(&h->pdev->dev, "resetting drive\n");
2007
2008 dev = scsicmd->device->hostdata;
2009 if (!dev) {
2010 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2011 "device lookup failed.\n");
2012 return FAILED;
2013 }
2014 /* send a reset to the SCSI LUN which the command was sent to */
2015 rc = hpsa_send_reset(h, dev->scsi3addr);
2016 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2017 return SUCCESS;
2018
2019 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2020 return FAILED;
2021}
2022
2023/*
2024 * For operations that cannot sleep, a command block is allocated at init,
2025 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2026 * which ones are free or in use. Lock must be held when calling this.
2027 * cmd_free() is the complement.
2028 */
2029static struct CommandList *cmd_alloc(struct ctlr_info *h)
2030{
2031 struct CommandList *c;
2032 int i;
2033 union u64bit temp64;
2034 dma_addr_t cmd_dma_handle, err_dma_handle;
2035
2036 do {
2037 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2038 if (i == h->nr_cmds)
2039 return NULL;
2040 } while (test_and_set_bit
2041 (i & (BITS_PER_LONG - 1),
2042 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2043 c = h->cmd_pool + i;
2044 memset(c, 0, sizeof(*c));
2045 cmd_dma_handle = h->cmd_pool_dhandle
2046 + i * sizeof(*c);
2047 c->err_info = h->errinfo_pool + i;
2048 memset(c->err_info, 0, sizeof(*c->err_info));
2049 err_dma_handle = h->errinfo_pool_dhandle
2050 + i * sizeof(*c->err_info);
2051 h->nr_allocs++;
2052
2053 c->cmdindex = i;
2054
2055 INIT_HLIST_NODE(&c->list);
2056 c->busaddr = (__u32) cmd_dma_handle;
2057 temp64.val = (__u64) err_dma_handle;
2058 c->ErrDesc.Addr.lower = temp64.val32.lower;
2059 c->ErrDesc.Addr.upper = temp64.val32.upper;
2060 c->ErrDesc.Len = sizeof(*c->err_info);
2061
2062 c->h = h;
2063 return c;
2064}
2065
2066/* For operations that can wait for kmalloc to possibly sleep,
2067 * this routine can be called. Lock need not be held to call
2068 * cmd_special_alloc. cmd_special_free() is the complement.
2069 */
2070static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2071{
2072 struct CommandList *c;
2073 union u64bit temp64;
2074 dma_addr_t cmd_dma_handle, err_dma_handle;
2075
2076 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2077 if (c == NULL)
2078 return NULL;
2079 memset(c, 0, sizeof(*c));
2080
2081 c->cmdindex = -1;
2082
2083 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2084 &err_dma_handle);
2085
2086 if (c->err_info == NULL) {
2087 pci_free_consistent(h->pdev,
2088 sizeof(*c), c, cmd_dma_handle);
2089 return NULL;
2090 }
2091 memset(c->err_info, 0, sizeof(*c->err_info));
2092
2093 INIT_HLIST_NODE(&c->list);
2094 c->busaddr = (__u32) cmd_dma_handle;
2095 temp64.val = (__u64) err_dma_handle;
2096 c->ErrDesc.Addr.lower = temp64.val32.lower;
2097 c->ErrDesc.Addr.upper = temp64.val32.upper;
2098 c->ErrDesc.Len = sizeof(*c->err_info);
2099
2100 c->h = h;
2101 return c;
2102}
2103
2104static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2105{
2106 int i;
2107
2108 i = c - h->cmd_pool;
2109 clear_bit(i & (BITS_PER_LONG - 1),
2110 h->cmd_pool_bits + (i / BITS_PER_LONG));
2111 h->nr_frees++;
2112}
2113
2114static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2115{
2116 union u64bit temp64;
2117
2118 temp64.val32.lower = c->ErrDesc.Addr.lower;
2119 temp64.val32.upper = c->ErrDesc.Addr.upper;
2120 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2121 c->err_info, (dma_addr_t) temp64.val);
2122 pci_free_consistent(h->pdev, sizeof(*c),
2123 c, (dma_addr_t) c->busaddr);
2124}
2125
2126#ifdef CONFIG_COMPAT
2127
2128static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
2129{
2130 int ret;
2131
2132 lock_kernel();
2133 ret = hpsa_ioctl(dev, cmd, arg);
2134 unlock_kernel();
2135 return ret;
2136}
2137
2138static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
2139static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2140 int cmd, void *arg);
2141
2142static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2143{
2144 switch (cmd) {
2145 case CCISS_GETPCIINFO:
2146 case CCISS_GETINTINFO:
2147 case CCISS_SETINTINFO:
2148 case CCISS_GETNODENAME:
2149 case CCISS_SETNODENAME:
2150 case CCISS_GETHEARTBEAT:
2151 case CCISS_GETBUSTYPES:
2152 case CCISS_GETFIRMVER:
2153 case CCISS_GETDRIVVER:
2154 case CCISS_REVALIDVOLS:
2155 case CCISS_DEREGDISK:
2156 case CCISS_REGNEWDISK:
2157 case CCISS_REGNEWD:
2158 case CCISS_RESCANDISK:
2159 case CCISS_GETLUNINFO:
2160 return do_ioctl(dev, cmd, arg);
2161
2162 case CCISS_PASSTHRU32:
2163 return hpsa_ioctl32_passthru(dev, cmd, arg);
2164 case CCISS_BIG_PASSTHRU32:
2165 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2166
2167 default:
2168 return -ENOIOCTLCMD;
2169 }
2170}
2171
2172static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2173{
2174 IOCTL32_Command_struct __user *arg32 =
2175 (IOCTL32_Command_struct __user *) arg;
2176 IOCTL_Command_struct arg64;
2177 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2178 int err;
2179 u32 cp;
2180
2181 err = 0;
2182 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2183 sizeof(arg64.LUN_info));
2184 err |= copy_from_user(&arg64.Request, &arg32->Request,
2185 sizeof(arg64.Request));
2186 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2187 sizeof(arg64.error_info));
2188 err |= get_user(arg64.buf_size, &arg32->buf_size);
2189 err |= get_user(cp, &arg32->buf);
2190 arg64.buf = compat_ptr(cp);
2191 err |= copy_to_user(p, &arg64, sizeof(arg64));
2192
2193 if (err)
2194 return -EFAULT;
2195
2196 err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2197 if (err)
2198 return err;
2199 err |= copy_in_user(&arg32->error_info, &p->error_info,
2200 sizeof(arg32->error_info));
2201 if (err)
2202 return -EFAULT;
2203 return err;
2204}
2205
2206static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2207 int cmd, void *arg)
2208{
2209 BIG_IOCTL32_Command_struct __user *arg32 =
2210 (BIG_IOCTL32_Command_struct __user *) arg;
2211 BIG_IOCTL_Command_struct arg64;
2212 BIG_IOCTL_Command_struct __user *p =
2213 compat_alloc_user_space(sizeof(arg64));
2214 int err;
2215 u32 cp;
2216
2217 err = 0;
2218 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2219 sizeof(arg64.LUN_info));
2220 err |= copy_from_user(&arg64.Request, &arg32->Request,
2221 sizeof(arg64.Request));
2222 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2223 sizeof(arg64.error_info));
2224 err |= get_user(arg64.buf_size, &arg32->buf_size);
2225 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2226 err |= get_user(cp, &arg32->buf);
2227 arg64.buf = compat_ptr(cp);
2228 err |= copy_to_user(p, &arg64, sizeof(arg64));
2229
2230 if (err)
2231 return -EFAULT;
2232
2233 err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2234 if (err)
2235 return err;
2236 err |= copy_in_user(&arg32->error_info, &p->error_info,
2237 sizeof(arg32->error_info));
2238 if (err)
2239 return -EFAULT;
2240 return err;
2241}
2242#endif
2243
2244static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2245{
2246 struct hpsa_pci_info pciinfo;
2247
2248 if (!argp)
2249 return -EINVAL;
2250 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2251 pciinfo.bus = h->pdev->bus->number;
2252 pciinfo.dev_fn = h->pdev->devfn;
2253 pciinfo.board_id = h->board_id;
2254 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2255 return -EFAULT;
2256 return 0;
2257}
2258
2259static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2260{
2261 DriverVer_type DriverVer;
2262 unsigned char vmaj, vmin, vsubmin;
2263 int rc;
2264
2265 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2266 &vmaj, &vmin, &vsubmin);
2267 if (rc != 3) {
2268 dev_info(&h->pdev->dev, "driver version string '%s' "
2269 "unrecognized.", HPSA_DRIVER_VERSION);
2270 vmaj = 0;
2271 vmin = 0;
2272 vsubmin = 0;
2273 }
2274 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2275 if (!argp)
2276 return -EINVAL;
2277 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2278 return -EFAULT;
2279 return 0;
2280}
2281
2282static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2283{
2284 IOCTL_Command_struct iocommand;
2285 struct CommandList *c;
2286 char *buff = NULL;
2287 union u64bit temp64;
2288
2289 if (!argp)
2290 return -EINVAL;
2291 if (!capable(CAP_SYS_RAWIO))
2292 return -EPERM;
2293 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2294 return -EFAULT;
2295 if ((iocommand.buf_size < 1) &&
2296 (iocommand.Request.Type.Direction != XFER_NONE)) {
2297 return -EINVAL;
2298 }
2299 if (iocommand.buf_size > 0) {
2300 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2301 if (buff == NULL)
2302 return -EFAULT;
2303 }
2304 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2305 /* Copy the data into the buffer we created */
2306 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
2307 kfree(buff);
2308 return -EFAULT;
2309 }
2310 } else
2311 memset(buff, 0, iocommand.buf_size);
2312 c = cmd_special_alloc(h);
2313 if (c == NULL) {
2314 kfree(buff);
2315 return -ENOMEM;
2316 }
2317 /* Fill in the command type */
2318 c->cmd_type = CMD_IOCTL_PEND;
2319 /* Fill in Command Header */
2320 c->Header.ReplyQueue = 0; /* unused in simple mode */
2321 if (iocommand.buf_size > 0) { /* buffer to fill */
2322 c->Header.SGList = 1;
2323 c->Header.SGTotal = 1;
2324 } else { /* no buffers to fill */
2325 c->Header.SGList = 0;
2326 c->Header.SGTotal = 0;
2327 }
2328 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2329 /* use the kernel address the cmd block for tag */
2330 c->Header.Tag.lower = c->busaddr;
2331
2332 /* Fill in Request block */
2333 memcpy(&c->Request, &iocommand.Request,
2334 sizeof(c->Request));
2335
2336 /* Fill in the scatter gather information */
2337 if (iocommand.buf_size > 0) {
2338 temp64.val = pci_map_single(h->pdev, buff,
2339 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2340 c->SG[0].Addr.lower = temp64.val32.lower;
2341 c->SG[0].Addr.upper = temp64.val32.upper;
2342 c->SG[0].Len = iocommand.buf_size;
2343 c->SG[0].Ext = 0; /* we are not chaining*/
2344 }
2345 hpsa_scsi_do_simple_cmd_core(h, c);
2346 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2347 check_ioctl_unit_attention(h, c);
2348
2349 /* Copy the error information out */
2350 memcpy(&iocommand.error_info, c->err_info,
2351 sizeof(iocommand.error_info));
2352 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2353 kfree(buff);
2354 cmd_special_free(h, c);
2355 return -EFAULT;
2356 }
2357
2358 if (iocommand.Request.Type.Direction == XFER_READ) {
2359 /* Copy the data out of the buffer we created */
2360 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2361 kfree(buff);
2362 cmd_special_free(h, c);
2363 return -EFAULT;
2364 }
2365 }
2366 kfree(buff);
2367 cmd_special_free(h, c);
2368 return 0;
2369}
2370
2371static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2372{
2373 BIG_IOCTL_Command_struct *ioc;
2374 struct CommandList *c;
2375 unsigned char **buff = NULL;
2376 int *buff_size = NULL;
2377 union u64bit temp64;
2378 BYTE sg_used = 0;
2379 int status = 0;
2380 int i;
2381 __u32 left;
2382 __u32 sz;
2383 BYTE __user *data_ptr;
2384
2385 if (!argp)
2386 return -EINVAL;
2387 if (!capable(CAP_SYS_RAWIO))
2388 return -EPERM;
2389 ioc = (BIG_IOCTL_Command_struct *)
2390 kmalloc(sizeof(*ioc), GFP_KERNEL);
2391 if (!ioc) {
2392 status = -ENOMEM;
2393 goto cleanup1;
2394 }
2395 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2396 status = -EFAULT;
2397 goto cleanup1;
2398 }
2399 if ((ioc->buf_size < 1) &&
2400 (ioc->Request.Type.Direction != XFER_NONE)) {
2401 status = -EINVAL;
2402 goto cleanup1;
2403 }
2404 /* Check kmalloc limits using all SGs */
2405 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2406 status = -EINVAL;
2407 goto cleanup1;
2408 }
2409 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2410 status = -EINVAL;
2411 goto cleanup1;
2412 }
2413 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2414 if (!buff) {
2415 status = -ENOMEM;
2416 goto cleanup1;
2417 }
2418 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2419 if (!buff_size) {
2420 status = -ENOMEM;
2421 goto cleanup1;
2422 }
2423 left = ioc->buf_size;
2424 data_ptr = ioc->buf;
2425 while (left) {
2426 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2427 buff_size[sg_used] = sz;
2428 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2429 if (buff[sg_used] == NULL) {
2430 status = -ENOMEM;
2431 goto cleanup1;
2432 }
2433 if (ioc->Request.Type.Direction == XFER_WRITE) {
2434 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2435 status = -ENOMEM;
2436 goto cleanup1;
2437 }
2438 } else
2439 memset(buff[sg_used], 0, sz);
2440 left -= sz;
2441 data_ptr += sz;
2442 sg_used++;
2443 }
2444 c = cmd_special_alloc(h);
2445 if (c == NULL) {
2446 status = -ENOMEM;
2447 goto cleanup1;
2448 }
2449 c->cmd_type = CMD_IOCTL_PEND;
2450 c->Header.ReplyQueue = 0;
2451
2452 if (ioc->buf_size > 0) {
2453 c->Header.SGList = sg_used;
2454 c->Header.SGTotal = sg_used;
2455 } else {
2456 c->Header.SGList = 0;
2457 c->Header.SGTotal = 0;
2458 }
2459 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2460 c->Header.Tag.lower = c->busaddr;
2461 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2462 if (ioc->buf_size > 0) {
2463 int i;
2464 for (i = 0; i < sg_used; i++) {
2465 temp64.val = pci_map_single(h->pdev, buff[i],
2466 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2467 c->SG[i].Addr.lower = temp64.val32.lower;
2468 c->SG[i].Addr.upper = temp64.val32.upper;
2469 c->SG[i].Len = buff_size[i];
2470 /* we are not chaining */
2471 c->SG[i].Ext = 0;
2472 }
2473 }
2474 hpsa_scsi_do_simple_cmd_core(h, c);
2475 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2476 check_ioctl_unit_attention(h, c);
2477 /* Copy the error information out */
2478 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2479 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2480 cmd_special_free(h, c);
2481 status = -EFAULT;
2482 goto cleanup1;
2483 }
2484 if (ioc->Request.Type.Direction == XFER_READ) {
2485 /* Copy the data out of the buffer we created */
2486 BYTE __user *ptr = ioc->buf;
2487 for (i = 0; i < sg_used; i++) {
2488 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2489 cmd_special_free(h, c);
2490 status = -EFAULT;
2491 goto cleanup1;
2492 }
2493 ptr += buff_size[i];
2494 }
2495 }
2496 cmd_special_free(h, c);
2497 status = 0;
2498cleanup1:
2499 if (buff) {
2500 for (i = 0; i < sg_used; i++)
2501 kfree(buff[i]);
2502 kfree(buff);
2503 }
2504 kfree(buff_size);
2505 kfree(ioc);
2506 return status;
2507}
2508
2509static void check_ioctl_unit_attention(struct ctlr_info *h,
2510 struct CommandList *c)
2511{
2512 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2513 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2514 (void) check_for_unit_attention(h, c);
2515}
2516/*
2517 * ioctl
2518 */
2519static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2520{
2521 struct ctlr_info *h;
2522 void __user *argp = (void __user *)arg;
2523
2524 h = sdev_to_hba(dev);
2525
2526 switch (cmd) {
2527 case CCISS_DEREGDISK:
2528 case CCISS_REGNEWDISK:
2529 case CCISS_REGNEWD:
2530 hpsa_update_scsi_devices(h, dev->host->host_no);
2531 return 0;
2532 case CCISS_GETPCIINFO:
2533 return hpsa_getpciinfo_ioctl(h, argp);
2534 case CCISS_GETDRIVVER:
2535 return hpsa_getdrivver_ioctl(h, argp);
2536 case CCISS_PASSTHRU:
2537 return hpsa_passthru_ioctl(h, argp);
2538 case CCISS_BIG_PASSTHRU:
2539 return hpsa_big_passthru_ioctl(h, argp);
2540 default:
2541 return -ENOTTY;
2542 }
2543}
2544
2545static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
2546 void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
2547 int cmd_type)
2548{
2549 int pci_dir = XFER_NONE;
2550
2551 c->cmd_type = CMD_IOCTL_PEND;
2552 c->Header.ReplyQueue = 0;
2553 if (buff != NULL && size > 0) {
2554 c->Header.SGList = 1;
2555 c->Header.SGTotal = 1;
2556 } else {
2557 c->Header.SGList = 0;
2558 c->Header.SGTotal = 0;
2559 }
2560 c->Header.Tag.lower = c->busaddr;
2561 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2562
2563 c->Request.Type.Type = cmd_type;
2564 if (cmd_type == TYPE_CMD) {
2565 switch (cmd) {
2566 case HPSA_INQUIRY:
2567 /* are we trying to read a vital product page */
2568 if (page_code != 0) {
2569 c->Request.CDB[1] = 0x01;
2570 c->Request.CDB[2] = page_code;
2571 }
2572 c->Request.CDBLen = 6;
2573 c->Request.Type.Attribute = ATTR_SIMPLE;
2574 c->Request.Type.Direction = XFER_READ;
2575 c->Request.Timeout = 0;
2576 c->Request.CDB[0] = HPSA_INQUIRY;
2577 c->Request.CDB[4] = size & 0xFF;
2578 break;
2579 case HPSA_REPORT_LOG:
2580 case HPSA_REPORT_PHYS:
2581 /* Talking to controller so It's a physical command
2582 mode = 00 target = 0. Nothing to write.
2583 */
2584 c->Request.CDBLen = 12;
2585 c->Request.Type.Attribute = ATTR_SIMPLE;
2586 c->Request.Type.Direction = XFER_READ;
2587 c->Request.Timeout = 0;
2588 c->Request.CDB[0] = cmd;
2589 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2590 c->Request.CDB[7] = (size >> 16) & 0xFF;
2591 c->Request.CDB[8] = (size >> 8) & 0xFF;
2592 c->Request.CDB[9] = size & 0xFF;
2593 break;
2594
2595 case HPSA_READ_CAPACITY:
2596 c->Request.CDBLen = 10;
2597 c->Request.Type.Attribute = ATTR_SIMPLE;
2598 c->Request.Type.Direction = XFER_READ;
2599 c->Request.Timeout = 0;
2600 c->Request.CDB[0] = cmd;
2601 break;
2602 case HPSA_CACHE_FLUSH:
2603 c->Request.CDBLen = 12;
2604 c->Request.Type.Attribute = ATTR_SIMPLE;
2605 c->Request.Type.Direction = XFER_WRITE;
2606 c->Request.Timeout = 0;
2607 c->Request.CDB[0] = BMIC_WRITE;
2608 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2609 break;
2610 case TEST_UNIT_READY:
2611 c->Request.CDBLen = 6;
2612 c->Request.Type.Attribute = ATTR_SIMPLE;
2613 c->Request.Type.Direction = XFER_NONE;
2614 c->Request.Timeout = 0;
2615 break;
2616 default:
2617 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2618 BUG();
2619 return;
2620 }
2621 } else if (cmd_type == TYPE_MSG) {
2622 switch (cmd) {
2623
2624 case HPSA_DEVICE_RESET_MSG:
2625 c->Request.CDBLen = 16;
2626 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2627 c->Request.Type.Attribute = ATTR_SIMPLE;
2628 c->Request.Type.Direction = XFER_NONE;
2629 c->Request.Timeout = 0; /* Don't time out */
2630 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
2631 c->Request.CDB[1] = 0x03; /* Reset target above */
2632 /* If bytes 4-7 are zero, it means reset the */
2633 /* LunID device */
2634 c->Request.CDB[4] = 0x00;
2635 c->Request.CDB[5] = 0x00;
2636 c->Request.CDB[6] = 0x00;
2637 c->Request.CDB[7] = 0x00;
2638 break;
2639
2640 default:
2641 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2642 cmd);
2643 BUG();
2644 }
2645 } else {
2646 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2647 BUG();
2648 }
2649
2650 switch (c->Request.Type.Direction) {
2651 case XFER_READ:
2652 pci_dir = PCI_DMA_FROMDEVICE;
2653 break;
2654 case XFER_WRITE:
2655 pci_dir = PCI_DMA_TODEVICE;
2656 break;
2657 case XFER_NONE:
2658 pci_dir = PCI_DMA_NONE;
2659 break;
2660 default:
2661 pci_dir = PCI_DMA_BIDIRECTIONAL;
2662 }
2663
2664 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2665
2666 return;
2667}
2668
2669/*
2670 * Map (physical) PCI mem into (virtual) kernel space
2671 */
2672static void __iomem *remap_pci_mem(ulong base, ulong size)
2673{
2674 ulong page_base = ((ulong) base) & PAGE_MASK;
2675 ulong page_offs = ((ulong) base) - page_base;
2676 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2677
2678 return page_remapped ? (page_remapped + page_offs) : NULL;
2679}
2680
2681/* Takes cmds off the submission queue and sends them to the hardware,
2682 * then puts them on the queue of cmds waiting for completion.
2683 */
2684static void start_io(struct ctlr_info *h)
2685{
2686 struct CommandList *c;
2687
2688 while (!hlist_empty(&h->reqQ)) {
2689 c = hlist_entry(h->reqQ.first, struct CommandList, list);
2690 /* can't do anything if fifo is full */
2691 if ((h->access.fifo_full(h))) {
2692 dev_warn(&h->pdev->dev, "fifo full\n");
2693 break;
2694 }
2695
2696 /* Get the first entry from the Request Q */
2697 removeQ(c);
2698 h->Qdepth--;
2699
2700 /* Tell the controller execute command */
2701 h->access.submit_command(h, c);
2702
2703 /* Put job onto the completed Q */
2704 addQ(&h->cmpQ, c);
2705 }
2706}
2707
2708static inline unsigned long get_next_completion(struct ctlr_info *h)
2709{
2710 return h->access.command_completed(h);
2711}
2712
2713static inline int interrupt_pending(struct ctlr_info *h)
2714{
2715 return h->access.intr_pending(h);
2716}
2717
2718static inline long interrupt_not_for_us(struct ctlr_info *h)
2719{
2720 return ((h->access.intr_pending(h) == 0) ||
2721 (h->interrupts_enabled == 0));
2722}
2723
2724static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
2725 __u32 raw_tag)
2726{
2727 if (unlikely(tag_index >= h->nr_cmds)) {
2728 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2729 return 1;
2730 }
2731 return 0;
2732}
2733
2734static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
2735{
2736 removeQ(c);
2737 if (likely(c->cmd_type == CMD_SCSI))
2738 complete_scsi_command(c, 0, raw_tag);
2739 else if (c->cmd_type == CMD_IOCTL_PEND)
2740 complete(c->waiting);
2741}
2742
2743static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2744{
2745 struct ctlr_info *h = dev_id;
2746 struct CommandList *c;
2747 unsigned long flags;
2748 __u32 raw_tag, tag, tag_index;
2749 struct hlist_node *tmp;
2750
2751 if (interrupt_not_for_us(h))
2752 return IRQ_NONE;
2753 spin_lock_irqsave(&h->lock, flags);
2754 while (interrupt_pending(h)) {
2755 while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
2756 if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
2757 tag_index = HPSA_TAG_TO_INDEX(raw_tag);
2758 if (bad_tag(h, tag_index, raw_tag))
2759 return IRQ_HANDLED;
2760 c = h->cmd_pool + tag_index;
2761 finish_cmd(c, raw_tag);
2762 continue;
2763 }
2764 tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
2765 c = NULL;
2766 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2767 if (c->busaddr == tag) {
2768 finish_cmd(c, raw_tag);
2769 break;
2770 }
2771 }
2772 }
2773 }
2774 spin_unlock_irqrestore(&h->lock, flags);
2775 return IRQ_HANDLED;
2776}
2777
2778/* Send a message CDB to the firmware. */
2779static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2780 unsigned char type)
2781{
2782 struct Command {
2783 struct CommandListHeader CommandHeader;
2784 struct RequestBlock Request;
2785 struct ErrDescriptor ErrorDescriptor;
2786 };
2787 struct Command *cmd;
2788 static const size_t cmd_sz = sizeof(*cmd) +
2789 sizeof(cmd->ErrorDescriptor);
2790 dma_addr_t paddr64;
2791 uint32_t paddr32, tag;
2792 void __iomem *vaddr;
2793 int i, err;
2794
2795 vaddr = pci_ioremap_bar(pdev, 0);
2796 if (vaddr == NULL)
2797 return -ENOMEM;
2798
2799 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
2800 * CCISS commands, so they must be allocated from the lower 4GiB of
2801 * memory.
2802 */
2803 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2804 if (err) {
2805 iounmap(vaddr);
2806 return -ENOMEM;
2807 }
2808
2809 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
2810 if (cmd == NULL) {
2811 iounmap(vaddr);
2812 return -ENOMEM;
2813 }
2814
2815 /* This must fit, because of the 32-bit consistent DMA mask. Also,
2816 * although there's no guarantee, we assume that the address is at
2817 * least 4-byte aligned (most likely, it's page-aligned).
2818 */
2819 paddr32 = paddr64;
2820
2821 cmd->CommandHeader.ReplyQueue = 0;
2822 cmd->CommandHeader.SGList = 0;
2823 cmd->CommandHeader.SGTotal = 0;
2824 cmd->CommandHeader.Tag.lower = paddr32;
2825 cmd->CommandHeader.Tag.upper = 0;
2826 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
2827
2828 cmd->Request.CDBLen = 16;
2829 cmd->Request.Type.Type = TYPE_MSG;
2830 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
2831 cmd->Request.Type.Direction = XFER_NONE;
2832 cmd->Request.Timeout = 0; /* Don't time out */
2833 cmd->Request.CDB[0] = opcode;
2834 cmd->Request.CDB[1] = type;
2835 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
2836 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
2837 cmd->ErrorDescriptor.Addr.upper = 0;
2838 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
2839
2840 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
2841
2842 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
2843 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2844 if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
2845 break;
2846 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
2847 }
2848
2849 iounmap(vaddr);
2850
2851 /* we leak the DMA buffer here ... no choice since the controller could
2852 * still complete the command.
2853 */
2854 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
2855 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
2856 opcode, type);
2857 return -ETIMEDOUT;
2858 }
2859
2860 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
2861
2862 if (tag & HPSA_ERROR_BIT) {
2863 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
2864 opcode, type);
2865 return -EIO;
2866 }
2867
2868 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
2869 opcode, type);
2870 return 0;
2871}
2872
2873#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
2874#define hpsa_noop(p) hpsa_message(p, 3, 0)
2875
2876static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
2877{
2878/* the #defines are stolen from drivers/pci/msi.h. */
2879#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
2880#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
2881
2882 int pos;
2883 u16 control = 0;
2884
2885 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
2886 if (pos) {
2887 pci_read_config_word(pdev, msi_control_reg(pos), &control);
2888 if (control & PCI_MSI_FLAGS_ENABLE) {
2889 dev_info(&pdev->dev, "resetting MSI\n");
2890 pci_write_config_word(pdev, msi_control_reg(pos),
2891 control & ~PCI_MSI_FLAGS_ENABLE);
2892 }
2893 }
2894
2895 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
2896 if (pos) {
2897 pci_read_config_word(pdev, msi_control_reg(pos), &control);
2898 if (control & PCI_MSIX_FLAGS_ENABLE) {
2899 dev_info(&pdev->dev, "resetting MSI-X\n");
2900 pci_write_config_word(pdev, msi_control_reg(pos),
2901 control & ~PCI_MSIX_FLAGS_ENABLE);
2902 }
2903 }
2904
2905 return 0;
2906}
2907
2908/* This does a hard reset of the controller using PCI power management
2909 * states.
2910 */
2911static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
2912{
2913 u16 pmcsr, saved_config_space[32];
2914 int i, pos;
2915
2916 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
2917
2918 /* This is very nearly the same thing as
2919 *
2920 * pci_save_state(pci_dev);
2921 * pci_set_power_state(pci_dev, PCI_D3hot);
2922 * pci_set_power_state(pci_dev, PCI_D0);
2923 * pci_restore_state(pci_dev);
2924 *
2925 * but we can't use these nice canned kernel routines on
2926 * kexec, because they also check the MSI/MSI-X state in PCI
2927 * configuration space and do the wrong thing when it is
2928 * set/cleared. Also, the pci_save/restore_state functions
2929 * violate the ordering requirements for restoring the
2930 * configuration space from the CCISS document (see the
2931 * comment below). So we roll our own ....
2932 */
2933
2934 for (i = 0; i < 32; i++)
2935 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
2936
2937 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
2938 if (pos == 0) {
2939 dev_err(&pdev->dev,
2940 "hpsa_reset_controller: PCI PM not supported\n");
2941 return -ENODEV;
2942 }
2943
2944 /* Quoting from the Open CISS Specification: "The Power
2945 * Management Control/Status Register (CSR) controls the power
2946 * state of the device. The normal operating state is D0,
2947 * CSR=00h. The software off state is D3, CSR=03h. To reset
2948 * the controller, place the interface device in D3 then to
2949 * D0, this causes a secondary PCI reset which will reset the
2950 * controller."
2951 */
2952
2953 /* enter the D3hot power management state */
2954 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
2955 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2956 pmcsr |= PCI_D3hot;
2957 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
2958
2959 msleep(500);
2960
2961 /* enter the D0 power management state */
2962 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2963 pmcsr |= PCI_D0;
2964 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
2965
2966 msleep(500);
2967
2968 /* Restore the PCI configuration space. The Open CISS
2969 * Specification says, "Restore the PCI Configuration
2970 * Registers, offsets 00h through 60h. It is important to
2971 * restore the command register, 16-bits at offset 04h,
2972 * last. Do not restore the configuration status register,
2973 * 16-bits at offset 06h." Note that the offset is 2*i.
2974 */
2975 for (i = 0; i < 32; i++) {
2976 if (i == 2 || i == 3)
2977 continue;
2978 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
2979 }
2980 wmb();
2981 pci_write_config_word(pdev, 4, saved_config_space[2]);
2982
2983 return 0;
2984}
2985
2986/*
2987 * We cannot read the structure directly, for portability we must use
2988 * the io functions.
2989 * This is for debug only.
2990 */
2991#ifdef HPSA_DEBUG
2992static void print_cfg_table(struct device *dev, struct CfgTable *tb)
2993{
2994 int i;
2995 char temp_name[17];
2996
2997 dev_info(dev, "Controller Configuration information\n");
2998 dev_info(dev, "------------------------------------\n");
2999 for (i = 0; i < 4; i++)
3000 temp_name[i] = readb(&(tb->Signature[i]));
3001 temp_name[4] = '\0';
3002 dev_info(dev, " Signature = %s\n", temp_name);
3003 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3004 dev_info(dev, " Transport methods supported = 0x%x\n",
3005 readl(&(tb->TransportSupport)));
3006 dev_info(dev, " Transport methods active = 0x%x\n",
3007 readl(&(tb->TransportActive)));
3008 dev_info(dev, " Requested transport Method = 0x%x\n",
3009 readl(&(tb->HostWrite.TransportRequest)));
3010 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3011 readl(&(tb->HostWrite.CoalIntDelay)));
3012 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3013 readl(&(tb->HostWrite.CoalIntCount)));
3014 dev_info(dev, " Max outstanding commands = 0x%d\n",
3015 readl(&(tb->CmdsOutMax)));
3016 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3017 for (i = 0; i < 16; i++)
3018 temp_name[i] = readb(&(tb->ServerName[i]));
3019 temp_name[16] = '\0';
3020 dev_info(dev, " Server Name = %s\n", temp_name);
3021 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3022 readl(&(tb->HeartBeat)));
3023}
3024#endif /* HPSA_DEBUG */
3025
3026static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3027{
3028 int i, offset, mem_type, bar_type;
3029
3030 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3031 return 0;
3032 offset = 0;
3033 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3034 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3035 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3036 offset += 4;
3037 else {
3038 mem_type = pci_resource_flags(pdev, i) &
3039 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3040 switch (mem_type) {
3041 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3042 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3043 offset += 4; /* 32 bit */
3044 break;
3045 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3046 offset += 8;
3047 break;
3048 default: /* reserved in PCI 2.2 */
3049 dev_warn(&pdev->dev,
3050 "base address is invalid\n");
3051 return -1;
3052 break;
3053 }
3054 }
3055 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3056 return i + 1;
3057 }
3058 return -1;
3059}
3060
3061/* If MSI/MSI-X is supported by the kernel we will try to enable it on
3062 * controllers that are capable. If not, we use IO-APIC mode.
3063 */
3064
3065static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3066 struct pci_dev *pdev, __u32 board_id)
3067{
3068#ifdef CONFIG_PCI_MSI
3069 int err;
3070 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3071 {0, 2}, {0, 3}
3072 };
3073
3074 /* Some boards advertise MSI but don't really support it */
3075 if ((board_id == 0x40700E11) ||
3076 (board_id == 0x40800E11) ||
3077 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3078 goto default_int_mode;
3079 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3080 dev_info(&pdev->dev, "MSIX\n");
3081 err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
3082 if (!err) {
3083 h->intr[0] = hpsa_msix_entries[0].vector;
3084 h->intr[1] = hpsa_msix_entries[1].vector;
3085 h->intr[2] = hpsa_msix_entries[2].vector;
3086 h->intr[3] = hpsa_msix_entries[3].vector;
3087 h->msix_vector = 1;
3088 return;
3089 }
3090 if (err > 0) {
3091 dev_warn(&pdev->dev, "only %d MSI-X vectors "
3092 "available\n", err);
3093 goto default_int_mode;
3094 } else {
3095 dev_warn(&pdev->dev, "MSI-X init failed %d\n",
3096 err);
3097 goto default_int_mode;
3098 }
3099 }
3100 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3101 dev_info(&pdev->dev, "MSI\n");
3102 if (!pci_enable_msi(pdev))
3103 h->msi_vector = 1;
3104 else
3105 dev_warn(&pdev->dev, "MSI init failed\n");
3106 }
3107default_int_mode:
3108#endif /* CONFIG_PCI_MSI */
3109 /* if we get here we're going to use the default interrupt mode */
3110 h->intr[SIMPLE_MODE_INT] = pdev->irq;
3111 return;
3112}
3113
3114static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3115{
3116 ushort subsystem_vendor_id, subsystem_device_id, command;
3117 __u32 board_id, scratchpad = 0;
3118 __u64 cfg_offset;
3119 __u32 cfg_base_addr;
3120 __u64 cfg_base_addr_index;
3121 int i, prod_index, err;
3122
3123 subsystem_vendor_id = pdev->subsystem_vendor;
3124 subsystem_device_id = pdev->subsystem_device;
3125 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3126 subsystem_vendor_id);
3127
3128 for (i = 0; i < ARRAY_SIZE(products); i++)
3129 if (board_id == products[i].board_id)
3130 break;
3131
3132 prod_index = i;
3133
3134 if (prod_index == ARRAY_SIZE(products)) {
3135 prod_index--;
3136 if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
3137 !hpsa_allow_any) {
3138 dev_warn(&pdev->dev, "unrecognized board ID:"
3139 " 0x%08lx, ignoring.\n",
3140 (unsigned long) board_id);
3141 return -ENODEV;
3142 }
3143 }
3144 /* check to see if controller has been disabled
3145 * BEFORE trying to enable it
3146 */
3147 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3148 if (!(command & 0x02)) {
3149 dev_warn(&pdev->dev, "controller appears to be disabled\n");
3150 return -ENODEV;
3151 }
3152
3153 err = pci_enable_device(pdev);
3154 if (err) {
3155 dev_warn(&pdev->dev, "unable to enable PCI device\n");
3156 return err;
3157 }
3158
3159 err = pci_request_regions(pdev, "hpsa");
3160 if (err) {
3161 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
3162 return err;
3163 }
3164
3165 /* If the kernel supports MSI/MSI-X we will try to enable that,
3166 * else we use the IO-APIC interrupt assigned to us by system ROM.
3167 */
3168 hpsa_interrupt_mode(h, pdev, board_id);
3169
3170 /* find the memory BAR */
3171 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3172 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
3173 break;
3174 }
3175 if (i == DEVICE_COUNT_RESOURCE) {
3176 dev_warn(&pdev->dev, "no memory BAR found\n");
3177 err = -ENODEV;
3178 goto err_out_free_res;
3179 }
3180
3181 h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
3182 * already removed
3183 */
3184
3185 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3186
3187 /* Wait for the board to become ready. */
3188 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3189 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3190 if (scratchpad == HPSA_FIRMWARE_READY)
3191 break;
3192 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3193 }
3194 if (scratchpad != HPSA_FIRMWARE_READY) {
3195 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3196 err = -ENODEV;
3197 goto err_out_free_res;
3198 }
3199
3200 /* get the address index number */
3201 cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
3202 cfg_base_addr &= (__u32) 0x0000ffff;
3203 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3204 if (cfg_base_addr_index == -1) {
3205 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3206 err = -ENODEV;
3207 goto err_out_free_res;
3208 }
3209
3210 cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
3211 h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3212 cfg_base_addr_index) + cfg_offset,
3213 sizeof(h->cfgtable));
3214 h->board_id = board_id;
3215
3216 /* Query controller for max supported commands: */
3217 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3218
3219 h->product_name = products[prod_index].product_name;
3220 h->access = *(products[prod_index].access);
3221 /* Allow room for some ioctls */
3222 h->nr_cmds = h->max_commands - 4;
3223
3224 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3225 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3226 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3227 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3228 dev_warn(&pdev->dev, "not a valid CISS config table\n");
3229 err = -ENODEV;
3230 goto err_out_free_res;
3231 }
3232#ifdef CONFIG_X86
3233 {
3234 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3235 __u32 prefetch;
3236 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3237 prefetch |= 0x100;
3238 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3239 }
3240#endif
3241
3242 /* Disabling DMA prefetch for the P600
3243 * An ASIC bug may result in a prefetch beyond
3244 * physical memory.
3245 */
3246 if (board_id == 0x3225103C) {
3247 __u32 dma_prefetch;
3248 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3249 dma_prefetch |= 0x8000;
3250 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3251 }
3252
3253 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3254 /* Update the field, and then ring the doorbell */
3255 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3256 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3257
3258 /* under certain very rare conditions, this can take awhile.
3259 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3260 * as we enter this code.)
3261 */
3262 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3263 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3264 break;
3265 /* delay and try again */
3266 msleep(10);
3267 }
3268
3269#ifdef HPSA_DEBUG
3270 print_cfg_table(&pdev->dev, h->cfgtable);
3271#endif /* HPSA_DEBUG */
3272
3273 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3274 dev_warn(&pdev->dev, "unable to get board into simple mode\n");
3275 err = -ENODEV;
3276 goto err_out_free_res;
3277 }
3278 return 0;
3279
3280err_out_free_res:
3281 /*
3282 * Deliberately omit pci_disable_device(): it does something nasty to
3283 * Smart Array controllers that pci_enable_device does not undo
3284 */
3285 pci_release_regions(pdev);
3286 return err;
3287}
3288
3289static int __devinit hpsa_init_one(struct pci_dev *pdev,
3290 const struct pci_device_id *ent)
3291{
3292 int i;
3293 int dac;
3294 struct ctlr_info *h;
3295
3296 if (number_of_controllers == 0)
3297 printk(KERN_INFO DRIVER_NAME "\n");
3298 if (reset_devices) {
3299 /* Reset the controller with a PCI power-cycle */
3300 if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
3301 return -ENODEV;
3302
3303 /* Some devices (notably the HP Smart Array 5i Controller)
3304 need a little pause here */
3305 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3306
3307 /* Now try to get the controller to respond to a no-op */
3308 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3309 if (hpsa_noop(pdev) == 0)
3310 break;
3311 else
3312 dev_warn(&pdev->dev, "no-op failed%s\n",
3313 (i < 11 ? "; re-trying" : ""));
3314 }
3315 }
3316
3317 BUILD_BUG_ON(sizeof(struct CommandList) % 8);
3318 h = kzalloc(sizeof(*h), GFP_KERNEL);
3319 if (!h)
3320 return -1;
3321
3322 h->busy_initializing = 1;
3323 INIT_HLIST_HEAD(&h->cmpQ);
3324 INIT_HLIST_HEAD(&h->reqQ);
3325 mutex_init(&h->busy_shutting_down);
3326 init_completion(&h->scan_wait);
3327 if (hpsa_pci_init(h, pdev) != 0)
3328 goto clean1;
3329
3330 sprintf(h->devname, "hpsa%d", number_of_controllers);
3331 h->ctlr = number_of_controllers;
3332 number_of_controllers++;
3333 h->pdev = pdev;
3334
3335 /* configure PCI DMA stuff */
3336 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
3337 dac = 1;
3338 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
3339 dac = 0;
3340 else {
3341 dev_err(&pdev->dev, "no suitable DMA available\n");
3342 goto clean1;
3343 }
3344
3345 /* make sure the board interrupts are off */
3346 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3347 if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
3348 IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
3349 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3350 h->intr[SIMPLE_MODE_INT], h->devname);
3351 goto clean2;
3352 }
3353
3354 dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3355 h->devname, pdev->device, pci_name(pdev),
3356 h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3357
3358 h->cmd_pool_bits =
3359 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3360 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3361 h->cmd_pool = pci_alloc_consistent(h->pdev,
3362 h->nr_cmds * sizeof(*h->cmd_pool),
3363 &(h->cmd_pool_dhandle));
3364 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3365 h->nr_cmds * sizeof(*h->errinfo_pool),
3366 &(h->errinfo_pool_dhandle));
3367 if ((h->cmd_pool_bits == NULL)
3368 || (h->cmd_pool == NULL)
3369 || (h->errinfo_pool == NULL)) {
3370 dev_err(&pdev->dev, "out of memory");
3371 goto clean4;
3372 }
3373 spin_lock_init(&h->lock);
3374
3375 pci_set_drvdata(pdev, h);
3376 memset(h->cmd_pool_bits, 0,
3377 ((h->nr_cmds + BITS_PER_LONG -
3378 1) / BITS_PER_LONG) * sizeof(unsigned long));
3379
3380 hpsa_scsi_setup(h);
3381
3382 /* Turn the interrupts on so we can service requests */
3383 h->access.set_intr_mask(h, HPSA_INTR_ON);
3384
3385 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3386 h->busy_initializing = 0;
3387 return 1;
3388
3389clean4:
3390 kfree(h->cmd_pool_bits);
3391 if (h->cmd_pool)
3392 pci_free_consistent(h->pdev,
3393 h->nr_cmds * sizeof(struct CommandList),
3394 h->cmd_pool, h->cmd_pool_dhandle);
3395 if (h->errinfo_pool)
3396 pci_free_consistent(h->pdev,
3397 h->nr_cmds * sizeof(struct ErrorInfo),
3398 h->errinfo_pool,
3399 h->errinfo_pool_dhandle);
3400 free_irq(h->intr[SIMPLE_MODE_INT], h);
3401clean2:
3402clean1:
3403 h->busy_initializing = 0;
3404 kfree(h);
3405 return -1;
3406}
3407
3408static void hpsa_flush_cache(struct ctlr_info *h)
3409{
3410 char *flush_buf;
3411 struct CommandList *c;
3412
3413 flush_buf = kzalloc(4, GFP_KERNEL);
3414 if (!flush_buf)
3415 return;
3416
3417 c = cmd_special_alloc(h);
3418 if (!c) {
3419 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3420 goto out_of_memory;
3421 }
3422 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3423 RAID_CTLR_LUNID, TYPE_CMD);
3424 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3425 if (c->err_info->CommandStatus != 0)
3426 dev_warn(&h->pdev->dev,
3427 "error flushing cache on controller\n");
3428 cmd_special_free(h, c);
3429out_of_memory:
3430 kfree(flush_buf);
3431}
3432
3433static void hpsa_shutdown(struct pci_dev *pdev)
3434{
3435 struct ctlr_info *h;
3436
3437 h = pci_get_drvdata(pdev);
3438 /* Turn board interrupts off and send the flush cache command
3439 * sendcmd will turn off interrupt, and send the flush...
3440 * To write all data in the battery backed cache to disks
3441 */
3442 hpsa_flush_cache(h);
3443 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3444 free_irq(h->intr[2], h);
3445#ifdef CONFIG_PCI_MSI
3446 if (h->msix_vector)
3447 pci_disable_msix(h->pdev);
3448 else if (h->msi_vector)
3449 pci_disable_msi(h->pdev);
3450#endif /* CONFIG_PCI_MSI */
3451}
3452
3453static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3454{
3455 struct ctlr_info *h;
3456
3457 if (pci_get_drvdata(pdev) == NULL) {
3458 dev_err(&pdev->dev, "unable to remove device \n");
3459 return;
3460 }
3461 h = pci_get_drvdata(pdev);
3462 mutex_lock(&h->busy_shutting_down);
3463 remove_from_scan_list(h);
3464 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3465 hpsa_shutdown(pdev);
3466 iounmap(h->vaddr);
3467 pci_free_consistent(h->pdev,
3468 h->nr_cmds * sizeof(struct CommandList),
3469 h->cmd_pool, h->cmd_pool_dhandle);
3470 pci_free_consistent(h->pdev,
3471 h->nr_cmds * sizeof(struct ErrorInfo),
3472 h->errinfo_pool, h->errinfo_pool_dhandle);
3473 kfree(h->cmd_pool_bits);
3474 /*
3475 * Deliberately omit pci_disable_device(): it does something nasty to
3476 * Smart Array controllers that pci_enable_device does not undo
3477 */
3478 pci_release_regions(pdev);
3479 pci_set_drvdata(pdev, NULL);
3480 mutex_unlock(&h->busy_shutting_down);
3481 kfree(h);
3482}
3483
3484static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3485 __attribute__((unused)) pm_message_t state)
3486{
3487 return -ENOSYS;
3488}
3489
3490static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3491{
3492 return -ENOSYS;
3493}
3494
3495static struct pci_driver hpsa_pci_driver = {
3496 .name = "hpsa",
3497 .probe = hpsa_init_one,
3498 .remove = __devexit_p(hpsa_remove_one),
3499 .id_table = hpsa_pci_device_id, /* id_table */
3500 .shutdown = hpsa_shutdown,
3501 .suspend = hpsa_suspend,
3502 .resume = hpsa_resume,
3503};
3504
3505/*
3506 * This is it. Register the PCI driver information for the cards we control
3507 * the OS will call our registered routines when it finds one of our cards.
3508 */
3509static int __init hpsa_init(void)
3510{
3511 int err;
3512 /* Start the scan thread */
3513 hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
3514 if (IS_ERR(hpsa_scan_thread)) {
3515 err = PTR_ERR(hpsa_scan_thread);
3516 return -ENODEV;
3517 }
3518 err = pci_register_driver(&hpsa_pci_driver);
3519 if (err)
3520 kthread_stop(hpsa_scan_thread);
3521 return err;
3522}
3523
3524static void __exit hpsa_cleanup(void)
3525{
3526 pci_unregister_driver(&hpsa_pci_driver);
3527 kthread_stop(hpsa_scan_thread);
3528}
3529
3530module_init(hpsa_init);
3531module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
new file mode 100644
index 000000000000..6bd1949144b5
--- /dev/null
+++ b/drivers/scsi/hpsa.h
@@ -0,0 +1,273 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_H
22#define HPSA_H
23
24#include <scsi/scsicam.h>
25
26#define IO_OK 0
27#define IO_ERROR 1
28
29struct ctlr_info;
30
31struct access_method {
32 void (*submit_command)(struct ctlr_info *h,
33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h);
36 unsigned long (*intr_pending)(struct ctlr_info *h);
37 unsigned long (*command_completed)(struct ctlr_info *h);
38};
39
40struct hpsa_scsi_dev_t {
41 int devtype;
42 int bus, target, lun; /* as presented to the OS */
43 unsigned char scsi3addr[8]; /* as presented to the HW */
44#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */
48 unsigned char revision[4]; /* bytes 32-35 of inquiry data */
49 unsigned char raid_level; /* from inquiry page 0xC1 */
50};
51
52struct ctlr_info {
53 int ctlr;
54 char devname[8];
55 char *product_name;
56 char firm_ver[4]; /* Firmware version */
57 struct pci_dev *pdev;
58 __u32 board_id;
59 void __iomem *vaddr;
60 unsigned long paddr;
61 int nr_cmds; /* Number of commands allowed on this controller */
62 struct CfgTable __iomem *cfgtable;
63 int interrupts_enabled;
64 int major;
65 int max_commands;
66 int commands_outstanding;
67 int max_outstanding; /* Debug */
68 int usage_count; /* number of opens all all minor devices */
69# define DOORBELL_INT 0
70# define PERF_MODE_INT 1
71# define SIMPLE_MODE_INT 2
72# define MEMQ_MODE_INT 3
73 unsigned int intr[4];
74 unsigned int msix_vector;
75 unsigned int msi_vector;
76 struct access_method access;
77
78 /* queue and queue Info */
79 struct hlist_head reqQ;
80 struct hlist_head cmpQ;
81 unsigned int Qdepth;
82 unsigned int maxQsinceinit;
83 unsigned int maxSG;
84 spinlock_t lock;
85
86 /* pointers to command and error info pool */
87 struct CommandList *cmd_pool;
88 dma_addr_t cmd_pool_dhandle;
89 struct ErrorInfo *errinfo_pool;
90 dma_addr_t errinfo_pool_dhandle;
91 unsigned long *cmd_pool_bits;
92 int nr_allocs;
93 int nr_frees;
94 int busy_initializing;
95 int busy_scanning;
96 struct mutex busy_shutting_down;
97 struct list_head scan_list;
98 struct completion scan_wait;
99
100 struct Scsi_Host *scsi_host;
101 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
102 int ndevices; /* number of used elements in .dev[] array. */
103#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
104 struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
105};
106#define HPSA_ABORT_MSG 0
107#define HPSA_DEVICE_RESET_MSG 1
108#define HPSA_BUS_RESET_MSG 2
109#define HPSA_HOST_RESET_MSG 3
110#define HPSA_MSG_SEND_RETRY_LIMIT 10
111#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
112
113/* Maximum time in seconds driver will wait for command completions
114 * when polling before giving up.
115 */
116#define HPSA_MAX_POLL_TIME_SECS (20)
117
118/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
119 * how many times to retry TEST UNIT READY on a device
120 * while waiting for it to become ready before giving up.
121 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
122 * between sending TURs while waiting for a device
123 * to become ready.
124 */
125#define HPSA_TUR_RETRY_LIMIT (20)
126#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
127
128/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
129 * to become ready, in seconds, before giving up on it.
130 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
131 * between polling the board to see if it is ready, in
132 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
133 * HPSA_BOARD_READY_ITERATIONS are derived from those.
134 */
135#define HPSA_BOARD_READY_WAIT_SECS (120)
136#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
137#define HPSA_BOARD_READY_POLL_INTERVAL \
138 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
139#define HPSA_BOARD_READY_ITERATIONS \
140 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
141 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
142#define HPSA_POST_RESET_PAUSE_MSECS (3000)
143#define HPSA_POST_RESET_NOOP_RETRIES (12)
144
145/* Defining the diffent access_menthods */
146/*
147 * Memory mapped FIFO interface (SMART 53xx cards)
148 */
149#define SA5_DOORBELL 0x20
150#define SA5_REQUEST_PORT_OFFSET 0x40
151#define SA5_REPLY_INTR_MASK_OFFSET 0x34
152#define SA5_REPLY_PORT_OFFSET 0x44
153#define SA5_INTR_STATUS 0x30
154#define SA5_SCRATCHPAD_OFFSET 0xB0
155
156#define SA5_CTCFG_OFFSET 0xB4
157#define SA5_CTMEM_OFFSET 0xB8
158
159#define SA5_INTR_OFF 0x08
160#define SA5B_INTR_OFF 0x04
161#define SA5_INTR_PENDING 0x08
162#define SA5B_INTR_PENDING 0x04
163#define FIFO_EMPTY 0xffffffff
164#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
165
166#define HPSA_ERROR_BIT 0x02
167#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
168#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
169#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
170
171#define HPSA_INTR_ON 1
172#define HPSA_INTR_OFF 0
173/*
174 Send the command to the hardware
175*/
176static void SA5_submit_command(struct ctlr_info *h,
177 struct CommandList *c)
178{
179#ifdef HPSA_DEBUG
180 printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
181 c->busaddr);
182#endif /* HPSA_DEBUG */
183 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
184 h->commands_outstanding++;
185 if (h->commands_outstanding > h->max_outstanding)
186 h->max_outstanding = h->commands_outstanding;
187}
188
189/*
190 * This card is the opposite of the other cards.
191 * 0 turns interrupts on...
192 * 0x08 turns them off...
193 */
194static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
195{
196 if (val) { /* Turn interrupts on */
197 h->interrupts_enabled = 1;
198 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
199 } else { /* Turn them off */
200 h->interrupts_enabled = 0;
201 writel(SA5_INTR_OFF,
202 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
203 }
204}
205/*
206 * Returns true if fifo is full.
207 *
208 */
209static unsigned long SA5_fifo_full(struct ctlr_info *h)
210{
211 if (h->commands_outstanding >= h->max_commands)
212 return 1;
213 else
214 return 0;
215
216}
217/*
218 * returns value read from hardware.
219 * returns FIFO_EMPTY if there is nothing to read
220 */
221static unsigned long SA5_completed(struct ctlr_info *h)
222{
223 unsigned long register_value
224 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
225
226 if (register_value != FIFO_EMPTY)
227 h->commands_outstanding--;
228
229#ifdef HPSA_DEBUG
230 if (register_value != FIFO_EMPTY)
231 printk(KERN_INFO "hpsa: Read %lx back from board\n",
232 register_value);
233 else
234 printk(KERN_INFO "hpsa: FIFO Empty read\n");
235#endif
236
237 return register_value;
238}
239/*
240 * Returns true if an interrupt is pending..
241 */
242static unsigned long SA5_intr_pending(struct ctlr_info *h)
243{
244 unsigned long register_value =
245 readl(h->vaddr + SA5_INTR_STATUS);
246#ifdef HPSA_DEBUG
247 printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
248#endif /* HPSA_DEBUG */
249 if (register_value & SA5_INTR_PENDING)
250 return 1;
251 return 0 ;
252}
253
254
255static struct access_method SA5_access = {
256 SA5_submit_command,
257 SA5_intr_mask,
258 SA5_fifo_full,
259 SA5_intr_pending,
260 SA5_completed,
261};
262
263struct board_type {
264 __u32 board_id;
265 char *product_name;
266 struct access_method *access;
267};
268
269
270/* end of old hpsa_scsi.h file */
271
272#endif /* HPSA_H */
273
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
new file mode 100644
index 000000000000..12d71387ed9a
--- /dev/null
+++ b/drivers/scsi/hpsa_cmd.h
@@ -0,0 +1,326 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_CMD_H
22#define HPSA_CMD_H
23
24/* general boundary defintions */
25#define SENSEINFOBYTES 32 /* may vary between hbas */
26#define MAXSGENTRIES 31
27#define MAXREPLYQS 256
28
29/* Command Status value */
30#define CMD_SUCCESS 0x0000
31#define CMD_TARGET_STATUS 0x0001
32#define CMD_DATA_UNDERRUN 0x0002
33#define CMD_DATA_OVERRUN 0x0003
34#define CMD_INVALID 0x0004
35#define CMD_PROTOCOL_ERR 0x0005
36#define CMD_HARDWARE_ERR 0x0006
37#define CMD_CONNECTION_LOST 0x0007
38#define CMD_ABORTED 0x0008
39#define CMD_ABORT_FAILED 0x0009
40#define CMD_UNSOLICITED_ABORT 0x000A
41#define CMD_TIMEOUT 0x000B
42#define CMD_UNABORTABLE 0x000C
43
44/* Unit Attentions ASC's as defined for the MSA2012sa */
45#define POWER_OR_RESET 0x29
46#define STATE_CHANGED 0x2a
47#define UNIT_ATTENTION_CLEARED 0x2f
48#define LUN_FAILED 0x3e
49#define REPORT_LUNS_CHANGED 0x3f
50
51/* Unit Attentions ASCQ's as defined for the MSA2012sa */
52
53 /* These ASCQ's defined for ASC = POWER_OR_RESET */
54#define POWER_ON_RESET 0x00
55#define POWER_ON_REBOOT 0x01
56#define SCSI_BUS_RESET 0x02
57#define MSA_TARGET_RESET 0x03
58#define CONTROLLER_FAILOVER 0x04
59#define TRANSCEIVER_SE 0x05
60#define TRANSCEIVER_LVD 0x06
61
62 /* These ASCQ's defined for ASC = STATE_CHANGED */
63#define RESERVATION_PREEMPTED 0x03
64#define ASYM_ACCESS_CHANGED 0x06
65#define LUN_CAPACITY_CHANGED 0x09
66
67/* transfer direction */
68#define XFER_NONE 0x00
69#define XFER_WRITE 0x01
70#define XFER_READ 0x02
71#define XFER_RSVD 0x03
72
73/* task attribute */
74#define ATTR_UNTAGGED 0x00
75#define ATTR_SIMPLE 0x04
76#define ATTR_HEADOFQUEUE 0x05
77#define ATTR_ORDERED 0x06
78#define ATTR_ACA 0x07
79
80/* cdb type */
81#define TYPE_CMD 0x00
82#define TYPE_MSG 0x01
83
84/* config space register offsets */
85#define CFG_VENDORID 0x00
86#define CFG_DEVICEID 0x02
87#define CFG_I2OBAR 0x10
88#define CFG_MEM1BAR 0x14
89
90/* i2o space register offsets */
91#define I2O_IBDB_SET 0x20
92#define I2O_IBDB_CLEAR 0x70
93#define I2O_INT_STATUS 0x30
94#define I2O_INT_MASK 0x34
95#define I2O_IBPOST_Q 0x40
96#define I2O_OBPOST_Q 0x44
97#define I2O_DMA1_CFG 0x214
98
99/* Configuration Table */
100#define CFGTBL_ChangeReq 0x00000001l
101#define CFGTBL_AccCmds 0x00000001l
102
103#define CFGTBL_Trans_Simple 0x00000002l
104
105#define CFGTBL_BusType_Ultra2 0x00000001l
106#define CFGTBL_BusType_Ultra3 0x00000002l
107#define CFGTBL_BusType_Fibre1G 0x00000100l
108#define CFGTBL_BusType_Fibre2G 0x00000200l
109struct vals32 {
110 __u32 lower;
111 __u32 upper;
112};
113
114union u64bit {
115 struct vals32 val32;
116 __u64 val;
117};
118
119/* FIXME this is a per controller value (barf!) */
120#define HPSA_MAX_TARGETS_PER_CTLR 16
121#define HPSA_MAX_LUN 256
122#define HPSA_MAX_PHYS_LUN 1024
123
124/* SCSI-3 Commands */
125#pragma pack(1)
126
127#define HPSA_INQUIRY 0x12
128struct InquiryData {
129 __u8 data_byte[36];
130};
131
132#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
133#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
134struct ReportLUNdata {
135 __u8 LUNListLength[4];
136 __u32 reserved;
137 __u8 LUN[HPSA_MAX_LUN][8];
138};
139
140struct ReportExtendedLUNdata {
141 __u8 LUNListLength[4];
142 __u8 extended_response_flag;
143 __u8 reserved[3];
144 __u8 LUN[HPSA_MAX_LUN][24];
145};
146
147struct SenseSubsystem_info {
148 __u8 reserved[36];
149 __u8 portname[8];
150 __u8 reserved1[1108];
151};
152
153#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
154struct ReadCapdata {
155 __u8 total_size[4]; /* Total size in blocks */
156 __u8 block_size[4]; /* Size of blocks in bytes */
157};
158
159#if 0
160/* 12 byte commands not implemented in firmware yet. */
161#define HPSA_READ 0xa8
162#define HPSA_WRITE 0xaa
163#endif
164
165#define HPSA_READ 0x28 /* Read(10) */
166#define HPSA_WRITE 0x2a /* Write(10) */
167
168/* BMIC commands */
169#define BMIC_READ 0x26
170#define BMIC_WRITE 0x27
171#define BMIC_CACHE_FLUSH 0xc2
172#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
173
174/* Command List Structure */
175union SCSI3Addr {
176 struct {
177 __u8 Dev;
178 __u8 Bus:6;
179 __u8 Mode:2; /* b00 */
180 } PeripDev;
181 struct {
182 __u8 DevLSB;
183 __u8 DevMSB:6;
184 __u8 Mode:2; /* b01 */
185 } LogDev;
186 struct {
187 __u8 Dev:5;
188 __u8 Bus:3;
189 __u8 Targ:6;
190 __u8 Mode:2; /* b10 */
191 } LogUnit;
192};
193
194struct PhysDevAddr {
195 __u32 TargetId:24;
196 __u32 Bus:6;
197 __u32 Mode:2;
198 /* 2 level target device addr */
199 union SCSI3Addr Target[2];
200};
201
202struct LogDevAddr {
203 __u32 VolId:30;
204 __u32 Mode:2;
205 __u8 reserved[4];
206};
207
208union LUNAddr {
209 __u8 LunAddrBytes[8];
210 union SCSI3Addr SCSI3Lun[4];
211 struct PhysDevAddr PhysDev;
212 struct LogDevAddr LogDev;
213};
214
215struct CommandListHeader {
216 __u8 ReplyQueue;
217 __u8 SGList;
218 __u16 SGTotal;
219 struct vals32 Tag;
220 union LUNAddr LUN;
221};
222
223struct RequestBlock {
224 __u8 CDBLen;
225 struct {
226 __u8 Type:3;
227 __u8 Attribute:3;
228 __u8 Direction:2;
229 } Type;
230 __u16 Timeout;
231 __u8 CDB[16];
232};
233
234struct ErrDescriptor {
235 struct vals32 Addr;
236 __u32 Len;
237};
238
239struct SGDescriptor {
240 struct vals32 Addr;
241 __u32 Len;
242 __u32 Ext;
243};
244
245union MoreErrInfo {
246 struct {
247 __u8 Reserved[3];
248 __u8 Type;
249 __u32 ErrorInfo;
250 } Common_Info;
251 struct {
252 __u8 Reserved[2];
253 __u8 offense_size; /* size of offending entry */
254 __u8 offense_num; /* byte # of offense 0-base */
255 __u32 offense_value;
256 } Invalid_Cmd;
257};
258struct ErrorInfo {
259 __u8 ScsiStatus;
260 __u8 SenseLen;
261 __u16 CommandStatus;
262 __u32 ResidualCnt;
263 union MoreErrInfo MoreErrInfo;
264 __u8 SenseInfo[SENSEINFOBYTES];
265};
266/* Command types */
267#define CMD_IOCTL_PEND 0x01
268#define CMD_SCSI 0x03
269
270struct ctlr_info; /* defined in hpsa.h */
271/* The size of this structure needs to be divisible by 8
272 * od on all architectures, because the controller uses 2
273 * lower bits of the address, and the driver uses 1 lower
274 * bit (3 bits total.)
275 */
276struct CommandList {
277 struct CommandListHeader Header;
278 struct RequestBlock Request;
279 struct ErrDescriptor ErrDesc;
280 struct SGDescriptor SG[MAXSGENTRIES];
281 /* information associated with the command */
282 __u32 busaddr; /* physical addr of this record */
283 struct ErrorInfo *err_info; /* pointer to the allocated mem */
284 struct ctlr_info *h;
285 int cmd_type;
286 long cmdindex;
287 struct hlist_node list;
288 struct CommandList *prev;
289 struct CommandList *next;
290 struct request *rq;
291 struct completion *waiting;
292 int retry_count;
293 void *scsi_cmd;
294};
295
296/* Configuration Table Structure */
297struct HostWrite {
298 __u32 TransportRequest;
299 __u32 Reserved;
300 __u32 CoalIntDelay;
301 __u32 CoalIntCount;
302};
303
304struct CfgTable {
305 __u8 Signature[4];
306 __u32 SpecValence;
307 __u32 TransportSupport;
308 __u32 TransportActive;
309 struct HostWrite HostWrite;
310 __u32 CmdsOutMax;
311 __u32 BusTypes;
312 __u32 Reserved;
313 __u8 ServerName[16];
314 __u32 HeartBeat;
315 __u32 SCSI_Prefetch;
316};
317
318struct hpsa_pci_info {
319 unsigned char bus;
320 unsigned char dev_fn;
321 unsigned short domain;
322 __u32 board_id;
323};
324
325#pragma pack()
326#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 8643f5089361..9e52d16c7c39 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6521,6 +6521,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6521 int rc; 6521 int rc;
6522 6522
6523 ENTER; 6523 ENTER;
6524 ioa_cfg->pdev->state_saved = true;
6524 rc = pci_restore_state(ioa_cfg->pdev); 6525 rc = pci_restore_state(ioa_cfg->pdev);
6525 6526
6526 if (rc != PCIBIOS_SUCCESSFUL) { 6527 if (rc != PCIBIOS_SUCCESSFUL) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c4b58d042f6f..881d5dfe8c74 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -68,18 +68,20 @@ struct kmem_cache *scsi_pkt_cachep;
68 68
69/** 69/**
70 * struct fc_fcp_internal - FCP layer internal data 70 * struct fc_fcp_internal - FCP layer internal data
71 * @scsi_pkt_pool: Memory pool to draw FCP packets from 71 * @scsi_pkt_pool: Memory pool to draw FCP packets from
72 * @scsi_queue_lock: Protects the scsi_pkt_queue
72 * @scsi_pkt_queue: Current FCP packets 73 * @scsi_pkt_queue: Current FCP packets
73 * @last_can_queue_ramp_down_time: ramp down time 74 * @last_can_queue_ramp_down_time: ramp down time
74 * @last_can_queue_ramp_up_time: ramp up time 75 * @last_can_queue_ramp_up_time: ramp up time
75 * @max_can_queue: max can_queue size 76 * @max_can_queue: max can_queue size
76 */ 77 */
77struct fc_fcp_internal { 78struct fc_fcp_internal {
78 mempool_t *scsi_pkt_pool; 79 mempool_t *scsi_pkt_pool;
79 struct list_head scsi_pkt_queue; 80 spinlock_t scsi_queue_lock;
80 unsigned long last_can_queue_ramp_down_time; 81 struct list_head scsi_pkt_queue;
81 unsigned long last_can_queue_ramp_up_time; 82 unsigned long last_can_queue_ramp_down_time;
82 int max_can_queue; 83 unsigned long last_can_queue_ramp_up_time;
84 int max_can_queue;
83}; 85};
84 86
85#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 87#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -410,12 +412,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
410 unsigned long flags; 412 unsigned long flags;
411 413
412 fp = fc_frame_alloc(lport, len); 414 fp = fc_frame_alloc(lport, len);
413 if (!fp) { 415 if (likely(fp))
414 spin_lock_irqsave(lport->host->host_lock, flags); 416 return fp;
415 fc_fcp_can_queue_ramp_down(lport); 417
416 spin_unlock_irqrestore(lport->host->host_lock, flags); 418 /* error case */
417 } 419 spin_lock_irqsave(lport->host->host_lock, flags);
418 return fp; 420 fc_fcp_can_queue_ramp_down(lport);
421 spin_unlock_irqrestore(lport->host->host_lock, flags);
422 return NULL;
419} 423}
420 424
421/** 425/**
@@ -990,7 +994,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
990 struct scsi_cmnd *sc_cmd; 994 struct scsi_cmnd *sc_cmd;
991 unsigned long flags; 995 unsigned long flags;
992 996
993 spin_lock_irqsave(lport->host->host_lock, flags); 997 spin_lock_irqsave(&si->scsi_queue_lock, flags);
994restart: 998restart:
995 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 999 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
996 sc_cmd = fsp->cmd; 1000 sc_cmd = fsp->cmd;
@@ -1001,7 +1005,7 @@ restart:
1001 continue; 1005 continue;
1002 1006
1003 fc_fcp_pkt_hold(fsp); 1007 fc_fcp_pkt_hold(fsp);
1004 spin_unlock_irqrestore(lport->host->host_lock, flags); 1008 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1005 1009
1006 if (!fc_fcp_lock_pkt(fsp)) { 1010 if (!fc_fcp_lock_pkt(fsp)) {
1007 fc_fcp_cleanup_cmd(fsp, error); 1011 fc_fcp_cleanup_cmd(fsp, error);
@@ -1010,14 +1014,14 @@ restart:
1010 } 1014 }
1011 1015
1012 fc_fcp_pkt_release(fsp); 1016 fc_fcp_pkt_release(fsp);
1013 spin_lock_irqsave(lport->host->host_lock, flags); 1017 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1014 /* 1018 /*
1015 * while we dropped the lock multiple pkts could 1019 * while we dropped the lock multiple pkts could
1016 * have been released, so we have to start over. 1020 * have been released, so we have to start over.
1017 */ 1021 */
1018 goto restart; 1022 goto restart;
1019 } 1023 }
1020 spin_unlock_irqrestore(lport->host->host_lock, flags); 1024 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1021} 1025}
1022 1026
1023/** 1027/**
@@ -1035,11 +1039,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport)
1035 * @fsp: The FCP packet to send 1039 * @fsp: The FCP packet to send
1036 * 1040 *
1037 * Return: Zero for success and -1 for failure 1041 * Return: Zero for success and -1 for failure
1038 * Locks: Called with the host lock and irqs disabled. 1042 * Locks: Called without locks held
1039 */ 1043 */
1040static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) 1044static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1041{ 1045{
1042 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 1046 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
1047 unsigned long flags;
1043 int rc; 1048 int rc;
1044 1049
1045 fsp->cmd->SCp.ptr = (char *)fsp; 1050 fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1049,13 +1054,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1049 int_to_scsilun(fsp->cmd->device->lun, 1054 int_to_scsilun(fsp->cmd->device->lun,
1050 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1055 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1051 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1056 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
1052 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1053 1057
1054 spin_unlock_irq(lport->host->host_lock); 1058 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1059 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1060 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1055 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); 1061 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1056 spin_lock_irq(lport->host->host_lock); 1062 if (unlikely(rc)) {
1057 if (rc) 1063 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1058 list_del(&fsp->list); 1064 list_del(&fsp->list);
1065 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1066 }
1059 1067
1060 return rc; 1068 return rc;
1061} 1069}
@@ -1752,6 +1760,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1752 struct fcoe_dev_stats *stats; 1760 struct fcoe_dev_stats *stats;
1753 1761
1754 lport = shost_priv(sc_cmd->device->host); 1762 lport = shost_priv(sc_cmd->device->host);
1763 spin_unlock_irq(lport->host->host_lock);
1755 1764
1756 rval = fc_remote_port_chkready(rport); 1765 rval = fc_remote_port_chkready(rport);
1757 if (rval) { 1766 if (rval) {
@@ -1834,6 +1843,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1834 rc = SCSI_MLQUEUE_HOST_BUSY; 1843 rc = SCSI_MLQUEUE_HOST_BUSY;
1835 } 1844 }
1836out: 1845out:
1846 spin_lock_irq(lport->host->host_lock);
1837 return rc; 1847 return rc;
1838} 1848}
1839EXPORT_SYMBOL(fc_queuecommand); 1849EXPORT_SYMBOL(fc_queuecommand);
@@ -1864,11 +1874,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1864 1874
1865 lport = fsp->lp; 1875 lport = fsp->lp;
1866 si = fc_get_scsi_internal(lport); 1876 si = fc_get_scsi_internal(lport);
1867 spin_lock_irqsave(lport->host->host_lock, flags); 1877 if (!fsp->cmd)
1868 if (!fsp->cmd) {
1869 spin_unlock_irqrestore(lport->host->host_lock, flags);
1870 return; 1878 return;
1871 }
1872 1879
1873 /* 1880 /*
1874 * if can_queue ramp down is done then try can_queue ramp up 1881 * if can_queue ramp down is done then try can_queue ramp up
@@ -1880,10 +1887,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1880 sc_cmd = fsp->cmd; 1887 sc_cmd = fsp->cmd;
1881 fsp->cmd = NULL; 1888 fsp->cmd = NULL;
1882 1889
1883 if (!sc_cmd->SCp.ptr) { 1890 if (!sc_cmd->SCp.ptr)
1884 spin_unlock_irqrestore(lport->host->host_lock, flags);
1885 return; 1891 return;
1886 }
1887 1892
1888 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1893 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1889 switch (fsp->status_code) { 1894 switch (fsp->status_code) {
@@ -1945,10 +1950,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1945 break; 1950 break;
1946 } 1951 }
1947 1952
1953 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1948 list_del(&fsp->list); 1954 list_del(&fsp->list);
1955 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1949 sc_cmd->SCp.ptr = NULL; 1956 sc_cmd->SCp.ptr = NULL;
1950 sc_cmd->scsi_done(sc_cmd); 1957 sc_cmd->scsi_done(sc_cmd);
1951 spin_unlock_irqrestore(lport->host->host_lock, flags);
1952 1958
1953 /* release ref from initial allocation in queue command */ 1959 /* release ref from initial allocation in queue command */
1954 fc_fcp_pkt_release(fsp); 1960 fc_fcp_pkt_release(fsp);
@@ -2216,6 +2222,7 @@ int fc_fcp_init(struct fc_lport *lport)
2216 lport->scsi_priv = si; 2222 lport->scsi_priv = si;
2217 si->max_can_queue = lport->host->can_queue; 2223 si->max_can_queue = lport->host->can_queue;
2218 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2224 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2225 spin_lock_init(&si->scsi_queue_lock);
2219 2226
2220 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2227 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2221 if (!si->scsi_pkt_pool) { 2228 if (!si->scsi_pkt_pool) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 74338c83ad0a..0b165024a219 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -537,7 +537,9 @@ int fc_fabric_login(struct fc_lport *lport)
537 int rc = -1; 537 int rc = -1;
538 538
539 mutex_lock(&lport->lp_mutex); 539 mutex_lock(&lport->lp_mutex);
540 if (lport->state == LPORT_ST_DISABLED) { 540 if (lport->state == LPORT_ST_DISABLED ||
541 lport->state == LPORT_ST_LOGO) {
542 fc_lport_state_enter(lport, LPORT_ST_RESET);
541 fc_lport_enter_reset(lport); 543 fc_lport_enter_reset(lport);
542 rc = 0; 544 rc = 0;
543 } 545 }
@@ -967,6 +969,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
967 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 969 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
968 fc_lport_state(lport)); 970 fc_lport_state(lport));
969 971
972 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
973 return;
974
970 if (lport->vport) { 975 if (lport->vport) {
971 if (lport->link_up) 976 if (lport->link_up)
972 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 977 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 35ca0e72df46..02300523b234 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -310,6 +310,7 @@ static void fc_rport_work(struct work_struct *work)
310 restart = 1; 310 restart = 1;
311 else 311 else
312 list_del(&rdata->peers); 312 list_del(&rdata->peers);
313 rdata->event = RPORT_EV_NONE;
313 mutex_unlock(&rdata->rp_mutex); 314 mutex_unlock(&rdata->rp_mutex);
314 mutex_unlock(&lport->disc.disc_mutex); 315 mutex_unlock(&lport->disc.disc_mutex);
315 } 316 }
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 226920d15ea1..d4da6bdd0e73 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4506,9 +4506,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4506 pdev = phba->pcidev; 4506 pdev = phba->pcidev;
4507 4507
4508 /* Set the device DMA mask size */ 4508 /* Set the device DMA mask size */
4509 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 4509 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
4510 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4510 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
4511 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
4512 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
4511 return error; 4513 return error;
4514 }
4515 }
4512 4516
4513 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4517 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4514 * required by each mapping. 4518 * required by each mapping.
@@ -6021,9 +6025,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6021 pdev = phba->pcidev; 6025 pdev = phba->pcidev;
6022 6026
6023 /* Set the device DMA mask size */ 6027 /* Set the device DMA mask size */
6024 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 6028 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6025 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 6029 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6030 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6031 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6026 return error; 6032 return error;
6033 }
6034 }
6027 6035
6028 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6036 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6029 * number of bytes required by each mapping. They are actually 6037 * number of bytes required by each mapping. They are actually
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 134c63ef6d38..99ff99e45bee 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -2501,7 +2501,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2501 instance->base_addr = pci_resource_start(instance->pdev, 0); 2501 instance->base_addr = pci_resource_start(instance->pdev, 0);
2502 } 2502 }
2503 2503
2504 if (pci_request_regions(instance->pdev, "megasas: LSI")) { 2504 if (pci_request_selected_regions(instance->pdev,
2505 pci_select_bars(instance->pdev, IORESOURCE_MEM),
2506 "megasas: LSI")) {
2505 printk(KERN_DEBUG "megasas: IO memory region busy!\n"); 2507 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
2506 return -EBUSY; 2508 return -EBUSY;
2507 } 2509 }
@@ -2642,7 +2644,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2642 iounmap(instance->reg_set); 2644 iounmap(instance->reg_set);
2643 2645
2644 fail_ioremap: 2646 fail_ioremap:
2645 pci_release_regions(instance->pdev); 2647 pci_release_selected_regions(instance->pdev,
2648 pci_select_bars(instance->pdev, IORESOURCE_MEM));
2646 2649
2647 return -EINVAL; 2650 return -EINVAL;
2648} 2651}
@@ -2662,7 +2665,8 @@ static void megasas_release_mfi(struct megasas_instance *instance)
2662 2665
2663 iounmap(instance->reg_set); 2666 iounmap(instance->reg_set);
2664 2667
2665 pci_release_regions(instance->pdev); 2668 pci_release_selected_regions(instance->pdev,
2669 pci_select_bars(instance->pdev, IORESOURCE_MEM));
2666} 2670}
2667 2671
2668/** 2672/**
@@ -2971,7 +2975,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2971 /* 2975 /*
2972 * PCI prepping: enable device set bus mastering and dma mask 2976 * PCI prepping: enable device set bus mastering and dma mask
2973 */ 2977 */
2974 rval = pci_enable_device(pdev); 2978 rval = pci_enable_device_mem(pdev);
2975 2979
2976 if (rval) { 2980 if (rval) {
2977 return rval; 2981 return rval;
@@ -3276,7 +3280,7 @@ megasas_resume(struct pci_dev *pdev)
3276 /* 3280 /*
3277 * PCI prepping: enable device set bus mastering and dma mask 3281 * PCI prepping: enable device set bus mastering and dma mask
3278 */ 3282 */
3279 rval = pci_enable_device(pdev); 3283 rval = pci_enable_device_mem(pdev);
3280 3284
3281 if (rval) { 3285 if (rval) {
3282 printk(KERN_ERR "megasas: Enable device failed\n"); 3286 printk(KERN_ERR "megasas: Enable device failed\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6422e258fd52..89d02401b9ec 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3583,6 +3583,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3583 ioc->transport_cmds.status = MPT2_CMD_NOT_USED; 3583 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
3584 mutex_init(&ioc->transport_cmds.mutex); 3584 mutex_init(&ioc->transport_cmds.mutex);
3585 3585
3586 /* scsih internal command bits */
3587 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3588 ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
3589 mutex_init(&ioc->scsih_cmds.mutex);
3590
3586 /* task management internal command bits */ 3591 /* task management internal command bits */
3587 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3592 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3588 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 3593 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index c790d45876c4..cae6b2cf492f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, 657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, 658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, 659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
660 { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
660 661
661 { } /* terminate list */ 662 { } /* terminate list */
662}; 663};
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 950202a70bcf..24223473f573 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -432,30 +432,23 @@ static void _osd_free_seg(struct osd_request *or __unused,
432 seg->alloc_size = 0; 432 seg->alloc_size = 0;
433} 433}
434 434
435static void _put_request(struct request *rq , bool is_async) 435static void _put_request(struct request *rq)
436{ 436{
437 if (is_async) { 437 /*
438 WARN_ON(rq->bio); 438 * If osd_finalize_request() was called but the request was not
439 __blk_put_request(rq->q, rq); 439 * executed through the block layer, then we must release BIOs.
440 } else { 440 * TODO: Keep error code in or->async_error. Need to audit all
441 /* 441 * code paths.
442 * If osd_finalize_request() was called but the request was not 442 */
443 * executed through the block layer, then we must release BIOs. 443 if (unlikely(rq->bio))
444 * TODO: Keep error code in or->async_error. Need to audit all 444 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
445 * code paths. 445 else
446 */ 446 blk_put_request(rq);
447 if (unlikely(rq->bio))
448 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
449 else
450 blk_put_request(rq);
451 }
452} 447}
453 448
454void osd_end_request(struct osd_request *or) 449void osd_end_request(struct osd_request *or)
455{ 450{
456 struct request *rq = or->request; 451 struct request *rq = or->request;
457 /* IMPORTANT: make sure this agrees with osd_execute_request_async */
458 bool is_async = (or->request->end_io_data == or);
459 452
460 _osd_free_seg(or, &or->set_attr); 453 _osd_free_seg(or, &or->set_attr);
461 _osd_free_seg(or, &or->enc_get_attr); 454 _osd_free_seg(or, &or->enc_get_attr);
@@ -463,20 +456,34 @@ void osd_end_request(struct osd_request *or)
463 456
464 if (rq) { 457 if (rq) {
465 if (rq->next_rq) { 458 if (rq->next_rq) {
466 _put_request(rq->next_rq, is_async); 459 _put_request(rq->next_rq);
467 rq->next_rq = NULL; 460 rq->next_rq = NULL;
468 } 461 }
469 462
470 _put_request(rq, is_async); 463 _put_request(rq);
471 } 464 }
472 _osd_request_free(or); 465 _osd_request_free(or);
473} 466}
474EXPORT_SYMBOL(osd_end_request); 467EXPORT_SYMBOL(osd_end_request);
475 468
469static void _set_error_resid(struct osd_request *or, struct request *req,
470 int error)
471{
472 or->async_error = error;
473 or->req_errors = req->errors ? : error;
474 or->sense_len = req->sense_len;
475 if (or->out.req)
476 or->out.residual = or->out.req->resid_len;
477 if (or->in.req)
478 or->in.residual = or->in.req->resid_len;
479}
480
476int osd_execute_request(struct osd_request *or) 481int osd_execute_request(struct osd_request *or)
477{ 482{
478 return or->async_error = 483 int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
479 blk_execute_rq(or->request->q, NULL, or->request, 0); 484
485 _set_error_resid(or, or->request, error);
486 return error;
480} 487}
481EXPORT_SYMBOL(osd_execute_request); 488EXPORT_SYMBOL(osd_execute_request);
482 489
@@ -484,15 +491,17 @@ static void osd_request_async_done(struct request *req, int error)
484{ 491{
485 struct osd_request *or = req->end_io_data; 492 struct osd_request *or = req->end_io_data;
486 493
487 or->async_error = error; 494 _set_error_resid(or, req, error);
488 495 if (req->next_rq) {
489 if (unlikely(error)) { 496 __blk_put_request(req->q, req->next_rq);
490 OSD_DEBUG("osd_request_async_done error recieved %d " 497 req->next_rq = NULL;
491 "errors 0x%x\n", error, req->errors);
492 if (!req->errors) /* don't miss out on this one */
493 req->errors = error;
494 } 498 }
495 499
500 __blk_put_request(req->q, req);
501 or->request = NULL;
502 or->in.req = NULL;
503 or->out.req = NULL;
504
496 if (or->async_done) 505 if (or->async_done)
497 or->async_done(or, or->async_private); 506 or->async_done(or, or->async_private);
498 else 507 else
@@ -1489,21 +1498,18 @@ int osd_req_decode_sense_full(struct osd_request *or,
1489#endif 1498#endif
1490 int ret; 1499 int ret;
1491 1500
1492 if (likely(!or->request->errors)) { 1501 if (likely(!or->req_errors))
1493 osi->out_resid = 0;
1494 osi->in_resid = 0;
1495 return 0; 1502 return 0;
1496 }
1497 1503
1498 osi = osi ? : &local_osi; 1504 osi = osi ? : &local_osi;
1499 memset(osi, 0, sizeof(*osi)); 1505 memset(osi, 0, sizeof(*osi));
1500 1506
1501 ssdb = or->request->sense; 1507 ssdb = (typeof(ssdb))or->sense;
1502 sense_len = or->request->sense_len; 1508 sense_len = or->sense_len;
1503 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) { 1509 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1504 OSD_ERR("Block-layer returned error(0x%x) but " 1510 OSD_ERR("Block-layer returned error(0x%x) but "
1505 "sense_len(%u) || key(%d) is empty\n", 1511 "sense_len(%u) || key(%d) is empty\n",
1506 or->request->errors, sense_len, ssdb->sense_key); 1512 or->req_errors, sense_len, ssdb->sense_key);
1507 goto analyze; 1513 goto analyze;
1508 } 1514 }
1509 1515
@@ -1525,7 +1531,7 @@ int osd_req_decode_sense_full(struct osd_request *or,
1525 "additional_code=0x%x async_error=%d errors=0x%x\n", 1531 "additional_code=0x%x async_error=%d errors=0x%x\n",
1526 osi->key, original_sense_len, sense_len, 1532 osi->key, original_sense_len, sense_len,
1527 osi->additional_code, or->async_error, 1533 osi->additional_code, or->async_error,
1528 or->request->errors); 1534 or->req_errors);
1529 1535
1530 if (original_sense_len < sense_len) 1536 if (original_sense_len < sense_len)
1531 sense_len = original_sense_len; 1537 sense_len = original_sense_len;
@@ -1695,10 +1701,10 @@ analyze:
1695 ret = -EIO; 1701 ret = -EIO;
1696 } 1702 }
1697 1703
1698 if (or->out.req) 1704 if (!or->out.residual)
1699 osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes; 1705 or->out.residual = or->out.total_bytes;
1700 if (or->in.req) 1706 if (!or->in.residual)
1701 osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes; 1707 or->in.residual = or->in.total_bytes;
1702 1708
1703 return ret; 1709 return ret;
1704} 1710}
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index 22644de26399..63ad4aa0c422 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -45,16 +45,6 @@
45#define HEADER_LEN 28 45#define HEADER_LEN 28
46#define SIZE_OFFSET 16 46#define SIZE_OFFSET 16
47 47
48struct pm8001_ioctl_payload {
49 u32 signature;
50 u16 major_function;
51 u16 minor_function;
52 u16 length;
53 u16 status;
54 u16 offset;
55 u16 id;
56 u8 func_specific[1];
57};
58 48
59#define FLASH_OK 0x000000 49#define FLASH_OK 0x000000
60#define FAIL_OPEN_BIOS_FILE 0x000100 50#define FAIL_OPEN_BIOS_FILE 0x000100
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index a3de306b9045..9b44c6f1b10e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -373,10 +373,7 @@ static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
373static void __devinit 373static void __devinit
374mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) 374mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
375{ 375{
376 u32 offset; 376 u32 value, offset, i;
377 u32 value;
378 u32 i, j;
379 u32 bit_cnt;
380 377
381#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 378#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
382#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 379#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -392,55 +389,35 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
392 */ 389 */
393 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) 390 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
394 return; 391 return;
395 /* set SSC bit of PHY 0 - 3 */ 392
396 for (i = 0; i < 4; i++) { 393 for (i = 0; i < 4; i++) {
397 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; 394 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
398 value = pm8001_cr32(pm8001_ha, 2, offset); 395 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
399 if (SSCbit) {
400 value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
401 value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
402 } else {
403 value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
404 value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
405 }
406 bit_cnt = 0;
407 for (j = 0; j < 31; j++)
408 if ((value >> j) & 0x00000001)
409 bit_cnt++;
410 if (bit_cnt % 2)
411 value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
412 else
413 value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
414
415 pm8001_cw32(pm8001_ha, 2, offset, value);
416 } 396 }
417
418 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ 397 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
419 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) 398 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
420 return; 399 return;
421
422 /* set SSC bit of PHY 4 - 7 */
423 for (i = 4; i < 8; i++) { 400 for (i = 4; i < 8; i++) {
424 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); 401 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
425 value = pm8001_cr32(pm8001_ha, 2, offset); 402 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
426 if (SSCbit) {
427 value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
428 value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
429 } else {
430 value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
431 value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
432 }
433 bit_cnt = 0;
434 for (j = 0; j < 31; j++)
435 if ((value >> j) & 0x00000001)
436 bit_cnt++;
437 if (bit_cnt % 2)
438 value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
439 else
440 value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
441
442 pm8001_cw32(pm8001_ha, 2, offset, value);
443 } 403 }
404 /*************************************************************
405 Change the SSC upspreading value to 0x0 so that upspreading is disabled.
406 Device MABC SMOD0 Controls
407 Address: (via MEMBASE-III):
408 Using shifted destination address 0x0_0000: with Offset 0xD8
409
410 31:28 R/W Reserved Do not change
411 27:24 R/W SAS_SMOD_SPRDUP 0000
412 23:20 R/W SAS_SMOD_SPRDDN 0000
413 19:0 R/W Reserved Do not change
414 Upon power-up this register will read as 0x8990c016,
415 and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
416 so that the written value will be 0x8090c016.
417 This will ensure only down-spreading SSC is enabled on the SPC.
418 *************************************************************/
419 value = pm8001_cr32(pm8001_ha, 2, 0xd8);
420 pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
444 421
445 /*set the shifted destination address to 0x0 to avoid error operation */ 422 /*set the shifted destination address to 0x0 to avoid error operation */
446 bar4_shift(pm8001_ha, 0x0); 423 bar4_shift(pm8001_ha, 0x0);
@@ -1901,7 +1878,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1901{ 1878{
1902 struct sas_task *t; 1879 struct sas_task *t;
1903 struct pm8001_ccb_info *ccb; 1880 struct pm8001_ccb_info *ccb;
1904 unsigned long flags; 1881 unsigned long flags = 0;
1905 u32 param; 1882 u32 param;
1906 u32 status; 1883 u32 status;
1907 u32 tag; 1884 u32 tag;
@@ -2040,7 +2017,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2040 ts->stat = SAS_QUEUE_FULL; 2017 ts->stat = SAS_QUEUE_FULL;
2041 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2018 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2042 mb();/*in order to force CPU ordering*/ 2019 mb();/*in order to force CPU ordering*/
2020 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2043 t->task_done(t); 2021 t->task_done(t);
2022 spin_lock_irqsave(&pm8001_ha->lock, flags);
2044 return; 2023 return;
2045 } 2024 }
2046 break; 2025 break;
@@ -2058,7 +2037,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2058 ts->stat = SAS_QUEUE_FULL; 2037 ts->stat = SAS_QUEUE_FULL;
2059 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2038 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2060 mb();/*ditto*/ 2039 mb();/*ditto*/
2040 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2061 t->task_done(t); 2041 t->task_done(t);
2042 spin_lock_irqsave(&pm8001_ha->lock, flags);
2062 return; 2043 return;
2063 } 2044 }
2064 break; 2045 break;
@@ -2084,7 +2065,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2084 ts->stat = SAS_QUEUE_FULL; 2065 ts->stat = SAS_QUEUE_FULL;
2085 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2066 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2086 mb();/* ditto*/ 2067 mb();/* ditto*/
2068 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2087 t->task_done(t); 2069 t->task_done(t);
2070 spin_lock_irqsave(&pm8001_ha->lock, flags);
2088 return; 2071 return;
2089 } 2072 }
2090 break; 2073 break;
@@ -2149,7 +2132,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2149 ts->stat = SAS_QUEUE_FULL; 2132 ts->stat = SAS_QUEUE_FULL;
2150 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2133 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2151 mb();/*ditto*/ 2134 mb();/*ditto*/
2135 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2152 t->task_done(t); 2136 t->task_done(t);
2137 spin_lock_irqsave(&pm8001_ha->lock, flags);
2153 return; 2138 return;
2154 } 2139 }
2155 break; 2140 break;
@@ -2171,7 +2156,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2171 ts->stat = SAS_QUEUE_FULL; 2156 ts->stat = SAS_QUEUE_FULL;
2172 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2157 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2173 mb();/*ditto*/ 2158 mb();/*ditto*/
2159 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2174 t->task_done(t); 2160 t->task_done(t);
2161 spin_lock_irqsave(&pm8001_ha->lock, flags);
2175 return; 2162 return;
2176 } 2163 }
2177 break; 2164 break;
@@ -2200,11 +2187,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2200 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2187 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2201 t, status, ts->resp, ts->stat)); 2188 t, status, ts->resp, ts->stat));
2202 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2189 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2203 } else { 2190 } else if (t->uldd_task) {
2204 spin_unlock_irqrestore(&t->task_state_lock, flags); 2191 spin_unlock_irqrestore(&t->task_state_lock, flags);
2205 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2192 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2206 mb();/* ditto */ 2193 mb();/* ditto */
2194 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2207 t->task_done(t); 2195 t->task_done(t);
2196 spin_lock_irqsave(&pm8001_ha->lock, flags);
2197 } else if (!t->uldd_task) {
2198 spin_unlock_irqrestore(&t->task_state_lock, flags);
2199 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2200 mb();/*ditto*/
2201 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2202 t->task_done(t);
2203 spin_lock_irqsave(&pm8001_ha->lock, flags);
2208 } 2204 }
2209} 2205}
2210 2206
@@ -2212,7 +2208,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2212static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) 2208static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2213{ 2209{
2214 struct sas_task *t; 2210 struct sas_task *t;
2215 unsigned long flags; 2211 unsigned long flags = 0;
2216 struct task_status_struct *ts; 2212 struct task_status_struct *ts;
2217 struct pm8001_ccb_info *ccb; 2213 struct pm8001_ccb_info *ccb;
2218 struct pm8001_device *pm8001_dev; 2214 struct pm8001_device *pm8001_dev;
@@ -2292,7 +2288,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2292 ts->stat = SAS_QUEUE_FULL; 2288 ts->stat = SAS_QUEUE_FULL;
2293 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2289 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2294 mb();/*ditto*/ 2290 mb();/*ditto*/
2291 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2295 t->task_done(t); 2292 t->task_done(t);
2293 spin_lock_irqsave(&pm8001_ha->lock, flags);
2296 return; 2294 return;
2297 } 2295 }
2298 break; 2296 break;
@@ -2401,11 +2399,20 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2401 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2399 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2402 t, event, ts->resp, ts->stat)); 2400 t, event, ts->resp, ts->stat));
2403 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2401 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2404 } else { 2402 } else if (t->uldd_task) {
2405 spin_unlock_irqrestore(&t->task_state_lock, flags); 2403 spin_unlock_irqrestore(&t->task_state_lock, flags);
2406 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2404 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2407 mb();/* in order to force CPU ordering */ 2405 mb();/* ditto */
2406 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2408 t->task_done(t); 2407 t->task_done(t);
2408 spin_lock_irqsave(&pm8001_ha->lock, flags);
2409 } else if (!t->uldd_task) {
2410 spin_unlock_irqrestore(&t->task_state_lock, flags);
2411 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2412 mb();/*ditto*/
2413 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2414 t->task_done(t);
2415 spin_lock_irqsave(&pm8001_ha->lock, flags);
2409 } 2416 }
2410} 2417}
2411 2418
@@ -2876,15 +2883,20 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2876 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 2883 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
2877 u8 link_rate = 2884 u8 link_rate =
2878 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); 2885 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
2886 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
2879 u8 phy_id = 2887 u8 phy_id =
2880 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 2888 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2889 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2890 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2891 struct pm8001_port *port = &pm8001_ha->port[port_id];
2881 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 2892 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2882 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 2893 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2883 unsigned long flags; 2894 unsigned long flags;
2884 u8 deviceType = pPayload->sas_identify.dev_type; 2895 u8 deviceType = pPayload->sas_identify.dev_type;
2885 2896 port->port_state = portstate;
2886 PM8001_MSG_DBG(pm8001_ha, 2897 PM8001_MSG_DBG(pm8001_ha,
2887 pm8001_printk("HW_EVENT_SAS_PHY_UP \n")); 2898 pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
2899 port_id, phy_id));
2888 2900
2889 switch (deviceType) { 2901 switch (deviceType) {
2890 case SAS_PHY_UNUSED: 2902 case SAS_PHY_UNUSED:
@@ -2895,16 +2907,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2895 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); 2907 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
2896 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, 2908 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
2897 PHY_NOTIFY_ENABLE_SPINUP); 2909 PHY_NOTIFY_ENABLE_SPINUP);
2910 port->port_attached = 1;
2898 get_lrate_mode(phy, link_rate); 2911 get_lrate_mode(phy, link_rate);
2899 break; 2912 break;
2900 case SAS_EDGE_EXPANDER_DEVICE: 2913 case SAS_EDGE_EXPANDER_DEVICE:
2901 PM8001_MSG_DBG(pm8001_ha, 2914 PM8001_MSG_DBG(pm8001_ha,
2902 pm8001_printk("expander device.\n")); 2915 pm8001_printk("expander device.\n"));
2916 port->port_attached = 1;
2903 get_lrate_mode(phy, link_rate); 2917 get_lrate_mode(phy, link_rate);
2904 break; 2918 break;
2905 case SAS_FANOUT_EXPANDER_DEVICE: 2919 case SAS_FANOUT_EXPANDER_DEVICE:
2906 PM8001_MSG_DBG(pm8001_ha, 2920 PM8001_MSG_DBG(pm8001_ha,
2907 pm8001_printk("fanout expander device.\n")); 2921 pm8001_printk("fanout expander device.\n"));
2922 port->port_attached = 1;
2908 get_lrate_mode(phy, link_rate); 2923 get_lrate_mode(phy, link_rate);
2909 break; 2924 break;
2910 default: 2925 default:
@@ -2946,11 +2961,20 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2946 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 2961 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
2947 u8 link_rate = 2962 u8 link_rate =
2948 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); 2963 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
2964 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
2949 u8 phy_id = 2965 u8 phy_id =
2950 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 2966 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2967 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2968 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2969 struct pm8001_port *port = &pm8001_ha->port[port_id];
2951 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 2970 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2952 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 2971 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2953 unsigned long flags; 2972 unsigned long flags;
2973 PM8001_MSG_DBG(pm8001_ha,
2974 pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
2975 " phy id = %d\n", port_id, phy_id));
2976 port->port_state = portstate;
2977 port->port_attached = 1;
2954 get_lrate_mode(phy, link_rate); 2978 get_lrate_mode(phy, link_rate);
2955 phy->phy_type |= PORT_TYPE_SATA; 2979 phy->phy_type |= PORT_TYPE_SATA;
2956 phy->phy_attached = 1; 2980 phy->phy_attached = 1;
@@ -2984,7 +3008,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2984 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 3008 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2985 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); 3009 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2986 u8 portstate = (u8)(npip_portstate & 0x0000000F); 3010 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2987 3011 struct pm8001_port *port = &pm8001_ha->port[port_id];
3012 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3013 port->port_state = portstate;
3014 phy->phy_type = 0;
3015 phy->identify.device_type = 0;
3016 phy->phy_attached = 0;
3017 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
2988 switch (portstate) { 3018 switch (portstate) {
2989 case PORT_VALID: 3019 case PORT_VALID:
2990 break; 3020 break;
@@ -2993,26 +3023,30 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2993 pm8001_printk(" PortInvalid portID %d \n", port_id)); 3023 pm8001_printk(" PortInvalid portID %d \n", port_id));
2994 PM8001_MSG_DBG(pm8001_ha, 3024 PM8001_MSG_DBG(pm8001_ha,
2995 pm8001_printk(" Last phy Down and port invalid\n")); 3025 pm8001_printk(" Last phy Down and port invalid\n"));
3026 port->port_attached = 0;
2996 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3027 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
2997 port_id, phy_id, 0, 0); 3028 port_id, phy_id, 0, 0);
2998 break; 3029 break;
2999 case PORT_IN_RESET: 3030 case PORT_IN_RESET:
3000 PM8001_MSG_DBG(pm8001_ha, 3031 PM8001_MSG_DBG(pm8001_ha,
3001 pm8001_printk(" PortInReset portID %d \n", port_id)); 3032 pm8001_printk(" Port In Reset portID %d \n", port_id));
3002 break; 3033 break;
3003 case PORT_NOT_ESTABLISHED: 3034 case PORT_NOT_ESTABLISHED:
3004 PM8001_MSG_DBG(pm8001_ha, 3035 PM8001_MSG_DBG(pm8001_ha,
3005 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); 3036 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
3037 port->port_attached = 0;
3006 break; 3038 break;
3007 case PORT_LOSTCOMM: 3039 case PORT_LOSTCOMM:
3008 PM8001_MSG_DBG(pm8001_ha, 3040 PM8001_MSG_DBG(pm8001_ha,
3009 pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); 3041 pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
3010 PM8001_MSG_DBG(pm8001_ha, 3042 PM8001_MSG_DBG(pm8001_ha,
3011 pm8001_printk(" Last phy Down and port invalid\n")); 3043 pm8001_printk(" Last phy Down and port invalid\n"));
3044 port->port_attached = 0;
3012 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3045 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3013 port_id, phy_id, 0, 0); 3046 port_id, phy_id, 0, 0);
3014 break; 3047 break;
3015 default: 3048 default:
3049 port->port_attached = 0;
3016 PM8001_MSG_DBG(pm8001_ha, 3050 PM8001_MSG_DBG(pm8001_ha,
3017 pm8001_printk(" phy Down and(default) = %x\n", 3051 pm8001_printk(" phy Down and(default) = %x\n",
3018 portstate)); 3052 portstate));
@@ -3770,7 +3804,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
3770 u32 opc = OPC_INB_SSPINIIOSTART; 3804 u32 opc = OPC_INB_SSPINIIOSTART;
3771 memset(&ssp_cmd, 0, sizeof(ssp_cmd)); 3805 memset(&ssp_cmd, 0, sizeof(ssp_cmd));
3772 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); 3806 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
3773 ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for 3807 ssp_cmd.dir_m_tlr =
3808 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
3774 SAS 1.1 compatible TLR*/ 3809 SAS 1.1 compatible TLR*/
3775 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); 3810 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3776 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); 3811 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
@@ -3841,7 +3876,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3841 } 3876 }
3842 } 3877 }
3843 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) 3878 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
3844 ncg_tag = cpu_to_le32(hdr_tag); 3879 ncg_tag = hdr_tag;
3845 dir = data_dir_flags[task->data_dir] << 8; 3880 dir = data_dir_flags[task->data_dir] << 8;
3846 sata_cmd.tag = cpu_to_le32(tag); 3881 sata_cmd.tag = cpu_to_le32(tag);
3847 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 3882 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -3986,7 +4021,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
3986 ((stp_sspsmp_sata & 0x03) * 0x10000000)); 4021 ((stp_sspsmp_sata & 0x03) * 0x10000000));
3987 payload.firstburstsize_ITNexustimeout = 4022 payload.firstburstsize_ITNexustimeout =
3988 cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); 4023 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
3989 memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr, 4024 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
3990 SAS_ADDR_SIZE); 4025 SAS_ADDR_SIZE);
3991 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4026 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
3992 return rc; 4027 return rc;
@@ -4027,7 +4062,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4027 struct inbound_queue_table *circularQ; 4062 struct inbound_queue_table *circularQ;
4028 int ret; 4063 int ret;
4029 u32 opc = OPC_INB_LOCAL_PHY_CONTROL; 4064 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4030 memset((u8 *)&payload, 0, sizeof(payload)); 4065 memset(&payload, 0, sizeof(payload));
4031 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4066 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4032 payload.tag = 1; 4067 payload.tag = 1;
4033 payload.phyop_phyid = 4068 payload.phyop_phyid =
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 96e4daa68b8f..833a5201eda4 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -242,8 +242,7 @@ struct reg_dev_req {
242 __le32 phyid_portid; 242 __le32 phyid_portid;
243 __le32 dtype_dlr_retry; 243 __le32 dtype_dlr_retry;
244 __le32 firstburstsize_ITNexustimeout; 244 __le32 firstburstsize_ITNexustimeout;
245 u32 sas_addr_hi; 245 u8 sas_addr[SAS_ADDR_SIZE];
246 u32 sas_addr_low;
247 __le32 upper_device_id; 246 __le32 upper_device_id;
248 u32 reserved[8]; 247 u32 reserved[8];
249} __attribute__((packed, aligned(4))); 248} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 42ebe725d5a5..c2f1032496cb 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -200,8 +200,13 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
200{ 200{
201 int i; 201 int i;
202 spin_lock_init(&pm8001_ha->lock); 202 spin_lock_init(&pm8001_ha->lock);
203 for (i = 0; i < pm8001_ha->chip->n_phy; i++) 203 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
204 pm8001_phy_init(pm8001_ha, i); 204 pm8001_phy_init(pm8001_ha, i);
205 pm8001_ha->port[i].wide_port_phymap = 0;
206 pm8001_ha->port[i].port_attached = 0;
207 pm8001_ha->port[i].port_state = 0;
208 INIT_LIST_HEAD(&pm8001_ha->port[i].list);
209 }
205 210
206 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL); 211 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
207 if (!pm8001_ha->tags) 212 if (!pm8001_ha->tags)
@@ -511,19 +516,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
511 u8 i; 516 u8 i;
512#ifdef PM8001_READ_VPD 517#ifdef PM8001_READ_VPD
513 DECLARE_COMPLETION_ONSTACK(completion); 518 DECLARE_COMPLETION_ONSTACK(completion);
519 struct pm8001_ioctl_payload payload;
514 pm8001_ha->nvmd_completion = &completion; 520 pm8001_ha->nvmd_completion = &completion;
515 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0); 521 payload.minor_function = 0;
522 payload.length = 128;
523 payload.func_specific = kzalloc(128, GFP_KERNEL);
524 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
516 wait_for_completion(&completion); 525 wait_for_completion(&completion);
517 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 526 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
518 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, 527 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
519 SAS_ADDR_SIZE); 528 SAS_ADDR_SIZE);
520 PM8001_INIT_DBG(pm8001_ha, 529 PM8001_INIT_DBG(pm8001_ha,
521 pm8001_printk("phy %d sas_addr = %x \n", i, 530 pm8001_printk("phy %d sas_addr = %016llx \n", i,
522 (u64)pm8001_ha->phy[i].dev_sas_addr)); 531 pm8001_ha->phy[i].dev_sas_addr));
523 } 532 }
524#else 533#else
525 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 534 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
526 pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL; 535 pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
527 pm8001_ha->phy[i].dev_sas_addr = 536 pm8001_ha->phy[i].dev_sas_addr =
528 cpu_to_be64((u64) 537 cpu_to_be64((u64)
529 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr)); 538 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 1f767a0e727a..7f9c83a76390 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -329,6 +329,23 @@ int pm8001_slave_configure(struct scsi_device *sdev)
329 } 329 }
330 return 0; 330 return 0;
331} 331}
332 /* Find the local port id that's attached to this device */
333static int sas_find_local_port_id(struct domain_device *dev)
334{
335 struct domain_device *pdev = dev->parent;
336
337 /* Directly attached device */
338 if (!pdev)
339 return dev->port->id;
340 while (pdev) {
341 struct domain_device *pdev_p = pdev->parent;
342 if (!pdev_p)
343 return pdev->port->id;
344 pdev = pdev->parent;
345 }
346 return 0;
347}
348
332/** 349/**
333 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. 350 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
334 * @task: the task to be execute. 351 * @task: the task to be execute.
@@ -346,11 +363,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
346 struct domain_device *dev = task->dev; 363 struct domain_device *dev = task->dev;
347 struct pm8001_hba_info *pm8001_ha; 364 struct pm8001_hba_info *pm8001_ha;
348 struct pm8001_device *pm8001_dev; 365 struct pm8001_device *pm8001_dev;
366 struct pm8001_port *port = NULL;
349 struct sas_task *t = task; 367 struct sas_task *t = task;
350 struct pm8001_ccb_info *ccb; 368 struct pm8001_ccb_info *ccb;
351 u32 tag = 0xdeadbeef, rc, n_elem = 0; 369 u32 tag = 0xdeadbeef, rc, n_elem = 0;
352 u32 n = num; 370 u32 n = num;
353 unsigned long flags = 0; 371 unsigned long flags = 0, flags_libsas = 0;
354 372
355 if (!dev->port) { 373 if (!dev->port) {
356 struct task_status_struct *tsm = &t->task_status; 374 struct task_status_struct *tsm = &t->task_status;
@@ -379,6 +397,35 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
379 rc = SAS_PHY_DOWN; 397 rc = SAS_PHY_DOWN;
380 goto out_done; 398 goto out_done;
381 } 399 }
400 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
401 if (!port->port_attached) {
402 if (sas_protocol_ata(t->task_proto)) {
403 struct task_status_struct *ts = &t->task_status;
404 ts->resp = SAS_TASK_UNDELIVERED;
405 ts->stat = SAS_PHY_DOWN;
406
407 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
408 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
409 flags_libsas);
410 t->task_done(t);
411 spin_lock_irqsave(dev->sata_dev.ap->lock,
412 flags_libsas);
413 spin_lock_irqsave(&pm8001_ha->lock, flags);
414 if (n > 1)
415 t = list_entry(t->list.next,
416 struct sas_task, list);
417 continue;
418 } else {
419 struct task_status_struct *ts = &t->task_status;
420 ts->resp = SAS_TASK_UNDELIVERED;
421 ts->stat = SAS_PHY_DOWN;
422 t->task_done(t);
423 if (n > 1)
424 t = list_entry(t->list.next,
425 struct sas_task, list);
426 continue;
427 }
428 }
382 rc = pm8001_tag_alloc(pm8001_ha, &tag); 429 rc = pm8001_tag_alloc(pm8001_ha, &tag);
383 if (rc) 430 if (rc)
384 goto err_out; 431 goto err_out;
@@ -569,11 +616,11 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
569 spin_lock_irqsave(&pm8001_ha->lock, flags); 616 spin_lock_irqsave(&pm8001_ha->lock, flags);
570 617
571 pm8001_device = pm8001_alloc_dev(pm8001_ha); 618 pm8001_device = pm8001_alloc_dev(pm8001_ha);
572 pm8001_device->sas_device = dev;
573 if (!pm8001_device) { 619 if (!pm8001_device) {
574 res = -1; 620 res = -1;
575 goto found_out; 621 goto found_out;
576 } 622 }
623 pm8001_device->sas_device = dev;
577 dev->lldd_dev = pm8001_device; 624 dev->lldd_dev = pm8001_device;
578 pm8001_device->dev_type = dev->dev_type; 625 pm8001_device->dev_type = dev->dev_type;
579 pm8001_device->dcompletion = &completion; 626 pm8001_device->dcompletion = &completion;
@@ -609,7 +656,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
609 wait_for_completion(&completion); 656 wait_for_completion(&completion);
610 if (dev->dev_type == SAS_END_DEV) 657 if (dev->dev_type == SAS_END_DEV)
611 msleep(50); 658 msleep(50);
612 pm8001_ha->flags = PM8001F_RUN_TIME ; 659 pm8001_ha->flags |= PM8001F_RUN_TIME ;
613 return 0; 660 return 0;
614found_out: 661found_out:
615 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 662 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -772,7 +819,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
772 task->task_done = pm8001_task_done; 819 task->task_done = pm8001_task_done;
773 task->timer.data = (unsigned long)task; 820 task->timer.data = (unsigned long)task;
774 task->timer.function = pm8001_tmf_timedout; 821 task->timer.function = pm8001_tmf_timedout;
775 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 822 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
776 add_timer(&task->timer); 823 add_timer(&task->timer);
777 824
778 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 825 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
@@ -897,6 +944,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
897 944
898 if (dev_is_sata(dev)) { 945 if (dev_is_sata(dev)) {
899 DECLARE_COMPLETION_ONSTACK(completion_setstate); 946 DECLARE_COMPLETION_ONSTACK(completion_setstate);
947 if (scsi_is_sas_phy_local(phy))
948 return 0;
900 rc = sas_phy_reset(phy, 1); 949 rc = sas_phy_reset(phy, 1);
901 msleep(2000); 950 msleep(2000);
902 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 951 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 30f2ede55a75..8e38ca8cd101 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -59,11 +59,11 @@
59 59
60#define DRV_NAME "pm8001" 60#define DRV_NAME "pm8001"
61#define DRV_VERSION "0.1.36" 61#define DRV_VERSION "0.1.36"
62#define PM8001_FAIL_LOGGING 0x01 /* libsas EH function logging */ 62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ 63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ 64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
65#define PM8001_IO_LOGGING 0x08 /* I/O path logging */ 65#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
66#define PM8001_EH_LOGGING 0x10 /* Error message logging */ 66#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ 67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ 68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ 69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
@@ -100,6 +100,7 @@ do { \
100 100
101#define PM8001_USE_TASKLET 101#define PM8001_USE_TASKLET
102#define PM8001_USE_MSIX 102#define PM8001_USE_MSIX
103#define PM8001_READ_VPD
103 104
104 105
105#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) 106#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
@@ -111,7 +112,22 @@ extern const struct pm8001_dispatch pm8001_8001_dispatch;
111struct pm8001_hba_info; 112struct pm8001_hba_info;
112struct pm8001_ccb_info; 113struct pm8001_ccb_info;
113struct pm8001_device; 114struct pm8001_device;
114struct pm8001_tmf_task; 115/* define task management IU */
116struct pm8001_tmf_task {
117 u8 tmf;
118 u32 tag_of_task_to_be_managed;
119};
120struct pm8001_ioctl_payload {
121 u32 signature;
122 u16 major_function;
123 u16 minor_function;
124 u16 length;
125 u16 status;
126 u16 offset;
127 u16 id;
128 u8 *func_specific;
129};
130
115struct pm8001_dispatch { 131struct pm8001_dispatch {
116 char *name; 132 char *name;
117 int (*chip_init)(struct pm8001_hba_info *pm8001_ha); 133 int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -164,6 +180,10 @@ struct pm8001_chip_info {
164 180
165struct pm8001_port { 181struct pm8001_port {
166 struct asd_sas_port sas_port; 182 struct asd_sas_port sas_port;
183 u8 port_attached;
184 u8 wide_port_phymap;
185 u8 port_state;
186 struct list_head list;
167}; 187};
168 188
169struct pm8001_phy { 189struct pm8001_phy {
@@ -386,11 +406,7 @@ struct pm8001_fw_image_header {
386 __be32 startup_entry; 406 __be32 startup_entry;
387} __attribute__((packed, aligned(4))); 407} __attribute__((packed, aligned(4)));
388 408
389/* define task management IU */ 409
390struct pm8001_tmf_task {
391 u8 tmf;
392 u32 tag_of_task_to_be_managed;
393};
394/** 410/**
395 * FW Flash Update status values 411 * FW Flash Update status values
396 */ 412 */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 34c6b896a91b..e7d2688fbeba 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters 2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3 * 3 *
4 * Written By: PMC Sierra Corporation 4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
5 * 6 *
6 * Copyright (C) 2008, 2009 PMC Sierra Inc 7 * Copyright (C) 2008, 2009 PMC Sierra Inc
7 * 8 *
@@ -79,7 +80,7 @@ DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
79/* 80/*
80 * Module parameters 81 * Module parameters
81 */ 82 */
82MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com"); 83MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
83MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver"); 84MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
84MODULE_LICENSE("GPL"); 85MODULE_LICENSE("GPL");
85MODULE_VERSION(PMCRAID_DRIVER_VERSION); 86MODULE_VERSION(PMCRAID_DRIVER_VERSION);
@@ -162,10 +163,10 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
162 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 163 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
163 list_for_each_entry(temp, &pinstance->used_res_q, queue) { 164 list_for_each_entry(temp, &pinstance->used_res_q, queue) {
164 165
165 /* do not expose VSETs with order-ids >= 240 */ 166 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
166 if (RES_IS_VSET(temp->cfg_entry)) { 167 if (RES_IS_VSET(temp->cfg_entry)) {
167 target = temp->cfg_entry.unique_flags1; 168 target = temp->cfg_entry.unique_flags1;
168 if (target >= PMCRAID_MAX_VSET_TARGETS) 169 if (target > PMCRAID_MAX_VSET_TARGETS)
169 continue; 170 continue;
170 bus = PMCRAID_VSET_BUS_ID; 171 bus = PMCRAID_VSET_BUS_ID;
171 lun = 0; 172 lun = 0;
@@ -1210,7 +1211,7 @@ static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
1210 int retval = 0; 1211 int retval = 0;
1211 1212
1212 if (cfgte->resource_type == RES_TYPE_VSET) 1213 if (cfgte->resource_type == RES_TYPE_VSET)
1213 retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE); 1214 retval = ((cfgte->unique_flags1 & 0x80) == 0);
1214 else if (cfgte->resource_type == RES_TYPE_GSCSI) 1215 else if (cfgte->resource_type == RES_TYPE_GSCSI)
1215 retval = (RES_BUS(cfgte->resource_address) != 1216 retval = (RES_BUS(cfgte->resource_address) !=
1216 PMCRAID_VIRTUAL_ENCL_BUS_ID); 1217 PMCRAID_VIRTUAL_ENCL_BUS_ID);
@@ -1361,6 +1362,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1361 * Return value: 1362 * Return value:
1362 * none 1363 * none
1363 */ 1364 */
1365
1364static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) 1366static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1365{ 1367{
1366 struct pmcraid_config_table_entry *cfg_entry; 1368 struct pmcraid_config_table_entry *cfg_entry;
@@ -1368,9 +1370,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1368 struct pmcraid_cmd *cmd; 1370 struct pmcraid_cmd *cmd;
1369 struct pmcraid_cmd *cfgcmd; 1371 struct pmcraid_cmd *cfgcmd;
1370 struct pmcraid_resource_entry *res = NULL; 1372 struct pmcraid_resource_entry *res = NULL;
1371 u32 new_entry = 1;
1372 unsigned long lock_flags; 1373 unsigned long lock_flags;
1373 unsigned long host_lock_flags; 1374 unsigned long host_lock_flags;
1375 u32 new_entry = 1;
1376 u32 hidden_entry = 0;
1374 int rc; 1377 int rc;
1375 1378
1376 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam; 1379 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
@@ -1406,9 +1409,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1406 } 1409 }
1407 1410
1408 /* If this resource is not going to be added to mid-layer, just notify 1411 /* If this resource is not going to be added to mid-layer, just notify
1409 * applications and return 1412 * applications and return. If this notification is about hiding a VSET
1413 * resource, check if it was exposed already.
1410 */ 1414 */
1411 if (!pmcraid_expose_resource(cfg_entry)) 1415 if (pinstance->ccn.hcam->notification_type ==
1416 NOTIFICATION_TYPE_ENTRY_CHANGED &&
1417 cfg_entry->resource_type == RES_TYPE_VSET &&
1418 cfg_entry->unique_flags1 & 0x80) {
1419 hidden_entry = 1;
1420 } else if (!pmcraid_expose_resource(cfg_entry))
1412 goto out_notify_apps; 1421 goto out_notify_apps;
1413 1422
1414 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 1423 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
@@ -1424,6 +1433,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1424 1433
1425 if (new_entry) { 1434 if (new_entry) {
1426 1435
1436 if (hidden_entry) {
1437 spin_unlock_irqrestore(&pinstance->resource_lock,
1438 lock_flags);
1439 goto out_notify_apps;
1440 }
1441
1427 /* If there are more number of resources than what driver can 1442 /* If there are more number of resources than what driver can
1428 * manage, do not notify the applications about the CCN. Just 1443 * manage, do not notify the applications about the CCN. Just
1429 * ignore this notifications and re-register the same HCAM 1444 * ignore this notifications and re-register the same HCAM
@@ -1454,8 +1469,9 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1454 sizeof(struct pmcraid_config_table_entry)); 1469 sizeof(struct pmcraid_config_table_entry));
1455 1470
1456 if (pinstance->ccn.hcam->notification_type == 1471 if (pinstance->ccn.hcam->notification_type ==
1457 NOTIFICATION_TYPE_ENTRY_DELETED) { 1472 NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
1458 if (res->scsi_dev) { 1473 if (res->scsi_dev) {
1474 res->cfg_entry.unique_flags1 &= 0x7F;
1459 res->change_detected = RES_CHANGE_DEL; 1475 res->change_detected = RES_CHANGE_DEL;
1460 res->cfg_entry.resource_handle = 1476 res->cfg_entry.resource_handle =
1461 PMCRAID_INVALID_RES_HANDLE; 1477 PMCRAID_INVALID_RES_HANDLE;
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 2752b56cad56..92f89d50850c 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1,6 +1,9 @@
1/* 1/*
2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file 2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
3 * 3 *
4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
6 *
4 * Copyright (C) 2008, 2009 PMC Sierra Inc. 7 * Copyright (C) 2008, 2009 PMC Sierra Inc.
5 * 8 *
6 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
@@ -106,7 +109,7 @@
106#define PMCRAID_VSET_LUN_ID 0x0 109#define PMCRAID_VSET_LUN_ID 0x0
107#define PMCRAID_PHYS_BUS_ID 0x0 110#define PMCRAID_PHYS_BUS_ID 0x0
108#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8 111#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
109#define PMCRAID_MAX_VSET_TARGETS 240 112#define PMCRAID_MAX_VSET_TARGETS 0x7F
110#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8 113#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
111 114
112#define PMCRAID_IOA_MAX_SECTORS 32767 115#define PMCRAID_IOA_MAX_SECTORS 32767
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6b9bf23c7735..384afda7dbe9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1570,9 +1570,6 @@ typedef struct fc_port {
1570 struct fc_rport *rport, *drport; 1570 struct fc_rport *rport, *drport;
1571 u32 supported_classes; 1571 u32 supported_classes;
1572 1572
1573 unsigned long last_queue_full;
1574 unsigned long last_ramp_up;
1575
1576 uint16_t vp_idx; 1573 uint16_t vp_idx;
1577} fc_port_t; 1574} fc_port_t;
1578 1575
@@ -2265,6 +2262,7 @@ struct qla_hw_data {
2265 uint32_t port0 :1; 2262 uint32_t port0 :1;
2266 uint32_t running_gold_fw :1; 2263 uint32_t running_gold_fw :1;
2267 uint32_t cpu_affinity_enabled :1; 2264 uint32_t cpu_affinity_enabled :1;
2265 uint32_t disable_msix_handshake :1;
2268 } flags; 2266 } flags;
2269 2267
2270 /* This spinlock is used to protect "io transactions", you must 2268 /* This spinlock is used to protect "io transactions", you must
@@ -2387,6 +2385,7 @@ struct qla_hw_data {
2387#define IS_QLA81XX(ha) (IS_QLA8001(ha)) 2385#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2388#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2386#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2389 IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2387 IS_QLA25XX(ha) || IS_QLA81XX(ha))
2388#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
2390#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \ 2389#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
2391 (ha)->flags.msix_enabled) 2390 (ha)->flags.msix_enabled)
2392#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha)) 2391#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e21851358509..0b6801fc6389 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,8 +72,6 @@ extern int ql2xloginretrycount;
72extern int ql2xfdmienable; 72extern int ql2xfdmienable;
73extern int ql2xallocfwdump; 73extern int ql2xallocfwdump;
74extern int ql2xextended_error_logging; 74extern int ql2xextended_error_logging;
75extern int ql2xqfullrampup;
76extern int ql2xqfulltracking;
77extern int ql2xiidmaenable; 75extern int ql2xiidmaenable;
78extern int ql2xmaxqueues; 76extern int ql2xmaxqueues;
79extern int ql2xmultique_tag; 77extern int ql2xmultique_tag;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b74924b279ef..73a793539d45 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1442,7 +1442,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1442 icb->firmware_options_2 |= 1442 icb->firmware_options_2 |=
1443 __constant_cpu_to_le32(BIT_18); 1443 __constant_cpu_to_le32(BIT_18);
1444 1444
1445 icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); 1445 /* Use Disable MSIX Handshake mode for capable adapters */
1446 if (IS_MSIX_NACK_CAPABLE(ha)) {
1447 icb->firmware_options_2 &=
1448 __constant_cpu_to_le32(~BIT_22);
1449 ha->flags.disable_msix_handshake = 1;
1450 qla_printk(KERN_INFO, ha,
1451 "MSIX Handshake Disable Mode turned on\n");
1452 } else {
1453 icb->firmware_options_2 |=
1454 __constant_cpu_to_le32(BIT_22);
1455 }
1446 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 1456 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1447 1457
1448 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0); 1458 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 804987397b77..1692a883f4de 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -811,78 +811,6 @@ skip_rio:
811 qla2x00_alert_all_vps(rsp, mb); 811 qla2x00_alert_all_vps(rsp, mb);
812} 812}
813 813
814static void
815qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
816{
817 fc_port_t *fcport = data;
818 struct scsi_qla_host *vha = fcport->vha;
819 struct qla_hw_data *ha = vha->hw;
820 struct req_que *req = NULL;
821
822 if (!ql2xqfulltracking)
823 return;
824
825 req = vha->req;
826 if (!req)
827 return;
828 if (req->max_q_depth <= sdev->queue_depth)
829 return;
830
831 if (sdev->ordered_tags)
832 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
833 sdev->queue_depth + 1);
834 else
835 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
836 sdev->queue_depth + 1);
837
838 fcport->last_ramp_up = jiffies;
839
840 DEBUG2(qla_printk(KERN_INFO, ha,
841 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
842 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
843 sdev->queue_depth));
844}
845
846static void
847qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
848{
849 fc_port_t *fcport = data;
850
851 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
852 return;
853
854 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
855 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
856 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
857 sdev->queue_depth));
858}
859
860static inline void
861qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
862 srb_t *sp)
863{
864 fc_port_t *fcport;
865 struct scsi_device *sdev;
866
867 if (!ql2xqfulltracking)
868 return;
869
870 sdev = sp->cmd->device;
871 if (sdev->queue_depth >= req->max_q_depth)
872 return;
873
874 fcport = sp->fcport;
875 if (time_before(jiffies,
876 fcport->last_ramp_up + ql2xqfullrampup * HZ))
877 return;
878 if (time_before(jiffies,
879 fcport->last_queue_full + ql2xqfullrampup * HZ))
880 return;
881
882 starget_for_each_device(sdev->sdev_target, fcport,
883 qla2x00_adjust_sdev_qdepth_up);
884}
885
886/** 814/**
887 * qla2x00_process_completed_request() - Process a Fast Post response. 815 * qla2x00_process_completed_request() - Process a Fast Post response.
888 * @ha: SCSI driver HA context 816 * @ha: SCSI driver HA context
@@ -913,8 +841,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
913 841
914 /* Save ISP completion status */ 842 /* Save ISP completion status */
915 sp->cmd->result = DID_OK << 16; 843 sp->cmd->result = DID_OK << 16;
916
917 qla2x00_ramp_up_queue_depth(vha, req, sp);
918 qla2x00_sp_compl(ha, sp); 844 qla2x00_sp_compl(ha, sp);
919 } else { 845 } else {
920 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 846 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
@@ -1435,13 +1361,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1435 "scsi(%ld): QUEUE FULL status detected " 1361 "scsi(%ld): QUEUE FULL status detected "
1436 "0x%x-0x%x.\n", vha->host_no, comp_status, 1362 "0x%x-0x%x.\n", vha->host_no, comp_status,
1437 scsi_status)); 1363 scsi_status));
1438
1439 /* Adjust queue depth for all luns on the port. */
1440 if (!ql2xqfulltracking)
1441 break;
1442 fcport->last_queue_full = jiffies;
1443 starget_for_each_device(cp->device->sdev_target,
1444 fcport, qla2x00_adjust_sdev_qdepth_down);
1445 break; 1364 break;
1446 } 1365 }
1447 if (lscsi_status != SS_CHECK_CONDITION) 1366 if (lscsi_status != SS_CHECK_CONDITION)
@@ -1516,17 +1435,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1516 "scsi(%ld): QUEUE FULL status detected " 1435 "scsi(%ld): QUEUE FULL status detected "
1517 "0x%x-0x%x.\n", vha->host_no, comp_status, 1436 "0x%x-0x%x.\n", vha->host_no, comp_status,
1518 scsi_status)); 1437 scsi_status));
1519
1520 /*
1521 * Adjust queue depth for all luns on the
1522 * port.
1523 */
1524 if (!ql2xqfulltracking)
1525 break;
1526 fcport->last_queue_full = jiffies;
1527 starget_for_each_device(
1528 cp->device->sdev_target, fcport,
1529 qla2x00_adjust_sdev_qdepth_down);
1530 break; 1438 break;
1531 } 1439 }
1532 if (lscsi_status != SS_CHECK_CONDITION) 1440 if (lscsi_status != SS_CHECK_CONDITION)
@@ -2020,7 +1928,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2020 1928
2021 vha = qla25xx_get_host(rsp); 1929 vha = qla25xx_get_host(rsp);
2022 qla24xx_process_response_queue(vha, rsp); 1930 qla24xx_process_response_queue(vha, rsp);
2023 if (!ha->mqenable) { 1931 if (!ha->flags.disable_msix_handshake) {
2024 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1932 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2025 RD_REG_DWORD_RELAXED(&reg->hccr); 1933 RD_REG_DWORD_RELAXED(&reg->hccr);
2026 } 1934 }
@@ -2034,6 +1942,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2034{ 1942{
2035 struct qla_hw_data *ha; 1943 struct qla_hw_data *ha;
2036 struct rsp_que *rsp; 1944 struct rsp_que *rsp;
1945 struct device_reg_24xx __iomem *reg;
2037 1946
2038 rsp = (struct rsp_que *) dev_id; 1947 rsp = (struct rsp_que *) dev_id;
2039 if (!rsp) { 1948 if (!rsp) {
@@ -2043,6 +1952,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2043 } 1952 }
2044 ha = rsp->hw; 1953 ha = rsp->hw;
2045 1954
1955 /* Clear the interrupt, if enabled, for this response queue */
1956 if (rsp->options & ~BIT_6) {
1957 reg = &ha->iobase->isp24;
1958 spin_lock_irq(&ha->hardware_lock);
1959 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1960 RD_REG_DWORD_RELAXED(&reg->hccr);
1961 spin_unlock_irq(&ha->hardware_lock);
1962 }
2046 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 1963 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2047 1964
2048 return IRQ_HANDLED; 1965 return IRQ_HANDLED;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a47d34308a3a..2a4c7f4e7b69 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -696,6 +696,10 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
696 /* Use alternate PCI devfn */ 696 /* Use alternate PCI devfn */
697 if (LSB(rsp->rid)) 697 if (LSB(rsp->rid))
698 options |= BIT_5; 698 options |= BIT_5;
699 /* Enable MSIX handshake mode on for uncapable adapters */
700 if (!IS_MSIX_NACK_CAPABLE(ha))
701 options |= BIT_6;
702
699 rsp->options = options; 703 rsp->options = options;
700 rsp->id = que_id; 704 rsp->id = que_id;
701 reg = ISP_QUE_REG(ha, que_id); 705 reg = ISP_QUE_REG(ha, que_id);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 41669357b186..2f873d237325 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -78,21 +78,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
78MODULE_PARM_DESC(ql2xmaxqdepth, 78MODULE_PARM_DESC(ql2xmaxqdepth,
79 "Maximum queue depth to report for target devices."); 79 "Maximum queue depth to report for target devices.");
80 80
81int ql2xqfulltracking = 1;
82module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
83MODULE_PARM_DESC(ql2xqfulltracking,
84 "Controls whether the driver tracks queue full status "
85 "returns and dynamically adjusts a scsi device's queue "
86 "depth. Default is 1, perform tracking. Set to 0 to "
87 "disable dynamic tracking and adjustment of queue depth.");
88
89int ql2xqfullrampup = 120;
90module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(ql2xqfullrampup,
92 "Number of seconds to wait to begin to ramp-up the queue "
93 "depth for a device after a queue-full condition has been "
94 "detected. Default is 120 seconds.");
95
96int ql2xiidmaenable=1; 81int ql2xiidmaenable=1;
97module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 82module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
98MODULE_PARM_DESC(ql2xiidmaenable, 83MODULE_PARM_DESC(ql2xiidmaenable,
@@ -1217,13 +1202,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1217 sdev->hostdata = NULL; 1202 sdev->hostdata = NULL;
1218} 1203}
1219 1204
1205static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1206{
1207 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1208
1209 if (!scsi_track_queue_full(sdev, qdepth))
1210 return;
1211
1212 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1213 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1214 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1215 sdev->queue_depth));
1216}
1217
1218static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1219{
1220 fc_port_t *fcport = sdev->hostdata;
1221 struct scsi_qla_host *vha = fcport->vha;
1222 struct qla_hw_data *ha = vha->hw;
1223 struct req_que *req = NULL;
1224
1225 req = vha->req;
1226 if (!req)
1227 return;
1228
1229 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1230 return;
1231
1232 if (sdev->ordered_tags)
1233 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1234 else
1235 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1236
1237 DEBUG2(qla_printk(KERN_INFO, ha,
1238 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1239 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1240 sdev->queue_depth));
1241}
1242
1220static int 1243static int
1221qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 1244qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1222{ 1245{
1223 if (reason != SCSI_QDEPTH_DEFAULT) 1246 switch (reason) {
1224 return -EOPNOTSUPP; 1247 case SCSI_QDEPTH_DEFAULT:
1248 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1249 break;
1250 case SCSI_QDEPTH_QFULL:
1251 qla2x00_handle_queue_full(sdev, qdepth);
1252 break;
1253 case SCSI_QDEPTH_RAMP_UP:
1254 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1255 break;
1256 default:
1257 return EOPNOTSUPP;
1258 }
1225 1259
1226 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1227 return sdev->queue_depth; 1260 return sdev->queue_depth;
1228} 1261}
1229 1262
@@ -2003,13 +2036,13 @@ skip_dpc:
2003 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2036 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2004 base_vha->host_no, ha)); 2037 base_vha->host_no, ha));
2005 2038
2006 base_vha->flags.init_done = 1;
2007 base_vha->flags.online = 1;
2008
2009 ret = scsi_add_host(host, &pdev->dev); 2039 ret = scsi_add_host(host, &pdev->dev);
2010 if (ret) 2040 if (ret)
2011 goto probe_failed; 2041 goto probe_failed;
2012 2042
2043 base_vha->flags.init_done = 1;
2044 base_vha->flags.online = 1;
2045
2013 ha->isp_ops->enable_intrs(ha); 2046 ha->isp_ops->enable_intrs(ha);
2014 2047
2015 scsi_scan_host(host); 2048 scsi_scan_host(host);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 807e0dbc67fa..c482220f7eed 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k7" 10#define QLA2XXX_VERSION "8.03.01-k8"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e495d3813948..d8927681ec88 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -859,6 +859,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859 case 0x07: /* operation in progress */ 859 case 0x07: /* operation in progress */
860 case 0x08: /* Long write in progress */ 860 case 0x08: /* Long write in progress */
861 case 0x09: /* self test in progress */ 861 case 0x09: /* self test in progress */
862 case 0x14: /* space allocation in progress */
862 action = ACTION_DELAYED_RETRY; 863 action = ACTION_DELAYED_RETRY;
863 break; 864 break;
864 default: 865 default:
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6531c91501be..ddfcecd5099f 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -649,11 +649,22 @@ static __init int fc_transport_init(void)
649 return error; 649 return error;
650 error = transport_class_register(&fc_vport_class); 650 error = transport_class_register(&fc_vport_class);
651 if (error) 651 if (error)
652 return error; 652 goto unreg_host_class;
653 error = transport_class_register(&fc_rport_class); 653 error = transport_class_register(&fc_rport_class);
654 if (error) 654 if (error)
655 return error; 655 goto unreg_vport_class;
656 return transport_class_register(&fc_transport_class); 656 error = transport_class_register(&fc_transport_class);
657 if (error)
658 goto unreg_rport_class;
659 return 0;
660
661unreg_rport_class:
662 transport_class_unregister(&fc_rport_class);
663unreg_vport_class:
664 transport_class_unregister(&fc_vport_class);
665unreg_host_class:
666 transport_class_unregister(&fc_host_class);
667 return error;
657} 668}
658 669
659static void __exit fc_transport_exit(void) 670static void __exit fc_transport_exit(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9093c7261f33..255da53e5a01 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
264 return snprintf(buf, 20, "%u\n", sdkp->ATO); 264 return snprintf(buf, 20, "%u\n", sdkp->ATO);
265} 265}
266 266
267static ssize_t
268sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
269 char *buf)
270{
271 struct scsi_disk *sdkp = to_scsi_disk(dev);
272
273 return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
274}
275
267static struct device_attribute sd_disk_attrs[] = { 276static struct device_attribute sd_disk_attrs[] = {
268 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 277 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
269 sd_store_cache_type), 278 sd_store_cache_type),
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
274 sd_store_manage_start_stop), 283 sd_store_manage_start_stop),
275 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), 284 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
276 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 285 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
286 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
277 __ATTR_NULL, 287 __ATTR_NULL,
278}; 288};
279 289
@@ -399,6 +409,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
399} 409}
400 410
401/** 411/**
412 * sd_prepare_discard - unmap blocks on thinly provisioned device
413 * @rq: Request to prepare
414 *
415 * Will issue either UNMAP or WRITE SAME(16) depending on preference
416 * indicated by target device.
417 **/
418static int sd_prepare_discard(struct request *rq)
419{
420 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
421 struct bio *bio = rq->bio;
422 sector_t sector = bio->bi_sector;
423 unsigned int num = bio_sectors(bio);
424
425 if (sdkp->device->sector_size == 4096) {
426 sector >>= 3;
427 num >>= 3;
428 }
429
430 rq->cmd_type = REQ_TYPE_BLOCK_PC;
431 rq->timeout = SD_TIMEOUT;
432
433 memset(rq->cmd, 0, rq->cmd_len);
434
435 if (sdkp->unmap) {
436 char *buf = kmap_atomic(bio_page(bio), KM_USER0);
437
438 rq->cmd[0] = UNMAP;
439 rq->cmd[8] = 24;
440 rq->cmd_len = 10;
441
442 /* Ensure that data length matches payload */
443 rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
444
445 put_unaligned_be16(6 + 16, &buf[0]);
446 put_unaligned_be16(16, &buf[2]);
447 put_unaligned_be64(sector, &buf[8]);
448 put_unaligned_be32(num, &buf[16]);
449
450 kunmap_atomic(buf, KM_USER0);
451 } else {
452 rq->cmd[0] = WRITE_SAME_16;
453 rq->cmd[1] = 0x8; /* UNMAP */
454 put_unaligned_be64(sector, &rq->cmd[2]);
455 put_unaligned_be32(num, &rq->cmd[10]);
456 rq->cmd_len = 16;
457 }
458
459 return BLKPREP_OK;
460}
461
462/**
402 * sd_init_command - build a scsi (read or write) command from 463 * sd_init_command - build a scsi (read or write) command from
403 * information in the request structure. 464 * information in the request structure.
404 * @SCpnt: pointer to mid-level's per scsi command structure that 465 * @SCpnt: pointer to mid-level's per scsi command structure that
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
418 int ret, host_dif; 479 int ret, host_dif;
419 unsigned char protect; 480 unsigned char protect;
420 481
482 /*
483 * Discard request come in as REQ_TYPE_FS but we turn them into
484 * block PC requests to make life easier.
485 */
486 if (blk_discard_rq(rq))
487 ret = sd_prepare_discard(rq);
488
421 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 489 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
422 ret = scsi_setup_blk_pc_cmnd(sdp, rq); 490 ret = scsi_setup_blk_pc_cmnd(sdp, rq);
423 goto out; 491 goto out;
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1432 sd_printk(KERN_NOTICE, sdkp, 1500 sd_printk(KERN_NOTICE, sdkp,
1433 "physical block alignment offset: %u\n", alignment); 1501 "physical block alignment offset: %u\n", alignment);
1434 1502
1503 if (buffer[14] & 0x80) { /* TPE */
1504 struct request_queue *q = sdp->request_queue;
1505
1506 sdkp->thin_provisioning = 1;
1507 q->limits.discard_granularity = sdkp->hw_sector_size;
1508 q->limits.max_discard_sectors = 0xffffffff;
1509
1510 if (buffer[14] & 0x40) /* TPRZ */
1511 q->limits.discard_zeroes_data = 1;
1512
1513 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1514 }
1515
1435 sdkp->capacity = lba + 1; 1516 sdkp->capacity = lba + 1;
1436 return sector_size; 1517 return sector_size;
1437} 1518}
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1863 */ 1944 */
1864static void sd_read_block_limits(struct scsi_disk *sdkp) 1945static void sd_read_block_limits(struct scsi_disk *sdkp)
1865{ 1946{
1947 struct request_queue *q = sdkp->disk->queue;
1866 unsigned int sector_sz = sdkp->device->sector_size; 1948 unsigned int sector_sz = sdkp->device->sector_size;
1867 char *buffer; 1949 char *buffer;
1868 1950
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
1877 blk_queue_io_opt(sdkp->disk->queue, 1959 blk_queue_io_opt(sdkp->disk->queue,
1878 get_unaligned_be32(&buffer[12]) * sector_sz); 1960 get_unaligned_be32(&buffer[12]) * sector_sz);
1879 1961
1962 /* Thin provisioning enabled and page length indicates TP support */
1963 if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
1964 unsigned int lba_count, desc_count, granularity;
1965
1966 lba_count = get_unaligned_be32(&buffer[20]);
1967 desc_count = get_unaligned_be32(&buffer[24]);
1968
1969 if (lba_count) {
1970 q->limits.max_discard_sectors =
1971 lba_count * sector_sz >> 9;
1972
1973 if (desc_count)
1974 sdkp->unmap = 1;
1975 }
1976
1977 granularity = get_unaligned_be32(&buffer[28]);
1978
1979 if (granularity)
1980 q->limits.discard_granularity = granularity * sector_sz;
1981
1982 if (buffer[32] & 0x80)
1983 q->limits.discard_alignment =
1984 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
1985 }
1986
1880 kfree(buffer); 1987 kfree(buffer);
1881} 1988}
1882 1989
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index e374804d26fb..43d3caf268ef 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -60,6 +60,8 @@ struct scsi_disk {
60 unsigned RCD : 1; /* state of disk RCD bit, unused */ 60 unsigned RCD : 1; /* state of disk RCD bit, unused */
61 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 61 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
62 unsigned first_scan : 1; 62 unsigned first_scan : 1;
63 unsigned thin_provisioning : 1;
64 unsigned unmap : 1;
63}; 65};
64#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) 66#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
65 67
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ad59abb47722..d04ea9a6f673 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
552 SRpnt->waiting = waiting; 552 SRpnt->waiting = waiting;
553 553
554 if (STp->buffer->do_dio) { 554 if (STp->buffer->do_dio) {
555 mdata->page_order = 0;
555 mdata->nr_entries = STp->buffer->sg_segs; 556 mdata->nr_entries = STp->buffer->sg_segs;
556 mdata->pages = STp->buffer->mapped_pages; 557 mdata->pages = STp->buffer->mapped_pages;
557 } else { 558 } else {
559 mdata->page_order = STp->buffer->reserved_page_order;
558 mdata->nr_entries = 560 mdata->nr_entries =
559 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order); 561 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
560 STp->buffer->map_data.pages = STp->buffer->reserved_pages; 562 mdata->pages = STp->buffer->reserved_pages;
561 STp->buffer->map_data.offset = 0; 563 mdata->offset = 0;
562 } 564 }
563 565
564 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); 566 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
@@ -3719,7 +3721,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3719 priority |= __GFP_ZERO; 3721 priority |= __GFP_ZERO;
3720 3722
3721 if (STbuffer->frp_segs) { 3723 if (STbuffer->frp_segs) {
3722 order = STbuffer->map_data.page_order; 3724 order = STbuffer->reserved_page_order;
3723 b_size = PAGE_SIZE << order; 3725 b_size = PAGE_SIZE << order;
3724 } else { 3726 } else {
3725 for (b_size = PAGE_SIZE, order = 0; 3727 for (b_size = PAGE_SIZE, order = 0;
@@ -3752,7 +3754,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3752 segs++; 3754 segs++;
3753 } 3755 }
3754 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); 3756 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
3755 STbuffer->map_data.page_order = order; 3757 STbuffer->reserved_page_order = order;
3756 3758
3757 return 1; 3759 return 1;
3758} 3760}
@@ -3765,7 +3767,7 @@ static void clear_buffer(struct st_buffer * st_bp)
3765 3767
3766 for (i=0; i < st_bp->frp_segs; i++) 3768 for (i=0; i < st_bp->frp_segs; i++)
3767 memset(page_address(st_bp->reserved_pages[i]), 0, 3769 memset(page_address(st_bp->reserved_pages[i]), 0,
3768 PAGE_SIZE << st_bp->map_data.page_order); 3770 PAGE_SIZE << st_bp->reserved_page_order);
3769 st_bp->cleared = 1; 3771 st_bp->cleared = 1;
3770} 3772}
3771 3773
@@ -3773,7 +3775,7 @@ static void clear_buffer(struct st_buffer * st_bp)
3773/* Release the extra buffer */ 3775/* Release the extra buffer */
3774static void normalize_buffer(struct st_buffer * STbuffer) 3776static void normalize_buffer(struct st_buffer * STbuffer)
3775{ 3777{
3776 int i, order = STbuffer->map_data.page_order; 3778 int i, order = STbuffer->reserved_page_order;
3777 3779
3778 for (i = 0; i < STbuffer->frp_segs; i++) { 3780 for (i = 0; i < STbuffer->frp_segs; i++) {
3779 __free_pages(STbuffer->reserved_pages[i], order); 3781 __free_pages(STbuffer->reserved_pages[i], order);
@@ -3781,7 +3783,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3781 } 3783 }
3782 STbuffer->frp_segs = 0; 3784 STbuffer->frp_segs = 0;
3783 STbuffer->sg_segs = 0; 3785 STbuffer->sg_segs = 0;
3784 STbuffer->map_data.page_order = 0; 3786 STbuffer->reserved_page_order = 0;
3785 STbuffer->map_data.offset = 0; 3787 STbuffer->map_data.offset = 0;
3786} 3788}
3787 3789
@@ -3791,7 +3793,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3791static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) 3793static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
3792{ 3794{
3793 int i, cnt, res, offset; 3795 int i, cnt, res, offset;
3794 int length = PAGE_SIZE << st_bp->map_data.page_order; 3796 int length = PAGE_SIZE << st_bp->reserved_page_order;
3795 3797
3796 for (i = 0, offset = st_bp->buffer_bytes; 3798 for (i = 0, offset = st_bp->buffer_bytes;
3797 i < st_bp->frp_segs && offset >= length; i++) 3799 i < st_bp->frp_segs && offset >= length; i++)
@@ -3823,7 +3825,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
3823static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) 3825static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
3824{ 3826{
3825 int i, cnt, res, offset; 3827 int i, cnt, res, offset;
3826 int length = PAGE_SIZE << st_bp->map_data.page_order; 3828 int length = PAGE_SIZE << st_bp->reserved_page_order;
3827 3829
3828 for (i = 0, offset = st_bp->read_pointer; 3830 for (i = 0, offset = st_bp->read_pointer;
3829 i < st_bp->frp_segs && offset >= length; i++) 3831 i < st_bp->frp_segs && offset >= length; i++)
@@ -3856,7 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3856{ 3858{
3857 int src_seg, dst_seg, src_offset = 0, dst_offset; 3859 int src_seg, dst_seg, src_offset = 0, dst_offset;
3858 int count, total; 3860 int count, total;
3859 int length = PAGE_SIZE << st_bp->map_data.page_order; 3861 int length = PAGE_SIZE << st_bp->reserved_page_order;
3860 3862
3861 if (offset == 0) 3863 if (offset == 0)
3862 return; 3864 return;
@@ -4578,7 +4580,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4578 } 4580 }
4579 4581
4580 mdata->offset = uaddr & ~PAGE_MASK; 4582 mdata->offset = uaddr & ~PAGE_MASK;
4581 mdata->page_order = 0;
4582 STbp->mapped_pages = pages; 4583 STbp->mapped_pages = pages;
4583 4584
4584 return nr_pages; 4585 return nr_pages;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 544dc6b1f548..f91a67c6d968 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -46,6 +46,7 @@ struct st_buffer {
46 struct st_request *last_SRpnt; 46 struct st_request *last_SRpnt;
47 struct st_cmdstatus cmdstat; 47 struct st_cmdstatus cmdstat;
48 struct page **reserved_pages; 48 struct page **reserved_pages;
49 int reserved_page_order;
49 struct page **mapped_pages; 50 struct page **mapped_pages;
50 struct rq_map_data map_data; 51 struct rq_map_data map_data;
51 unsigned char *b_data; 52 unsigned char *b_data;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2d9d70359360..f55eb0107336 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -216,6 +216,17 @@ config SPI_S3C24XX
216 help 216 help
217 SPI driver for Samsung S3C24XX series ARM SoCs 217 SPI driver for Samsung S3C24XX series ARM SoCs
218 218
219config SPI_S3C24XX_FIQ
220 bool "S3C24XX driver with FIQ pseudo-DMA"
221 depends on SPI_S3C24XX
222 select FIQ
223 help
224 Enable FIQ support for the S3C24XX SPI driver to provide pseudo
225 DMA by using the fast-interrupt request framework, This allows
226 the driver to get DMA-like performance when there are either
227 no free DMA channels, or when doing transfers that required both
228 TX and RX data paths.
229
219config SPI_S3C24XX_GPIO 230config SPI_S3C24XX_GPIO
220 tristate "Samsung S3C24XX series SPI by GPIO" 231 tristate "Samsung S3C24XX series SPI by GPIO"
221 depends on ARCH_S3C2410 && EXPERIMENTAL 232 depends on ARCH_S3C2410 && EXPERIMENTAL
@@ -226,6 +237,13 @@ config SPI_S3C24XX_GPIO
226 the inbuilt hardware cannot provide the transfer mode, or 237 the inbuilt hardware cannot provide the transfer mode, or
227 where the board is using non hardware connected pins. 238 where the board is using non hardware connected pins.
228 239
240config SPI_S3C64XX
241 tristate "Samsung S3C64XX series type SPI"
242 depends on ARCH_S3C64XX && EXPERIMENTAL
243 select S3C64XX_DMA
244 help
245 SPI driver for Samsung S3C64XX and newer SoCs.
246
229config SPI_SH_MSIOF 247config SPI_SH_MSIOF
230 tristate "SuperH MSIOF SPI controller" 248 tristate "SuperH MSIOF SPI controller"
231 depends on SUPERH && HAVE_CLK 249 depends on SUPERH && HAVE_CLK
@@ -289,6 +307,16 @@ config SPI_NUC900
289# Add new SPI master controllers in alphabetical order above this line 307# Add new SPI master controllers in alphabetical order above this line
290# 308#
291 309
310config SPI_DESIGNWARE
311 bool "DesignWare SPI controller core support"
312 depends on SPI_MASTER
313 help
314 general driver for SPI controller core from DesignWare
315
316config SPI_DW_PCI
317 tristate "PCI interface driver for DW SPI core"
318 depends on SPI_DESIGNWARE && PCI
319
292# 320#
293# There are lots of SPI device types, with sensors and memory 321# There are lots of SPI device types, with sensors and memory
294# being probably the most widely used ones. 322# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index ed8c1675b52f..f3d2810ba11c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
16obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 16obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
20obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 21obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
20obj-$(CONFIG_SPI_IMX) += spi_imx.o 22obj-$(CONFIG_SPI_IMX) += spi_imx.o
21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 23obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
@@ -30,7 +32,8 @@ obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
30obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o 32obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
31obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 33obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
32obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 34obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
33obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o 35obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
36obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
34obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 37obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
35obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 38obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
36obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o 39obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
@@ -39,6 +42,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
39obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o 42obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
40obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o 43obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
41obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o 44obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
45
46# special build for s3c24xx spi driver with fiq support
47spi_s3c24xx_hw-y := spi_s3c24xx.o
48spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
49
42# ... add above this line ... 50# ... add above this line ...
43 51
44# SPI protocol drivers (device/link on bus) 52# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index f5b3fdbb1e27..d21c24eaf0a9 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -189,14 +189,14 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
189 189
190 /* use scratch buffer only when rx or tx data is unspecified */ 190 /* use scratch buffer only when rx or tx data is unspecified */
191 if (xfer->rx_buf) 191 if (xfer->rx_buf)
192 *rx_dma = xfer->rx_dma + xfer->len - len; 192 *rx_dma = xfer->rx_dma + xfer->len - *plen;
193 else { 193 else {
194 *rx_dma = as->buffer_dma; 194 *rx_dma = as->buffer_dma;
195 if (len > BUFFER_SIZE) 195 if (len > BUFFER_SIZE)
196 len = BUFFER_SIZE; 196 len = BUFFER_SIZE;
197 } 197 }
198 if (xfer->tx_buf) 198 if (xfer->tx_buf)
199 *tx_dma = xfer->tx_dma + xfer->len - len; 199 *tx_dma = xfer->tx_dma + xfer->len - *plen;
200 else { 200 else {
201 *tx_dma = as->buffer_dma; 201 *tx_dma = as->buffer_dma;
202 if (len > BUFFER_SIZE) 202 if (len > BUFFER_SIZE)
@@ -788,7 +788,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
788 spin_lock_init(&as->lock); 788 spin_lock_init(&as->lock);
789 INIT_LIST_HEAD(&as->queue); 789 INIT_LIST_HEAD(&as->queue);
790 as->pdev = pdev; 790 as->pdev = pdev;
791 as->regs = ioremap(regs->start, (regs->end - regs->start) + 1); 791 as->regs = ioremap(regs->start, resource_size(regs));
792 if (!as->regs) 792 if (!as->regs)
793 goto out_free_buffer; 793 goto out_free_buffer;
794 as->irq = irq; 794 as->irq = irq;
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
new file mode 100644
index 000000000000..31620fae77be
--- /dev/null
+++ b/drivers/spi/dw_spi.c
@@ -0,0 +1,944 @@
1/*
2 * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/highmem.h>
23#include <linux/delay.h>
24
25#include <linux/spi/dw_spi.h>
26#include <linux/spi/spi.h>
27
28#ifdef CONFIG_DEBUG_FS
29#include <linux/debugfs.h>
30#endif
31
32#define START_STATE ((void *)0)
33#define RUNNING_STATE ((void *)1)
34#define DONE_STATE ((void *)2)
35#define ERROR_STATE ((void *)-1)
36
37#define QUEUE_RUNNING 0
38#define QUEUE_STOPPED 1
39
40#define MRST_SPI_DEASSERT 0
41#define MRST_SPI_ASSERT 1
42
43/* Slave spi_dev related */
44struct chip_data {
45 u16 cr0;
46 u8 cs; /* chip select pin */
47 u8 n_bytes; /* current is a 1/2/4 byte op */
48 u8 tmode; /* TR/TO/RO/EEPROM */
49 u8 type; /* SPI/SSP/MicroWire */
50
51 u8 poll_mode; /* 1 means use poll mode */
52
53 u32 dma_width;
54 u32 rx_threshold;
55 u32 tx_threshold;
56 u8 enable_dma;
57 u8 bits_per_word;
58 u16 clk_div; /* baud rate divider */
59 u32 speed_hz; /* baud rate */
60 int (*write)(struct dw_spi *dws);
61 int (*read)(struct dw_spi *dws);
62 void (*cs_control)(u32 command);
63};
64
65#ifdef CONFIG_DEBUG_FS
66static int spi_show_regs_open(struct inode *inode, struct file *file)
67{
68 file->private_data = inode->i_private;
69 return 0;
70}
71
72#define SPI_REGS_BUFSIZE 1024
73static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct dw_spi *dws;
77 char *buf;
78 u32 len = 0;
79 ssize_t ret;
80
81 dws = file->private_data;
82
83 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
84 if (!buf)
85 return 0;
86
87 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
88 "MRST SPI0 registers:\n");
89 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
90 "=================================\n");
91 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
92 "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
93 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
94 "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
95 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
96 "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
97 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
98 "SER: \t\t0x%08x\n", dw_readl(dws, ser));
99 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
100 "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
101 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
102 "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
103 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
104 "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
105 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
106 "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
107 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
108 "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
109 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
110 "SR: \t\t0x%08x\n", dw_readl(dws, sr));
111 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
112 "IMR: \t\t0x%08x\n", dw_readl(dws, imr));
113 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
114 "ISR: \t\t0x%08x\n", dw_readl(dws, isr));
115 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
116 "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
117 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
118 "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
119 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
120 "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
121 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
122 "=================================\n");
123
124 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
125 kfree(buf);
126 return ret;
127}
128
129static const struct file_operations mrst_spi_regs_ops = {
130 .owner = THIS_MODULE,
131 .open = spi_show_regs_open,
132 .read = spi_show_regs,
133};
134
135static int mrst_spi_debugfs_init(struct dw_spi *dws)
136{
137 dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
138 if (!dws->debugfs)
139 return -ENOMEM;
140
141 debugfs_create_file("registers", S_IFREG | S_IRUGO,
142 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
143 return 0;
144}
145
146static void mrst_spi_debugfs_remove(struct dw_spi *dws)
147{
148 if (dws->debugfs)
149 debugfs_remove_recursive(dws->debugfs);
150}
151
152#else
153static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
154{
155}
156
157static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
158{
159}
160#endif /* CONFIG_DEBUG_FS */
161
162static void wait_till_not_busy(struct dw_spi *dws)
163{
164 unsigned long end = jiffies + usecs_to_jiffies(1000);
165
166 while (time_before(jiffies, end)) {
167 if (!(dw_readw(dws, sr) & SR_BUSY))
168 return;
169 }
170 dev_err(&dws->master->dev,
171 "DW SPI: Stutus keeps busy for 1000us after a read/write!\n");
172}
173
174static void flush(struct dw_spi *dws)
175{
176 while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
177 dw_readw(dws, dr);
178
179 wait_till_not_busy(dws);
180}
181
182static void null_cs_control(u32 command)
183{
184}
185
186static int null_writer(struct dw_spi *dws)
187{
188 u8 n_bytes = dws->n_bytes;
189
190 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
191 || (dws->tx == dws->tx_end))
192 return 0;
193 dw_writew(dws, dr, 0);
194 dws->tx += n_bytes;
195
196 wait_till_not_busy(dws);
197 return 1;
198}
199
200static int null_reader(struct dw_spi *dws)
201{
202 u8 n_bytes = dws->n_bytes;
203
204 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
205 && (dws->rx < dws->rx_end)) {
206 dw_readw(dws, dr);
207 dws->rx += n_bytes;
208 }
209 wait_till_not_busy(dws);
210 return dws->rx == dws->rx_end;
211}
212
213static int u8_writer(struct dw_spi *dws)
214{
215 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
216 || (dws->tx == dws->tx_end))
217 return 0;
218
219 dw_writew(dws, dr, *(u8 *)(dws->tx));
220 ++dws->tx;
221
222 wait_till_not_busy(dws);
223 return 1;
224}
225
226static int u8_reader(struct dw_spi *dws)
227{
228 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
229 && (dws->rx < dws->rx_end)) {
230 *(u8 *)(dws->rx) = dw_readw(dws, dr);
231 ++dws->rx;
232 }
233
234 wait_till_not_busy(dws);
235 return dws->rx == dws->rx_end;
236}
237
238static int u16_writer(struct dw_spi *dws)
239{
240 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
241 || (dws->tx == dws->tx_end))
242 return 0;
243
244 dw_writew(dws, dr, *(u16 *)(dws->tx));
245 dws->tx += 2;
246
247 wait_till_not_busy(dws);
248 return 1;
249}
250
251static int u16_reader(struct dw_spi *dws)
252{
253 u16 temp;
254
255 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
256 && (dws->rx < dws->rx_end)) {
257 temp = dw_readw(dws, dr);
258 *(u16 *)(dws->rx) = temp;
259 dws->rx += 2;
260 }
261
262 wait_till_not_busy(dws);
263 return dws->rx == dws->rx_end;
264}
265
266static void *next_transfer(struct dw_spi *dws)
267{
268 struct spi_message *msg = dws->cur_msg;
269 struct spi_transfer *trans = dws->cur_transfer;
270
271 /* Move to next transfer */
272 if (trans->transfer_list.next != &msg->transfers) {
273 dws->cur_transfer =
274 list_entry(trans->transfer_list.next,
275 struct spi_transfer,
276 transfer_list);
277 return RUNNING_STATE;
278 } else
279 return DONE_STATE;
280}
281
282/*
283 * Note: first step is the protocol driver prepares
284 * a dma-capable memory, and this func just need translate
285 * the virt addr to physical
286 */
287static int map_dma_buffers(struct dw_spi *dws)
288{
289 if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
290 || !dws->cur_chip->enable_dma)
291 return 0;
292
293 if (dws->cur_transfer->tx_dma)
294 dws->tx_dma = dws->cur_transfer->tx_dma;
295
296 if (dws->cur_transfer->rx_dma)
297 dws->rx_dma = dws->cur_transfer->rx_dma;
298
299 return 1;
300}
301
302/* Caller already set message->status; dma and pio irqs are blocked */
303static void giveback(struct dw_spi *dws)
304{
305 struct spi_transfer *last_transfer;
306 unsigned long flags;
307 struct spi_message *msg;
308
309 spin_lock_irqsave(&dws->lock, flags);
310 msg = dws->cur_msg;
311 dws->cur_msg = NULL;
312 dws->cur_transfer = NULL;
313 dws->prev_chip = dws->cur_chip;
314 dws->cur_chip = NULL;
315 dws->dma_mapped = 0;
316 queue_work(dws->workqueue, &dws->pump_messages);
317 spin_unlock_irqrestore(&dws->lock, flags);
318
319 last_transfer = list_entry(msg->transfers.prev,
320 struct spi_transfer,
321 transfer_list);
322
323 if (!last_transfer->cs_change)
324 dws->cs_control(MRST_SPI_DEASSERT);
325
326 msg->state = NULL;
327 if (msg->complete)
328 msg->complete(msg->context);
329}
330
331static void int_error_stop(struct dw_spi *dws, const char *msg)
332{
333 /* Stop and reset hw */
334 flush(dws);
335 spi_enable_chip(dws, 0);
336
337 dev_err(&dws->master->dev, "%s\n", msg);
338 dws->cur_msg->state = ERROR_STATE;
339 tasklet_schedule(&dws->pump_transfers);
340}
341
342static void transfer_complete(struct dw_spi *dws)
343{
344 /* Update total byte transfered return count actual bytes read */
345 dws->cur_msg->actual_length += dws->len;
346
347 /* Move to next transfer */
348 dws->cur_msg->state = next_transfer(dws);
349
350 /* Handle end of message */
351 if (dws->cur_msg->state == DONE_STATE) {
352 dws->cur_msg->status = 0;
353 giveback(dws);
354 } else
355 tasklet_schedule(&dws->pump_transfers);
356}
357
358static irqreturn_t interrupt_transfer(struct dw_spi *dws)
359{
360 u16 irq_status, irq_mask = 0x3f;
361
362 irq_status = dw_readw(dws, isr) & irq_mask;
363 /* Error handling */
364 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
365 dw_readw(dws, txoicr);
366 dw_readw(dws, rxoicr);
367 dw_readw(dws, rxuicr);
368 int_error_stop(dws, "interrupt_transfer: fifo overrun");
369 return IRQ_HANDLED;
370 }
371
372 /* INT comes from tx */
373 if (dws->tx && (irq_status & SPI_INT_TXEI)) {
374 while (dws->tx < dws->tx_end)
375 dws->write(dws);
376
377 if (dws->tx == dws->tx_end) {
378 spi_mask_intr(dws, SPI_INT_TXEI);
379 transfer_complete(dws);
380 }
381 }
382
383 /* INT comes from rx */
384 if (dws->rx && (irq_status & SPI_INT_RXFI)) {
385 if (dws->read(dws))
386 transfer_complete(dws);
387 }
388 return IRQ_HANDLED;
389}
390
391static irqreturn_t dw_spi_irq(int irq, void *dev_id)
392{
393 struct dw_spi *dws = dev_id;
394
395 if (!dws->cur_msg) {
396 spi_mask_intr(dws, SPI_INT_TXEI);
397 /* Never fail */
398 return IRQ_HANDLED;
399 }
400
401 return dws->transfer_handler(dws);
402}
403
404/* Must be called inside pump_transfers() */
405static void poll_transfer(struct dw_spi *dws)
406{
407 if (dws->tx) {
408 while (dws->write(dws))
409 dws->read(dws);
410 }
411
412 dws->read(dws);
413 transfer_complete(dws);
414}
415
416static void dma_transfer(struct dw_spi *dws, int cs_change)
417{
418}
419
420static void pump_transfers(unsigned long data)
421{
422 struct dw_spi *dws = (struct dw_spi *)data;
423 struct spi_message *message = NULL;
424 struct spi_transfer *transfer = NULL;
425 struct spi_transfer *previous = NULL;
426 struct spi_device *spi = NULL;
427 struct chip_data *chip = NULL;
428 u8 bits = 0;
429 u8 imask = 0;
430 u8 cs_change = 0;
431 u16 clk_div = 0;
432 u32 speed = 0;
433 u32 cr0 = 0;
434
435 /* Get current state information */
436 message = dws->cur_msg;
437 transfer = dws->cur_transfer;
438 chip = dws->cur_chip;
439 spi = message->spi;
440
441 if (message->state == ERROR_STATE) {
442 message->status = -EIO;
443 goto early_exit;
444 }
445
446 /* Handle end of message */
447 if (message->state == DONE_STATE) {
448 message->status = 0;
449 goto early_exit;
450 }
451
452 /* Delay if requested at end of transfer*/
453 if (message->state == RUNNING_STATE) {
454 previous = list_entry(transfer->transfer_list.prev,
455 struct spi_transfer,
456 transfer_list);
457 if (previous->delay_usecs)
458 udelay(previous->delay_usecs);
459 }
460
461 dws->n_bytes = chip->n_bytes;
462 dws->dma_width = chip->dma_width;
463 dws->cs_control = chip->cs_control;
464
465 dws->rx_dma = transfer->rx_dma;
466 dws->tx_dma = transfer->tx_dma;
467 dws->tx = (void *)transfer->tx_buf;
468 dws->tx_end = dws->tx + transfer->len;
469 dws->rx = transfer->rx_buf;
470 dws->rx_end = dws->rx + transfer->len;
471 dws->write = dws->tx ? chip->write : null_writer;
472 dws->read = dws->rx ? chip->read : null_reader;
473 dws->cs_change = transfer->cs_change;
474 dws->len = dws->cur_transfer->len;
475 if (chip != dws->prev_chip)
476 cs_change = 1;
477
478 cr0 = chip->cr0;
479
480 /* Handle per transfer options for bpw and speed */
481 if (transfer->speed_hz) {
482 speed = chip->speed_hz;
483
484 if (transfer->speed_hz != speed) {
485 speed = transfer->speed_hz;
486 if (speed > dws->max_freq) {
487 printk(KERN_ERR "MRST SPI0: unsupported"
488 "freq: %dHz\n", speed);
489 message->status = -EIO;
490 goto early_exit;
491 }
492
493 /* clk_div doesn't support odd number */
494 clk_div = dws->max_freq / speed;
495 clk_div = (clk_div >> 1) << 1;
496
497 chip->speed_hz = speed;
498 chip->clk_div = clk_div;
499 }
500 }
501 if (transfer->bits_per_word) {
502 bits = transfer->bits_per_word;
503
504 switch (bits) {
505 case 8:
506 dws->n_bytes = 1;
507 dws->dma_width = 1;
508 dws->read = (dws->read != null_reader) ?
509 u8_reader : null_reader;
510 dws->write = (dws->write != null_writer) ?
511 u8_writer : null_writer;
512 break;
513 case 16:
514 dws->n_bytes = 2;
515 dws->dma_width = 2;
516 dws->read = (dws->read != null_reader) ?
517 u16_reader : null_reader;
518 dws->write = (dws->write != null_writer) ?
519 u16_writer : null_writer;
520 break;
521 default:
522 printk(KERN_ERR "MRST SPI0: unsupported bits:"
523 "%db\n", bits);
524 message->status = -EIO;
525 goto early_exit;
526 }
527
528 cr0 = (bits - 1)
529 | (chip->type << SPI_FRF_OFFSET)
530 | (spi->mode << SPI_MODE_OFFSET)
531 | (chip->tmode << SPI_TMOD_OFFSET);
532 }
533 message->state = RUNNING_STATE;
534
535 /* Check if current transfer is a DMA transaction */
536 dws->dma_mapped = map_dma_buffers(dws);
537
538 if (!dws->dma_mapped && !chip->poll_mode) {
539 if (dws->rx)
540 imask |= SPI_INT_RXFI;
541 if (dws->tx)
542 imask |= SPI_INT_TXEI;
543 dws->transfer_handler = interrupt_transfer;
544 }
545
546 /*
547 * Reprogram registers only if
548 * 1. chip select changes
549 * 2. clk_div is changed
550 * 3. control value changes
551 */
552 if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) {
553 spi_enable_chip(dws, 0);
554
555 if (dw_readw(dws, ctrl0) != cr0)
556 dw_writew(dws, ctrl0, cr0);
557
558 /* Set the interrupt mask, for poll mode just diable all int */
559 spi_mask_intr(dws, 0xff);
560 if (!chip->poll_mode)
561 spi_umask_intr(dws, imask);
562
563 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
564 spi_chip_sel(dws, spi->chip_select);
565 spi_enable_chip(dws, 1);
566
567 if (cs_change)
568 dws->prev_chip = chip;
569 }
570
571 if (dws->dma_mapped)
572 dma_transfer(dws, cs_change);
573
574 if (chip->poll_mode)
575 poll_transfer(dws);
576
577 return;
578
579early_exit:
580 giveback(dws);
581 return;
582}
583
584static void pump_messages(struct work_struct *work)
585{
586 struct dw_spi *dws =
587 container_of(work, struct dw_spi, pump_messages);
588 unsigned long flags;
589
590 /* Lock queue and check for queue work */
591 spin_lock_irqsave(&dws->lock, flags);
592 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
593 dws->busy = 0;
594 spin_unlock_irqrestore(&dws->lock, flags);
595 return;
596 }
597
598 /* Make sure we are not already running a message */
599 if (dws->cur_msg) {
600 spin_unlock_irqrestore(&dws->lock, flags);
601 return;
602 }
603
604 /* Extract head of queue */
605 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
606 list_del_init(&dws->cur_msg->queue);
607
608 /* Initial message state*/
609 dws->cur_msg->state = START_STATE;
610 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
611 struct spi_transfer,
612 transfer_list);
613 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
614
615 /* Mark as busy and launch transfers */
616 tasklet_schedule(&dws->pump_transfers);
617
618 dws->busy = 1;
619 spin_unlock_irqrestore(&dws->lock, flags);
620}
621
622/* spi_device use this to queue in their spi_msg */
623static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
624{
625 struct dw_spi *dws = spi_master_get_devdata(spi->master);
626 unsigned long flags;
627
628 spin_lock_irqsave(&dws->lock, flags);
629
630 if (dws->run == QUEUE_STOPPED) {
631 spin_unlock_irqrestore(&dws->lock, flags);
632 return -ESHUTDOWN;
633 }
634
635 msg->actual_length = 0;
636 msg->status = -EINPROGRESS;
637 msg->state = START_STATE;
638
639 list_add_tail(&msg->queue, &dws->queue);
640
641 if (dws->run == QUEUE_RUNNING && !dws->busy) {
642
643 if (dws->cur_transfer || dws->cur_msg)
644 queue_work(dws->workqueue,
645 &dws->pump_messages);
646 else {
647 /* If no other data transaction in air, just go */
648 spin_unlock_irqrestore(&dws->lock, flags);
649 pump_messages(&dws->pump_messages);
650 return 0;
651 }
652 }
653
654 spin_unlock_irqrestore(&dws->lock, flags);
655 return 0;
656}
657
658/* This may be called twice for each spi dev */
659static int dw_spi_setup(struct spi_device *spi)
660{
661 struct dw_spi_chip *chip_info = NULL;
662 struct chip_data *chip;
663
664 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
665 return -EINVAL;
666
667 /* Only alloc on first setup */
668 chip = spi_get_ctldata(spi);
669 if (!chip) {
670 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
671 if (!chip)
672 return -ENOMEM;
673
674 chip->cs_control = null_cs_control;
675 chip->enable_dma = 0;
676 }
677
678 /*
679 * Protocol drivers may change the chip settings, so...
680 * if chip_info exists, use it
681 */
682 chip_info = spi->controller_data;
683
684 /* chip_info doesn't always exist */
685 if (chip_info) {
686 if (chip_info->cs_control)
687 chip->cs_control = chip_info->cs_control;
688
689 chip->poll_mode = chip_info->poll_mode;
690 chip->type = chip_info->type;
691
692 chip->rx_threshold = 0;
693 chip->tx_threshold = 0;
694
695 chip->enable_dma = chip_info->enable_dma;
696 }
697
698 if (spi->bits_per_word <= 8) {
699 chip->n_bytes = 1;
700 chip->dma_width = 1;
701 chip->read = u8_reader;
702 chip->write = u8_writer;
703 } else if (spi->bits_per_word <= 16) {
704 chip->n_bytes = 2;
705 chip->dma_width = 2;
706 chip->read = u16_reader;
707 chip->write = u16_writer;
708 } else {
709 /* Never take >16b case for MRST SPIC */
710 dev_err(&spi->dev, "invalid wordsize\n");
711 return -EINVAL;
712 }
713 chip->bits_per_word = spi->bits_per_word;
714
715 chip->speed_hz = spi->max_speed_hz;
716 if (chip->speed_hz)
717 chip->clk_div = 25000000 / chip->speed_hz;
718 else
719 chip->clk_div = 8; /* default value */
720
721 chip->tmode = 0; /* Tx & Rx */
722 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
723 chip->cr0 = (chip->bits_per_word - 1)
724 | (chip->type << SPI_FRF_OFFSET)
725 | (spi->mode << SPI_MODE_OFFSET)
726 | (chip->tmode << SPI_TMOD_OFFSET);
727
728 spi_set_ctldata(spi, chip);
729 return 0;
730}
731
732static void dw_spi_cleanup(struct spi_device *spi)
733{
734 struct chip_data *chip = spi_get_ctldata(spi);
735 kfree(chip);
736}
737
738static int __init init_queue(struct dw_spi *dws)
739{
740 INIT_LIST_HEAD(&dws->queue);
741 spin_lock_init(&dws->lock);
742
743 dws->run = QUEUE_STOPPED;
744 dws->busy = 0;
745
746 tasklet_init(&dws->pump_transfers,
747 pump_transfers, (unsigned long)dws);
748
749 INIT_WORK(&dws->pump_messages, pump_messages);
750 dws->workqueue = create_singlethread_workqueue(
751 dev_name(dws->master->dev.parent));
752 if (dws->workqueue == NULL)
753 return -EBUSY;
754
755 return 0;
756}
757
758static int start_queue(struct dw_spi *dws)
759{
760 unsigned long flags;
761
762 spin_lock_irqsave(&dws->lock, flags);
763
764 if (dws->run == QUEUE_RUNNING || dws->busy) {
765 spin_unlock_irqrestore(&dws->lock, flags);
766 return -EBUSY;
767 }
768
769 dws->run = QUEUE_RUNNING;
770 dws->cur_msg = NULL;
771 dws->cur_transfer = NULL;
772 dws->cur_chip = NULL;
773 dws->prev_chip = NULL;
774 spin_unlock_irqrestore(&dws->lock, flags);
775
776 queue_work(dws->workqueue, &dws->pump_messages);
777
778 return 0;
779}
780
781static int stop_queue(struct dw_spi *dws)
782{
783 unsigned long flags;
784 unsigned limit = 50;
785 int status = 0;
786
787 spin_lock_irqsave(&dws->lock, flags);
788 dws->run = QUEUE_STOPPED;
789 while (!list_empty(&dws->queue) && dws->busy && limit--) {
790 spin_unlock_irqrestore(&dws->lock, flags);
791 msleep(10);
792 spin_lock_irqsave(&dws->lock, flags);
793 }
794
795 if (!list_empty(&dws->queue) || dws->busy)
796 status = -EBUSY;
797 spin_unlock_irqrestore(&dws->lock, flags);
798
799 return status;
800}
801
802static int destroy_queue(struct dw_spi *dws)
803{
804 int status;
805
806 status = stop_queue(dws);
807 if (status != 0)
808 return status;
809 destroy_workqueue(dws->workqueue);
810 return 0;
811}
812
813/* Restart the controller, disable all interrupts, clean rx fifo */
814static void spi_hw_init(struct dw_spi *dws)
815{
816 spi_enable_chip(dws, 0);
817 spi_mask_intr(dws, 0xff);
818 spi_enable_chip(dws, 1);
819 flush(dws);
820}
821
822int __devinit dw_spi_add_host(struct dw_spi *dws)
823{
824 struct spi_master *master;
825 int ret;
826
827 BUG_ON(dws == NULL);
828
829 master = spi_alloc_master(dws->parent_dev, 0);
830 if (!master) {
831 ret = -ENOMEM;
832 goto exit;
833 }
834
835 dws->master = master;
836 dws->type = SSI_MOTO_SPI;
837 dws->prev_chip = NULL;
838 dws->dma_inited = 0;
839 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
840
841 ret = request_irq(dws->irq, dw_spi_irq, 0,
842 "dw_spi", dws);
843 if (ret < 0) {
844 dev_err(&master->dev, "can not get IRQ\n");
845 goto err_free_master;
846 }
847
848 master->mode_bits = SPI_CPOL | SPI_CPHA;
849 master->bus_num = dws->bus_num;
850 master->num_chipselect = dws->num_cs;
851 master->cleanup = dw_spi_cleanup;
852 master->setup = dw_spi_setup;
853 master->transfer = dw_spi_transfer;
854
855 dws->dma_inited = 0;
856
857 /* Basic HW init */
858 spi_hw_init(dws);
859
860 /* Initial and start queue */
861 ret = init_queue(dws);
862 if (ret) {
863 dev_err(&master->dev, "problem initializing queue\n");
864 goto err_diable_hw;
865 }
866 ret = start_queue(dws);
867 if (ret) {
868 dev_err(&master->dev, "problem starting queue\n");
869 goto err_diable_hw;
870 }
871
872 spi_master_set_devdata(master, dws);
873 ret = spi_register_master(master);
874 if (ret) {
875 dev_err(&master->dev, "problem registering spi master\n");
876 goto err_queue_alloc;
877 }
878
879 mrst_spi_debugfs_init(dws);
880 return 0;
881
882err_queue_alloc:
883 destroy_queue(dws);
884err_diable_hw:
885 spi_enable_chip(dws, 0);
886 free_irq(dws->irq, dws);
887err_free_master:
888 spi_master_put(master);
889exit:
890 return ret;
891}
892EXPORT_SYMBOL(dw_spi_add_host);
893
894void __devexit dw_spi_remove_host(struct dw_spi *dws)
895{
896 int status = 0;
897
898 if (!dws)
899 return;
900 mrst_spi_debugfs_remove(dws);
901
902 /* Remove the queue */
903 status = destroy_queue(dws);
904 if (status != 0)
905 dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
906 "complete, message memory not freed\n");
907
908 spi_enable_chip(dws, 0);
909 /* Disable clk */
910 spi_set_clk(dws, 0);
911 free_irq(dws->irq, dws);
912
913 /* Disconnect from the SPI framework */
914 spi_unregister_master(dws->master);
915}
916
917int dw_spi_suspend_host(struct dw_spi *dws)
918{
919 int ret = 0;
920
921 ret = stop_queue(dws);
922 if (ret)
923 return ret;
924 spi_enable_chip(dws, 0);
925 spi_set_clk(dws, 0);
926 return ret;
927}
928EXPORT_SYMBOL(dw_spi_suspend_host);
929
930int dw_spi_resume_host(struct dw_spi *dws)
931{
932 int ret;
933
934 spi_hw_init(dws);
935 ret = start_queue(dws);
936 if (ret)
937 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
938 return ret;
939}
940EXPORT_SYMBOL(dw_spi_resume_host);
941
942MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
943MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
944MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
new file mode 100644
index 000000000000..34ba69161734
--- /dev/null
+++ b/drivers/spi/dw_spi_pci.c
@@ -0,0 +1,169 @@
1/*
2 * mrst_spi_pci.c - PCI interface driver for DW SPI Core
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/spi/dw_spi.h>
23#include <linux/spi/spi.h>
24
25#define DRIVER_NAME "dw_spi_pci"
26
27struct dw_spi_pci {
28 struct pci_dev *pdev;
29 struct dw_spi dws;
30};
31
32static int __devinit spi_pci_probe(struct pci_dev *pdev,
33 const struct pci_device_id *ent)
34{
35 struct dw_spi_pci *dwpci;
36 struct dw_spi *dws;
37 int pci_bar = 0;
38 int ret;
39
40 printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
41 pdev->vendor, pdev->device);
42
43 ret = pci_enable_device(pdev);
44 if (ret)
45 return ret;
46
47 dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
48 if (!dwpci) {
49 ret = -ENOMEM;
50 goto err_disable;
51 }
52
53 dwpci->pdev = pdev;
54 dws = &dwpci->dws;
55
56 /* Get basic io resource and map it */
57 dws->paddr = pci_resource_start(pdev, pci_bar);
58 dws->iolen = pci_resource_len(pdev, pci_bar);
59
60 ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
61 if (ret)
62 goto err_kfree;
63
64 dws->regs = ioremap_nocache((unsigned long)dws->paddr,
65 pci_resource_len(pdev, pci_bar));
66 if (!dws->regs) {
67 ret = -ENOMEM;
68 goto err_release_reg;
69 }
70
71 dws->parent_dev = &pdev->dev;
72 dws->bus_num = 0;
73 dws->num_cs = 4;
74 dws->max_freq = 25000000; /* for Moorestwon */
75 dws->irq = pdev->irq;
76
77 ret = dw_spi_add_host(dws);
78 if (ret)
79 goto err_unmap;
80
81 /* PCI hook and SPI hook use the same drv data */
82 pci_set_drvdata(pdev, dwpci);
83 return 0;
84
85err_unmap:
86 iounmap(dws->regs);
87err_release_reg:
88 pci_release_region(pdev, pci_bar);
89err_kfree:
90 kfree(dwpci);
91err_disable:
92 pci_disable_device(pdev);
93 return ret;
94}
95
96static void __devexit spi_pci_remove(struct pci_dev *pdev)
97{
98 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
99
100 pci_set_drvdata(pdev, NULL);
101 iounmap(dwpci->dws.regs);
102 pci_release_region(pdev, 0);
103 kfree(dwpci);
104 pci_disable_device(pdev);
105}
106
107#ifdef CONFIG_PM
108static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
109{
110 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
111 int ret;
112
113 ret = dw_spi_suspend_host(&dwpci->dws);
114 if (ret)
115 return ret;
116 pci_save_state(pdev);
117 pci_disable_device(pdev);
118 pci_set_power_state(pdev, pci_choose_state(pdev, state));
119 return ret;
120}
121
122static int spi_resume(struct pci_dev *pdev)
123{
124 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
125 int ret;
126
127 pci_set_power_state(pdev, PCI_D0);
128 pci_restore_state(pdev);
129 ret = pci_enable_device(pdev);
130 if (ret)
131 return ret;
132 return dw_spi_resume_host(&dwpci->dws);
133}
134#else
135#define spi_suspend NULL
136#define spi_resume NULL
137#endif
138
139static const struct pci_device_id pci_ids[] __devinitdata = {
140 /* Intel Moorestown platform SPI controller 0 */
141 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
142 {},
143};
144
145static struct pci_driver dw_spi_driver = {
146 .name = DRIVER_NAME,
147 .id_table = pci_ids,
148 .probe = spi_pci_probe,
149 .remove = __devexit_p(spi_pci_remove),
150 .suspend = spi_suspend,
151 .resume = spi_resume,
152};
153
154static int __init mrst_spi_init(void)
155{
156 return pci_register_driver(&dw_spi_driver);
157}
158
159static void __exit mrst_spi_exit(void)
160{
161 pci_unregister_driver(&dw_spi_driver);
162}
163
164module_init(mrst_spi_init);
165module_exit(mrst_spi_exit);
166
167MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
168MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
169MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 73e24ef5a2f9..1d41058bbab2 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1294,7 +1294,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1294 goto out_error_get_res; 1294 goto out_error_get_res;
1295 } 1295 }
1296 1296
1297 drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1)); 1297 drv_data->regs_base = ioremap(res->start, resource_size(res));
1298 if (drv_data->regs_base == NULL) { 1298 if (drv_data->regs_base == NULL) {
1299 dev_err(dev, "Cannot map IO\n"); 1299 dev_err(dev, "Cannot map IO\n");
1300 status = -ENXIO; 1300 status = -ENXIO;
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index e9390d747bfc..1fb2a6ea328c 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -1013,7 +1013,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
1013 1013
1014 init_completion(&mpc8xxx_spi->done); 1014 init_completion(&mpc8xxx_spi->done);
1015 1015
1016 mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1); 1016 mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
1017 if (mpc8xxx_spi->base == NULL) { 1017 if (mpc8xxx_spi->base == NULL) {
1018 ret = -ENOMEM; 1018 ret = -ENOMEM;
1019 goto err_ioremap; 1019 goto err_ioremap;
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 276591569c8b..c010733877ae 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -1,7 +1,7 @@
1/* linux/drivers/spi/spi_s3c24xx.c 1/* linux/drivers/spi/spi_s3c24xx.c
2 * 2 *
3 * Copyright (c) 2006 Ben Dooks 3 * Copyright (c) 2006 Ben Dooks
4 * Copyright (c) 2006 Simtec Electronics 4 * Copyright 2006-2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk> 5 * Ben Dooks <ben@simtec.co.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -28,6 +28,11 @@
28#include <plat/regs-spi.h> 28#include <plat/regs-spi.h>
29#include <mach/spi.h> 29#include <mach/spi.h>
30 30
31#include <plat/fiq.h>
32#include <asm/fiq.h>
33
34#include "spi_s3c24xx_fiq.h"
35
31/** 36/**
32 * s3c24xx_spi_devstate - per device data 37 * s3c24xx_spi_devstate - per device data
33 * @hz: Last frequency calculated for @sppre field. 38 * @hz: Last frequency calculated for @sppre field.
@@ -42,6 +47,13 @@ struct s3c24xx_spi_devstate {
42 u8 sppre; 47 u8 sppre;
43}; 48};
44 49
50enum spi_fiq_mode {
51 FIQ_MODE_NONE = 0,
52 FIQ_MODE_TX = 1,
53 FIQ_MODE_RX = 2,
54 FIQ_MODE_TXRX = 3,
55};
56
45struct s3c24xx_spi { 57struct s3c24xx_spi {
46 /* bitbang has to be first */ 58 /* bitbang has to be first */
47 struct spi_bitbang bitbang; 59 struct spi_bitbang bitbang;
@@ -52,6 +64,11 @@ struct s3c24xx_spi {
52 int len; 64 int len;
53 int count; 65 int count;
54 66
67 struct fiq_handler fiq_handler;
68 enum spi_fiq_mode fiq_mode;
69 unsigned char fiq_inuse;
70 unsigned char fiq_claimed;
71
55 void (*set_cs)(struct s3c2410_spi_info *spi, 72 void (*set_cs)(struct s3c2410_spi_info *spi,
56 int cs, int pol); 73 int cs, int pol);
57 74
@@ -67,6 +84,7 @@ struct s3c24xx_spi {
67 struct s3c2410_spi_info *pdata; 84 struct s3c2410_spi_info *pdata;
68}; 85};
69 86
87
70#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) 88#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
71#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) 89#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
72 90
@@ -127,7 +145,7 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
127 } 145 }
128 146
129 if (spi->mode != cs->mode) { 147 if (spi->mode != cs->mode) {
130 u8 spcon = SPCON_DEFAULT; 148 u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
131 149
132 if (spi->mode & SPI_CPHA) 150 if (spi->mode & SPI_CPHA)
133 spcon |= S3C2410_SPCON_CPHA_FMTB; 151 spcon |= S3C2410_SPCON_CPHA_FMTB;
@@ -214,13 +232,196 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
214 return hw->tx ? hw->tx[count] : 0; 232 return hw->tx ? hw->tx[count] : 0;
215} 233}
216 234
235#ifdef CONFIG_SPI_S3C24XX_FIQ
236/* Support for FIQ based pseudo-DMA to improve the transfer speed.
237 *
238 * This code uses the assembly helper in spi_s3c24xx_spi.S which is
239 * used by the FIQ core to move data between main memory and the peripheral
240 * block. Since this is code running on the processor, there is no problem
241 * with cache coherency of the buffers, so we can use any buffer we like.
242 */
243
244/**
245 * struct spi_fiq_code - FIQ code and header
246 * @length: The length of the code fragment, excluding this header.
247 * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
248 * @data: The code itself to install as a FIQ handler.
249 */
250struct spi_fiq_code {
251 u32 length;
252 u32 ack_offset;
253 u8 data[0];
254};
255
256extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
257extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
258extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
259
260/**
261 * ack_bit - turn IRQ into IRQ acknowledgement bit
262 * @irq: The interrupt number
263 *
264 * Returns the bit to write to the interrupt acknowledge register.
265 */
266static inline u32 ack_bit(unsigned int irq)
267{
268 return 1 << (irq - IRQ_EINT0);
269}
270
271/**
272 * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
273 * @hw: The hardware state.
274 *
275 * Claim the FIQ handler (only one can be active at any one time) and
276 * then setup the correct transfer code for this transfer.
277 *
278 * This call updates all the necessary state information if sucessful,
279 * so the caller does not need to do anything more than start the transfer
280 * as normal, since the IRQ will have been re-routed to the FIQ handler.
281*/
282void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
283{
284 struct pt_regs regs;
285 enum spi_fiq_mode mode;
286 struct spi_fiq_code *code;
287 int ret;
288
289 if (!hw->fiq_claimed) {
290 /* try and claim fiq if we haven't got it, and if not
291 * then return and simply use another transfer method */
292
293 ret = claim_fiq(&hw->fiq_handler);
294 if (ret)
295 return;
296 }
297
298 if (hw->tx && !hw->rx)
299 mode = FIQ_MODE_TX;
300 else if (hw->rx && !hw->tx)
301 mode = FIQ_MODE_RX;
302 else
303 mode = FIQ_MODE_TXRX;
304
305 regs.uregs[fiq_rspi] = (long)hw->regs;
306 regs.uregs[fiq_rrx] = (long)hw->rx;
307 regs.uregs[fiq_rtx] = (long)hw->tx + 1;
308 regs.uregs[fiq_rcount] = hw->len - 1;
309 regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ;
310
311 set_fiq_regs(&regs);
312
313 if (hw->fiq_mode != mode) {
314 u32 *ack_ptr;
315
316 hw->fiq_mode = mode;
317
318 switch (mode) {
319 case FIQ_MODE_TX:
320 code = &s3c24xx_spi_fiq_tx;
321 break;
322 case FIQ_MODE_RX:
323 code = &s3c24xx_spi_fiq_rx;
324 break;
325 case FIQ_MODE_TXRX:
326 code = &s3c24xx_spi_fiq_txrx;
327 break;
328 default:
329 code = NULL;
330 }
331
332 BUG_ON(!code);
333
334 ack_ptr = (u32 *)&code->data[code->ack_offset];
335 *ack_ptr = ack_bit(hw->irq);
336
337 set_fiq_handler(&code->data, code->length);
338 }
339
340 s3c24xx_set_fiq(hw->irq, true);
341
342 hw->fiq_mode = mode;
343 hw->fiq_inuse = 1;
344}
345
346/**
347 * s3c24xx_spi_fiqop - FIQ core code callback
348 * @pw: Data registered with the handler
349 * @release: Whether this is a release or a return.
350 *
351 * Called by the FIQ code when another module wants to use the FIQ, so
352 * return whether we are currently using this or not and then update our
353 * internal state.
354 */
355static int s3c24xx_spi_fiqop(void *pw, int release)
356{
357 struct s3c24xx_spi *hw = pw;
358 int ret = 0;
359
360 if (release) {
361 if (hw->fiq_inuse)
362 ret = -EBUSY;
363
364 /* note, we do not need to unroute the FIQ, as the FIQ
365 * vector code de-routes it to signal the end of transfer */
366
367 hw->fiq_mode = FIQ_MODE_NONE;
368 hw->fiq_claimed = 0;
369 } else {
370 hw->fiq_claimed = 1;
371 }
372
373 return ret;
374}
375
376/**
377 * s3c24xx_spi_initfiq - setup the information for the FIQ core
378 * @hw: The hardware state.
379 *
380 * Setup the fiq_handler block to pass to the FIQ core.
381 */
382static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
383{
384 hw->fiq_handler.dev_id = hw;
385 hw->fiq_handler.name = dev_name(hw->dev);
386 hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
387}
388
389/**
390 * s3c24xx_spi_usefiq - return if we should be using FIQ.
391 * @hw: The hardware state.
392 *
393 * Return true if the platform data specifies whether this channel is
394 * allowed to use the FIQ.
395 */
396static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
397{
398 return hw->pdata->use_fiq;
399}
400
401/**
402 * s3c24xx_spi_usingfiq - return if channel is using FIQ
403 * @spi: The hardware state.
404 *
405 * Return whether the channel is currently using the FIQ (separate from
406 * whether the FIQ is claimed).
407 */
408static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
409{
410 return spi->fiq_inuse;
411}
412#else
413
414static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
415static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
416static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
417static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
418
419#endif /* CONFIG_SPI_S3C24XX_FIQ */
420
217static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) 421static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
218{ 422{
219 struct s3c24xx_spi *hw = to_hw(spi); 423 struct s3c24xx_spi *hw = to_hw(spi);
220 424
221 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
222 t->tx_buf, t->rx_buf, t->len);
223
224 hw->tx = t->tx_buf; 425 hw->tx = t->tx_buf;
225 hw->rx = t->rx_buf; 426 hw->rx = t->rx_buf;
226 hw->len = t->len; 427 hw->len = t->len;
@@ -228,11 +429,14 @@ static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
228 429
229 init_completion(&hw->done); 430 init_completion(&hw->done);
230 431
432 hw->fiq_inuse = 0;
433 if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
434 s3c24xx_spi_tryfiq(hw);
435
231 /* send the first byte */ 436 /* send the first byte */
232 writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); 437 writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
233 438
234 wait_for_completion(&hw->done); 439 wait_for_completion(&hw->done);
235
236 return hw->count; 440 return hw->count;
237} 441}
238 442
@@ -254,17 +458,27 @@ static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
254 goto irq_done; 458 goto irq_done;
255 } 459 }
256 460
257 hw->count++; 461 if (!s3c24xx_spi_usingfiq(hw)) {
462 hw->count++;
258 463
259 if (hw->rx) 464 if (hw->rx)
260 hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); 465 hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
261 466
262 count++; 467 count++;
468
469 if (count < hw->len)
470 writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
471 else
472 complete(&hw->done);
473 } else {
474 hw->count = hw->len;
475 hw->fiq_inuse = 0;
476
477 if (hw->rx)
478 hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
263 479
264 if (count < hw->len)
265 writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
266 else
267 complete(&hw->done); 480 complete(&hw->done);
481 }
268 482
269 irq_done: 483 irq_done:
270 return IRQ_HANDLED; 484 return IRQ_HANDLED;
@@ -322,6 +536,10 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
322 platform_set_drvdata(pdev, hw); 536 platform_set_drvdata(pdev, hw);
323 init_completion(&hw->done); 537 init_completion(&hw->done);
324 538
539 /* initialise fiq handler */
540
541 s3c24xx_spi_initfiq(hw);
542
325 /* setup the master state. */ 543 /* setup the master state. */
326 544
327 /* the spi->mode bits understood by this driver: */ 545 /* the spi->mode bits understood by this driver: */
diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi_s3c24xx_fiq.S
new file mode 100644
index 000000000000..3793cae361db
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.S
@@ -0,0 +1,116 @@
1/* linux/drivers/spi/spi_s3c24xx_fiq.S
2 *
3 * Copyright 2009 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX SPI - FIQ pseudo-DMA transfer code
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15
16#include <mach/map.h>
17#include <mach/regs-irq.h>
18#include <plat/regs-spi.h>
19
20#include "spi_s3c24xx_fiq.h"
21
22 .text
23
24 @ entry to these routines is as follows, with the register names
25 @ defined in fiq.h so that they can be shared with the C files which
26 @ setup the calling registers.
27 @
28 @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
29 @ fiq_rtmp Temporary register to hold tx/rx data
30 @ fiq_rspi The base of the SPI register block
31 @ fiq_rtx The tx buffer pointer
32 @ fiq_rrx The rx buffer pointer
33 @ fiq_rcount The number of bytes to move
34
35 @ each entry starts with a word entry of how long it is
36 @ and an offset to the irq acknowledgment word
37
38ENTRY(s3c24xx_spi_fiq_rx)
39s3c24xx_spi_fix_rx:
40 .word fiq_rx_end - fiq_rx_start
41 .word fiq_rx_irq_ack - fiq_rx_start
42fiq_rx_start:
43 ldr fiq_rtmp, fiq_rx_irq_ack
44 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
45
46 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
47 strb fiq_rtmp, [ fiq_rrx ], #1
48
49 mov fiq_rtmp, #0xff
50 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
51
52 subs fiq_rcount, fiq_rcount, #1
53 subnes pc, lr, #4 @@ return, still have work to do
54
55 @@ set IRQ controller so that next op will trigger IRQ
56 mov fiq_rtmp, #0
57 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
58 subs pc, lr, #4
59
60fiq_rx_irq_ack:
61 .word 0
62fiq_rx_end:
63
64ENTRY(s3c24xx_spi_fiq_txrx)
65s3c24xx_spi_fiq_txrx:
66 .word fiq_txrx_end - fiq_txrx_start
67 .word fiq_txrx_irq_ack - fiq_txrx_start
68fiq_txrx_start:
69
70 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
71 strb fiq_rtmp, [ fiq_rrx ], #1
72
73 ldr fiq_rtmp, fiq_txrx_irq_ack
74 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
75
76 ldrb fiq_rtmp, [ fiq_rtx ], #1
77 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
78
79 subs fiq_rcount, fiq_rcount, #1
80 subnes pc, lr, #4 @@ return, still have work to do
81
82 mov fiq_rtmp, #0
83 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
84 subs pc, lr, #4
85
86fiq_txrx_irq_ack:
87 .word 0
88
89fiq_txrx_end:
90
91ENTRY(s3c24xx_spi_fiq_tx)
92s3c24xx_spi_fix_tx:
93 .word fiq_tx_end - fiq_tx_start
94 .word fiq_tx_irq_ack - fiq_tx_start
95fiq_tx_start:
96 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
97
98 ldr fiq_rtmp, fiq_tx_irq_ack
99 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
100
101 ldrb fiq_rtmp, [ fiq_rtx ], #1
102 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
103
104 subs fiq_rcount, fiq_rcount, #1
105 subnes pc, lr, #4 @@ return, still have work to do
106
107 mov fiq_rtmp, #0
108 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
109 subs pc, lr, #4
110
111fiq_tx_irq_ack:
112 .word 0
113
114fiq_tx_end:
115
116 .end
diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi_s3c24xx_fiq.h
new file mode 100644
index 000000000000..a5950bb25b51
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.h
@@ -0,0 +1,26 @@
1/* linux/drivers/spi/spi_s3c24xx_fiq.h
2 *
3 * Copyright 2009 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX SPI - FIQ pseudo-DMA transfer support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13/* We have R8 through R13 to play with */
14
15#ifdef __ASSEMBLY__
16#define __REG_NR(x) r##x
17#else
18#define __REG_NR(x) (x)
19#endif
20
21#define fiq_rspi __REG_NR(8)
22#define fiq_rtmp __REG_NR(9)
23#define fiq_rrx __REG_NR(10)
24#define fiq_rtx __REG_NR(11)
25#define fiq_rcount __REG_NR(12)
26#define fiq_rirq __REG_NR(13)
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
new file mode 100644
index 000000000000..88a456dba967
--- /dev/null
+++ b/drivers/spi/spi_s3c64xx.c
@@ -0,0 +1,1196 @@
1/* linux/drivers/spi/spi_s3c64xx.c
2 *
3 * Copyright (C) 2009 Samsung Electronics Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spi/spi.h>
29
30#include <mach/dma.h>
31#include <plat/spi.h>
32
33/* Registers and bit-fields */
34
35#define S3C64XX_SPI_CH_CFG 0x00
36#define S3C64XX_SPI_CLK_CFG 0x04
37#define S3C64XX_SPI_MODE_CFG 0x08
38#define S3C64XX_SPI_SLAVE_SEL 0x0C
39#define S3C64XX_SPI_INT_EN 0x10
40#define S3C64XX_SPI_STATUS 0x14
41#define S3C64XX_SPI_TX_DATA 0x18
42#define S3C64XX_SPI_RX_DATA 0x1C
43#define S3C64XX_SPI_PACKET_CNT 0x20
44#define S3C64XX_SPI_PENDING_CLR 0x24
45#define S3C64XX_SPI_SWAP_CFG 0x28
46#define S3C64XX_SPI_FB_CLK 0x2C
47
48#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
49#define S3C64XX_SPI_CH_SW_RST (1<<5)
50#define S3C64XX_SPI_CH_SLAVE (1<<4)
51#define S3C64XX_SPI_CPOL_L (1<<3)
52#define S3C64XX_SPI_CPHA_B (1<<2)
53#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
54#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
55
56#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
57#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
58#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
59#define S3C64XX_SPI_PSR_MASK 0xff
60
61#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
62#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
63#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
64#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
65#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
66#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
67#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
68#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
69#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
70#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
71#define S3C64XX_SPI_MODE_4BURST (1<<0)
72
73#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
74#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
75
76#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
77
78#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
79 (c)->regs + S3C64XX_SPI_SLAVE_SEL)
80
81#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
82#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
83#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
84#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
85#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
86#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
87#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
88
89#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
90#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
91#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
92#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
93#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
94#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
95
96#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
97
98#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
99#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
100#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
101#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
102#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
103
104#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
105#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
106#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
107#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
108#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
109#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
110#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
111#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
112
113#define S3C64XX_SPI_FBCLK_MSK (3<<0)
114
115#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
116 (((i)->fifo_lvl_mask + 1))) \
117 ? 1 : 0)
118
119#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
120 (((i)->fifo_lvl_mask + 1) << 1)) \
121 ? 1 : 0)
122#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
123#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
124
125#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
126#define S3C64XX_SPI_TRAILCNT_OFF 19
127
128#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
129
130#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
131
132#define SUSPND (1<<0)
133#define SPIBUSY (1<<1)
134#define RXBUSY (1<<2)
135#define TXBUSY (1<<3)
136
137/**
138 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
139 * @clk: Pointer to the spi clock.
140 * @master: Pointer to the SPI Protocol master.
141 * @workqueue: Work queue for the SPI xfer requests.
142 * @cntrlr_info: Platform specific data for the controller this driver manages.
143 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
144 * @work: Work
145 * @queue: To log SPI xfer requests.
146 * @lock: Controller specific lock.
147 * @state: Set of FLAGS to indicate status.
148 * @rx_dmach: Controller's DMA channel for Rx.
149 * @tx_dmach: Controller's DMA channel for Tx.
150 * @sfr_start: BUS address of SPI controller regs.
151 * @regs: Pointer to ioremap'ed controller registers.
152 * @xfer_completion: To indicate completion of xfer task.
153 * @cur_mode: Stores the active configuration of the controller.
154 * @cur_bpw: Stores the active bits per word settings.
155 * @cur_speed: Stores the active xfer clock speed.
156 */
157struct s3c64xx_spi_driver_data {
158 void __iomem *regs;
159 struct clk *clk;
160 struct platform_device *pdev;
161 struct spi_master *master;
162 struct workqueue_struct *workqueue;
163 struct s3c64xx_spi_cntrlr_info *cntrlr_info;
164 struct spi_device *tgl_spi;
165 struct work_struct work;
166 struct list_head queue;
167 spinlock_t lock;
168 enum dma_ch rx_dmach;
169 enum dma_ch tx_dmach;
170 unsigned long sfr_start;
171 struct completion xfer_completion;
172 unsigned state;
173 unsigned cur_mode, cur_bpw;
174 unsigned cur_speed;
175};
176
177static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
178 .name = "samsung-spi-dma",
179};
180
181static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
182{
183 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
184 void __iomem *regs = sdd->regs;
185 unsigned long loops;
186 u32 val;
187
188 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
189
190 val = readl(regs + S3C64XX_SPI_CH_CFG);
191 val |= S3C64XX_SPI_CH_SW_RST;
192 val &= ~S3C64XX_SPI_CH_HS_EN;
193 writel(val, regs + S3C64XX_SPI_CH_CFG);
194
195 /* Flush TxFIFO*/
196 loops = msecs_to_loops(1);
197 do {
198 val = readl(regs + S3C64XX_SPI_STATUS);
199 } while (TX_FIFO_LVL(val, sci) && loops--);
200
201 /* Flush RxFIFO*/
202 loops = msecs_to_loops(1);
203 do {
204 val = readl(regs + S3C64XX_SPI_STATUS);
205 if (RX_FIFO_LVL(val, sci))
206 readl(regs + S3C64XX_SPI_RX_DATA);
207 else
208 break;
209 } while (loops--);
210
211 val = readl(regs + S3C64XX_SPI_CH_CFG);
212 val &= ~S3C64XX_SPI_CH_SW_RST;
213 writel(val, regs + S3C64XX_SPI_CH_CFG);
214
215 val = readl(regs + S3C64XX_SPI_MODE_CFG);
216 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
217 writel(val, regs + S3C64XX_SPI_MODE_CFG);
218
219 val = readl(regs + S3C64XX_SPI_CH_CFG);
220 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
221 writel(val, regs + S3C64XX_SPI_CH_CFG);
222}
223
224static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
225 struct spi_device *spi,
226 struct spi_transfer *xfer, int dma_mode)
227{
228 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
229 void __iomem *regs = sdd->regs;
230 u32 modecfg, chcfg;
231
232 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
233 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
234
235 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
236 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
237
238 if (dma_mode) {
239 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
240 } else {
241 /* Always shift in data in FIFO, even if xfer is Tx only,
242 * this helps setting PCKT_CNT value for generating clocks
243 * as exactly needed.
244 */
245 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
246 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
247 | S3C64XX_SPI_PACKET_CNT_EN,
248 regs + S3C64XX_SPI_PACKET_CNT);
249 }
250
251 if (xfer->tx_buf != NULL) {
252 sdd->state |= TXBUSY;
253 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
254 if (dma_mode) {
255 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
256 s3c2410_dma_config(sdd->tx_dmach, 1);
257 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
258 xfer->tx_dma, xfer->len);
259 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
260 } else {
261 unsigned char *buf = (unsigned char *) xfer->tx_buf;
262 int i = 0;
263 while (i < xfer->len)
264 writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
265 }
266 }
267
268 if (xfer->rx_buf != NULL) {
269 sdd->state |= RXBUSY;
270
271 if (sci->high_speed && sdd->cur_speed >= 30000000UL
272 && !(sdd->cur_mode & SPI_CPHA))
273 chcfg |= S3C64XX_SPI_CH_HS_EN;
274
275 if (dma_mode) {
276 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
277 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
278 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
279 | S3C64XX_SPI_PACKET_CNT_EN,
280 regs + S3C64XX_SPI_PACKET_CNT);
281 s3c2410_dma_config(sdd->rx_dmach, 1);
282 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
283 xfer->rx_dma, xfer->len);
284 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
285 }
286 }
287
288 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
289 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
290}
291
292static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
293 struct spi_device *spi)
294{
295 struct s3c64xx_spi_csinfo *cs;
296
297 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
298 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
299 /* Deselect the last toggled device */
300 cs = sdd->tgl_spi->controller_data;
301 cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
302 }
303 sdd->tgl_spi = NULL;
304 }
305
306 cs = spi->controller_data;
307 cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0);
308}
309
310static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
311 struct spi_transfer *xfer, int dma_mode)
312{
313 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
314 void __iomem *regs = sdd->regs;
315 unsigned long val;
316 int ms;
317
318 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
319 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
320 ms += 5; /* some tolerance */
321
322 if (dma_mode) {
323 val = msecs_to_jiffies(ms) + 10;
324 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
325 } else {
326 val = msecs_to_loops(ms);
327 do {
328 val = readl(regs + S3C64XX_SPI_STATUS);
329 } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
330 }
331
332 if (!val)
333 return -EIO;
334
335 if (dma_mode) {
336 u32 status;
337
338 /*
339 * DmaTx returns after simply writing data in the FIFO,
340 * w/o waiting for real transmission on the bus to finish.
341 * DmaRx returns only after Dma read data from FIFO which
342 * needs bus transmission to finish, so we don't worry if
343 * Xfer involved Rx(with or without Tx).
344 */
345 if (xfer->rx_buf == NULL) {
346 val = msecs_to_loops(10);
347 status = readl(regs + S3C64XX_SPI_STATUS);
348 while ((TX_FIFO_LVL(status, sci)
349 || !S3C64XX_SPI_ST_TX_DONE(status, sci))
350 && --val) {
351 cpu_relax();
352 status = readl(regs + S3C64XX_SPI_STATUS);
353 }
354
355 if (!val)
356 return -EIO;
357 }
358 } else {
359 unsigned char *buf;
360 int i;
361
362 /* If it was only Tx */
363 if (xfer->rx_buf == NULL) {
364 sdd->state &= ~TXBUSY;
365 return 0;
366 }
367
368 i = 0;
369 buf = xfer->rx_buf;
370 while (i < xfer->len)
371 buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
372
373 sdd->state &= ~RXBUSY;
374 }
375
376 return 0;
377}
378
379static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
380 struct spi_device *spi)
381{
382 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
383
384 if (sdd->tgl_spi == spi)
385 sdd->tgl_spi = NULL;
386
387 cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
388}
389
390static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
391{
392 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
393 void __iomem *regs = sdd->regs;
394 u32 val;
395
396 /* Disable Clock */
397 val = readl(regs + S3C64XX_SPI_CLK_CFG);
398 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
399 writel(val, regs + S3C64XX_SPI_CLK_CFG);
400
401 /* Set Polarity and Phase */
402 val = readl(regs + S3C64XX_SPI_CH_CFG);
403 val &= ~(S3C64XX_SPI_CH_SLAVE |
404 S3C64XX_SPI_CPOL_L |
405 S3C64XX_SPI_CPHA_B);
406
407 if (sdd->cur_mode & SPI_CPOL)
408 val |= S3C64XX_SPI_CPOL_L;
409
410 if (sdd->cur_mode & SPI_CPHA)
411 val |= S3C64XX_SPI_CPHA_B;
412
413 writel(val, regs + S3C64XX_SPI_CH_CFG);
414
415 /* Set Channel & DMA Mode */
416 val = readl(regs + S3C64XX_SPI_MODE_CFG);
417 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
418 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
419
420 switch (sdd->cur_bpw) {
421 case 32:
422 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
423 break;
424 case 16:
425 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
426 break;
427 default:
428 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
429 break;
430 }
431 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
432
433 writel(val, regs + S3C64XX_SPI_MODE_CFG);
434
435 /* Configure Clock */
436 val = readl(regs + S3C64XX_SPI_CLK_CFG);
437 val &= ~S3C64XX_SPI_PSR_MASK;
438 val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1)
439 & S3C64XX_SPI_PSR_MASK);
440 writel(val, regs + S3C64XX_SPI_CLK_CFG);
441
442 /* Enable Clock */
443 val = readl(regs + S3C64XX_SPI_CLK_CFG);
444 val |= S3C64XX_SPI_ENCLK_ENABLE;
445 writel(val, regs + S3C64XX_SPI_CLK_CFG);
446}
447
448void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
449 int size, enum s3c2410_dma_buffresult res)
450{
451 struct s3c64xx_spi_driver_data *sdd = buf_id;
452 unsigned long flags;
453
454 spin_lock_irqsave(&sdd->lock, flags);
455
456 if (res == S3C2410_RES_OK)
457 sdd->state &= ~RXBUSY;
458 else
459 dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
460
461 /* If the other done */
462 if (!(sdd->state & TXBUSY))
463 complete(&sdd->xfer_completion);
464
465 spin_unlock_irqrestore(&sdd->lock, flags);
466}
467
468void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
469 int size, enum s3c2410_dma_buffresult res)
470{
471 struct s3c64xx_spi_driver_data *sdd = buf_id;
472 unsigned long flags;
473
474 spin_lock_irqsave(&sdd->lock, flags);
475
476 if (res == S3C2410_RES_OK)
477 sdd->state &= ~TXBUSY;
478 else
479 dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
480
481 /* If the other done */
482 if (!(sdd->state & RXBUSY))
483 complete(&sdd->xfer_completion);
484
485 spin_unlock_irqrestore(&sdd->lock, flags);
486}
487
488#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
489
490static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
491 struct spi_message *msg)
492{
493 struct device *dev = &sdd->pdev->dev;
494 struct spi_transfer *xfer;
495
496 if (msg->is_dma_mapped)
497 return 0;
498
499 /* First mark all xfer unmapped */
500 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
501 xfer->rx_dma = XFER_DMAADDR_INVALID;
502 xfer->tx_dma = XFER_DMAADDR_INVALID;
503 }
504
505 /* Map until end or first fail */
506 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
507
508 if (xfer->tx_buf != NULL) {
509 xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
510 xfer->len, DMA_TO_DEVICE);
511 if (dma_mapping_error(dev, xfer->tx_dma)) {
512 dev_err(dev, "dma_map_single Tx failed\n");
513 xfer->tx_dma = XFER_DMAADDR_INVALID;
514 return -ENOMEM;
515 }
516 }
517
518 if (xfer->rx_buf != NULL) {
519 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
520 xfer->len, DMA_FROM_DEVICE);
521 if (dma_mapping_error(dev, xfer->rx_dma)) {
522 dev_err(dev, "dma_map_single Rx failed\n");
523 dma_unmap_single(dev, xfer->tx_dma,
524 xfer->len, DMA_TO_DEVICE);
525 xfer->tx_dma = XFER_DMAADDR_INVALID;
526 xfer->rx_dma = XFER_DMAADDR_INVALID;
527 return -ENOMEM;
528 }
529 }
530 }
531
532 return 0;
533}
534
535static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
536 struct spi_message *msg)
537{
538 struct device *dev = &sdd->pdev->dev;
539 struct spi_transfer *xfer;
540
541 if (msg->is_dma_mapped)
542 return;
543
544 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
545
546 if (xfer->rx_buf != NULL
547 && xfer->rx_dma != XFER_DMAADDR_INVALID)
548 dma_unmap_single(dev, xfer->rx_dma,
549 xfer->len, DMA_FROM_DEVICE);
550
551 if (xfer->tx_buf != NULL
552 && xfer->tx_dma != XFER_DMAADDR_INVALID)
553 dma_unmap_single(dev, xfer->tx_dma,
554 xfer->len, DMA_TO_DEVICE);
555 }
556}
557
558static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
559 struct spi_message *msg)
560{
561 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
562 struct spi_device *spi = msg->spi;
563 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
564 struct spi_transfer *xfer;
565 int status = 0, cs_toggle = 0;
566 u32 speed;
567 u8 bpw;
568
569 /* If Master's(controller) state differs from that needed by Slave */
570 if (sdd->cur_speed != spi->max_speed_hz
571 || sdd->cur_mode != spi->mode
572 || sdd->cur_bpw != spi->bits_per_word) {
573 sdd->cur_bpw = spi->bits_per_word;
574 sdd->cur_speed = spi->max_speed_hz;
575 sdd->cur_mode = spi->mode;
576 s3c64xx_spi_config(sdd);
577 }
578
579 /* Map all the transfers if needed */
580 if (s3c64xx_spi_map_mssg(sdd, msg)) {
581 dev_err(&spi->dev,
582 "Xfer: Unable to map message buffers!\n");
583 status = -ENOMEM;
584 goto out;
585 }
586
587 /* Configure feedback delay */
588 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
589
590 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
591
592 unsigned long flags;
593 int use_dma;
594
595 INIT_COMPLETION(sdd->xfer_completion);
596
597 /* Only BPW and Speed may change across transfers */
598 bpw = xfer->bits_per_word ? : spi->bits_per_word;
599 speed = xfer->speed_hz ? : spi->max_speed_hz;
600
601 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
602 sdd->cur_bpw = bpw;
603 sdd->cur_speed = speed;
604 s3c64xx_spi_config(sdd);
605 }
606
607 /* Polling method for xfers not bigger than FIFO capacity */
608 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
609 use_dma = 0;
610 else
611 use_dma = 1;
612
613 spin_lock_irqsave(&sdd->lock, flags);
614
615 /* Pending only which is to be done */
616 sdd->state &= ~RXBUSY;
617 sdd->state &= ~TXBUSY;
618
619 enable_datapath(sdd, spi, xfer, use_dma);
620
621 /* Slave Select */
622 enable_cs(sdd, spi);
623
624 /* Start the signals */
625 S3C64XX_SPI_ACT(sdd);
626
627 spin_unlock_irqrestore(&sdd->lock, flags);
628
629 status = wait_for_xfer(sdd, xfer, use_dma);
630
631 /* Quiese the signals */
632 S3C64XX_SPI_DEACT(sdd);
633
634 if (status) {
635 dev_err(&spi->dev, "I/O Error: \
636 rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
637 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
638 (sdd->state & RXBUSY) ? 'f' : 'p',
639 (sdd->state & TXBUSY) ? 'f' : 'p',
640 xfer->len);
641
642 if (use_dma) {
643 if (xfer->tx_buf != NULL
644 && (sdd->state & TXBUSY))
645 s3c2410_dma_ctrl(sdd->tx_dmach,
646 S3C2410_DMAOP_FLUSH);
647 if (xfer->rx_buf != NULL
648 && (sdd->state & RXBUSY))
649 s3c2410_dma_ctrl(sdd->rx_dmach,
650 S3C2410_DMAOP_FLUSH);
651 }
652
653 goto out;
654 }
655
656 if (xfer->delay_usecs)
657 udelay(xfer->delay_usecs);
658
659 if (xfer->cs_change) {
660 /* Hint that the next mssg is gonna be
661 for the same device */
662 if (list_is_last(&xfer->transfer_list,
663 &msg->transfers))
664 cs_toggle = 1;
665 else
666 disable_cs(sdd, spi);
667 }
668
669 msg->actual_length += xfer->len;
670
671 flush_fifo(sdd);
672 }
673
674out:
675 if (!cs_toggle || status)
676 disable_cs(sdd, spi);
677 else
678 sdd->tgl_spi = spi;
679
680 s3c64xx_spi_unmap_mssg(sdd, msg);
681
682 msg->status = status;
683
684 if (msg->complete)
685 msg->complete(msg->context);
686}
687
688static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
689{
690 if (s3c2410_dma_request(sdd->rx_dmach,
691 &s3c64xx_spi_dma_client, NULL) < 0) {
692 dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
693 return 0;
694 }
695 s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
696 s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
697 sdd->sfr_start + S3C64XX_SPI_RX_DATA);
698
699 if (s3c2410_dma_request(sdd->tx_dmach,
700 &s3c64xx_spi_dma_client, NULL) < 0) {
701 dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
702 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
703 return 0;
704 }
705 s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
706 s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
707 sdd->sfr_start + S3C64XX_SPI_TX_DATA);
708
709 return 1;
710}
711
712static void s3c64xx_spi_work(struct work_struct *work)
713{
714 struct s3c64xx_spi_driver_data *sdd = container_of(work,
715 struct s3c64xx_spi_driver_data, work);
716 unsigned long flags;
717
718 /* Acquire DMA channels */
719 while (!acquire_dma(sdd))
720 msleep(10);
721
722 spin_lock_irqsave(&sdd->lock, flags);
723
724 while (!list_empty(&sdd->queue)
725 && !(sdd->state & SUSPND)) {
726
727 struct spi_message *msg;
728
729 msg = container_of(sdd->queue.next, struct spi_message, queue);
730
731 list_del_init(&msg->queue);
732
733 /* Set Xfer busy flag */
734 sdd->state |= SPIBUSY;
735
736 spin_unlock_irqrestore(&sdd->lock, flags);
737
738 handle_msg(sdd, msg);
739
740 spin_lock_irqsave(&sdd->lock, flags);
741
742 sdd->state &= ~SPIBUSY;
743 }
744
745 spin_unlock_irqrestore(&sdd->lock, flags);
746
747 /* Free DMA channels */
748 s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
749 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
750}
751
752static int s3c64xx_spi_transfer(struct spi_device *spi,
753 struct spi_message *msg)
754{
755 struct s3c64xx_spi_driver_data *sdd;
756 unsigned long flags;
757
758 sdd = spi_master_get_devdata(spi->master);
759
760 spin_lock_irqsave(&sdd->lock, flags);
761
762 if (sdd->state & SUSPND) {
763 spin_unlock_irqrestore(&sdd->lock, flags);
764 return -ESHUTDOWN;
765 }
766
767 msg->status = -EINPROGRESS;
768 msg->actual_length = 0;
769
770 list_add_tail(&msg->queue, &sdd->queue);
771
772 queue_work(sdd->workqueue, &sdd->work);
773
774 spin_unlock_irqrestore(&sdd->lock, flags);
775
776 return 0;
777}
778
779/*
780 * Here we only check the validity of requested configuration
781 * and save the configuration in a local data-structure.
782 * The controller is actually configured only just before we
783 * get a message to transfer.
784 */
785static int s3c64xx_spi_setup(struct spi_device *spi)
786{
787 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
788 struct s3c64xx_spi_driver_data *sdd;
789 struct s3c64xx_spi_cntrlr_info *sci;
790 struct spi_message *msg;
791 u32 psr, speed;
792 unsigned long flags;
793 int err = 0;
794
795 if (cs == NULL || cs->set_level == NULL) {
796 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
797 return -ENODEV;
798 }
799
800 sdd = spi_master_get_devdata(spi->master);
801 sci = sdd->cntrlr_info;
802
803 spin_lock_irqsave(&sdd->lock, flags);
804
805 list_for_each_entry(msg, &sdd->queue, queue) {
806 /* Is some mssg is already queued for this device */
807 if (msg->spi == spi) {
808 dev_err(&spi->dev,
809 "setup: attempt while mssg in queue!\n");
810 spin_unlock_irqrestore(&sdd->lock, flags);
811 return -EBUSY;
812 }
813 }
814
815 if (sdd->state & SUSPND) {
816 spin_unlock_irqrestore(&sdd->lock, flags);
817 dev_err(&spi->dev,
818 "setup: SPI-%d not active!\n", spi->master->bus_num);
819 return -ESHUTDOWN;
820 }
821
822 spin_unlock_irqrestore(&sdd->lock, flags);
823
824 if (spi->bits_per_word != 8
825 && spi->bits_per_word != 16
826 && spi->bits_per_word != 32) {
827 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
828 spi->bits_per_word);
829 err = -EINVAL;
830 goto setup_exit;
831 }
832
833 /* Check if we can provide the requested rate */
834 speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */
835
836 if (spi->max_speed_hz > speed)
837 spi->max_speed_hz = speed;
838
839 psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1;
840 psr &= S3C64XX_SPI_PSR_MASK;
841 if (psr == S3C64XX_SPI_PSR_MASK)
842 psr--;
843
844 speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
845 if (spi->max_speed_hz < speed) {
846 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
847 psr++;
848 } else {
849 err = -EINVAL;
850 goto setup_exit;
851 }
852 }
853
854 speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
855 if (spi->max_speed_hz >= speed)
856 spi->max_speed_hz = speed;
857 else
858 err = -EINVAL;
859
860setup_exit:
861
862 /* setup() returns with device de-selected */
863 disable_cs(sdd, spi);
864
865 return err;
866}
867
868static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
869{
870 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
871 void __iomem *regs = sdd->regs;
872 unsigned int val;
873
874 sdd->cur_speed = 0;
875
876 S3C64XX_SPI_DEACT(sdd);
877
878 /* Disable Interrupts - we use Polling if not DMA mode */
879 writel(0, regs + S3C64XX_SPI_INT_EN);
880
881 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
882 regs + S3C64XX_SPI_CLK_CFG);
883 writel(0, regs + S3C64XX_SPI_MODE_CFG);
884 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
885
886 /* Clear any irq pending bits */
887 writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
888 regs + S3C64XX_SPI_PENDING_CLR);
889
890 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
891
892 val = readl(regs + S3C64XX_SPI_MODE_CFG);
893 val &= ~S3C64XX_SPI_MODE_4BURST;
894 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
895 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
896 writel(val, regs + S3C64XX_SPI_MODE_CFG);
897
898 flush_fifo(sdd);
899}
900
901static int __init s3c64xx_spi_probe(struct platform_device *pdev)
902{
903 struct resource *mem_res, *dmatx_res, *dmarx_res;
904 struct s3c64xx_spi_driver_data *sdd;
905 struct s3c64xx_spi_cntrlr_info *sci;
906 struct spi_master *master;
907 int ret;
908
909 if (pdev->id < 0) {
910 dev_err(&pdev->dev,
911 "Invalid platform device id-%d\n", pdev->id);
912 return -ENODEV;
913 }
914
915 if (pdev->dev.platform_data == NULL) {
916 dev_err(&pdev->dev, "platform_data missing!\n");
917 return -ENODEV;
918 }
919
920 /* Check for availability of necessary resource */
921
922 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
923 if (dmatx_res == NULL) {
924 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
925 return -ENXIO;
926 }
927
928 dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
929 if (dmarx_res == NULL) {
930 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
931 return -ENXIO;
932 }
933
934 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
935 if (mem_res == NULL) {
936 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
937 return -ENXIO;
938 }
939
940 master = spi_alloc_master(&pdev->dev,
941 sizeof(struct s3c64xx_spi_driver_data));
942 if (master == NULL) {
943 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
944 return -ENOMEM;
945 }
946
947 sci = pdev->dev.platform_data;
948
949 platform_set_drvdata(pdev, master);
950
951 sdd = spi_master_get_devdata(master);
952 sdd->master = master;
953 sdd->cntrlr_info = sci;
954 sdd->pdev = pdev;
955 sdd->sfr_start = mem_res->start;
956 sdd->tx_dmach = dmatx_res->start;
957 sdd->rx_dmach = dmarx_res->start;
958
959 sdd->cur_bpw = 8;
960
961 master->bus_num = pdev->id;
962 master->setup = s3c64xx_spi_setup;
963 master->transfer = s3c64xx_spi_transfer;
964 master->num_chipselect = sci->num_cs;
965 master->dma_alignment = 8;
966 /* the spi->mode bits understood by this driver: */
967 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
968
969 if (request_mem_region(mem_res->start,
970 resource_size(mem_res), pdev->name) == NULL) {
971 dev_err(&pdev->dev, "Req mem region failed\n");
972 ret = -ENXIO;
973 goto err0;
974 }
975
976 sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
977 if (sdd->regs == NULL) {
978 dev_err(&pdev->dev, "Unable to remap IO\n");
979 ret = -ENXIO;
980 goto err1;
981 }
982
983 if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
984 dev_err(&pdev->dev, "Unable to config gpio\n");
985 ret = -EBUSY;
986 goto err2;
987 }
988
989 /* Setup clocks */
990 sdd->clk = clk_get(&pdev->dev, "spi");
991 if (IS_ERR(sdd->clk)) {
992 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
993 ret = PTR_ERR(sdd->clk);
994 goto err3;
995 }
996
997 if (clk_enable(sdd->clk)) {
998 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
999 ret = -EBUSY;
1000 goto err4;
1001 }
1002
1003 if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK)
1004 sci->src_clk = sdd->clk;
1005 else
1006 sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
1007 if (IS_ERR(sci->src_clk)) {
1008 dev_err(&pdev->dev,
1009 "Unable to acquire clock '%s'\n", sci->src_clk_name);
1010 ret = PTR_ERR(sci->src_clk);
1011 goto err5;
1012 }
1013
1014 if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) {
1015 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
1016 sci->src_clk_name);
1017 ret = -EBUSY;
1018 goto err6;
1019 }
1020
1021 sdd->workqueue = create_singlethread_workqueue(
1022 dev_name(master->dev.parent));
1023 if (sdd->workqueue == NULL) {
1024 dev_err(&pdev->dev, "Unable to create workqueue\n");
1025 ret = -ENOMEM;
1026 goto err7;
1027 }
1028
1029 /* Setup Deufult Mode */
1030 s3c64xx_spi_hwinit(sdd, pdev->id);
1031
1032 spin_lock_init(&sdd->lock);
1033 init_completion(&sdd->xfer_completion);
1034 INIT_WORK(&sdd->work, s3c64xx_spi_work);
1035 INIT_LIST_HEAD(&sdd->queue);
1036
1037 if (spi_register_master(master)) {
1038 dev_err(&pdev->dev, "cannot register SPI master\n");
1039 ret = -EBUSY;
1040 goto err8;
1041 }
1042
1043 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \
1044 with %d Slaves attached\n",
1045 pdev->id, master->num_chipselect);
1046 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\
1047 \tDMA=[Rx-%d, Tx-%d]\n",
1048 mem_res->end, mem_res->start,
1049 sdd->rx_dmach, sdd->tx_dmach);
1050
1051 return 0;
1052
1053err8:
1054 destroy_workqueue(sdd->workqueue);
1055err7:
1056 if (sci->src_clk != sdd->clk)
1057 clk_disable(sci->src_clk);
1058err6:
1059 if (sci->src_clk != sdd->clk)
1060 clk_put(sci->src_clk);
1061err5:
1062 clk_disable(sdd->clk);
1063err4:
1064 clk_put(sdd->clk);
1065err3:
1066err2:
1067 iounmap((void *) sdd->regs);
1068err1:
1069 release_mem_region(mem_res->start, resource_size(mem_res));
1070err0:
1071 platform_set_drvdata(pdev, NULL);
1072 spi_master_put(master);
1073
1074 return ret;
1075}
1076
1077static int s3c64xx_spi_remove(struct platform_device *pdev)
1078{
1079 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1080 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1081 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1082 struct resource *mem_res;
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&sdd->lock, flags);
1086 sdd->state |= SUSPND;
1087 spin_unlock_irqrestore(&sdd->lock, flags);
1088
1089 while (sdd->state & SPIBUSY)
1090 msleep(10);
1091
1092 spi_unregister_master(master);
1093
1094 destroy_workqueue(sdd->workqueue);
1095
1096 if (sci->src_clk != sdd->clk)
1097 clk_disable(sci->src_clk);
1098
1099 if (sci->src_clk != sdd->clk)
1100 clk_put(sci->src_clk);
1101
1102 clk_disable(sdd->clk);
1103 clk_put(sdd->clk);
1104
1105 iounmap((void *) sdd->regs);
1106
1107 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1108 release_mem_region(mem_res->start, resource_size(mem_res));
1109
1110 platform_set_drvdata(pdev, NULL);
1111 spi_master_put(master);
1112
1113 return 0;
1114}
1115
1116#ifdef CONFIG_PM
1117static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1118{
1119 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1120 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1121 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1122 struct s3c64xx_spi_csinfo *cs;
1123 unsigned long flags;
1124
1125 spin_lock_irqsave(&sdd->lock, flags);
1126 sdd->state |= SUSPND;
1127 spin_unlock_irqrestore(&sdd->lock, flags);
1128
1129 while (sdd->state & SPIBUSY)
1130 msleep(10);
1131
1132 /* Disable the clock */
1133 if (sci->src_clk != sdd->clk)
1134 clk_disable(sci->src_clk);
1135
1136 clk_disable(sdd->clk);
1137
1138 sdd->cur_speed = 0; /* Output Clock is stopped */
1139
1140 return 0;
1141}
1142
1143static int s3c64xx_spi_resume(struct platform_device *pdev)
1144{
1145 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1146 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1147 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1148 unsigned long flags;
1149
1150 sci->cfg_gpio(pdev);
1151
1152 /* Enable the clock */
1153 if (sci->src_clk != sdd->clk)
1154 clk_enable(sci->src_clk);
1155
1156 clk_enable(sdd->clk);
1157
1158 s3c64xx_spi_hwinit(sdd, pdev->id);
1159
1160 spin_lock_irqsave(&sdd->lock, flags);
1161 sdd->state &= ~SUSPND;
1162 spin_unlock_irqrestore(&sdd->lock, flags);
1163
1164 return 0;
1165}
1166#else
1167#define s3c64xx_spi_suspend NULL
1168#define s3c64xx_spi_resume NULL
1169#endif /* CONFIG_PM */
1170
1171static struct platform_driver s3c64xx_spi_driver = {
1172 .driver = {
1173 .name = "s3c64xx-spi",
1174 .owner = THIS_MODULE,
1175 },
1176 .remove = s3c64xx_spi_remove,
1177 .suspend = s3c64xx_spi_suspend,
1178 .resume = s3c64xx_spi_resume,
1179};
1180MODULE_ALIAS("platform:s3c64xx-spi");
1181
1182static int __init s3c64xx_spi_init(void)
1183{
1184 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1185}
1186module_init(s3c64xx_spi_init);
1187
1188static void __exit s3c64xx_spi_exit(void)
1189{
1190 platform_driver_unregister(&s3c64xx_spi_driver);
1191}
1192module_exit(s3c64xx_spi_exit);
1193
1194MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1195MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1196MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
index 7d36720eb982..a65c12ffa733 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi_sh_sci.c
@@ -148,7 +148,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
148 ret = -ENOENT; 148 ret = -ENOENT;
149 goto err1; 149 goto err1;
150 } 150 }
151 sp->membase = ioremap(r->start, r->end - r->start + 1); 151 sp->membase = ioremap(r->start, resource_size(r));
152 if (!sp->membase) { 152 if (!sp->membase) {
153 ret = -ENXIO; 153 ret = -ENXIO;
154 goto err1; 154 goto err1;
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 19f75627c3de..dfa024b633e1 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -375,12 +375,10 @@ static int __init txx9spi_probe(struct platform_device *dev)
375 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 375 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
376 if (!res) 376 if (!res)
377 goto exit_busy; 377 goto exit_busy;
378 if (!devm_request_mem_region(&dev->dev, 378 if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
379 res->start, res->end - res->start + 1,
380 "spi_txx9")) 379 "spi_txx9"))
381 goto exit_busy; 380 goto exit_busy;
382 c->membase = devm_ioremap(&dev->dev, 381 c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
383 res->start, res->end - res->start + 1);
384 if (!c->membase) 382 if (!c->membase)
385 goto exit_busy; 383 goto exit_busy;
386 384
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 9c446e6003d5..ea1bec3c9a13 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -53,7 +53,7 @@
53#define SPIDEV_MAJOR 153 /* assigned */ 53#define SPIDEV_MAJOR 153 /* assigned */
54#define N_SPI_MINORS 32 /* ... up to 256 */ 54#define N_SPI_MINORS 32 /* ... up to 256 */
55 55
56static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; 56static DECLARE_BITMAP(minors, N_SPI_MINORS);
57 57
58 58
59/* Bit masks for spi_device.mode management. Note that incorrect 59/* Bit masks for spi_device.mode management. Note that incorrect
@@ -558,7 +558,7 @@ static struct class *spidev_class;
558 558
559/*-------------------------------------------------------------------------*/ 559/*-------------------------------------------------------------------------*/
560 560
561static int spidev_probe(struct spi_device *spi) 561static int __devinit spidev_probe(struct spi_device *spi)
562{ 562{
563 struct spidev_data *spidev; 563 struct spidev_data *spidev;
564 int status; 564 int status;
@@ -607,7 +607,7 @@ static int spidev_probe(struct spi_device *spi)
607 return status; 607 return status;
608} 608}
609 609
610static int spidev_remove(struct spi_device *spi) 610static int __devexit spidev_remove(struct spi_device *spi)
611{ 611{
612 struct spidev_data *spidev = spi_get_drvdata(spi); 612 struct spidev_data *spidev = spi_get_drvdata(spi);
613 613
@@ -629,7 +629,7 @@ static int spidev_remove(struct spi_device *spi)
629 return 0; 629 return 0;
630} 630}
631 631
632static struct spi_driver spidev_spi = { 632static struct spi_driver spidev_spi_driver = {
633 .driver = { 633 .driver = {
634 .name = "spidev", 634 .name = "spidev",
635 .owner = THIS_MODULE, 635 .owner = THIS_MODULE,
@@ -661,14 +661,14 @@ static int __init spidev_init(void)
661 661
662 spidev_class = class_create(THIS_MODULE, "spidev"); 662 spidev_class = class_create(THIS_MODULE, "spidev");
663 if (IS_ERR(spidev_class)) { 663 if (IS_ERR(spidev_class)) {
664 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 664 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
665 return PTR_ERR(spidev_class); 665 return PTR_ERR(spidev_class);
666 } 666 }
667 667
668 status = spi_register_driver(&spidev_spi); 668 status = spi_register_driver(&spidev_spi_driver);
669 if (status < 0) { 669 if (status < 0) {
670 class_destroy(spidev_class); 670 class_destroy(spidev_class);
671 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 671 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
672 } 672 }
673 return status; 673 return status;
674} 674}
@@ -676,9 +676,9 @@ module_init(spidev_init);
676 676
677static void __exit spidev_exit(void) 677static void __exit spidev_exit(void)
678{ 678{
679 spi_unregister_driver(&spidev_spi); 679 spi_unregister_driver(&spidev_spi_driver);
680 class_destroy(spidev_class); 680 class_destroy(spidev_class);
681 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 681 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
682} 682}
683module_exit(spidev_exit); 683module_exit(spidev_exit);
684 684
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index f0b86f02cd80..fd677f008365 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -29,7 +29,6 @@
29 * driver requests - some may support multiple options */ 29 * driver requests - some may support multiple options */
30 30
31 31
32#include <linux/autoconf.h>
33#include "iio.h" 32#include "iio.h"
34#include "ring_generic.h" 33#include "ring_generic.h"
35 34
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 536e2382de54..638ad6b35891 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,7 +1,8 @@
1config OCTEON_ETHERNET 1config OCTEON_ETHERNET
2 tristate "Cavium Networks Octeon Ethernet support" 2 tristate "Cavium Networks Octeon Ethernet support"
3 depends on CPU_CAVIUM_OCTEON 3 depends on CPU_CAVIUM_OCTEON
4 select MII 4 select PHYLIB
5 select MDIO_OCTEON
5 help 6 help
6 This driver supports the builtin ethernet ports on Cavium 7 This driver supports the builtin ethernet ports on Cavium
7 Networks' products in the Octeon family. This driver supports the 8 Networks' products in the Octeon family. This driver supports the
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 31a58e508924..05a5cc0f43ed 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -26,7 +26,8 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/ethtool.h> 28#include <linux/ethtool.h>
29#include <linux/mii.h> 29#include <linux/phy.h>
30
30#include <net/dst.h> 31#include <net/dst.h>
31 32
32#include <asm/octeon/octeon.h> 33#include <asm/octeon/octeon.h>
@@ -34,86 +35,12 @@
34#include "ethernet-defines.h" 35#include "ethernet-defines.h"
35#include "octeon-ethernet.h" 36#include "octeon-ethernet.h"
36#include "ethernet-mdio.h" 37#include "ethernet-mdio.h"
38#include "ethernet-util.h"
37 39
38#include "cvmx-helper-board.h" 40#include "cvmx-helper-board.h"
39 41
40#include "cvmx-smix-defs.h" 42#include "cvmx-smix-defs.h"
41 43
42DECLARE_MUTEX(mdio_sem);
43
44/**
45 * Perform an MII read. Called by the generic MII routines
46 *
47 * @dev: Device to perform read for
48 * @phy_id: The MII phy id
49 * @location: Register location to read
50 * Returns Result from the read or zero on failure
51 */
52static int cvm_oct_mdio_read(struct net_device *dev, int phy_id, int location)
53{
54 union cvmx_smix_cmd smi_cmd;
55 union cvmx_smix_rd_dat smi_rd;
56
57 smi_cmd.u64 = 0;
58 smi_cmd.s.phy_op = 1;
59 smi_cmd.s.phy_adr = phy_id;
60 smi_cmd.s.reg_adr = location;
61 cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
62
63 do {
64 if (!in_interrupt())
65 yield();
66 smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(0));
67 } while (smi_rd.s.pending);
68
69 if (smi_rd.s.val)
70 return smi_rd.s.dat;
71 else
72 return 0;
73}
74
75static int cvm_oct_mdio_dummy_read(struct net_device *dev, int phy_id,
76 int location)
77{
78 return 0xffff;
79}
80
81/**
82 * Perform an MII write. Called by the generic MII routines
83 *
84 * @dev: Device to perform write for
85 * @phy_id: The MII phy id
86 * @location: Register location to write
87 * @val: Value to write
88 */
89static void cvm_oct_mdio_write(struct net_device *dev, int phy_id, int location,
90 int val)
91{
92 union cvmx_smix_cmd smi_cmd;
93 union cvmx_smix_wr_dat smi_wr;
94
95 smi_wr.u64 = 0;
96 smi_wr.s.dat = val;
97 cvmx_write_csr(CVMX_SMIX_WR_DAT(0), smi_wr.u64);
98
99 smi_cmd.u64 = 0;
100 smi_cmd.s.phy_op = 0;
101 smi_cmd.s.phy_adr = phy_id;
102 smi_cmd.s.reg_adr = location;
103 cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
104
105 do {
106 if (!in_interrupt())
107 yield();
108 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(0));
109 } while (smi_wr.s.pending);
110}
111
112static void cvm_oct_mdio_dummy_write(struct net_device *dev, int phy_id,
113 int location, int val)
114{
115}
116
117static void cvm_oct_get_drvinfo(struct net_device *dev, 44static void cvm_oct_get_drvinfo(struct net_device *dev,
118 struct ethtool_drvinfo *info) 45 struct ethtool_drvinfo *info)
119{ 46{
@@ -125,49 +52,37 @@ static void cvm_oct_get_drvinfo(struct net_device *dev,
125static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 52static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
126{ 53{
127 struct octeon_ethernet *priv = netdev_priv(dev); 54 struct octeon_ethernet *priv = netdev_priv(dev);
128 int ret;
129 55
130 down(&mdio_sem); 56 if (priv->phydev)
131 ret = mii_ethtool_gset(&priv->mii_info, cmd); 57 return phy_ethtool_gset(priv->phydev, cmd);
132 up(&mdio_sem);
133 58
134 return ret; 59 return -EINVAL;
135} 60}
136 61
137static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 62static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
138{ 63{
139 struct octeon_ethernet *priv = netdev_priv(dev); 64 struct octeon_ethernet *priv = netdev_priv(dev);
140 int ret;
141 65
142 down(&mdio_sem); 66 if (!capable(CAP_NET_ADMIN))
143 ret = mii_ethtool_sset(&priv->mii_info, cmd); 67 return -EPERM;
144 up(&mdio_sem); 68
69 if (priv->phydev)
70 return phy_ethtool_sset(priv->phydev, cmd);
145 71
146 return ret; 72 return -EINVAL;
147} 73}
148 74
149static int cvm_oct_nway_reset(struct net_device *dev) 75static int cvm_oct_nway_reset(struct net_device *dev)
150{ 76{
151 struct octeon_ethernet *priv = netdev_priv(dev); 77 struct octeon_ethernet *priv = netdev_priv(dev);
152 int ret;
153 78
154 down(&mdio_sem); 79 if (!capable(CAP_NET_ADMIN))
155 ret = mii_nway_restart(&priv->mii_info); 80 return -EPERM;
156 up(&mdio_sem);
157 81
158 return ret; 82 if (priv->phydev)
159} 83 return phy_start_aneg(priv->phydev);
160 84
161static u32 cvm_oct_get_link(struct net_device *dev) 85 return -EINVAL;
162{
163 struct octeon_ethernet *priv = netdev_priv(dev);
164 u32 ret;
165
166 down(&mdio_sem);
167 ret = mii_link_ok(&priv->mii_info);
168 up(&mdio_sem);
169
170 return ret;
171} 86}
172 87
173const struct ethtool_ops cvm_oct_ethtool_ops = { 88const struct ethtool_ops cvm_oct_ethtool_ops = {
@@ -175,7 +90,7 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
175 .get_settings = cvm_oct_get_settings, 90 .get_settings = cvm_oct_get_settings,
176 .set_settings = cvm_oct_set_settings, 91 .set_settings = cvm_oct_set_settings,
177 .nway_reset = cvm_oct_nway_reset, 92 .nway_reset = cvm_oct_nway_reset,
178 .get_link = cvm_oct_get_link, 93 .get_link = ethtool_op_get_link,
179 .get_sg = ethtool_op_get_sg, 94 .get_sg = ethtool_op_get_sg,
180 .get_tx_csum = ethtool_op_get_tx_csum, 95 .get_tx_csum = ethtool_op_get_tx_csum,
181}; 96};
@@ -191,41 +106,78 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
191int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 106int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
192{ 107{
193 struct octeon_ethernet *priv = netdev_priv(dev); 108 struct octeon_ethernet *priv = netdev_priv(dev);
194 struct mii_ioctl_data *data = if_mii(rq);
195 unsigned int duplex_chg;
196 int ret;
197 109
198 down(&mdio_sem); 110 if (!netif_running(dev))
199 ret = generic_mii_ioctl(&priv->mii_info, data, cmd, &duplex_chg); 111 return -EINVAL;
200 up(&mdio_sem); 112
113 if (!priv->phydev)
114 return -EINVAL;
115
116 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
117}
201 118
202 return ret; 119static void cvm_oct_adjust_link(struct net_device *dev)
120{
121 struct octeon_ethernet *priv = netdev_priv(dev);
122 cvmx_helper_link_info_t link_info;
123
124 if (priv->last_link != priv->phydev->link) {
125 priv->last_link = priv->phydev->link;
126 link_info.u64 = 0;
127 link_info.s.link_up = priv->last_link ? 1 : 0;
128 link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
129 link_info.s.speed = priv->phydev->speed;
130 cvmx_helper_link_set( priv->port, link_info);
131 if (priv->last_link) {
132 netif_carrier_on(dev);
133 if (priv->queue != -1)
134 DEBUGPRINT("%s: %u Mbps %s duplex, "
135 "port %2d, queue %2d\n",
136 dev->name, priv->phydev->speed,
137 priv->phydev->duplex ?
138 "Full" : "Half",
139 priv->port, priv->queue);
140 else
141 DEBUGPRINT("%s: %u Mbps %s duplex, "
142 "port %2d, POW\n",
143 dev->name, priv->phydev->speed,
144 priv->phydev->duplex ?
145 "Full" : "Half",
146 priv->port);
147 } else {
148 netif_carrier_off(dev);
149 DEBUGPRINT("%s: Link down\n", dev->name);
150 }
151 }
203} 152}
204 153
154
205/** 155/**
206 * Setup the MDIO device structures 156 * Setup the PHY
207 * 157 *
208 * @dev: Device to setup 158 * @dev: Device to setup
209 * 159 *
210 * Returns Zero on success, negative on failure 160 * Returns Zero on success, negative on failure
211 */ 161 */
212int cvm_oct_mdio_setup_device(struct net_device *dev) 162int cvm_oct_phy_setup_device(struct net_device *dev)
213{ 163{
214 struct octeon_ethernet *priv = netdev_priv(dev); 164 struct octeon_ethernet *priv = netdev_priv(dev);
215 int phy_id = cvmx_helper_board_get_mii_address(priv->port); 165
216 if (phy_id != -1) { 166 int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
217 priv->mii_info.dev = dev; 167 if (phy_addr != -1) {
218 priv->mii_info.phy_id = phy_id; 168 char phy_id[20];
219 priv->mii_info.phy_id_mask = 0xff; 169
220 priv->mii_info.supports_gmii = 1; 170 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr);
221 priv->mii_info.reg_num_mask = 0x1f; 171
222 priv->mii_info.mdio_read = cvm_oct_mdio_read; 172 priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
223 priv->mii_info.mdio_write = cvm_oct_mdio_write; 173 PHY_INTERFACE_MODE_GMII);
224 } else { 174
225 /* Supply dummy MDIO routines so the kernel won't crash 175 if (IS_ERR(priv->phydev)) {
226 if the user tries to read them */ 176 priv->phydev = NULL;
227 priv->mii_info.mdio_read = cvm_oct_mdio_dummy_read; 177 return -1;
228 priv->mii_info.mdio_write = cvm_oct_mdio_dummy_write; 178 }
179 priv->last_link = 0;
180 phy_start_aneg(priv->phydev);
229 } 181 }
230 return 0; 182 return 0;
231} 183}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index b3328aeec2df..55d0614a7cd9 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -43,4 +43,4 @@
43 43
44extern const struct ethtool_ops cvm_oct_ethtool_ops; 44extern const struct ethtool_ops cvm_oct_ethtool_ops;
45int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 45int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
46int cvm_oct_mdio_setup_device(struct net_device *dev); 46int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
index 8fa88fc419b7..16308d484d3b 100644
--- a/drivers/staging/octeon/ethernet-proc.c
+++ b/drivers/staging/octeon/ethernet-proc.c
@@ -25,7 +25,6 @@
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/mii.h>
29#include <linux/seq_file.h> 28#include <linux/seq_file.h>
30#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
31#include <net/dst.h> 30#include <net/dst.h>
@@ -38,112 +37,6 @@
38#include "cvmx-helper.h" 37#include "cvmx-helper.h"
39#include "cvmx-pip.h" 38#include "cvmx-pip.h"
40 39
41static unsigned long long cvm_oct_stats_read_switch(struct net_device *dev,
42 int phy_id, int offset)
43{
44 struct octeon_ethernet *priv = netdev_priv(dev);
45
46 priv->mii_info.mdio_write(dev, phy_id, 0x1d, 0xcc00 | offset);
47 return ((uint64_t) priv->mii_info.
48 mdio_read(dev, phy_id,
49 0x1e) << 16) | (uint64_t) priv->mii_info.
50 mdio_read(dev, phy_id, 0x1f);
51}
52
53static int cvm_oct_stats_switch_show(struct seq_file *m, void *v)
54{
55 static const int ports[] = { 0, 1, 2, 3, 9, -1 };
56 struct net_device *dev = cvm_oct_device[0];
57 int index = 0;
58
59 while (ports[index] != -1) {
60
61 /* Latch port */
62 struct octeon_ethernet *priv = netdev_priv(dev);
63
64 priv->mii_info.mdio_write(dev, 0x1b, 0x1d,
65 0xdc00 | ports[index]);
66 seq_printf(m, "\nSwitch Port %d\n", ports[index]);
67 seq_printf(m, "InGoodOctets: %12llu\t"
68 "OutOctets: %12llu\t"
69 "64 Octets: %12llu\n",
70 cvm_oct_stats_read_switch(dev, 0x1b,
71 0x00) |
72 (cvm_oct_stats_read_switch(dev, 0x1b, 0x01) << 32),
73 cvm_oct_stats_read_switch(dev, 0x1b,
74 0x0E) |
75 (cvm_oct_stats_read_switch(dev, 0x1b, 0x0F) << 32),
76 cvm_oct_stats_read_switch(dev, 0x1b, 0x08));
77
78 seq_printf(m, "InBadOctets: %12llu\t"
79 "OutUnicast: %12llu\t"
80 "65-127 Octets: %12llu\n",
81 cvm_oct_stats_read_switch(dev, 0x1b, 0x02),
82 cvm_oct_stats_read_switch(dev, 0x1b, 0x10),
83 cvm_oct_stats_read_switch(dev, 0x1b, 0x09));
84
85 seq_printf(m, "InUnicast: %12llu\t"
86 "OutBroadcasts: %12llu\t"
87 "128-255 Octets: %12llu\n",
88 cvm_oct_stats_read_switch(dev, 0x1b, 0x04),
89 cvm_oct_stats_read_switch(dev, 0x1b, 0x13),
90 cvm_oct_stats_read_switch(dev, 0x1b, 0x0A));
91
92 seq_printf(m, "InBroadcasts: %12llu\t"
93 "OutMulticasts: %12llu\t"
94 "256-511 Octets: %12llu\n",
95 cvm_oct_stats_read_switch(dev, 0x1b, 0x06),
96 cvm_oct_stats_read_switch(dev, 0x1b, 0x12),
97 cvm_oct_stats_read_switch(dev, 0x1b, 0x0B));
98
99 seq_printf(m, "InMulticasts: %12llu\t"
100 "OutPause: %12llu\t"
101 "512-1023 Octets:%12llu\n",
102 cvm_oct_stats_read_switch(dev, 0x1b, 0x07),
103 cvm_oct_stats_read_switch(dev, 0x1b, 0x15),
104 cvm_oct_stats_read_switch(dev, 0x1b, 0x0C));
105
106 seq_printf(m, "InPause: %12llu\t"
107 "Excessive: %12llu\t"
108 "1024-Max Octets:%12llu\n",
109 cvm_oct_stats_read_switch(dev, 0x1b, 0x16),
110 cvm_oct_stats_read_switch(dev, 0x1b, 0x11),
111 cvm_oct_stats_read_switch(dev, 0x1b, 0x0D));
112
113 seq_printf(m, "InUndersize: %12llu\t"
114 "Collisions: %12llu\n",
115 cvm_oct_stats_read_switch(dev, 0x1b, 0x18),
116 cvm_oct_stats_read_switch(dev, 0x1b, 0x1E));
117
118 seq_printf(m, "InFragments: %12llu\t"
119 "Deferred: %12llu\n",
120 cvm_oct_stats_read_switch(dev, 0x1b, 0x19),
121 cvm_oct_stats_read_switch(dev, 0x1b, 0x05));
122
123 seq_printf(m, "InOversize: %12llu\t"
124 "Single: %12llu\n",
125 cvm_oct_stats_read_switch(dev, 0x1b, 0x1A),
126 cvm_oct_stats_read_switch(dev, 0x1b, 0x14));
127
128 seq_printf(m, "InJabber: %12llu\t"
129 "Multiple: %12llu\n",
130 cvm_oct_stats_read_switch(dev, 0x1b, 0x1B),
131 cvm_oct_stats_read_switch(dev, 0x1b, 0x17));
132
133 seq_printf(m, "In RxErr: %12llu\t"
134 "OutFCSErr: %12llu\n",
135 cvm_oct_stats_read_switch(dev, 0x1b, 0x1C),
136 cvm_oct_stats_read_switch(dev, 0x1b, 0x03));
137
138 seq_printf(m, "InFCSErr: %12llu\t"
139 "Late: %12llu\n",
140 cvm_oct_stats_read_switch(dev, 0x1b, 0x1D),
141 cvm_oct_stats_read_switch(dev, 0x1b, 0x1F));
142 index++;
143 }
144 return 0;
145}
146
147/** 40/**
148 * User is reading /proc/octeon_ethernet_stats 41 * User is reading /proc/octeon_ethernet_stats
149 * 42 *
@@ -215,11 +108,6 @@ static int cvm_oct_stats_show(struct seq_file *m, void *v)
215 } 108 }
216 } 109 }
217 110
218 if (cvm_oct_device[0]) {
219 priv = netdev_priv(cvm_oct_device[0]);
220 if (priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
221 cvm_oct_stats_switch_show(m, v);
222 }
223 return 0; 111 return 0;
224} 112}
225 113
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index fbaa465d2fac..3820f1ec11d1 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -147,32 +147,36 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
147 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), 147 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
148 gmxx_rxx_int_reg.u64); 148 gmxx_rxx_int_reg.u64);
149 } 149 }
150 150 if (priv->phydev == NULL) {
151 link_info = cvmx_helper_link_autoconf(priv->port); 151 link_info = cvmx_helper_link_autoconf(priv->port);
152 priv->link_info = link_info.u64; 152 priv->link_info = link_info.u64;
153 }
153 spin_unlock_irqrestore(&global_register_lock, flags); 154 spin_unlock_irqrestore(&global_register_lock, flags);
154 155
155 /* Tell Linux */ 156 if (priv->phydev == NULL) {
156 if (link_info.s.link_up) { 157 /* Tell core. */
157 158 if (link_info.s.link_up) {
158 if (!netif_carrier_ok(dev)) 159 if (!netif_carrier_ok(dev))
159 netif_carrier_on(dev); 160 netif_carrier_on(dev);
160 if (priv->queue != -1) 161 if (priv->queue != -1)
161 DEBUGPRINT 162 DEBUGPRINT("%s: %u Mbps %s duplex, "
162 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", 163 "port %2d, queue %2d\n",
163 dev->name, link_info.s.speed, 164 dev->name, link_info.s.speed,
164 (link_info.s.full_duplex) ? "Full" : "Half", 165 (link_info.s.full_duplex) ?
165 priv->port, priv->queue); 166 "Full" : "Half",
166 else 167 priv->port, priv->queue);
167 DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", 168 else
168 dev->name, link_info.s.speed, 169 DEBUGPRINT("%s: %u Mbps %s duplex, "
169 (link_info.s.full_duplex) ? "Full" : "Half", 170 "port %2d, POW\n",
170 priv->port); 171 dev->name, link_info.s.speed,
171 } else { 172 (link_info.s.full_duplex) ?
172 173 "Full" : "Half",
173 if (netif_carrier_ok(dev)) 174 priv->port);
174 netif_carrier_off(dev); 175 } else {
175 DEBUGPRINT("%s: Link down\n", dev->name); 176 if (netif_carrier_ok(dev))
177 netif_carrier_off(dev);
178 DEBUGPRINT("%s: Link down\n", dev->name);
179 }
176 } 180 }
177} 181}
178 182
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 2b54996bd85d..6061d01eca2d 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -113,7 +113,7 @@ int cvm_oct_sgmii_init(struct net_device *dev)
113 struct octeon_ethernet *priv = netdev_priv(dev); 113 struct octeon_ethernet *priv = netdev_priv(dev);
114 cvm_oct_common_init(dev); 114 cvm_oct_common_init(dev);
115 dev->netdev_ops->ndo_stop(dev); 115 dev->netdev_ops->ndo_stop(dev);
116 if (!octeon_is_simulation()) 116 if (!octeon_is_simulation() && priv->phydev == NULL)
117 priv->poll = cvm_oct_sgmii_poll; 117 priv->poll = cvm_oct_sgmii_poll;
118 118
119 /* FIXME: Need autoneg logic */ 119 /* FIXME: Need autoneg logic */
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index 0c2e7cc40f35..ee3dc41b2c53 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -112,7 +112,7 @@ int cvm_oct_xaui_init(struct net_device *dev)
112 struct octeon_ethernet *priv = netdev_priv(dev); 112 struct octeon_ethernet *priv = netdev_priv(dev);
113 cvm_oct_common_init(dev); 113 cvm_oct_common_init(dev);
114 dev->netdev_ops->ndo_stop(dev); 114 dev->netdev_ops->ndo_stop(dev);
115 if (!octeon_is_simulation()) 115 if (!octeon_is_simulation() && priv->phydev == NULL)
116 priv->poll = cvm_oct_xaui_poll; 116 priv->poll = cvm_oct_xaui_poll;
117 117
118 return 0; 118 return 0;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 492c5029992d..4cfd4b136b32 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -30,7 +30,7 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/mii.h> 33#include <linux/phy.h>
34 34
35#include <net/dst.h> 35#include <net/dst.h>
36 36
@@ -132,8 +132,6 @@ static struct timer_list cvm_oct_poll_timer;
132 */ 132 */
133struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; 133struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
134 134
135extern struct semaphore mdio_sem;
136
137/** 135/**
138 * Periodic timer tick for slow management operations 136 * Periodic timer tick for slow management operations
139 * 137 *
@@ -160,13 +158,8 @@ static void cvm_do_timer(unsigned long arg)
160 goto out; 158 goto out;
161 159
162 priv = netdev_priv(cvm_oct_device[port]); 160 priv = netdev_priv(cvm_oct_device[port]);
163 if (priv->poll) { 161 if (priv->poll)
164 /* skip polling if we don't get the lock */ 162 priv->poll(cvm_oct_device[port]);
165 if (!down_trylock(&mdio_sem)) {
166 priv->poll(cvm_oct_device[port]);
167 up(&mdio_sem);
168 }
169 }
170 163
171 queues_per_port = cvmx_pko_get_num_queues(port); 164 queues_per_port = cvmx_pko_get_num_queues(port);
172 /* Drain any pending packets in the free list */ 165 /* Drain any pending packets in the free list */
@@ -524,7 +517,7 @@ int cvm_oct_common_init(struct net_device *dev)
524 dev->features |= NETIF_F_LLTX; 517 dev->features |= NETIF_F_LLTX;
525 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); 518 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
526 519
527 cvm_oct_mdio_setup_device(dev); 520 cvm_oct_phy_setup_device(dev);
528 dev->netdev_ops->ndo_set_mac_address(dev, &sa); 521 dev->netdev_ops->ndo_set_mac_address(dev, &sa);
529 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); 522 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
530 523
@@ -540,7 +533,10 @@ int cvm_oct_common_init(struct net_device *dev)
540 533
541void cvm_oct_common_uninit(struct net_device *dev) 534void cvm_oct_common_uninit(struct net_device *dev)
542{ 535{
543 /* Currently nothing to do */ 536 struct octeon_ethernet *priv = netdev_priv(dev);
537
538 if (priv->phydev)
539 phy_disconnect(priv->phydev);
544} 540}
545 541
546static const struct net_device_ops cvm_oct_npi_netdev_ops = { 542static const struct net_device_ops cvm_oct_npi_netdev_ops = {
@@ -627,6 +623,8 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
627#endif 623#endif
628}; 624};
629 625
626extern void octeon_mdiobus_force_mod_depencency(void);
627
630/** 628/**
631 * Module/ driver initialization. Creates the linux network 629 * Module/ driver initialization. Creates the linux network
632 * devices. 630 * devices.
@@ -640,6 +638,7 @@ static int __init cvm_oct_init_module(void)
640 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; 638 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
641 int qos; 639 int qos;
642 640
641 octeon_mdiobus_force_mod_depencency();
643 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); 642 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
644 643
645 if (OCTEON_IS_MODEL(OCTEON_CN52XX)) 644 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 3aef9878fc0a..402a15b9bb0e 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -50,9 +50,9 @@ struct octeon_ethernet {
50 /* List of outstanding tx buffers per queue */ 50 /* List of outstanding tx buffers per queue */
51 struct sk_buff_head tx_free_list[16]; 51 struct sk_buff_head tx_free_list[16];
52 /* Device statistics */ 52 /* Device statistics */
53 struct net_device_stats stats 53 struct net_device_stats stats;
54; /* Generic MII info structure */ 54 struct phy_device *phydev;
55 struct mii_if_info mii_info; 55 unsigned int last_link;
56 /* Last negotiated link state */ 56 /* Last negotiated link state */
57 uint64_t link_info; 57 uint64_t link_info;
58 /* Called periodically to check link status */ 58 /* Called periodically to check link status */
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 4ce399b6d237..f98a52448eae 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -55,7 +55,7 @@
55#include <linux/list.h> 55#include <linux/list.h>
56#include <linux/notifier.h> 56#include <linux/notifier.h>
57#include <linux/reboot.h> 57#include <linux/reboot.h>
58#include <linux/utsrelease.h> 58#include <generated/utsrelease.h>
59 59
60#include <linux/io.h> 60#include <linux/io.h>
61#include <asm/uaccess.h> 61#include <asm/uaccess.h>
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 12f1ad2fd0e8..74d07f4e8b7d 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -37,7 +37,7 @@
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/clk.h> 38#include <linux/clk.h>
39#include <linux/gpio.h> 39#include <linux/gpio.h>
40#include <mach/usb.h> 40#include <plat/usb.h>
41 41
42/* 42/*
43 * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES 43 * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 2051c9dc813b..b7687c55fe16 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2245,9 +2245,6 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
2245 if (regno > 255) 2245 if (regno > 255)
2246 return 1; 2246 return 1;
2247 2247
2248 if (regno > 255)
2249 return 1;
2250
2251 switch (external_card_type) { 2248 switch (external_card_type) {
2252 case IS_VGA: 2249 case IS_VGA:
2253 OUTB(0x3c8, regno); 2250 OUTB(0x3c8, regno);
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4c10edecfb66..86d95c228adb 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -85,7 +85,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
85 return error ? data->current_brightness : reg_val; 85 return error ? data->current_brightness : reg_val;
86} 86}
87 87
88static struct backlight_ops adp5520_bl_ops = { 88static const struct backlight_ops adp5520_bl_ops = {
89 .update_status = adp5520_bl_update_status, 89 .update_status = adp5520_bl_update_status,
90 .get_brightness = adp5520_bl_get_brightness, 90 .get_brightness = adp5520_bl_get_brightness,
91}; 91};
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index 2c3bdfc620b7..d769b0bab21a 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
61 return 1; 61 return 1;
62} 62}
63 63
64static struct backlight_ops adx_backlight_ops = { 64static const struct backlight_ops adx_backlight_ops = {
65 .options = 0, 65 .options = 0,
66 .update_status = adx_backlight_update_status, 66 .update_status = adx_backlight_update_status,
67 .get_brightness = adx_backlight_get_brightness, 67 .get_brightness = adx_backlight_get_brightness,
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index 2cf7ba52f67c..f625ffc69ad3 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
113 return pwm_channel_enable(&pwmbl->pwmc); 113 return pwm_channel_enable(&pwmbl->pwmc);
114} 114}
115 115
116static struct backlight_ops atmel_pwm_bl_ops = { 116static const struct backlight_ops atmel_pwm_bl_ops = {
117 .get_brightness = atmel_pwm_bl_get_intensity, 117 .get_brightness = atmel_pwm_bl_get_intensity,
118 .update_status = atmel_pwm_bl_set_intensity, 118 .update_status = atmel_pwm_bl_set_intensity,
119}; 119};
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 6615ac7fa60a..18829cf68b1b 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
269 * ERR_PTR() or a pointer to the newly allocated device. 269 * ERR_PTR() or a pointer to the newly allocated device.
270 */ 270 */
271struct backlight_device *backlight_device_register(const char *name, 271struct backlight_device *backlight_device_register(const char *name,
272 struct device *parent, void *devdata, struct backlight_ops *ops) 272 struct device *parent, void *devdata, const struct backlight_ops *ops)
273{ 273{
274 struct backlight_device *new_bd; 274 struct backlight_device *new_bd;
275 int rc; 275 int rc;
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 96774949cd30..b4bcf8043797 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
451} 451}
452EXPORT_SYMBOL(corgi_lcd_limit_intensity); 452EXPORT_SYMBOL(corgi_lcd_limit_intensity);
453 453
454static struct backlight_ops corgi_bl_ops = { 454static const struct backlight_ops corgi_bl_ops = {
455 .get_brightness = corgi_bl_get_intensity, 455 .get_brightness = corgi_bl_get_intensity,
456 .update_status = corgi_bl_update_status, 456 .update_status = corgi_bl_update_status,
457}; 457};
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index b9fe62b475c6..da86db4374a0 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
108 return intensity; 108 return intensity;
109} 109}
110 110
111static struct backlight_ops cr_backlight_ops = { 111static const struct backlight_ops cr_backlight_ops = {
112 .get_brightness = cr_backlight_get_intensity, 112 .get_brightness = cr_backlight_get_intensity,
113 .update_status = cr_backlight_set_intensity, 113 .update_status = cr_backlight_set_intensity,
114}; 114};
@@ -201,7 +201,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
201 if (IS_ERR(ldp)) { 201 if (IS_ERR(ldp)) {
202 backlight_device_unregister(bdp); 202 backlight_device_unregister(bdp);
203 pci_dev_put(lpc_dev); 203 pci_dev_put(lpc_dev);
204 return PTR_ERR(bdp); 204 return PTR_ERR(ldp);
205 } 205 }
206 206
207 pci_read_config_dword(lpc_dev, CRVML_REG_GPIOBAR, 207 pci_read_config_dword(lpc_dev, CRVML_REG_GPIOBAR,
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index f2d76dae1eb3..74cdc640173d 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -95,7 +95,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
95 return data->current_brightness; 95 return data->current_brightness;
96} 96}
97 97
98static struct backlight_ops da903x_backlight_ops = { 98static const struct backlight_ops da903x_backlight_ops = {
99 .update_status = da903x_backlight_update_status, 99 .update_status = da903x_backlight_update_status,
100 .get_brightness = da903x_backlight_get_brightness, 100 .get_brightness = da903x_backlight_get_brightness,
101}; 101};
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 6d27f62fdcd0..e6d348e63596 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
70} 70}
71EXPORT_SYMBOL(corgibl_limit_intensity); 71EXPORT_SYMBOL(corgibl_limit_intensity);
72 72
73static struct backlight_ops genericbl_ops = { 73static const struct backlight_ops genericbl_ops = {
74 .options = BL_CORE_SUSPENDRESUME, 74 .options = BL_CORE_SUSPENDRESUME,
75 .get_brightness = genericbl_get_intensity, 75 .get_brightness = genericbl_get_intensity,
76 .update_status = genericbl_send_intensity, 76 .update_status = genericbl_send_intensity,
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 7fb4eefff80d..f7cc528d5be7 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
98 return current_intensity; 98 return current_intensity;
99} 99}
100 100
101static struct backlight_ops hp680bl_ops = { 101static const struct backlight_ops hp680bl_ops = {
102 .get_brightness = hp680bl_get_intensity, 102 .get_brightness = hp680bl_get_intensity,
103 .update_status = hp680bl_set_intensity, 103 .update_status = hp680bl_set_intensity,
104}; 104};
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 7aed2565c1bd..db9071fc5665 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -93,7 +93,7 @@ out:
93 return ret; 93 return ret;
94} 94}
95 95
96static struct backlight_ops jornada_bl_ops = { 96static const struct backlight_ops jornada_bl_ops = {
97 .get_brightness = jornada_bl_get_brightness, 97 .get_brightness = jornada_bl_get_brightness,
98 .update_status = jornada_bl_update_status, 98 .update_status = jornada_bl_update_status,
99 .options = BL_CORE_SUSPENDRESUME, 99 .options = BL_CORE_SUSPENDRESUME,
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index a38fda1742dd..939e7b830cf3 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
134 return kb3886bl_intensity; 134 return kb3886bl_intensity;
135} 135}
136 136
137static struct backlight_ops kb3886bl_ops = { 137static const struct backlight_ops kb3886bl_ops = {
138 .get_brightness = kb3886bl_get_intensity, 138 .get_brightness = kb3886bl_get_intensity,
139 .update_status = kb3886bl_send_intensity, 139 .update_status = kb3886bl_send_intensity,
140}; 140};
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 6b488b8a7eee..00a9591b0003 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
141 return current_intensity; 141 return current_intensity;
142} 142}
143 143
144static struct backlight_ops locomobl_data = { 144static const struct backlight_ops locomobl_data = {
145 .get_brightness = locomolcd_get_intensity, 145 .get_brightness = locomolcd_get_intensity,
146 .update_status = locomolcd_set_intensity, 146 .update_status = locomolcd_set_intensity,
147}; 147};
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9edb8d7c295f..2e78b0784bdc 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -33,7 +33,7 @@ struct dmi_match_data {
33 unsigned long iostart; 33 unsigned long iostart;
34 unsigned long iolen; 34 unsigned long iolen;
35 /* Backlight operations structure. */ 35 /* Backlight operations structure. */
36 struct backlight_ops backlight_ops; 36 const struct backlight_ops backlight_ops;
37}; 37};
38 38
39/* Module parameters. */ 39/* Module parameters. */
@@ -220,6 +220,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
220 }, 220 },
221 { 221 {
222 .callback = mbp_dmi_match, 222 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 5,3",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
226 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3"),
227 },
228 .driver_data = (void *)&nvidia_chipset_data,
229 },
230 {
231 .callback = mbp_dmi_match,
232 .ident = "MacBookPro 5,4",
233 .matches = {
234 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
235 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4"),
236 },
237 .driver_data = (void *)&nvidia_chipset_data,
238 },
239 {
240 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 5,5", 241 .ident = "MacBookPro 5,5",
224 .matches = { 242 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 243 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 8693e5fcd2eb..409ca9643528 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
125 return bl->current_intensity; 125 return bl->current_intensity;
126} 126}
127 127
128static struct backlight_ops omapbl_ops = { 128static const struct backlight_ops omapbl_ops = {
129 .get_brightness = omapbl_get_intensity, 129 .get_brightness = omapbl_get_intensity,
130 .update_status = omapbl_update_status, 130 .update_status = omapbl_update_status,
131}; 131};
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 9edaf24fd82d..075786e05034 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
54 return intensity - HW_LEVEL_MIN; 54 return intensity - HW_LEVEL_MIN;
55} 55}
56 56
57static struct backlight_ops progearbl_ops = { 57static const struct backlight_ops progearbl_ops = {
58 .get_brightness = progearbl_get_intensity, 58 .get_brightness = progearbl_get_intensity,
59 .update_status = progearbl_set_intensity, 59 .update_status = progearbl_set_intensity,
60}; 60};
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 887166267443..9d2ec2a1cce8 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -22,8 +22,10 @@
22 22
23struct pwm_bl_data { 23struct pwm_bl_data {
24 struct pwm_device *pwm; 24 struct pwm_device *pwm;
25 struct device *dev;
25 unsigned int period; 26 unsigned int period;
26 int (*notify)(int brightness); 27 int (*notify)(struct device *,
28 int brightness);
27}; 29};
28 30
29static int pwm_backlight_update_status(struct backlight_device *bl) 31static int pwm_backlight_update_status(struct backlight_device *bl)
@@ -39,7 +41,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
39 brightness = 0; 41 brightness = 0;
40 42
41 if (pb->notify) 43 if (pb->notify)
42 brightness = pb->notify(brightness); 44 brightness = pb->notify(pb->dev, brightness);
43 45
44 if (brightness == 0) { 46 if (brightness == 0) {
45 pwm_config(pb->pwm, 0, pb->period); 47 pwm_config(pb->pwm, 0, pb->period);
@@ -56,7 +58,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
56 return bl->props.brightness; 58 return bl->props.brightness;
57} 59}
58 60
59static struct backlight_ops pwm_backlight_ops = { 61static const struct backlight_ops pwm_backlight_ops = {
60 .update_status = pwm_backlight_update_status, 62 .update_status = pwm_backlight_update_status,
61 .get_brightness = pwm_backlight_get_brightness, 63 .get_brightness = pwm_backlight_get_brightness,
62}; 64};
@@ -88,6 +90,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
88 90
89 pb->period = data->pwm_period_ns; 91 pb->period = data->pwm_period_ns;
90 pb->notify = data->notify; 92 pb->notify = data->notify;
93 pb->dev = &pdev->dev;
91 94
92 pb->pwm = pwm_request(data->pwm_id, "backlight"); 95 pb->pwm = pwm_request(data->pwm_id, "backlight");
93 if (IS_ERR(pb->pwm)) { 96 if (IS_ERR(pb->pwm)) {
@@ -146,7 +149,7 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
146 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev); 149 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
147 150
148 if (pb->notify) 151 if (pb->notify)
149 pb->notify(0); 152 pb->notify(pb->dev, 0);
150 pwm_config(pb->pwm, 0, pb->period); 153 pwm_config(pb->pwm, 0, pb->period);
151 pwm_disable(pb->pwm); 154 pwm_disable(pb->pwm);
152 return 0; 155 return 0;
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 43edbada12d1..e14ce4d469f5 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
72 return props->brightness; 72 return props->brightness;
73} 73}
74 74
75static struct backlight_ops bl_ops = { 75static const struct backlight_ops bl_ops = {
76 .get_brightness = tosa_bl_get_brightness, 76 .get_brightness = tosa_bl_get_brightness,
77 .update_status = tosa_bl_update_status, 77 .update_status = tosa_bl_update_status,
78}; 78};
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 467bdb7efb23..e32add37a203 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
112 return data->current_brightness; 112 return data->current_brightness;
113} 113}
114 114
115static struct backlight_ops wm831x_backlight_ops = { 115static const struct backlight_ops wm831x_backlight_ops = {
116 .options = BL_CORE_SUSPENDRESUME, 116 .options = BL_CORE_SUSPENDRESUME,
117 .update_status = wm831x_backlight_update_status, 117 .update_status = wm831x_backlight_update_status,
118 .get_brightness = wm831x_backlight_get_brightness, 118 .get_brightness = wm831x_backlight_get_brightness,
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
index 5bb7f6f14601..0f5952cae85e 100644
--- a/drivers/video/omap/lcd_ldp.c
+++ b/drivers/video/omap/lcd_ldp.c
@@ -24,7 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/i2c/twl4030.h> 27#include <linux/i2c/twl.h>
28 28
29#include <mach/gpio.h> 29#include <mach/gpio.h>
30#include <plat/mux.h> 30#include <plat/mux.h>
@@ -59,7 +59,7 @@
59#define TWL4030_VPLL2_DEV_GRP 0x33 59#define TWL4030_VPLL2_DEV_GRP 0x33
60#define TWL4030_VPLL2_DEDICATED 0x36 60#define TWL4030_VPLL2_DEDICATED 0x36
61 61
62#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v) 62#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v)
63 63
64 64
65static int ldp_panel_init(struct lcd_panel *panel, 65static int ldp_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
index 006c2fe7360e..7e7a65c08452 100644
--- a/drivers/video/omap/lcd_omap2evm.c
+++ b/drivers/video/omap/lcd_omap2evm.c
@@ -24,7 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include <linux/i2c/twl4030.h> 27#include <linux/i2c/twl.h>
28 28
29#include <plat/mux.h> 29#include <plat/mux.h>
30#include <asm/mach-types.h> 30#include <asm/mach-types.h>
@@ -61,9 +61,9 @@ static int omap2evm_panel_init(struct lcd_panel *panel,
61 gpio_direction_output(LCD_PANEL_LR, 1); 61 gpio_direction_output(LCD_PANEL_LR, 1);
62 gpio_direction_output(LCD_PANEL_UD, 1); 62 gpio_direction_output(LCD_PANEL_UD, 1);
63 63
64 twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN); 64 twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
65 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON); 65 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
66 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF); 66 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
67 bklight_level = 100; 67 bklight_level = 100;
68 68
69 return 0; 69 return 0;
@@ -101,7 +101,7 @@ static int omap2evm_bklight_setlevel(struct lcd_panel *panel,
101 u8 c; 101 u8 c;
102 if ((level >= 0) && (level <= 100)) { 102 if ((level >= 0) && (level <= 100)) {
103 c = (125 * (100 - level)) / 100 + 2; 103 c = (125 * (100 - level)) / 100 + 2;
104 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF); 104 twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
105 bklight_level = level; 105 bklight_level = level;
106 } 106 }
107 return 0; 107 return 0;
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
index fc503d8f3c24..ca75cc2a87a5 100644
--- a/drivers/video/omap/lcd_omap3beagle.c
+++ b/drivers/video/omap/lcd_omap3beagle.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/i2c/twl4030.h> 26#include <linux/i2c/twl.h>
27 27
28#include <plat/mux.h> 28#include <plat/mux.h>
29#include <plat/mux.h> 29#include <plat/mux.h>
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c
index ae2edc4081a8..06840da0b094 100644
--- a/drivers/video/omap/lcd_omap3evm.c
+++ b/drivers/video/omap/lcd_omap3evm.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/i2c/twl4030.h> 26#include <linux/i2c/twl.h>
27 27
28#include <plat/mux.h> 28#include <plat/mux.h>
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
@@ -63,9 +63,9 @@ static int omap3evm_panel_init(struct lcd_panel *panel,
63 gpio_direction_output(LCD_PANEL_LR, 1); 63 gpio_direction_output(LCD_PANEL_LR, 1);
64 gpio_direction_output(LCD_PANEL_UD, 1); 64 gpio_direction_output(LCD_PANEL_UD, 1);
65 65
66 twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN); 66 twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
67 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON); 67 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
68 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF); 68 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
69 bklight_level = 100; 69 bklight_level = 100;
70 70
71 return 0; 71 return 0;
@@ -102,7 +102,7 @@ static int omap3evm_bklight_setlevel(struct lcd_panel *panel,
102 u8 c; 102 u8 c;
103 if ((level >= 0) && (level <= 100)) { 103 if ((level >= 0) && (level <= 100)) {
104 c = (125 * (100 - level)) / 100 + 2; 104 c = (125 * (100 - level)) / 100 + 2;
105 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF); 105 twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
106 bklight_level = level; 106 bklight_level = level;
107 } 107 }
108 return 0; 108 return 0;
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
index 56ee192e9ee2..564933ffac6e 100644
--- a/drivers/video/omap/lcd_overo.c
+++ b/drivers/video/omap/lcd_overo.c
@@ -21,7 +21,7 @@
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/i2c/twl4030.h> 24#include <linux/i2c/twl.h>
25 25
26#include <mach/gpio.h> 26#include <mach/gpio.h>
27#include <plat/mux.h> 27#include <plat/mux.h>
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 10d8c4b4baeb..d8df17a7d5fc 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -680,7 +680,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
680 if (!viafb_gamma_table) 680 if (!viafb_gamma_table)
681 return -ENOMEM; 681 return -ENOMEM;
682 if (copy_from_user(viafb_gamma_table, argp, 682 if (copy_from_user(viafb_gamma_table, argp,
683 sizeof(viafb_gamma_table))) { 683 256 * sizeof(u32))) {
684 kfree(viafb_gamma_table); 684 kfree(viafb_gamma_table);
685 return -EFAULT; 685 return -EFAULT;
686 } 686 }
@@ -694,7 +694,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
694 return -ENOMEM; 694 return -ENOMEM;
695 viafb_get_gamma_table(viafb_gamma_table); 695 viafb_get_gamma_table(viafb_gamma_table);
696 if (copy_to_user(argp, viafb_gamma_table, 696 if (copy_to_user(argp, viafb_gamma_table,
697 sizeof(viafb_gamma_table))) { 697 256 * sizeof(u32))) {
698 kfree(viafb_gamma_table); 698 kfree(viafb_gamma_table);
699 return -EFAULT; 699 return -EFAULT;
700 } 700 }
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d958b76430a2..088f32f29a6e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -368,7 +368,7 @@ config ALIM7101_WDT
368 368
369config GEODE_WDT 369config GEODE_WDT
370 tristate "AMD Geode CS5535/CS5536 Watchdog" 370 tristate "AMD Geode CS5535/CS5536 Watchdog"
371 depends on MGEODE_LX 371 depends on CS5535_MFGPT
372 help 372 help
373 This driver enables a watchdog capability built into the 373 This driver enables a watchdog capability built into the
374 CS5535/CS5536 companion chips for the AMD Geode GX and LX 374 CS5535/CS5536 companion chips for the AMD Geode GX and LX
@@ -815,16 +815,6 @@ config PNX833X_WDT
815 timer has expired and no process has written to /dev/watchdog during 815 timer has expired and no process has written to /dev/watchdog during
816 that time. 816 that time.
817 817
818config WDT_RM9K_GPI
819 tristate "RM9000/GPI hardware watchdog"
820 depends on CPU_RM9000
821 help
822 Watchdog implementation using the GPI hardware found on
823 PMC-Sierra RM9xxx CPUs.
824
825 To compile this driver as a module, choose M here: the
826 module will be called rm9k_wdt.
827
828config SIBYTE_WDOG 818config SIBYTE_WDOG
829 tristate "Sibyte SoC hardware watchdog" 819 tristate "Sibyte SoC hardware watchdog"
830 depends on CPU_SB1 820 depends on CPU_SB1
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 89c045dc468e..475c61100069 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -109,7 +109,6 @@ obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
109obj-$(CONFIG_INDYDOG) += indydog.o 109obj-$(CONFIG_INDYDOG) += indydog.o
110obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o 110obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
111obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o 111obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
112obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
113obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o 112obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
114obj-$(CONFIG_AR7_WDT) += ar7_wdt.o 113obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
115obj-$(CONFIG_TXX9_WDT) += txx9wdt.o 114obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 9acf0015a1e7..38252ff828ca 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -1,6 +1,7 @@
1/* Watchdog timer for the Geode GX/LX with the CS5535/CS5536 companion chip 1/* Watchdog timer for machines with the CS5535/CS5536 companion chip
2 * 2 *
3 * Copyright (C) 2006-2007, Advanced Micro Devices, Inc. 3 * Copyright (C) 2006-2007, Advanced Micro Devices, Inc.
4 * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
4 * 5 *
5 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -19,7 +20,7 @@
19#include <linux/reboot.h> 20#include <linux/reboot.h>
20#include <linux/uaccess.h> 21#include <linux/uaccess.h>
21 22
22#include <asm/geode.h> 23#include <linux/cs5535.h>
23 24
24#define GEODEWDT_HZ 500 25#define GEODEWDT_HZ 500
25#define GEODEWDT_SCALE 6 26#define GEODEWDT_SCALE 6
@@ -46,25 +47,25 @@ MODULE_PARM_DESC(nowayout,
46 47
47static struct platform_device *geodewdt_platform_device; 48static struct platform_device *geodewdt_platform_device;
48static unsigned long wdt_flags; 49static unsigned long wdt_flags;
49static int wdt_timer; 50static struct cs5535_mfgpt_timer *wdt_timer;
50static int safe_close; 51static int safe_close;
51 52
52static void geodewdt_ping(void) 53static void geodewdt_ping(void)
53{ 54{
54 /* Stop the counter */ 55 /* Stop the counter */
55 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); 56 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
56 57
57 /* Reset the counter */ 58 /* Reset the counter */
58 geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); 59 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
59 60
60 /* Enable the counter */ 61 /* Enable the counter */
61 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); 62 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
62} 63}
63 64
64static void geodewdt_disable(void) 65static void geodewdt_disable(void)
65{ 66{
66 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); 67 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
67 geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); 68 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
68} 69}
69 70
70static int geodewdt_set_heartbeat(int val) 71static int geodewdt_set_heartbeat(int val)
@@ -72,10 +73,10 @@ static int geodewdt_set_heartbeat(int val)
72 if (val < 1 || val > GEODEWDT_MAX_SECONDS) 73 if (val < 1 || val > GEODEWDT_MAX_SECONDS)
73 return -EINVAL; 74 return -EINVAL;
74 75
75 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); 76 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
76 geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ); 77 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ);
77 geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); 78 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
78 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); 79 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
79 80
80 timeout = val; 81 timeout = val;
81 return 0; 82 return 0;
@@ -215,28 +216,25 @@ static struct miscdevice geodewdt_miscdev = {
215 216
216static int __devinit geodewdt_probe(struct platform_device *dev) 217static int __devinit geodewdt_probe(struct platform_device *dev)
217{ 218{
218 int ret, timer; 219 int ret;
219
220 timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
221 220
222 if (timer == -1) { 221 wdt_timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
222 if (!wdt_timer) {
223 printk(KERN_ERR "geodewdt: No timers were available\n"); 223 printk(KERN_ERR "geodewdt: No timers were available\n");
224 return -ENODEV; 224 return -ENODEV;
225 } 225 }
226 226
227 wdt_timer = timer;
228
229 /* Set up the timer */ 227 /* Set up the timer */
230 228
231 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 229 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP,
232 GEODEWDT_SCALE | (3 << 8)); 230 GEODEWDT_SCALE | (3 << 8));
233 231
234 /* Set up comparator 2 to reset when the event fires */ 232 /* Set up comparator 2 to reset when the event fires */
235 geode_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1); 233 cs5535_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1);
236 234
237 /* Set up the initial timeout */ 235 /* Set up the initial timeout */
238 236
239 geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, 237 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2,
240 timeout * GEODEWDT_HZ); 238 timeout * GEODEWDT_HZ);
241 239
242 ret = misc_register(&geodewdt_miscdev); 240 ret = misc_register(&geodewdt_miscdev);
diff --git a/drivers/watchdog/rm9k_wdt.c b/drivers/watchdog/rm9k_wdt.c
deleted file mode 100644
index bb66958b9433..000000000000
--- a/drivers/watchdog/rm9k_wdt.c
+++ /dev/null
@@ -1,419 +0,0 @@
1/*
2 * Watchdog implementation for GPI h/w found on PMC-Sierra RM9xxx
3 * chips.
4 *
5 * Copyright (C) 2004 by Basler Vision Technologies AG
6 * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/platform_device.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/interrupt.h>
27#include <linux/fs.h>
28#include <linux/reboot.h>
29#include <linux/notifier.h>
30#include <linux/miscdevice.h>
31#include <linux/watchdog.h>
32#include <linux/io.h>
33#include <linux/uaccess.h>
34#include <asm/atomic.h>
35#include <asm/processor.h>
36#include <asm/system.h>
37#include <asm/rm9k-ocd.h>
38
39#include <rm9k_wdt.h>
40
41
42#define CLOCK 125000000
43#define MAX_TIMEOUT_SECONDS 32
44#define CPCCR 0x0080
45#define CPGIG1SR 0x0044
46#define CPGIG1ER 0x0054
47
48
49/* Function prototypes */
50static irqreturn_t wdt_gpi_irqhdl(int, void *);
51static void wdt_gpi_start(void);
52static void wdt_gpi_stop(void);
53static void wdt_gpi_set_timeout(unsigned int);
54static int wdt_gpi_open(struct inode *, struct file *);
55static int wdt_gpi_release(struct inode *, struct file *);
56static ssize_t wdt_gpi_write(struct file *, const char __user *, size_t,
57 loff_t *);
58static long wdt_gpi_ioctl(struct file *, unsigned int, unsigned long);
59static int wdt_gpi_notify(struct notifier_block *, unsigned long, void *);
60static const struct resource *wdt_gpi_get_resource(struct platform_device *,
61 const char *, unsigned int);
62static int __init wdt_gpi_probe(struct platform_device *);
63static int __exit wdt_gpi_remove(struct platform_device *);
64
65
66static const char wdt_gpi_name[] = "wdt_gpi";
67static atomic_t opencnt;
68static int expect_close;
69static int locked;
70
71
72/* These are set from device resources */
73static void __iomem *wd_regs;
74static unsigned int wd_irq, wd_ctr;
75
76
77/* Module arguments */
78static int timeout = MAX_TIMEOUT_SECONDS;
79module_param(timeout, int, 0444);
80MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
81
82static unsigned long resetaddr = 0xbffdc200;
83module_param(resetaddr, ulong, 0444);
84MODULE_PARM_DESC(resetaddr, "Address to write to to force a reset");
85
86static unsigned long flagaddr = 0xbffdc104;
87module_param(flagaddr, ulong, 0444);
88MODULE_PARM_DESC(flagaddr, "Address to write to boot flags to");
89
90static int powercycle;
91module_param(powercycle, bool, 0444);
92MODULE_PARM_DESC(powercycle, "Cycle power if watchdog expires");
93
94static int nowayout = WATCHDOG_NOWAYOUT;
95module_param(nowayout, bool, 0444);
96MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started");
97
98
99/* Kernel interfaces */
100static const struct file_operations fops = {
101 .owner = THIS_MODULE,
102 .open = wdt_gpi_open,
103 .release = wdt_gpi_release,
104 .write = wdt_gpi_write,
105 .unlocked_ioctl = wdt_gpi_ioctl,
106};
107
108static struct miscdevice miscdev = {
109 .minor = WATCHDOG_MINOR,
110 .name = wdt_gpi_name,
111 .fops = &fops,
112};
113
114static struct notifier_block wdt_gpi_shutdown = {
115 .notifier_call = wdt_gpi_notify,
116};
117
118
119/* Interrupt handler */
120static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt)
121{
122 if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1))
123 return IRQ_NONE;
124 __raw_writel(0x1, wd_regs + 0x0008);
125
126
127 printk(KERN_CRIT "%s: watchdog expired - resetting system\n",
128 wdt_gpi_name);
129
130 *(volatile char *) flagaddr |= 0x01;
131 *(volatile char *) resetaddr = powercycle ? 0x01 : 0x2;
132 iob();
133 while (1)
134 cpu_relax();
135}
136
137
138/* Watchdog functions */
139static void wdt_gpi_start(void)
140{
141 u32 reg;
142
143 lock_titan_regs();
144 reg = titan_readl(CPGIG1ER);
145 titan_writel(reg | (0x100 << wd_ctr), CPGIG1ER);
146 iob();
147 unlock_titan_regs();
148}
149
150static void wdt_gpi_stop(void)
151{
152 u32 reg;
153
154 lock_titan_regs();
155 reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
156 titan_writel(reg, CPCCR);
157 reg = titan_readl(CPGIG1ER);
158 titan_writel(reg & ~(0x100 << wd_ctr), CPGIG1ER);
159 iob();
160 unlock_titan_regs();
161}
162
163static void wdt_gpi_set_timeout(unsigned int to)
164{
165 u32 reg;
166 const u32 wdval = (to * CLOCK) & ~0x0000000f;
167
168 lock_titan_regs();
169 reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
170 titan_writel(reg, CPCCR);
171 wmb();
172 __raw_writel(wdval, wd_regs + 0x0000);
173 wmb();
174 titan_writel(reg | (0x2 << (wd_ctr * 4)), CPCCR);
175 wmb();
176 titan_writel(reg | (0x5 << (wd_ctr * 4)), CPCCR);
177 iob();
178 unlock_titan_regs();
179}
180
181
182/* /dev/watchdog operations */
183static int wdt_gpi_open(struct inode *inode, struct file *file)
184{
185 int res;
186
187 if (unlikely(atomic_dec_if_positive(&opencnt) < 0))
188 return -EBUSY;
189
190 expect_close = 0;
191 if (locked) {
192 module_put(THIS_MODULE);
193 free_irq(wd_irq, &miscdev);
194 locked = 0;
195 }
196
197 res = request_irq(wd_irq, wdt_gpi_irqhdl, IRQF_SHARED | IRQF_DISABLED,
198 wdt_gpi_name, &miscdev);
199 if (unlikely(res))
200 return res;
201
202 wdt_gpi_set_timeout(timeout);
203 wdt_gpi_start();
204
205 printk(KERN_INFO "%s: watchdog started, timeout = %u seconds\n",
206 wdt_gpi_name, timeout);
207 return nonseekable_open(inode, file);
208}
209
210static int wdt_gpi_release(struct inode *inode, struct file *file)
211{
212 if (nowayout) {
213 printk(KERN_INFO "%s: no way out - watchdog left running\n",
214 wdt_gpi_name);
215 __module_get(THIS_MODULE);
216 locked = 1;
217 } else {
218 if (expect_close) {
219 wdt_gpi_stop();
220 free_irq(wd_irq, &miscdev);
221 printk(KERN_INFO "%s: watchdog stopped\n",
222 wdt_gpi_name);
223 } else {
224 printk(KERN_CRIT "%s: unexpected close() -"
225 " watchdog left running\n",
226 wdt_gpi_name);
227 wdt_gpi_set_timeout(timeout);
228 __module_get(THIS_MODULE);
229 locked = 1;
230 }
231 }
232
233 atomic_inc(&opencnt);
234 return 0;
235}
236
237static ssize_t wdt_gpi_write(struct file *f, const char __user *d, size_t s,
238 loff_t *o)
239{
240 char val;
241
242 wdt_gpi_set_timeout(timeout);
243 expect_close = (s > 0) && !get_user(val, d) && (val == 'V');
244 return s ? 1 : 0;
245}
246
247static long wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
248{
249 long res = -ENOTTY;
250 const long size = _IOC_SIZE(cmd);
251 int stat;
252 void __user *argp = (void __user *)arg;
253 static struct watchdog_info wdinfo = {
254 .identity = "RM9xxx/GPI watchdog",
255 .firmware_version = 0,
256 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING
257 };
258
259 if (unlikely(_IOC_TYPE(cmd) != WATCHDOG_IOCTL_BASE))
260 return -ENOTTY;
261
262 if ((_IOC_DIR(cmd) & _IOC_READ)
263 && !access_ok(VERIFY_WRITE, arg, size))
264 return -EFAULT;
265
266 if ((_IOC_DIR(cmd) & _IOC_WRITE)
267 && !access_ok(VERIFY_READ, arg, size))
268 return -EFAULT;
269
270 expect_close = 0;
271
272 switch (cmd) {
273 case WDIOC_GETSUPPORT:
274 wdinfo.options = nowayout ?
275 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING :
276 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
277 WDIOF_MAGICCLOSE;
278 res = __copy_to_user(argp, &wdinfo, size) ? -EFAULT : size;
279 break;
280
281 case WDIOC_GETSTATUS:
282 break;
283
284 case WDIOC_GETBOOTSTATUS:
285 stat = (*(volatile char *) flagaddr & 0x01)
286 ? WDIOF_CARDRESET : 0;
287 res = __copy_to_user(argp, &stat, size) ?
288 -EFAULT : size;
289 break;
290
291 case WDIOC_SETOPTIONS:
292 break;
293
294 case WDIOC_KEEPALIVE:
295 wdt_gpi_set_timeout(timeout);
296 res = size;
297 break;
298
299 case WDIOC_SETTIMEOUT:
300 {
301 int val;
302 if (unlikely(__copy_from_user(&val, argp, size))) {
303 res = -EFAULT;
304 break;
305 }
306
307 if (val > MAX_TIMEOUT_SECONDS)
308 val = MAX_TIMEOUT_SECONDS;
309 timeout = val;
310 wdt_gpi_set_timeout(val);
311 res = size;
312 printk(KERN_INFO "%s: timeout set to %u seconds\n",
313 wdt_gpi_name, timeout);
314 }
315 break;
316
317 case WDIOC_GETTIMEOUT:
318 res = __copy_to_user(argp, &timeout, size) ?
319 -EFAULT : size;
320 break;
321 }
322
323 return res;
324}
325
326
327/* Shutdown notifier */
328static int wdt_gpi_notify(struct notifier_block *this, unsigned long code,
329 void *unused)
330{
331 if (code == SYS_DOWN || code == SYS_HALT)
332 wdt_gpi_stop();
333
334 return NOTIFY_DONE;
335}
336
337
338/* Init & exit procedures */
339static const struct resource *wdt_gpi_get_resource(struct platform_device *pdv,
340 const char *name, unsigned int type)
341{
342 char buf[80];
343 if (snprintf(buf, sizeof(buf), "%s_0", name) >= sizeof(buf))
344 return NULL;
345 return platform_get_resource_byname(pdv, type, buf);
346}
347
348/* No hotplugging on the platform bus - use __devinit */
349static int __devinit wdt_gpi_probe(struct platform_device *pdv)
350{
351 int res;
352 const struct resource
353 * const rr = wdt_gpi_get_resource(pdv, WDT_RESOURCE_REGS,
354 IORESOURCE_MEM),
355 * const ri = wdt_gpi_get_resource(pdv, WDT_RESOURCE_IRQ,
356 IORESOURCE_IRQ),
357 * const rc = wdt_gpi_get_resource(pdv, WDT_RESOURCE_COUNTER,
358 0);
359
360 if (unlikely(!rr || !ri || !rc))
361 return -ENXIO;
362
363 wd_regs = ioremap_nocache(rr->start, rr->end + 1 - rr->start);
364 if (unlikely(!wd_regs))
365 return -ENOMEM;
366 wd_irq = ri->start;
367 wd_ctr = rc->start;
368 res = misc_register(&miscdev);
369 if (res)
370 iounmap(wd_regs);
371 else
372 register_reboot_notifier(&wdt_gpi_shutdown);
373 return res;
374}
375
376static int __devexit wdt_gpi_remove(struct platform_device *dev)
377{
378 int res;
379
380 unregister_reboot_notifier(&wdt_gpi_shutdown);
381 res = misc_deregister(&miscdev);
382 iounmap(wd_regs);
383 wd_regs = NULL;
384 return res;
385}
386
387
388/* Device driver init & exit */
389static struct platform_driver wgt_gpi_driver = {
390 .driver = {
391 .name = wdt_gpi_name,
392 .owner = THIS_MODULE,
393 },
394 .probe = wdt_gpi_probe,
395 .remove = __devexit_p(wdt_gpi_remove),
396};
397
398static int __init wdt_gpi_init_module(void)
399{
400 atomic_set(&opencnt, 1);
401 if (timeout > MAX_TIMEOUT_SECONDS)
402 timeout = MAX_TIMEOUT_SECONDS;
403 return platform_driver_register(&wdt_gpi_driver);
404}
405
406static void __exit wdt_gpi_cleanup_module(void)
407{
408 platform_driver_unregister(&wdt_gpi_driver);
409}
410
411module_init(wdt_gpi_init_module);
412module_exit(wdt_gpi_cleanup_module);
413
414MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
415MODULE_DESCRIPTION("Basler eXcite watchdog driver for gpi devices");
416MODULE_VERSION("0.1");
417MODULE_LICENSE("GPL");
418MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
419