aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/spi/spi-bus.txt76
-rw-r--r--Documentation/spi/spi-summary27
-rw-r--r--drivers/spi/Kconfig26
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-slave-system-control.c154
-rw-r--r--drivers/spi/spi-slave-time.c129
-rw-r--r--drivers/spi/spi.c1216
-rw-r--r--include/linux/spi/spi.h217
-rw-r--r--include/trace/events/spi.h26
9 files changed, 1201 insertions, 674 deletions
diff --git a/Documentation/devicetree/bindings/spi/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt
index 4b1d6e74c744..1f6e86f787ef 100644
--- a/Documentation/devicetree/bindings/spi/spi-bus.txt
+++ b/Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -1,17 +1,23 @@
1SPI (Serial Peripheral Interface) busses 1SPI (Serial Peripheral Interface) busses
2 2
3SPI busses can be described with a node for the SPI master device 3SPI busses can be described with a node for the SPI controller device
4and a set of child nodes for each SPI slave on the bus. For this 4and a set of child nodes for each SPI slave on the bus. The system's SPI
5discussion, it is assumed that the system's SPI controller is in 5controller may be described for use in SPI master mode or in SPI slave mode,
6SPI master mode. This binding does not describe SPI controllers 6but not for both at the same time.
7in slave mode.
8 7
9The SPI master node requires the following properties: 8The SPI controller node requires the following properties:
9- compatible - Name of SPI bus controller following generic names
10 recommended practice.
11
12In master mode, the SPI controller node requires the following additional
13properties:
10- #address-cells - number of cells required to define a chip select 14- #address-cells - number of cells required to define a chip select
11 address on the SPI bus. 15 address on the SPI bus.
12- #size-cells - should be zero. 16- #size-cells - should be zero.
13- compatible - name of SPI bus controller following generic names 17
14 recommended practice. 18In slave mode, the SPI controller node requires one additional property:
19- spi-slave - Empty property.
20
15No other properties are required in the SPI bus node. It is assumed 21No other properties are required in the SPI bus node. It is assumed
16that a driver for an SPI bus device will understand that it is an SPI bus. 22that a driver for an SPI bus device will understand that it is an SPI bus.
17However, the binding does not attempt to define the specific method for 23However, the binding does not attempt to define the specific method for
@@ -21,7 +27,7 @@ assumption that board specific platform code will be used to manage
21chip selects. Individual drivers can define additional properties to 27chip selects. Individual drivers can define additional properties to
22support describing the chip select layout. 28support describing the chip select layout.
23 29
24Optional properties: 30Optional properties (master mode only):
25- cs-gpios - gpios chip select. 31- cs-gpios - gpios chip select.
26- num-cs - total number of chipselects. 32- num-cs - total number of chipselects.
27 33
@@ -41,28 +47,36 @@ cs1 : native
41cs2 : &gpio1 1 0 47cs2 : &gpio1 1 0
42cs3 : &gpio1 2 0 48cs3 : &gpio1 2 0
43 49
44SPI slave nodes must be children of the SPI master node and can 50
45contain the following properties. 51SPI slave nodes must be children of the SPI controller node.
46- reg - (required) chip select address of device. 52
47- compatible - (required) name of SPI device following generic names 53In master mode, one or more slave nodes (up to the number of chip selects) can
48 recommended practice. 54be present. Required properties are:
49- spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz. 55- compatible - Name of SPI device following generic names recommended
50- spi-cpol - (optional) Empty property indicating device requires 56 practice.
51 inverse clock polarity (CPOL) mode. 57- reg - Chip select address of device.
52- spi-cpha - (optional) Empty property indicating device requires 58- spi-max-frequency - Maximum SPI clocking speed of device in Hz.
53 shifted clock phase (CPHA) mode. 59
54- spi-cs-high - (optional) Empty property indicating device requires 60In slave mode, the (single) slave node is optional.
55 chip select active high. 61If present, it must be called "slave". Required properties are:
56- spi-3wire - (optional) Empty property indicating device requires 62- compatible - Name of SPI device following generic names recommended
57 3-wire mode. 63 practice.
58- spi-lsb-first - (optional) Empty property indicating device requires 64
59 LSB first mode. 65All slave nodes can contain the following optional properties:
60- spi-tx-bus-width - (optional) The bus width (number of data wires) that is 66- spi-cpol - Empty property indicating device requires inverse clock
61 used for MOSI. Defaults to 1 if not present. 67 polarity (CPOL) mode.
62- spi-rx-bus-width - (optional) The bus width (number of data wires) that is 68- spi-cpha - Empty property indicating device requires shifted clock
63 used for MISO. Defaults to 1 if not present. 69 phase (CPHA) mode.
64- spi-rx-delay-us - (optional) Microsecond delay after a read transfer. 70- spi-cs-high - Empty property indicating device requires chip select
65- spi-tx-delay-us - (optional) Microsecond delay after a write transfer. 71 active high.
72- spi-3wire - Empty property indicating device requires 3-wire mode.
73- spi-lsb-first - Empty property indicating device requires LSB first mode.
74- spi-tx-bus-width - The bus width (number of data wires) that is used for MOSI.
75 Defaults to 1 if not present.
76- spi-rx-bus-width - The bus width (number of data wires) that is used for MISO.
77 Defaults to 1 if not present.
78- spi-rx-delay-us - Microsecond delay after a read transfer.
79- spi-tx-delay-us - Microsecond delay after a write transfer.
66 80
67Some SPI controllers and devices support Dual and Quad SPI transfer mode. 81Some SPI controllers and devices support Dual and Quad SPI transfer mode.
68It allows data in the SPI system to be transferred using 2 wires (DUAL) or 4 82It allows data in the SPI system to be transferred using 2 wires (DUAL) or 4
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index d1824b399b2d..1721c1b570c3 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -62,8 +62,8 @@ chips described as using "three wire" signaling: SCK, data, nCSx.
62(That data line is sometimes called MOMI or SISO.) 62(That data line is sometimes called MOMI or SISO.)
63 63
64Microcontrollers often support both master and slave sides of the SPI 64Microcontrollers often support both master and slave sides of the SPI
65protocol. This document (and Linux) currently only supports the master 65protocol. This document (and Linux) supports both the master and slave
66side of SPI interactions. 66sides of SPI interactions.
67 67
68 68
69Who uses it? On what kinds of systems? 69Who uses it? On what kinds of systems?
@@ -154,9 +154,8 @@ control audio interfaces, present touchscreen sensors as input interfaces,
154or monitor temperature and voltage levels during industrial processing. 154or monitor temperature and voltage levels during industrial processing.
155And those might all be sharing the same controller driver. 155And those might all be sharing the same controller driver.
156 156
157A "struct spi_device" encapsulates the master-side interface between 157A "struct spi_device" encapsulates the controller-side interface between
158those two types of driver. At this writing, Linux has no slave side 158those two types of drivers.
159programming interface.
160 159
161There is a minimal core of SPI programming interfaces, focussing on 160There is a minimal core of SPI programming interfaces, focussing on
162using the driver model to connect controller and protocol drivers using 161using the driver model to connect controller and protocol drivers using
@@ -177,10 +176,24 @@ shows up in sysfs in several locations:
177 /sys/bus/spi/drivers/D ... driver for one or more spi*.* devices 176 /sys/bus/spi/drivers/D ... driver for one or more spi*.* devices
178 177
179 /sys/class/spi_master/spiB ... symlink (or actual device node) to 178 /sys/class/spi_master/spiB ... symlink (or actual device node) to
180 a logical node which could hold class related state for the 179 a logical node which could hold class related state for the SPI
181 controller managing bus "B". All spiB.* devices share one 180 master controller managing bus "B". All spiB.* devices share one
182 physical SPI bus segment, with SCLK, MOSI, and MISO. 181 physical SPI bus segment, with SCLK, MOSI, and MISO.
183 182
183 /sys/devices/.../CTLR/slave ... virtual file for (un)registering the
184 slave device for an SPI slave controller.
185 Writing the driver name of an SPI slave handler to this file
186 registers the slave device; writing "(null)" unregisters the slave
187 device.
188 Reading from this file shows the name of the slave device ("(null)"
189 if not registered).
190
191 /sys/class/spi_slave/spiB ... symlink (or actual device node) to
192 a logical node which could hold class related state for the SPI
193 slave controller on bus "B". When registered, a single spiB.*
194 device is present here, possible sharing the physical SPI bus
195 segment with other SPI slave devices.
196
184Note that the actual location of the controller's class state depends 197Note that the actual location of the controller's class state depends
185on whether you enabled CONFIG_SYSFS_DEPRECATED or not. At this time, 198on whether you enabled CONFIG_SYSFS_DEPRECATED or not. At this time,
186the only class-specific state is the bus number ("B" in "spiB"), so 199the only class-specific state is the bus number ("B" in "spiB"), so
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 097883362036..82cd818aa062 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -785,6 +785,30 @@ config SPI_TLE62X0
785 785
786endif # SPI_MASTER 786endif # SPI_MASTER
787 787
788# (slave support would go here) 788#
789# SLAVE side ... listening to other SPI masters
790#
791
792config SPI_SLAVE
793 bool "SPI slave protocol handlers"
794 help
795 If your system has a slave-capable SPI controller, you can enable
796 slave protocol handlers.
797
798if SPI_SLAVE
799
800config SPI_SLAVE_TIME
801 tristate "SPI slave handler reporting boot up time"
802 help
803 SPI slave handler responding with the time of reception of the last
804 SPI message.
805
806config SPI_SLAVE_SYSTEM_CONTROL
807 tristate "SPI slave handler controlling system state"
808 help
809 SPI slave handler to allow remote control of system reboot, power
810 off, halt, and suspend.
811
812endif # SPI_SLAVE
789 813
790endif # SPI 814endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index b375a7a89216..1d7923e8c63b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -105,3 +105,7 @@ obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
105obj-$(CONFIG_SPI_XLP) += spi-xlp.o 105obj-$(CONFIG_SPI_XLP) += spi-xlp.o
106obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o 106obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
107obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o 107obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
108
109# SPI slave protocol handlers
110obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
111obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
new file mode 100644
index 000000000000..c0257e937995
--- /dev/null
+++ b/drivers/spi/spi-slave-system-control.c
@@ -0,0 +1,154 @@
1/*
2 * SPI slave handler controlling system state
3 *
4 * This SPI slave handler allows remote control of system reboot, power off,
5 * halt, and suspend.
6 *
7 * Copyright (C) 2016-2017 Glider bvba
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
14 * system):
15 *
16 * # reboot='\x7c\x50'
17 * # poweroff='\x71\x3f'
18 * # halt='\x38\x76'
19 * # suspend='\x1b\x1b'
20 * # spidev_test -D /dev/spidev2.0 -p $suspend # or $reboot, $poweroff, $halt
21 */
22
23#include <linux/completion.h>
24#include <linux/module.h>
25#include <linux/reboot.h>
26#include <linux/suspend.h>
27#include <linux/spi/spi.h>
28
29/*
30 * The numbers are chosen to display something human-readable on two 7-segment
31 * displays connected to two 74HC595 shift registers
32 */
33#define CMD_REBOOT 0x7c50 /* rb */
34#define CMD_POWEROFF 0x713f /* OF */
35#define CMD_HALT 0x3876 /* HL */
36#define CMD_SUSPEND 0x1b1b /* ZZ */
37
38struct spi_slave_system_control_priv {
39 struct spi_device *spi;
40 struct completion finished;
41 struct spi_transfer xfer;
42 struct spi_message msg;
43 __be16 cmd;
44};
45
46static
47int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv);
48
49static void spi_slave_system_control_complete(void *arg)
50{
51 struct spi_slave_system_control_priv *priv = arg;
52 u16 cmd;
53 int ret;
54
55 if (priv->msg.status)
56 goto terminate;
57
58 cmd = be16_to_cpu(priv->cmd);
59 switch (cmd) {
60 case CMD_REBOOT:
61 dev_info(&priv->spi->dev, "Rebooting system...\n");
62 kernel_restart(NULL);
63
64 case CMD_POWEROFF:
65 dev_info(&priv->spi->dev, "Powering off system...\n");
66 kernel_power_off();
67 break;
68
69 case CMD_HALT:
70 dev_info(&priv->spi->dev, "Halting system...\n");
71 kernel_halt();
72 break;
73
74 case CMD_SUSPEND:
75 dev_info(&priv->spi->dev, "Suspending system...\n");
76 pm_suspend(PM_SUSPEND_MEM);
77 break;
78
79 default:
80 dev_warn(&priv->spi->dev, "Unknown command 0x%x\n", cmd);
81 break;
82 }
83
84 ret = spi_slave_system_control_submit(priv);
85 if (ret)
86 goto terminate;
87
88 return;
89
90terminate:
91 dev_info(&priv->spi->dev, "Terminating\n");
92 complete(&priv->finished);
93}
94
95static
96int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv)
97{
98 int ret;
99
100 spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
101
102 priv->msg.complete = spi_slave_system_control_complete;
103 priv->msg.context = priv;
104
105 ret = spi_async(priv->spi, &priv->msg);
106 if (ret)
107 dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
108
109 return ret;
110}
111
112static int spi_slave_system_control_probe(struct spi_device *spi)
113{
114 struct spi_slave_system_control_priv *priv;
115 int ret;
116
117 priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
118 if (!priv)
119 return -ENOMEM;
120
121 priv->spi = spi;
122 init_completion(&priv->finished);
123 priv->xfer.rx_buf = &priv->cmd;
124 priv->xfer.len = sizeof(priv->cmd);
125
126 ret = spi_slave_system_control_submit(priv);
127 if (ret)
128 return ret;
129
130 spi_set_drvdata(spi, priv);
131 return 0;
132}
133
134static int spi_slave_system_control_remove(struct spi_device *spi)
135{
136 struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
137
138 spi_slave_abort(spi);
139 wait_for_completion(&priv->finished);
140 return 0;
141}
142
143static struct spi_driver spi_slave_system_control_driver = {
144 .driver = {
145 .name = "spi-slave-system-control",
146 },
147 .probe = spi_slave_system_control_probe,
148 .remove = spi_slave_system_control_remove,
149};
150module_spi_driver(spi_slave_system_control_driver);
151
152MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
153MODULE_DESCRIPTION("SPI slave handler controlling system state");
154MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c
new file mode 100644
index 000000000000..f2e07a392d68
--- /dev/null
+++ b/drivers/spi/spi-slave-time.c
@@ -0,0 +1,129 @@
1/*
2 * SPI slave handler reporting uptime at reception of previous SPI message
3 *
4 * This SPI slave handler sends the time of reception of the last SPI message
5 * as two 32-bit unsigned integers in binary format and in network byte order,
6 * representing the number of seconds and fractional seconds (in microseconds)
7 * since boot up.
8 *
9 * Copyright (C) 2016-2017 Glider bvba
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 *
15 * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
16 * system):
17 *
18 * # spidev_test -D /dev/spidev2.0 -p dummy-8B
19 * spi mode: 0x0
20 * bits per word: 8
21 * max speed: 500000 Hz (500 KHz)
22 * RX | 00 00 04 6D 00 09 5B BB ...
23 * ^^^^^ ^^^^^^^^
24 * seconds microseconds
25 */
26
27#include <linux/completion.h>
28#include <linux/module.h>
29#include <linux/sched/clock.h>
30#include <linux/spi/spi.h>
31
32
33struct spi_slave_time_priv {
34 struct spi_device *spi;
35 struct completion finished;
36 struct spi_transfer xfer;
37 struct spi_message msg;
38 __be32 buf[2];
39};
40
41static int spi_slave_time_submit(struct spi_slave_time_priv *priv);
42
43static void spi_slave_time_complete(void *arg)
44{
45 struct spi_slave_time_priv *priv = arg;
46 int ret;
47
48 ret = priv->msg.status;
49 if (ret)
50 goto terminate;
51
52 ret = spi_slave_time_submit(priv);
53 if (ret)
54 goto terminate;
55
56 return;
57
58terminate:
59 dev_info(&priv->spi->dev, "Terminating\n");
60 complete(&priv->finished);
61}
62
63static int spi_slave_time_submit(struct spi_slave_time_priv *priv)
64{
65 u32 rem_us;
66 int ret;
67 u64 ts;
68
69 ts = local_clock();
70 rem_us = do_div(ts, 1000000000) / 1000;
71
72 priv->buf[0] = cpu_to_be32(ts);
73 priv->buf[1] = cpu_to_be32(rem_us);
74
75 spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
76
77 priv->msg.complete = spi_slave_time_complete;
78 priv->msg.context = priv;
79
80 ret = spi_async(priv->spi, &priv->msg);
81 if (ret)
82 dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
83
84 return ret;
85}
86
87static int spi_slave_time_probe(struct spi_device *spi)
88{
89 struct spi_slave_time_priv *priv;
90 int ret;
91
92 priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
93 if (!priv)
94 return -ENOMEM;
95
96 priv->spi = spi;
97 init_completion(&priv->finished);
98 priv->xfer.tx_buf = priv->buf;
99 priv->xfer.len = sizeof(priv->buf);
100
101 ret = spi_slave_time_submit(priv);
102 if (ret)
103 return ret;
104
105 spi_set_drvdata(spi, priv);
106 return 0;
107}
108
109static int spi_slave_time_remove(struct spi_device *spi)
110{
111 struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
112
113 spi_slave_abort(spi);
114 wait_for_completion(&priv->finished);
115 return 0;
116}
117
118static struct spi_driver spi_slave_time_driver = {
119 .driver = {
120 .name = "spi-slave-time",
121 },
122 .probe = spi_slave_time_probe,
123 .remove = spi_slave_time_remove,
124};
125module_spi_driver(spi_slave_time_driver);
126
127MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
128MODULE_DESCRIPTION("SPI slave reporting uptime at previous SPI message");
129MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6f87fec409b5..4fcbb0aa71d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -48,11 +48,11 @@ static void spidev_release(struct device *dev)
48{ 48{
49 struct spi_device *spi = to_spi_device(dev); 49 struct spi_device *spi = to_spi_device(dev);
50 50
51 /* spi masters may cleanup for released devices */ 51 /* spi controllers may cleanup for released devices */
52 if (spi->master->cleanup) 52 if (spi->controller->cleanup)
53 spi->master->cleanup(spi); 53 spi->controller->cleanup(spi);
54 54
55 spi_master_put(spi->master); 55 spi_controller_put(spi->controller);
56 kfree(spi); 56 kfree(spi);
57} 57}
58 58
@@ -71,17 +71,17 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
71static DEVICE_ATTR_RO(modalias); 71static DEVICE_ATTR_RO(modalias);
72 72
73#define SPI_STATISTICS_ATTRS(field, file) \ 73#define SPI_STATISTICS_ATTRS(field, file) \
74static ssize_t spi_master_##field##_show(struct device *dev, \ 74static ssize_t spi_controller_##field##_show(struct device *dev, \
75 struct device_attribute *attr, \ 75 struct device_attribute *attr, \
76 char *buf) \ 76 char *buf) \
77{ \ 77{ \
78 struct spi_master *master = container_of(dev, \ 78 struct spi_controller *ctlr = container_of(dev, \
79 struct spi_master, dev); \ 79 struct spi_controller, dev); \
80 return spi_statistics_##field##_show(&master->statistics, buf); \ 80 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
81} \ 81} \
82static struct device_attribute dev_attr_spi_master_##field = { \ 82static struct device_attribute dev_attr_spi_controller_##field = { \
83 .attr = { .name = file, .mode = 0444 }, \ 83 .attr = { .name = file, .mode = 0444 }, \
84 .show = spi_master_##field##_show, \ 84 .show = spi_controller_##field##_show, \
85}; \ 85}; \
86static ssize_t spi_device_##field##_show(struct device *dev, \ 86static ssize_t spi_device_##field##_show(struct device *dev, \
87 struct device_attribute *attr, \ 87 struct device_attribute *attr, \
@@ -201,51 +201,51 @@ static const struct attribute_group *spi_dev_groups[] = {
201 NULL, 201 NULL,
202}; 202};
203 203
204static struct attribute *spi_master_statistics_attrs[] = { 204static struct attribute *spi_controller_statistics_attrs[] = {
205 &dev_attr_spi_master_messages.attr, 205 &dev_attr_spi_controller_messages.attr,
206 &dev_attr_spi_master_transfers.attr, 206 &dev_attr_spi_controller_transfers.attr,
207 &dev_attr_spi_master_errors.attr, 207 &dev_attr_spi_controller_errors.attr,
208 &dev_attr_spi_master_timedout.attr, 208 &dev_attr_spi_controller_timedout.attr,
209 &dev_attr_spi_master_spi_sync.attr, 209 &dev_attr_spi_controller_spi_sync.attr,
210 &dev_attr_spi_master_spi_sync_immediate.attr, 210 &dev_attr_spi_controller_spi_sync_immediate.attr,
211 &dev_attr_spi_master_spi_async.attr, 211 &dev_attr_spi_controller_spi_async.attr,
212 &dev_attr_spi_master_bytes.attr, 212 &dev_attr_spi_controller_bytes.attr,
213 &dev_attr_spi_master_bytes_rx.attr, 213 &dev_attr_spi_controller_bytes_rx.attr,
214 &dev_attr_spi_master_bytes_tx.attr, 214 &dev_attr_spi_controller_bytes_tx.attr,
215 &dev_attr_spi_master_transfer_bytes_histo0.attr, 215 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
216 &dev_attr_spi_master_transfer_bytes_histo1.attr, 216 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
217 &dev_attr_spi_master_transfer_bytes_histo2.attr, 217 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
218 &dev_attr_spi_master_transfer_bytes_histo3.attr, 218 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
219 &dev_attr_spi_master_transfer_bytes_histo4.attr, 219 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
220 &dev_attr_spi_master_transfer_bytes_histo5.attr, 220 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
221 &dev_attr_spi_master_transfer_bytes_histo6.attr, 221 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
222 &dev_attr_spi_master_transfer_bytes_histo7.attr, 222 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
223 &dev_attr_spi_master_transfer_bytes_histo8.attr, 223 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
224 &dev_attr_spi_master_transfer_bytes_histo9.attr, 224 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
225 &dev_attr_spi_master_transfer_bytes_histo10.attr, 225 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
226 &dev_attr_spi_master_transfer_bytes_histo11.attr, 226 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
227 &dev_attr_spi_master_transfer_bytes_histo12.attr, 227 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
228 &dev_attr_spi_master_transfer_bytes_histo13.attr, 228 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
229 &dev_attr_spi_master_transfer_bytes_histo14.attr, 229 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
230 &dev_attr_spi_master_transfer_bytes_histo15.attr, 230 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
231 &dev_attr_spi_master_transfer_bytes_histo16.attr, 231 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
232 &dev_attr_spi_master_transfers_split_maxsize.attr, 232 &dev_attr_spi_controller_transfers_split_maxsize.attr,
233 NULL, 233 NULL,
234}; 234};
235 235
236static const struct attribute_group spi_master_statistics_group = { 236static const struct attribute_group spi_controller_statistics_group = {
237 .name = "statistics", 237 .name = "statistics",
238 .attrs = spi_master_statistics_attrs, 238 .attrs = spi_controller_statistics_attrs,
239}; 239};
240 240
241static const struct attribute_group *spi_master_groups[] = { 241static const struct attribute_group *spi_master_groups[] = {
242 &spi_master_statistics_group, 242 &spi_controller_statistics_group,
243 NULL, 243 NULL,
244}; 244};
245 245
246void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 246void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
247 struct spi_transfer *xfer, 247 struct spi_transfer *xfer,
248 struct spi_master *master) 248 struct spi_controller *ctlr)
249{ 249{
250 unsigned long flags; 250 unsigned long flags;
251 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 251 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
@@ -260,10 +260,10 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
260 260
261 stats->bytes += xfer->len; 261 stats->bytes += xfer->len;
262 if ((xfer->tx_buf) && 262 if ((xfer->tx_buf) &&
263 (xfer->tx_buf != master->dummy_tx)) 263 (xfer->tx_buf != ctlr->dummy_tx))
264 stats->bytes_tx += xfer->len; 264 stats->bytes_tx += xfer->len;
265 if ((xfer->rx_buf) && 265 if ((xfer->rx_buf) &&
266 (xfer->rx_buf != master->dummy_rx)) 266 (xfer->rx_buf != ctlr->dummy_rx))
267 stats->bytes_rx += xfer->len; 267 stats->bytes_rx += xfer->len;
268 268
269 spin_unlock_irqrestore(&stats->lock, flags); 269 spin_unlock_irqrestore(&stats->lock, flags);
@@ -405,7 +405,7 @@ EXPORT_SYMBOL_GPL(__spi_register_driver);
405/*-------------------------------------------------------------------------*/ 405/*-------------------------------------------------------------------------*/
406 406
407/* SPI devices should normally not be created by SPI device drivers; that 407/* SPI devices should normally not be created by SPI device drivers; that
408 * would make them board-specific. Similarly with SPI master drivers. 408 * would make them board-specific. Similarly with SPI controller drivers.
409 * Device registration normally goes into like arch/.../mach.../board-YYY.c 409 * Device registration normally goes into like arch/.../mach.../board-YYY.c
410 * with other readonly (flashable) information about mainboard devices. 410 * with other readonly (flashable) information about mainboard devices.
411 */ 411 */
@@ -416,17 +416,17 @@ struct boardinfo {
416}; 416};
417 417
418static LIST_HEAD(board_list); 418static LIST_HEAD(board_list);
419static LIST_HEAD(spi_master_list); 419static LIST_HEAD(spi_controller_list);
420 420
421/* 421/*
422 * Used to protect add/del opertion for board_info list and 422 * Used to protect add/del opertion for board_info list and
423 * spi_master list, and their matching process 423 * spi_controller list, and their matching process
424 */ 424 */
425static DEFINE_MUTEX(board_lock); 425static DEFINE_MUTEX(board_lock);
426 426
427/** 427/**
428 * spi_alloc_device - Allocate a new SPI device 428 * spi_alloc_device - Allocate a new SPI device
429 * @master: Controller to which device is connected 429 * @ctlr: Controller to which device is connected
430 * Context: can sleep 430 * Context: can sleep
431 * 431 *
432 * Allows a driver to allocate and initialize a spi_device without 432 * Allows a driver to allocate and initialize a spi_device without
@@ -435,27 +435,27 @@ static DEFINE_MUTEX(board_lock);
435 * spi_add_device() on it. 435 * spi_add_device() on it.
436 * 436 *
437 * Caller is responsible to call spi_add_device() on the returned 437 * Caller is responsible to call spi_add_device() on the returned
438 * spi_device structure to add it to the SPI master. If the caller 438 * spi_device structure to add it to the SPI controller. If the caller
439 * needs to discard the spi_device without adding it, then it should 439 * needs to discard the spi_device without adding it, then it should
440 * call spi_dev_put() on it. 440 * call spi_dev_put() on it.
441 * 441 *
442 * Return: a pointer to the new device, or NULL. 442 * Return: a pointer to the new device, or NULL.
443 */ 443 */
444struct spi_device *spi_alloc_device(struct spi_master *master) 444struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
445{ 445{
446 struct spi_device *spi; 446 struct spi_device *spi;
447 447
448 if (!spi_master_get(master)) 448 if (!spi_controller_get(ctlr))
449 return NULL; 449 return NULL;
450 450
451 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 451 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
452 if (!spi) { 452 if (!spi) {
453 spi_master_put(master); 453 spi_controller_put(ctlr);
454 return NULL; 454 return NULL;
455 } 455 }
456 456
457 spi->master = master; 457 spi->master = spi->controller = ctlr;
458 spi->dev.parent = &master->dev; 458 spi->dev.parent = &ctlr->dev;
459 spi->dev.bus = &spi_bus_type; 459 spi->dev.bus = &spi_bus_type;
460 spi->dev.release = spidev_release; 460 spi->dev.release = spidev_release;
461 spi->cs_gpio = -ENOENT; 461 spi->cs_gpio = -ENOENT;
@@ -476,7 +476,7 @@ static void spi_dev_set_name(struct spi_device *spi)
476 return; 476 return;
477 } 477 }
478 478
479 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 479 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
480 spi->chip_select); 480 spi->chip_select);
481} 481}
482 482
@@ -485,7 +485,7 @@ static int spi_dev_check(struct device *dev, void *data)
485 struct spi_device *spi = to_spi_device(dev); 485 struct spi_device *spi = to_spi_device(dev);
486 struct spi_device *new_spi = data; 486 struct spi_device *new_spi = data;
487 487
488 if (spi->master == new_spi->master && 488 if (spi->controller == new_spi->controller &&
489 spi->chip_select == new_spi->chip_select) 489 spi->chip_select == new_spi->chip_select)
490 return -EBUSY; 490 return -EBUSY;
491 return 0; 491 return 0;
@@ -503,15 +503,14 @@ static int spi_dev_check(struct device *dev, void *data)
503int spi_add_device(struct spi_device *spi) 503int spi_add_device(struct spi_device *spi)
504{ 504{
505 static DEFINE_MUTEX(spi_add_lock); 505 static DEFINE_MUTEX(spi_add_lock);
506 struct spi_master *master = spi->master; 506 struct spi_controller *ctlr = spi->controller;
507 struct device *dev = master->dev.parent; 507 struct device *dev = ctlr->dev.parent;
508 int status; 508 int status;
509 509
510 /* Chipselects are numbered 0..max; validate. */ 510 /* Chipselects are numbered 0..max; validate. */
511 if (spi->chip_select >= master->num_chipselect) { 511 if (spi->chip_select >= ctlr->num_chipselect) {
512 dev_err(dev, "cs%d >= max %d\n", 512 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
513 spi->chip_select, 513 ctlr->num_chipselect);
514 master->num_chipselect);
515 return -EINVAL; 514 return -EINVAL;
516 } 515 }
517 516
@@ -531,8 +530,8 @@ int spi_add_device(struct spi_device *spi)
531 goto done; 530 goto done;
532 } 531 }
533 532
534 if (master->cs_gpios) 533 if (ctlr->cs_gpios)
535 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 534 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
536 535
537 /* Drivers may modify this initial i/o setup, but will 536 /* Drivers may modify this initial i/o setup, but will
538 * normally rely on the device being setup. Devices 537 * normally rely on the device being setup. Devices
@@ -561,7 +560,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
561 560
562/** 561/**
563 * spi_new_device - instantiate one new SPI device 562 * spi_new_device - instantiate one new SPI device
564 * @master: Controller to which device is connected 563 * @ctlr: Controller to which device is connected
565 * @chip: Describes the SPI device 564 * @chip: Describes the SPI device
566 * Context: can sleep 565 * Context: can sleep
567 * 566 *
@@ -573,7 +572,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
573 * 572 *
574 * Return: the new device, or NULL. 573 * Return: the new device, or NULL.
575 */ 574 */
576struct spi_device *spi_new_device(struct spi_master *master, 575struct spi_device *spi_new_device(struct spi_controller *ctlr,
577 struct spi_board_info *chip) 576 struct spi_board_info *chip)
578{ 577{
579 struct spi_device *proxy; 578 struct spi_device *proxy;
@@ -586,7 +585,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
586 * suggests syslogged diagnostics are best here (ugh). 585 * suggests syslogged diagnostics are best here (ugh).
587 */ 586 */
588 587
589 proxy = spi_alloc_device(master); 588 proxy = spi_alloc_device(ctlr);
590 if (!proxy) 589 if (!proxy)
591 return NULL; 590 return NULL;
592 591
@@ -604,7 +603,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
604 if (chip->properties) { 603 if (chip->properties) {
605 status = device_add_properties(&proxy->dev, chip->properties); 604 status = device_add_properties(&proxy->dev, chip->properties);
606 if (status) { 605 if (status) {
607 dev_err(&master->dev, 606 dev_err(&ctlr->dev,
608 "failed to add properties to '%s': %d\n", 607 "failed to add properties to '%s': %d\n",
609 chip->modalias, status); 608 chip->modalias, status);
610 goto err_dev_put; 609 goto err_dev_put;
@@ -631,7 +630,7 @@ EXPORT_SYMBOL_GPL(spi_new_device);
631 * @spi: spi_device to unregister 630 * @spi: spi_device to unregister
632 * 631 *
633 * Start making the passed SPI device vanish. Normally this would be handled 632 * Start making the passed SPI device vanish. Normally this would be handled
634 * by spi_unregister_master(). 633 * by spi_unregister_controller().
635 */ 634 */
636void spi_unregister_device(struct spi_device *spi) 635void spi_unregister_device(struct spi_device *spi)
637{ 636{
@@ -648,17 +647,17 @@ void spi_unregister_device(struct spi_device *spi)
648} 647}
649EXPORT_SYMBOL_GPL(spi_unregister_device); 648EXPORT_SYMBOL_GPL(spi_unregister_device);
650 649
651static void spi_match_master_to_boardinfo(struct spi_master *master, 650static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
652 struct spi_board_info *bi) 651 struct spi_board_info *bi)
653{ 652{
654 struct spi_device *dev; 653 struct spi_device *dev;
655 654
656 if (master->bus_num != bi->bus_num) 655 if (ctlr->bus_num != bi->bus_num)
657 return; 656 return;
658 657
659 dev = spi_new_device(master, bi); 658 dev = spi_new_device(ctlr, bi);
660 if (!dev) 659 if (!dev)
661 dev_err(master->dev.parent, "can't create new device for %s\n", 660 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
662 bi->modalias); 661 bi->modalias);
663} 662}
664 663
@@ -697,7 +696,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
697 return -ENOMEM; 696 return -ENOMEM;
698 697
699 for (i = 0; i < n; i++, bi++, info++) { 698 for (i = 0; i < n; i++, bi++, info++) {
700 struct spi_master *master; 699 struct spi_controller *ctlr;
701 700
702 memcpy(&bi->board_info, info, sizeof(*info)); 701 memcpy(&bi->board_info, info, sizeof(*info));
703 if (info->properties) { 702 if (info->properties) {
@@ -709,8 +708,9 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
709 708
710 mutex_lock(&board_lock); 709 mutex_lock(&board_lock);
711 list_add_tail(&bi->list, &board_list); 710 list_add_tail(&bi->list, &board_list);
712 list_for_each_entry(master, &spi_master_list, list) 711 list_for_each_entry(ctlr, &spi_controller_list, list)
713 spi_match_master_to_boardinfo(master, &bi->board_info); 712 spi_match_controller_to_boardinfo(ctlr,
713 &bi->board_info);
714 mutex_unlock(&board_lock); 714 mutex_unlock(&board_lock);
715 } 715 }
716 716
@@ -727,16 +727,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
727 if (gpio_is_valid(spi->cs_gpio)) { 727 if (gpio_is_valid(spi->cs_gpio)) {
728 gpio_set_value(spi->cs_gpio, !enable); 728 gpio_set_value(spi->cs_gpio, !enable);
729 /* Some SPI masters need both GPIO CS & slave_select */ 729 /* Some SPI masters need both GPIO CS & slave_select */
730 if ((spi->master->flags & SPI_MASTER_GPIO_SS) && 730 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
731 spi->master->set_cs) 731 spi->controller->set_cs)
732 spi->master->set_cs(spi, !enable); 732 spi->controller->set_cs(spi, !enable);
733 } else if (spi->master->set_cs) { 733 } else if (spi->controller->set_cs) {
734 spi->master->set_cs(spi, !enable); 734 spi->controller->set_cs(spi, !enable);
735 } 735 }
736} 736}
737 737
738#ifdef CONFIG_HAS_DMA 738#ifdef CONFIG_HAS_DMA
739static int spi_map_buf(struct spi_master *master, struct device *dev, 739static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
740 struct sg_table *sgt, void *buf, size_t len, 740 struct sg_table *sgt, void *buf, size_t len,
741 enum dma_data_direction dir) 741 enum dma_data_direction dir)
742{ 742{
@@ -761,7 +761,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
761 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 761 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
762 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 762 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
763 } else if (virt_addr_valid(buf)) { 763 } else if (virt_addr_valid(buf)) {
764 desc_len = min_t(int, max_seg_size, master->max_dma_len); 764 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
765 sgs = DIV_ROUND_UP(len, desc_len); 765 sgs = DIV_ROUND_UP(len, desc_len);
766 } else { 766 } else {
767 return -EINVAL; 767 return -EINVAL;
@@ -811,7 +811,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
811 return 0; 811 return 0;
812} 812}
813 813
814static void spi_unmap_buf(struct spi_master *master, struct device *dev, 814static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
815 struct sg_table *sgt, enum dma_data_direction dir) 815 struct sg_table *sgt, enum dma_data_direction dir)
816{ 816{
817 if (sgt->orig_nents) { 817 if (sgt->orig_nents) {
@@ -820,31 +820,31 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
820 } 820 }
821} 821}
822 822
823static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 823static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
824{ 824{
825 struct device *tx_dev, *rx_dev; 825 struct device *tx_dev, *rx_dev;
826 struct spi_transfer *xfer; 826 struct spi_transfer *xfer;
827 int ret; 827 int ret;
828 828
829 if (!master->can_dma) 829 if (!ctlr->can_dma)
830 return 0; 830 return 0;
831 831
832 if (master->dma_tx) 832 if (ctlr->dma_tx)
833 tx_dev = master->dma_tx->device->dev; 833 tx_dev = ctlr->dma_tx->device->dev;
834 else 834 else
835 tx_dev = master->dev.parent; 835 tx_dev = ctlr->dev.parent;
836 836
837 if (master->dma_rx) 837 if (ctlr->dma_rx)
838 rx_dev = master->dma_rx->device->dev; 838 rx_dev = ctlr->dma_rx->device->dev;
839 else 839 else
840 rx_dev = master->dev.parent; 840 rx_dev = ctlr->dev.parent;
841 841
842 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 842 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
843 if (!master->can_dma(master, msg->spi, xfer)) 843 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
844 continue; 844 continue;
845 845
846 if (xfer->tx_buf != NULL) { 846 if (xfer->tx_buf != NULL) {
847 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 847 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
848 (void *)xfer->tx_buf, xfer->len, 848 (void *)xfer->tx_buf, xfer->len,
849 DMA_TO_DEVICE); 849 DMA_TO_DEVICE);
850 if (ret != 0) 850 if (ret != 0)
@@ -852,79 +852,78 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
852 } 852 }
853 853
854 if (xfer->rx_buf != NULL) { 854 if (xfer->rx_buf != NULL) {
855 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 855 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
856 xfer->rx_buf, xfer->len, 856 xfer->rx_buf, xfer->len,
857 DMA_FROM_DEVICE); 857 DMA_FROM_DEVICE);
858 if (ret != 0) { 858 if (ret != 0) {
859 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 859 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
860 DMA_TO_DEVICE); 860 DMA_TO_DEVICE);
861 return ret; 861 return ret;
862 } 862 }
863 } 863 }
864 } 864 }
865 865
866 master->cur_msg_mapped = true; 866 ctlr->cur_msg_mapped = true;
867 867
868 return 0; 868 return 0;
869} 869}
870 870
871static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 871static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
872{ 872{
873 struct spi_transfer *xfer; 873 struct spi_transfer *xfer;
874 struct device *tx_dev, *rx_dev; 874 struct device *tx_dev, *rx_dev;
875 875
876 if (!master->cur_msg_mapped || !master->can_dma) 876 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
877 return 0; 877 return 0;
878 878
879 if (master->dma_tx) 879 if (ctlr->dma_tx)
880 tx_dev = master->dma_tx->device->dev; 880 tx_dev = ctlr->dma_tx->device->dev;
881 else 881 else
882 tx_dev = master->dev.parent; 882 tx_dev = ctlr->dev.parent;
883 883
884 if (master->dma_rx) 884 if (ctlr->dma_rx)
885 rx_dev = master->dma_rx->device->dev; 885 rx_dev = ctlr->dma_rx->device->dev;
886 else 886 else
887 rx_dev = master->dev.parent; 887 rx_dev = ctlr->dev.parent;
888 888
889 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 889 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
890 if (!master->can_dma(master, msg->spi, xfer)) 890 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
891 continue; 891 continue;
892 892
893 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 893 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
894 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 894 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
895 } 895 }
896 896
897 return 0; 897 return 0;
898} 898}
899#else /* !CONFIG_HAS_DMA */ 899#else /* !CONFIG_HAS_DMA */
900static inline int spi_map_buf(struct spi_master *master, 900static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
901 struct device *dev, struct sg_table *sgt, 901 struct sg_table *sgt, void *buf, size_t len,
902 void *buf, size_t len,
903 enum dma_data_direction dir) 902 enum dma_data_direction dir)
904{ 903{
905 return -EINVAL; 904 return -EINVAL;
906} 905}
907 906
908static inline void spi_unmap_buf(struct spi_master *master, 907static inline void spi_unmap_buf(struct spi_controller *ctlr,
909 struct device *dev, struct sg_table *sgt, 908 struct device *dev, struct sg_table *sgt,
910 enum dma_data_direction dir) 909 enum dma_data_direction dir)
911{ 910{
912} 911}
913 912
914static inline int __spi_map_msg(struct spi_master *master, 913static inline int __spi_map_msg(struct spi_controller *ctlr,
915 struct spi_message *msg) 914 struct spi_message *msg)
916{ 915{
917 return 0; 916 return 0;
918} 917}
919 918
920static inline int __spi_unmap_msg(struct spi_master *master, 919static inline int __spi_unmap_msg(struct spi_controller *ctlr,
921 struct spi_message *msg) 920 struct spi_message *msg)
922{ 921{
923 return 0; 922 return 0;
924} 923}
925#endif /* !CONFIG_HAS_DMA */ 924#endif /* !CONFIG_HAS_DMA */
926 925
927static inline int spi_unmap_msg(struct spi_master *master, 926static inline int spi_unmap_msg(struct spi_controller *ctlr,
928 struct spi_message *msg) 927 struct spi_message *msg)
929{ 928{
930 struct spi_transfer *xfer; 929 struct spi_transfer *xfer;
@@ -934,63 +933,63 @@ static inline int spi_unmap_msg(struct spi_master *master,
934 * Restore the original value of tx_buf or rx_buf if they are 933 * Restore the original value of tx_buf or rx_buf if they are
935 * NULL. 934 * NULL.
936 */ 935 */
937 if (xfer->tx_buf == master->dummy_tx) 936 if (xfer->tx_buf == ctlr->dummy_tx)
938 xfer->tx_buf = NULL; 937 xfer->tx_buf = NULL;
939 if (xfer->rx_buf == master->dummy_rx) 938 if (xfer->rx_buf == ctlr->dummy_rx)
940 xfer->rx_buf = NULL; 939 xfer->rx_buf = NULL;
941 } 940 }
942 941
943 return __spi_unmap_msg(master, msg); 942 return __spi_unmap_msg(ctlr, msg);
944} 943}
945 944
946static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 945static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
947{ 946{
948 struct spi_transfer *xfer; 947 struct spi_transfer *xfer;
949 void *tmp; 948 void *tmp;
950 unsigned int max_tx, max_rx; 949 unsigned int max_tx, max_rx;
951 950
952 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 951 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
953 max_tx = 0; 952 max_tx = 0;
954 max_rx = 0; 953 max_rx = 0;
955 954
956 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 955 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
957 if ((master->flags & SPI_MASTER_MUST_TX) && 956 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
958 !xfer->tx_buf) 957 !xfer->tx_buf)
959 max_tx = max(xfer->len, max_tx); 958 max_tx = max(xfer->len, max_tx);
960 if ((master->flags & SPI_MASTER_MUST_RX) && 959 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
961 !xfer->rx_buf) 960 !xfer->rx_buf)
962 max_rx = max(xfer->len, max_rx); 961 max_rx = max(xfer->len, max_rx);
963 } 962 }
964 963
965 if (max_tx) { 964 if (max_tx) {
966 tmp = krealloc(master->dummy_tx, max_tx, 965 tmp = krealloc(ctlr->dummy_tx, max_tx,
967 GFP_KERNEL | GFP_DMA); 966 GFP_KERNEL | GFP_DMA);
968 if (!tmp) 967 if (!tmp)
969 return -ENOMEM; 968 return -ENOMEM;
970 master->dummy_tx = tmp; 969 ctlr->dummy_tx = tmp;
971 memset(tmp, 0, max_tx); 970 memset(tmp, 0, max_tx);
972 } 971 }
973 972
974 if (max_rx) { 973 if (max_rx) {
975 tmp = krealloc(master->dummy_rx, max_rx, 974 tmp = krealloc(ctlr->dummy_rx, max_rx,
976 GFP_KERNEL | GFP_DMA); 975 GFP_KERNEL | GFP_DMA);
977 if (!tmp) 976 if (!tmp)
978 return -ENOMEM; 977 return -ENOMEM;
979 master->dummy_rx = tmp; 978 ctlr->dummy_rx = tmp;
980 } 979 }
981 980
982 if (max_tx || max_rx) { 981 if (max_tx || max_rx) {
983 list_for_each_entry(xfer, &msg->transfers, 982 list_for_each_entry(xfer, &msg->transfers,
984 transfer_list) { 983 transfer_list) {
985 if (!xfer->tx_buf) 984 if (!xfer->tx_buf)
986 xfer->tx_buf = master->dummy_tx; 985 xfer->tx_buf = ctlr->dummy_tx;
987 if (!xfer->rx_buf) 986 if (!xfer->rx_buf)
988 xfer->rx_buf = master->dummy_rx; 987 xfer->rx_buf = ctlr->dummy_rx;
989 } 988 }
990 } 989 }
991 } 990 }
992 991
993 return __spi_map_msg(master, msg); 992 return __spi_map_msg(ctlr, msg);
994} 993}
995 994
996/* 995/*
@@ -1000,14 +999,14 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
1000 * drivers which implement a transfer_one() operation. It provides 999 * drivers which implement a transfer_one() operation. It provides
1001 * standard handling of delays and chip select management. 1000 * standard handling of delays and chip select management.
1002 */ 1001 */
1003static int spi_transfer_one_message(struct spi_master *master, 1002static int spi_transfer_one_message(struct spi_controller *ctlr,
1004 struct spi_message *msg) 1003 struct spi_message *msg)
1005{ 1004{
1006 struct spi_transfer *xfer; 1005 struct spi_transfer *xfer;
1007 bool keep_cs = false; 1006 bool keep_cs = false;
1008 int ret = 0; 1007 int ret = 0;
1009 unsigned long long ms = 1; 1008 unsigned long long ms = 1;
1010 struct spi_statistics *statm = &master->statistics; 1009 struct spi_statistics *statm = &ctlr->statistics;
1011 struct spi_statistics *stats = &msg->spi->statistics; 1010 struct spi_statistics *stats = &msg->spi->statistics;
1012 1011
1013 spi_set_cs(msg->spi, true); 1012 spi_set_cs(msg->spi, true);
@@ -1018,13 +1017,13 @@ static int spi_transfer_one_message(struct spi_master *master,
1018 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1017 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1019 trace_spi_transfer_start(msg, xfer); 1018 trace_spi_transfer_start(msg, xfer);
1020 1019
1021 spi_statistics_add_transfer_stats(statm, xfer, master); 1020 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1022 spi_statistics_add_transfer_stats(stats, xfer, master); 1021 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1023 1022
1024 if (xfer->tx_buf || xfer->rx_buf) { 1023 if (xfer->tx_buf || xfer->rx_buf) {
1025 reinit_completion(&master->xfer_completion); 1024 reinit_completion(&ctlr->xfer_completion);
1026 1025
1027 ret = master->transfer_one(master, msg->spi, xfer); 1026 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1028 if (ret < 0) { 1027 if (ret < 0) {
1029 SPI_STATISTICS_INCREMENT_FIELD(statm, 1028 SPI_STATISTICS_INCREMENT_FIELD(statm,
1030 errors); 1029 errors);
@@ -1044,7 +1043,7 @@ static int spi_transfer_one_message(struct spi_master *master,
1044 if (ms > UINT_MAX) 1043 if (ms > UINT_MAX)
1045 ms = UINT_MAX; 1044 ms = UINT_MAX;
1046 1045
1047 ms = wait_for_completion_timeout(&master->xfer_completion, 1046 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1048 msecs_to_jiffies(ms)); 1047 msecs_to_jiffies(ms));
1049 } 1048 }
1050 1049
@@ -1099,33 +1098,33 @@ out:
1099 if (msg->status == -EINPROGRESS) 1098 if (msg->status == -EINPROGRESS)
1100 msg->status = ret; 1099 msg->status = ret;
1101 1100
1102 if (msg->status && master->handle_err) 1101 if (msg->status && ctlr->handle_err)
1103 master->handle_err(master, msg); 1102 ctlr->handle_err(ctlr, msg);
1104 1103
1105 spi_res_release(master, msg); 1104 spi_res_release(ctlr, msg);
1106 1105
1107 spi_finalize_current_message(master); 1106 spi_finalize_current_message(ctlr);
1108 1107
1109 return ret; 1108 return ret;
1110} 1109}
1111 1110
1112/** 1111/**
1113 * spi_finalize_current_transfer - report completion of a transfer 1112 * spi_finalize_current_transfer - report completion of a transfer
1114 * @master: the master reporting completion 1113 * @ctlr: the controller reporting completion
1115 * 1114 *
1116 * Called by SPI drivers using the core transfer_one_message() 1115 * Called by SPI drivers using the core transfer_one_message()
1117 * implementation to notify it that the current interrupt driven 1116 * implementation to notify it that the current interrupt driven
1118 * transfer has finished and the next one may be scheduled. 1117 * transfer has finished and the next one may be scheduled.
1119 */ 1118 */
1120void spi_finalize_current_transfer(struct spi_master *master) 1119void spi_finalize_current_transfer(struct spi_controller *ctlr)
1121{ 1120{
1122 complete(&master->xfer_completion); 1121 complete(&ctlr->xfer_completion);
1123} 1122}
1124EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1123EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1125 1124
1126/** 1125/**
1127 * __spi_pump_messages - function which processes spi message queue 1126 * __spi_pump_messages - function which processes spi message queue
1128 * @master: master to process queue for 1127 * @ctlr: controller to process queue for
1129 * @in_kthread: true if we are in the context of the message pump thread 1128 * @in_kthread: true if we are in the context of the message pump thread
1130 * 1129 *
1131 * This function checks if there is any spi message in the queue that 1130 * This function checks if there is any spi message in the queue that
@@ -1136,136 +1135,136 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1136 * inside spi_sync(); the queue extraction handling at the top of the 1135 * inside spi_sync(); the queue extraction handling at the top of the
1137 * function should deal with this safely. 1136 * function should deal with this safely.
1138 */ 1137 */
1139static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1138static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1140{ 1139{
1141 unsigned long flags; 1140 unsigned long flags;
1142 bool was_busy = false; 1141 bool was_busy = false;
1143 int ret; 1142 int ret;
1144 1143
1145 /* Lock queue */ 1144 /* Lock queue */
1146 spin_lock_irqsave(&master->queue_lock, flags); 1145 spin_lock_irqsave(&ctlr->queue_lock, flags);
1147 1146
1148 /* Make sure we are not already running a message */ 1147 /* Make sure we are not already running a message */
1149 if (master->cur_msg) { 1148 if (ctlr->cur_msg) {
1150 spin_unlock_irqrestore(&master->queue_lock, flags); 1149 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1151 return; 1150 return;
1152 } 1151 }
1153 1152
1154 /* If another context is idling the device then defer */ 1153 /* If another context is idling the device then defer */
1155 if (master->idling) { 1154 if (ctlr->idling) {
1156 kthread_queue_work(&master->kworker, &master->pump_messages); 1155 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1157 spin_unlock_irqrestore(&master->queue_lock, flags); 1156 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1158 return; 1157 return;
1159 } 1158 }
1160 1159
1161 /* Check if the queue is idle */ 1160 /* Check if the queue is idle */
1162 if (list_empty(&master->queue) || !master->running) { 1161 if (list_empty(&ctlr->queue) || !ctlr->running) {
1163 if (!master->busy) { 1162 if (!ctlr->busy) {
1164 spin_unlock_irqrestore(&master->queue_lock, flags); 1163 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1165 return; 1164 return;
1166 } 1165 }
1167 1166
1168 /* Only do teardown in the thread */ 1167 /* Only do teardown in the thread */
1169 if (!in_kthread) { 1168 if (!in_kthread) {
1170 kthread_queue_work(&master->kworker, 1169 kthread_queue_work(&ctlr->kworker,
1171 &master->pump_messages); 1170 &ctlr->pump_messages);
1172 spin_unlock_irqrestore(&master->queue_lock, flags); 1171 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1173 return; 1172 return;
1174 } 1173 }
1175 1174
1176 master->busy = false; 1175 ctlr->busy = false;
1177 master->idling = true; 1176 ctlr->idling = true;
1178 spin_unlock_irqrestore(&master->queue_lock, flags); 1177 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1179 1178
1180 kfree(master->dummy_rx); 1179 kfree(ctlr->dummy_rx);
1181 master->dummy_rx = NULL; 1180 ctlr->dummy_rx = NULL;
1182 kfree(master->dummy_tx); 1181 kfree(ctlr->dummy_tx);
1183 master->dummy_tx = NULL; 1182 ctlr->dummy_tx = NULL;
1184 if (master->unprepare_transfer_hardware && 1183 if (ctlr->unprepare_transfer_hardware &&
1185 master->unprepare_transfer_hardware(master)) 1184 ctlr->unprepare_transfer_hardware(ctlr))
1186 dev_err(&master->dev, 1185 dev_err(&ctlr->dev,
1187 "failed to unprepare transfer hardware\n"); 1186 "failed to unprepare transfer hardware\n");
1188 if (master->auto_runtime_pm) { 1187 if (ctlr->auto_runtime_pm) {
1189 pm_runtime_mark_last_busy(master->dev.parent); 1188 pm_runtime_mark_last_busy(ctlr->dev.parent);
1190 pm_runtime_put_autosuspend(master->dev.parent); 1189 pm_runtime_put_autosuspend(ctlr->dev.parent);
1191 } 1190 }
1192 trace_spi_master_idle(master); 1191 trace_spi_controller_idle(ctlr);
1193 1192
1194 spin_lock_irqsave(&master->queue_lock, flags); 1193 spin_lock_irqsave(&ctlr->queue_lock, flags);
1195 master->idling = false; 1194 ctlr->idling = false;
1196 spin_unlock_irqrestore(&master->queue_lock, flags); 1195 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1197 return; 1196 return;
1198 } 1197 }
1199 1198
1200 /* Extract head of queue */ 1199 /* Extract head of queue */
1201 master->cur_msg = 1200 ctlr->cur_msg =
1202 list_first_entry(&master->queue, struct spi_message, queue); 1201 list_first_entry(&ctlr->queue, struct spi_message, queue);
1203 1202
1204 list_del_init(&master->cur_msg->queue); 1203 list_del_init(&ctlr->cur_msg->queue);
1205 if (master->busy) 1204 if (ctlr->busy)
1206 was_busy = true; 1205 was_busy = true;
1207 else 1206 else
1208 master->busy = true; 1207 ctlr->busy = true;
1209 spin_unlock_irqrestore(&master->queue_lock, flags); 1208 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1210 1209
1211 mutex_lock(&master->io_mutex); 1210 mutex_lock(&ctlr->io_mutex);
1212 1211
1213 if (!was_busy && master->auto_runtime_pm) { 1212 if (!was_busy && ctlr->auto_runtime_pm) {
1214 ret = pm_runtime_get_sync(master->dev.parent); 1213 ret = pm_runtime_get_sync(ctlr->dev.parent);
1215 if (ret < 0) { 1214 if (ret < 0) {
1216 dev_err(&master->dev, "Failed to power device: %d\n", 1215 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1217 ret); 1216 ret);
1218 mutex_unlock(&master->io_mutex); 1217 mutex_unlock(&ctlr->io_mutex);
1219 return; 1218 return;
1220 } 1219 }
1221 } 1220 }
1222 1221
1223 if (!was_busy) 1222 if (!was_busy)
1224 trace_spi_master_busy(master); 1223 trace_spi_controller_busy(ctlr);
1225 1224
1226 if (!was_busy && master->prepare_transfer_hardware) { 1225 if (!was_busy && ctlr->prepare_transfer_hardware) {
1227 ret = master->prepare_transfer_hardware(master); 1226 ret = ctlr->prepare_transfer_hardware(ctlr);
1228 if (ret) { 1227 if (ret) {
1229 dev_err(&master->dev, 1228 dev_err(&ctlr->dev,
1230 "failed to prepare transfer hardware\n"); 1229 "failed to prepare transfer hardware\n");
1231 1230
1232 if (master->auto_runtime_pm) 1231 if (ctlr->auto_runtime_pm)
1233 pm_runtime_put(master->dev.parent); 1232 pm_runtime_put(ctlr->dev.parent);
1234 mutex_unlock(&master->io_mutex); 1233 mutex_unlock(&ctlr->io_mutex);
1235 return; 1234 return;
1236 } 1235 }
1237 } 1236 }
1238 1237
1239 trace_spi_message_start(master->cur_msg); 1238 trace_spi_message_start(ctlr->cur_msg);
1240 1239
1241 if (master->prepare_message) { 1240 if (ctlr->prepare_message) {
1242 ret = master->prepare_message(master, master->cur_msg); 1241 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1243 if (ret) { 1242 if (ret) {
1244 dev_err(&master->dev, 1243 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1245 "failed to prepare message: %d\n", ret); 1244 ret);
1246 master->cur_msg->status = ret; 1245 ctlr->cur_msg->status = ret;
1247 spi_finalize_current_message(master); 1246 spi_finalize_current_message(ctlr);
1248 goto out; 1247 goto out;
1249 } 1248 }
1250 master->cur_msg_prepared = true; 1249 ctlr->cur_msg_prepared = true;
1251 } 1250 }
1252 1251
1253 ret = spi_map_msg(master, master->cur_msg); 1252 ret = spi_map_msg(ctlr, ctlr->cur_msg);
1254 if (ret) { 1253 if (ret) {
1255 master->cur_msg->status = ret; 1254 ctlr->cur_msg->status = ret;
1256 spi_finalize_current_message(master); 1255 spi_finalize_current_message(ctlr);
1257 goto out; 1256 goto out;
1258 } 1257 }
1259 1258
1260 ret = master->transfer_one_message(master, master->cur_msg); 1259 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1261 if (ret) { 1260 if (ret) {
1262 dev_err(&master->dev, 1261 dev_err(&ctlr->dev,
1263 "failed to transfer one message from queue\n"); 1262 "failed to transfer one message from queue\n");
1264 goto out; 1263 goto out;
1265 } 1264 }
1266 1265
1267out: 1266out:
1268 mutex_unlock(&master->io_mutex); 1267 mutex_unlock(&ctlr->io_mutex);
1269 1268
1270 /* Prod the scheduler in case transfer_one() was busy waiting */ 1269 /* Prod the scheduler in case transfer_one() was busy waiting */
1271 if (!ret) 1270 if (!ret)
@@ -1274,44 +1273,43 @@ out:
1274 1273
1275/** 1274/**
1276 * spi_pump_messages - kthread work function which processes spi message queue 1275 * spi_pump_messages - kthread work function which processes spi message queue
1277 * @work: pointer to kthread work struct contained in the master struct 1276 * @work: pointer to kthread work struct contained in the controller struct
1278 */ 1277 */
1279static void spi_pump_messages(struct kthread_work *work) 1278static void spi_pump_messages(struct kthread_work *work)
1280{ 1279{
1281 struct spi_master *master = 1280 struct spi_controller *ctlr =
1282 container_of(work, struct spi_master, pump_messages); 1281 container_of(work, struct spi_controller, pump_messages);
1283 1282
1284 __spi_pump_messages(master, true); 1283 __spi_pump_messages(ctlr, true);
1285} 1284}
1286 1285
1287static int spi_init_queue(struct spi_master *master) 1286static int spi_init_queue(struct spi_controller *ctlr)
1288{ 1287{
1289 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1288 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1290 1289
1291 master->running = false; 1290 ctlr->running = false;
1292 master->busy = false; 1291 ctlr->busy = false;
1293 1292
1294 kthread_init_worker(&master->kworker); 1293 kthread_init_worker(&ctlr->kworker);
1295 master->kworker_task = kthread_run(kthread_worker_fn, 1294 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1296 &master->kworker, "%s", 1295 "%s", dev_name(&ctlr->dev));
1297 dev_name(&master->dev)); 1296 if (IS_ERR(ctlr->kworker_task)) {
1298 if (IS_ERR(master->kworker_task)) { 1297 dev_err(&ctlr->dev, "failed to create message pump task\n");
1299 dev_err(&master->dev, "failed to create message pump task\n"); 1298 return PTR_ERR(ctlr->kworker_task);
1300 return PTR_ERR(master->kworker_task);
1301 } 1299 }
1302 kthread_init_work(&master->pump_messages, spi_pump_messages); 1300 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1303 1301
1304 /* 1302 /*
1305 * Master config will indicate if this controller should run the 1303 * Controller config will indicate if this controller should run the
1306 * message pump with high (realtime) priority to reduce the transfer 1304 * message pump with high (realtime) priority to reduce the transfer
1307 * latency on the bus by minimising the delay between a transfer 1305 * latency on the bus by minimising the delay between a transfer
1308 * request and the scheduling of the message pump thread. Without this 1306 * request and the scheduling of the message pump thread. Without this
1309 * setting the message pump thread will remain at default priority. 1307 * setting the message pump thread will remain at default priority.
1310 */ 1308 */
1311 if (master->rt) { 1309 if (ctlr->rt) {
1312 dev_info(&master->dev, 1310 dev_info(&ctlr->dev,
1313 "will run message pump with realtime priority\n"); 1311 "will run message pump with realtime priority\n");
1314 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param); 1312 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
1315 } 1313 }
1316 1314
1317 return 0; 1315 return 0;
@@ -1320,23 +1318,23 @@ static int spi_init_queue(struct spi_master *master)
1320/** 1318/**
1321 * spi_get_next_queued_message() - called by driver to check for queued 1319 * spi_get_next_queued_message() - called by driver to check for queued
1322 * messages 1320 * messages
1323 * @master: the master to check for queued messages 1321 * @ctlr: the controller to check for queued messages
1324 * 1322 *
1325 * If there are more messages in the queue, the next message is returned from 1323 * If there are more messages in the queue, the next message is returned from
1326 * this call. 1324 * this call.
1327 * 1325 *
1328 * Return: the next message in the queue, else NULL if the queue is empty. 1326 * Return: the next message in the queue, else NULL if the queue is empty.
1329 */ 1327 */
1330struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1328struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1331{ 1329{
1332 struct spi_message *next; 1330 struct spi_message *next;
1333 unsigned long flags; 1331 unsigned long flags;
1334 1332
1335 /* get a pointer to the next message, if any */ 1333 /* get a pointer to the next message, if any */
1336 spin_lock_irqsave(&master->queue_lock, flags); 1334 spin_lock_irqsave(&ctlr->queue_lock, flags);
1337 next = list_first_entry_or_null(&master->queue, struct spi_message, 1335 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1338 queue); 1336 queue);
1339 spin_unlock_irqrestore(&master->queue_lock, flags); 1337 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1340 1338
1341 return next; 1339 return next;
1342} 1340}
@@ -1344,36 +1342,36 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1344 1342
1345/** 1343/**
1346 * spi_finalize_current_message() - the current message is complete 1344 * spi_finalize_current_message() - the current message is complete
1347 * @master: the master to return the message to 1345 * @ctlr: the controller to return the message to
1348 * 1346 *
1349 * Called by the driver to notify the core that the message in the front of the 1347 * Called by the driver to notify the core that the message in the front of the
1350 * queue is complete and can be removed from the queue. 1348 * queue is complete and can be removed from the queue.
1351 */ 1349 */
1352void spi_finalize_current_message(struct spi_master *master) 1350void spi_finalize_current_message(struct spi_controller *ctlr)
1353{ 1351{
1354 struct spi_message *mesg; 1352 struct spi_message *mesg;
1355 unsigned long flags; 1353 unsigned long flags;
1356 int ret; 1354 int ret;
1357 1355
1358 spin_lock_irqsave(&master->queue_lock, flags); 1356 spin_lock_irqsave(&ctlr->queue_lock, flags);
1359 mesg = master->cur_msg; 1357 mesg = ctlr->cur_msg;
1360 spin_unlock_irqrestore(&master->queue_lock, flags); 1358 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1361 1359
1362 spi_unmap_msg(master, mesg); 1360 spi_unmap_msg(ctlr, mesg);
1363 1361
1364 if (master->cur_msg_prepared && master->unprepare_message) { 1362 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1365 ret = master->unprepare_message(master, mesg); 1363 ret = ctlr->unprepare_message(ctlr, mesg);
1366 if (ret) { 1364 if (ret) {
1367 dev_err(&master->dev, 1365 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1368 "failed to unprepare message: %d\n", ret); 1366 ret);
1369 } 1367 }
1370 } 1368 }
1371 1369
1372 spin_lock_irqsave(&master->queue_lock, flags); 1370 spin_lock_irqsave(&ctlr->queue_lock, flags);
1373 master->cur_msg = NULL; 1371 ctlr->cur_msg = NULL;
1374 master->cur_msg_prepared = false; 1372 ctlr->cur_msg_prepared = false;
1375 kthread_queue_work(&master->kworker, &master->pump_messages); 1373 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1376 spin_unlock_irqrestore(&master->queue_lock, flags); 1374 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1377 1375
1378 trace_spi_message_done(mesg); 1376 trace_spi_message_done(mesg);
1379 1377
@@ -1383,66 +1381,65 @@ void spi_finalize_current_message(struct spi_master *master)
1383} 1381}
1384EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1382EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1385 1383
1386static int spi_start_queue(struct spi_master *master) 1384static int spi_start_queue(struct spi_controller *ctlr)
1387{ 1385{
1388 unsigned long flags; 1386 unsigned long flags;
1389 1387
1390 spin_lock_irqsave(&master->queue_lock, flags); 1388 spin_lock_irqsave(&ctlr->queue_lock, flags);
1391 1389
1392 if (master->running || master->busy) { 1390 if (ctlr->running || ctlr->busy) {
1393 spin_unlock_irqrestore(&master->queue_lock, flags); 1391 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1394 return -EBUSY; 1392 return -EBUSY;
1395 } 1393 }
1396 1394
1397 master->running = true; 1395 ctlr->running = true;
1398 master->cur_msg = NULL; 1396 ctlr->cur_msg = NULL;
1399 spin_unlock_irqrestore(&master->queue_lock, flags); 1397 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1400 1398
1401 kthread_queue_work(&master->kworker, &master->pump_messages); 1399 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1402 1400
1403 return 0; 1401 return 0;
1404} 1402}
1405 1403
1406static int spi_stop_queue(struct spi_master *master) 1404static int spi_stop_queue(struct spi_controller *ctlr)
1407{ 1405{
1408 unsigned long flags; 1406 unsigned long flags;
1409 unsigned limit = 500; 1407 unsigned limit = 500;
1410 int ret = 0; 1408 int ret = 0;
1411 1409
1412 spin_lock_irqsave(&master->queue_lock, flags); 1410 spin_lock_irqsave(&ctlr->queue_lock, flags);
1413 1411
1414 /* 1412 /*
1415 * This is a bit lame, but is optimized for the common execution path. 1413 * This is a bit lame, but is optimized for the common execution path.
1416 * A wait_queue on the master->busy could be used, but then the common 1414 * A wait_queue on the ctlr->busy could be used, but then the common
1417 * execution path (pump_messages) would be required to call wake_up or 1415 * execution path (pump_messages) would be required to call wake_up or
1418 * friends on every SPI message. Do this instead. 1416 * friends on every SPI message. Do this instead.
1419 */ 1417 */
1420 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1418 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1421 spin_unlock_irqrestore(&master->queue_lock, flags); 1419 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1422 usleep_range(10000, 11000); 1420 usleep_range(10000, 11000);
1423 spin_lock_irqsave(&master->queue_lock, flags); 1421 spin_lock_irqsave(&ctlr->queue_lock, flags);
1424 } 1422 }
1425 1423
1426 if (!list_empty(&master->queue) || master->busy) 1424 if (!list_empty(&ctlr->queue) || ctlr->busy)
1427 ret = -EBUSY; 1425 ret = -EBUSY;
1428 else 1426 else
1429 master->running = false; 1427 ctlr->running = false;
1430 1428
1431 spin_unlock_irqrestore(&master->queue_lock, flags); 1429 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1432 1430
1433 if (ret) { 1431 if (ret) {
1434 dev_warn(&master->dev, 1432 dev_warn(&ctlr->dev, "could not stop message queue\n");
1435 "could not stop message queue\n");
1436 return ret; 1433 return ret;
1437 } 1434 }
1438 return ret; 1435 return ret;
1439} 1436}
1440 1437
1441static int spi_destroy_queue(struct spi_master *master) 1438static int spi_destroy_queue(struct spi_controller *ctlr)
1442{ 1439{
1443 int ret; 1440 int ret;
1444 1441
1445 ret = spi_stop_queue(master); 1442 ret = spi_stop_queue(ctlr);
1446 1443
1447 /* 1444 /*
1448 * kthread_flush_worker will block until all work is done. 1445 * kthread_flush_worker will block until all work is done.
@@ -1451,12 +1448,12 @@ static int spi_destroy_queue(struct spi_master *master)
1451 * return anyway. 1448 * return anyway.
1452 */ 1449 */
1453 if (ret) { 1450 if (ret) {
1454 dev_err(&master->dev, "problem destroying queue\n"); 1451 dev_err(&ctlr->dev, "problem destroying queue\n");
1455 return ret; 1452 return ret;
1456 } 1453 }
1457 1454
1458 kthread_flush_worker(&master->kworker); 1455 kthread_flush_worker(&ctlr->kworker);
1459 kthread_stop(master->kworker_task); 1456 kthread_stop(ctlr->kworker_task);
1460 1457
1461 return 0; 1458 return 0;
1462} 1459}
@@ -1465,23 +1462,23 @@ static int __spi_queued_transfer(struct spi_device *spi,
1465 struct spi_message *msg, 1462 struct spi_message *msg,
1466 bool need_pump) 1463 bool need_pump)
1467{ 1464{
1468 struct spi_master *master = spi->master; 1465 struct spi_controller *ctlr = spi->controller;
1469 unsigned long flags; 1466 unsigned long flags;
1470 1467
1471 spin_lock_irqsave(&master->queue_lock, flags); 1468 spin_lock_irqsave(&ctlr->queue_lock, flags);
1472 1469
1473 if (!master->running) { 1470 if (!ctlr->running) {
1474 spin_unlock_irqrestore(&master->queue_lock, flags); 1471 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1475 return -ESHUTDOWN; 1472 return -ESHUTDOWN;
1476 } 1473 }
1477 msg->actual_length = 0; 1474 msg->actual_length = 0;
1478 msg->status = -EINPROGRESS; 1475 msg->status = -EINPROGRESS;
1479 1476
1480 list_add_tail(&msg->queue, &master->queue); 1477 list_add_tail(&msg->queue, &ctlr->queue);
1481 if (!master->busy && need_pump) 1478 if (!ctlr->busy && need_pump)
1482 kthread_queue_work(&master->kworker, &master->pump_messages); 1479 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1483 1480
1484 spin_unlock_irqrestore(&master->queue_lock, flags); 1481 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1485 return 0; 1482 return 0;
1486} 1483}
1487 1484
@@ -1497,31 +1494,31 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1497 return __spi_queued_transfer(spi, msg, true); 1494 return __spi_queued_transfer(spi, msg, true);
1498} 1495}
1499 1496
1500static int spi_master_initialize_queue(struct spi_master *master) 1497static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1501{ 1498{
1502 int ret; 1499 int ret;
1503 1500
1504 master->transfer = spi_queued_transfer; 1501 ctlr->transfer = spi_queued_transfer;
1505 if (!master->transfer_one_message) 1502 if (!ctlr->transfer_one_message)
1506 master->transfer_one_message = spi_transfer_one_message; 1503 ctlr->transfer_one_message = spi_transfer_one_message;
1507 1504
1508 /* Initialize and start queue */ 1505 /* Initialize and start queue */
1509 ret = spi_init_queue(master); 1506 ret = spi_init_queue(ctlr);
1510 if (ret) { 1507 if (ret) {
1511 dev_err(&master->dev, "problem initializing queue\n"); 1508 dev_err(&ctlr->dev, "problem initializing queue\n");
1512 goto err_init_queue; 1509 goto err_init_queue;
1513 } 1510 }
1514 master->queued = true; 1511 ctlr->queued = true;
1515 ret = spi_start_queue(master); 1512 ret = spi_start_queue(ctlr);
1516 if (ret) { 1513 if (ret) {
1517 dev_err(&master->dev, "problem starting queue\n"); 1514 dev_err(&ctlr->dev, "problem starting queue\n");
1518 goto err_start_queue; 1515 goto err_start_queue;
1519 } 1516 }
1520 1517
1521 return 0; 1518 return 0;
1522 1519
1523err_start_queue: 1520err_start_queue:
1524 spi_destroy_queue(master); 1521 spi_destroy_queue(ctlr);
1525err_init_queue: 1522err_init_queue:
1526 return ret; 1523 return ret;
1527} 1524}
@@ -1529,21 +1526,12 @@ err_init_queue:
1529/*-------------------------------------------------------------------------*/ 1526/*-------------------------------------------------------------------------*/
1530 1527
1531#if defined(CONFIG_OF) 1528#if defined(CONFIG_OF)
1532static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, 1529static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1533 struct device_node *nc) 1530 struct device_node *nc)
1534{ 1531{
1535 u32 value; 1532 u32 value;
1536 int rc; 1533 int rc;
1537 1534
1538 /* Device address */
1539 rc = of_property_read_u32(nc, "reg", &value);
1540 if (rc) {
1541 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1542 nc->full_name, rc);
1543 return rc;
1544 }
1545 spi->chip_select = value;
1546
1547 /* Mode (clock phase/polarity/etc.) */ 1535 /* Mode (clock phase/polarity/etc.) */
1548 if (of_find_property(nc, "spi-cpha", NULL)) 1536 if (of_find_property(nc, "spi-cpha", NULL))
1549 spi->mode |= SPI_CPHA; 1537 spi->mode |= SPI_CPHA;
@@ -1568,7 +1556,7 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
1568 spi->mode |= SPI_TX_QUAD; 1556 spi->mode |= SPI_TX_QUAD;
1569 break; 1557 break;
1570 default: 1558 default:
1571 dev_warn(&master->dev, 1559 dev_warn(&ctlr->dev,
1572 "spi-tx-bus-width %d not supported\n", 1560 "spi-tx-bus-width %d not supported\n",
1573 value); 1561 value);
1574 break; 1562 break;
@@ -1586,17 +1574,36 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
1586 spi->mode |= SPI_RX_QUAD; 1574 spi->mode |= SPI_RX_QUAD;
1587 break; 1575 break;
1588 default: 1576 default:
1589 dev_warn(&master->dev, 1577 dev_warn(&ctlr->dev,
1590 "spi-rx-bus-width %d not supported\n", 1578 "spi-rx-bus-width %d not supported\n",
1591 value); 1579 value);
1592 break; 1580 break;
1593 } 1581 }
1594 } 1582 }
1595 1583
1584 if (spi_controller_is_slave(ctlr)) {
1585 if (strcmp(nc->name, "slave")) {
1586 dev_err(&ctlr->dev, "%s is not called 'slave'\n",
1587 nc->full_name);
1588 return -EINVAL;
1589 }
1590 return 0;
1591 }
1592
1593 /* Device address */
1594 rc = of_property_read_u32(nc, "reg", &value);
1595 if (rc) {
1596 dev_err(&ctlr->dev, "%s has no valid 'reg' property (%d)\n",
1597 nc->full_name, rc);
1598 return rc;
1599 }
1600 spi->chip_select = value;
1601
1596 /* Device speed */ 1602 /* Device speed */
1597 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1603 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1598 if (rc) { 1604 if (rc) {
1599 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1605 dev_err(&ctlr->dev,
1606 "%s has no valid 'spi-max-frequency' property (%d)\n",
1600 nc->full_name, rc); 1607 nc->full_name, rc);
1601 return rc; 1608 return rc;
1602 } 1609 }
@@ -1606,15 +1613,15 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
1606} 1613}
1607 1614
1608static struct spi_device * 1615static struct spi_device *
1609of_register_spi_device(struct spi_master *master, struct device_node *nc) 1616of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1610{ 1617{
1611 struct spi_device *spi; 1618 struct spi_device *spi;
1612 int rc; 1619 int rc;
1613 1620
1614 /* Alloc an spi_device */ 1621 /* Alloc an spi_device */
1615 spi = spi_alloc_device(master); 1622 spi = spi_alloc_device(ctlr);
1616 if (!spi) { 1623 if (!spi) {
1617 dev_err(&master->dev, "spi_device alloc error for %s\n", 1624 dev_err(&ctlr->dev, "spi_device alloc error for %s\n",
1618 nc->full_name); 1625 nc->full_name);
1619 rc = -ENOMEM; 1626 rc = -ENOMEM;
1620 goto err_out; 1627 goto err_out;
@@ -1624,12 +1631,12 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
1624 rc = of_modalias_node(nc, spi->modalias, 1631 rc = of_modalias_node(nc, spi->modalias,
1625 sizeof(spi->modalias)); 1632 sizeof(spi->modalias));
1626 if (rc < 0) { 1633 if (rc < 0) {
1627 dev_err(&master->dev, "cannot find modalias for %s\n", 1634 dev_err(&ctlr->dev, "cannot find modalias for %s\n",
1628 nc->full_name); 1635 nc->full_name);
1629 goto err_out; 1636 goto err_out;
1630 } 1637 }
1631 1638
1632 rc = of_spi_parse_dt(master, spi, nc); 1639 rc = of_spi_parse_dt(ctlr, spi, nc);
1633 if (rc) 1640 if (rc)
1634 goto err_out; 1641 goto err_out;
1635 1642
@@ -1640,7 +1647,7 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
1640 /* Register the new device */ 1647 /* Register the new device */
1641 rc = spi_add_device(spi); 1648 rc = spi_add_device(spi);
1642 if (rc) { 1649 if (rc) {
1643 dev_err(&master->dev, "spi_device register error %s\n", 1650 dev_err(&ctlr->dev, "spi_device register error %s\n",
1644 nc->full_name); 1651 nc->full_name);
1645 goto err_of_node_put; 1652 goto err_of_node_put;
1646 } 1653 }
@@ -1656,39 +1663,40 @@ err_out:
1656 1663
1657/** 1664/**
1658 * of_register_spi_devices() - Register child devices onto the SPI bus 1665 * of_register_spi_devices() - Register child devices onto the SPI bus
1659 * @master: Pointer to spi_master device 1666 * @ctlr: Pointer to spi_controller device
1660 * 1667 *
1661 * Registers an spi_device for each child node of master node which has a 'reg' 1668 * Registers an spi_device for each child node of controller node which
1662 * property. 1669 * represents a valid SPI slave.
1663 */ 1670 */
1664static void of_register_spi_devices(struct spi_master *master) 1671static void of_register_spi_devices(struct spi_controller *ctlr)
1665{ 1672{
1666 struct spi_device *spi; 1673 struct spi_device *spi;
1667 struct device_node *nc; 1674 struct device_node *nc;
1668 1675
1669 if (!master->dev.of_node) 1676 if (!ctlr->dev.of_node)
1670 return; 1677 return;
1671 1678
1672 for_each_available_child_of_node(master->dev.of_node, nc) { 1679 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1673 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1680 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1674 continue; 1681 continue;
1675 spi = of_register_spi_device(master, nc); 1682 spi = of_register_spi_device(ctlr, nc);
1676 if (IS_ERR(spi)) { 1683 if (IS_ERR(spi)) {
1677 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1684 dev_warn(&ctlr->dev,
1678 nc->full_name); 1685 "Failed to create SPI device for %s\n",
1686 nc->full_name);
1679 of_node_clear_flag(nc, OF_POPULATED); 1687 of_node_clear_flag(nc, OF_POPULATED);
1680 } 1688 }
1681 } 1689 }
1682} 1690}
1683#else 1691#else
1684static void of_register_spi_devices(struct spi_master *master) { } 1692static void of_register_spi_devices(struct spi_controller *ctlr) { }
1685#endif 1693#endif
1686 1694
1687#ifdef CONFIG_ACPI 1695#ifdef CONFIG_ACPI
1688static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1696static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1689{ 1697{
1690 struct spi_device *spi = data; 1698 struct spi_device *spi = data;
1691 struct spi_master *master = spi->master; 1699 struct spi_controller *ctlr = spi->controller;
1692 1700
1693 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1701 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1694 struct acpi_resource_spi_serialbus *sb; 1702 struct acpi_resource_spi_serialbus *sb;
@@ -1702,8 +1710,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1702 * 0 .. max - 1 so we need to ask the driver to 1710 * 0 .. max - 1 so we need to ask the driver to
1703 * translate between the two schemes. 1711 * translate between the two schemes.
1704 */ 1712 */
1705 if (master->fw_translate_cs) { 1713 if (ctlr->fw_translate_cs) {
1706 int cs = master->fw_translate_cs(master, 1714 int cs = ctlr->fw_translate_cs(ctlr,
1707 sb->device_selection); 1715 sb->device_selection);
1708 if (cs < 0) 1716 if (cs < 0)
1709 return cs; 1717 return cs;
@@ -1732,7 +1740,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1732 return 1; 1740 return 1;
1733} 1741}
1734 1742
1735static acpi_status acpi_register_spi_device(struct spi_master *master, 1743static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1736 struct acpi_device *adev) 1744 struct acpi_device *adev)
1737{ 1745{
1738 struct list_head resource_list; 1746 struct list_head resource_list;
@@ -1743,9 +1751,9 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
1743 acpi_device_enumerated(adev)) 1751 acpi_device_enumerated(adev))
1744 return AE_OK; 1752 return AE_OK;
1745 1753
1746 spi = spi_alloc_device(master); 1754 spi = spi_alloc_device(ctlr);
1747 if (!spi) { 1755 if (!spi) {
1748 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1756 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1749 dev_name(&adev->dev)); 1757 dev_name(&adev->dev));
1750 return AE_NO_MEMORY; 1758 return AE_NO_MEMORY;
1751 } 1759 }
@@ -1774,7 +1782,7 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
1774 adev->power.flags.ignore_parent = true; 1782 adev->power.flags.ignore_parent = true;
1775 if (spi_add_device(spi)) { 1783 if (spi_add_device(spi)) {
1776 adev->power.flags.ignore_parent = false; 1784 adev->power.flags.ignore_parent = false;
1777 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1785 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1778 dev_name(&adev->dev)); 1786 dev_name(&adev->dev));
1779 spi_dev_put(spi); 1787 spi_dev_put(spi);
1780 } 1788 }
@@ -1785,104 +1793,211 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
1785static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1793static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1786 void *data, void **return_value) 1794 void *data, void **return_value)
1787{ 1795{
1788 struct spi_master *master = data; 1796 struct spi_controller *ctlr = data;
1789 struct acpi_device *adev; 1797 struct acpi_device *adev;
1790 1798
1791 if (acpi_bus_get_device(handle, &adev)) 1799 if (acpi_bus_get_device(handle, &adev))
1792 return AE_OK; 1800 return AE_OK;
1793 1801
1794 return acpi_register_spi_device(master, adev); 1802 return acpi_register_spi_device(ctlr, adev);
1795} 1803}
1796 1804
1797static void acpi_register_spi_devices(struct spi_master *master) 1805static void acpi_register_spi_devices(struct spi_controller *ctlr)
1798{ 1806{
1799 acpi_status status; 1807 acpi_status status;
1800 acpi_handle handle; 1808 acpi_handle handle;
1801 1809
1802 handle = ACPI_HANDLE(master->dev.parent); 1810 handle = ACPI_HANDLE(ctlr->dev.parent);
1803 if (!handle) 1811 if (!handle)
1804 return; 1812 return;
1805 1813
1806 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1814 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1807 acpi_spi_add_device, NULL, 1815 acpi_spi_add_device, NULL, ctlr, NULL);
1808 master, NULL);
1809 if (ACPI_FAILURE(status)) 1816 if (ACPI_FAILURE(status))
1810 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1817 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1811} 1818}
1812#else 1819#else
1813static inline void acpi_register_spi_devices(struct spi_master *master) {} 1820static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1814#endif /* CONFIG_ACPI */ 1821#endif /* CONFIG_ACPI */
1815 1822
1816static void spi_master_release(struct device *dev) 1823static void spi_controller_release(struct device *dev)
1817{ 1824{
1818 struct spi_master *master; 1825 struct spi_controller *ctlr;
1819 1826
1820 master = container_of(dev, struct spi_master, dev); 1827 ctlr = container_of(dev, struct spi_controller, dev);
1821 kfree(master); 1828 kfree(ctlr);
1822} 1829}
1823 1830
1824static struct class spi_master_class = { 1831static struct class spi_master_class = {
1825 .name = "spi_master", 1832 .name = "spi_master",
1826 .owner = THIS_MODULE, 1833 .owner = THIS_MODULE,
1827 .dev_release = spi_master_release, 1834 .dev_release = spi_controller_release,
1828 .dev_groups = spi_master_groups, 1835 .dev_groups = spi_master_groups,
1829}; 1836};
1830 1837
1838#ifdef CONFIG_SPI_SLAVE
1839/**
1840 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1841 * controller
1842 * @spi: device used for the current transfer
1843 */
1844int spi_slave_abort(struct spi_device *spi)
1845{
1846 struct spi_controller *ctlr = spi->controller;
1847
1848 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1849 return ctlr->slave_abort(ctlr);
1850
1851 return -ENOTSUPP;
1852}
1853EXPORT_SYMBOL_GPL(spi_slave_abort);
1854
1855static int match_true(struct device *dev, void *data)
1856{
1857 return 1;
1858}
1859
1860static ssize_t spi_slave_show(struct device *dev,
1861 struct device_attribute *attr, char *buf)
1862{
1863 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1864 dev);
1865 struct device *child;
1866
1867 child = device_find_child(&ctlr->dev, NULL, match_true);
1868 return sprintf(buf, "%s\n",
1869 child ? to_spi_device(child)->modalias : NULL);
1870}
1871
1872static ssize_t spi_slave_store(struct device *dev,
1873 struct device_attribute *attr, const char *buf,
1874 size_t count)
1875{
1876 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1877 dev);
1878 struct spi_device *spi;
1879 struct device *child;
1880 char name[32];
1881 int rc;
1882
1883 rc = sscanf(buf, "%31s", name);
1884 if (rc != 1 || !name[0])
1885 return -EINVAL;
1886
1887 child = device_find_child(&ctlr->dev, NULL, match_true);
1888 if (child) {
1889 /* Remove registered slave */
1890 device_unregister(child);
1891 put_device(child);
1892 }
1893
1894 if (strcmp(name, "(null)")) {
1895 /* Register new slave */
1896 spi = spi_alloc_device(ctlr);
1897 if (!spi)
1898 return -ENOMEM;
1899
1900 strlcpy(spi->modalias, name, sizeof(spi->modalias));
1901
1902 rc = spi_add_device(spi);
1903 if (rc) {
1904 spi_dev_put(spi);
1905 return rc;
1906 }
1907 }
1908
1909 return count;
1910}
1911
1912static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
1913
1914static struct attribute *spi_slave_attrs[] = {
1915 &dev_attr_slave.attr,
1916 NULL,
1917};
1918
1919static const struct attribute_group spi_slave_group = {
1920 .attrs = spi_slave_attrs,
1921};
1922
1923static const struct attribute_group *spi_slave_groups[] = {
1924 &spi_controller_statistics_group,
1925 &spi_slave_group,
1926 NULL,
1927};
1928
1929static struct class spi_slave_class = {
1930 .name = "spi_slave",
1931 .owner = THIS_MODULE,
1932 .dev_release = spi_controller_release,
1933 .dev_groups = spi_slave_groups,
1934};
1935#else
1936extern struct class spi_slave_class; /* dummy */
1937#endif
1831 1938
1832/** 1939/**
1833 * spi_alloc_master - allocate SPI master controller 1940 * __spi_alloc_controller - allocate an SPI master or slave controller
1834 * @dev: the controller, possibly using the platform_bus 1941 * @dev: the controller, possibly using the platform_bus
1835 * @size: how much zeroed driver-private data to allocate; the pointer to this 1942 * @size: how much zeroed driver-private data to allocate; the pointer to this
1836 * memory is in the driver_data field of the returned device, 1943 * memory is in the driver_data field of the returned device,
1837 * accessible with spi_master_get_devdata(). 1944 * accessible with spi_controller_get_devdata().
1945 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
1946 * slave (true) controller
1838 * Context: can sleep 1947 * Context: can sleep
1839 * 1948 *
1840 * This call is used only by SPI master controller drivers, which are the 1949 * This call is used only by SPI controller drivers, which are the
1841 * only ones directly touching chip registers. It's how they allocate 1950 * only ones directly touching chip registers. It's how they allocate
1842 * an spi_master structure, prior to calling spi_register_master(). 1951 * an spi_controller structure, prior to calling spi_register_controller().
1843 * 1952 *
1844 * This must be called from context that can sleep. 1953 * This must be called from context that can sleep.
1845 * 1954 *
1846 * The caller is responsible for assigning the bus number and initializing 1955 * The caller is responsible for assigning the bus number and initializing the
1847 * the master's methods before calling spi_register_master(); and (after errors 1956 * controller's methods before calling spi_register_controller(); and (after
1848 * adding the device) calling spi_master_put() to prevent a memory leak. 1957 * errors adding the device) calling spi_controller_put() to prevent a memory
1958 * leak.
1849 * 1959 *
1850 * Return: the SPI master structure on success, else NULL. 1960 * Return: the SPI controller structure on success, else NULL.
1851 */ 1961 */
1852struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1962struct spi_controller *__spi_alloc_controller(struct device *dev,
1963 unsigned int size, bool slave)
1853{ 1964{
1854 struct spi_master *master; 1965 struct spi_controller *ctlr;
1855 1966
1856 if (!dev) 1967 if (!dev)
1857 return NULL; 1968 return NULL;
1858 1969
1859 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1970 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
1860 if (!master) 1971 if (!ctlr)
1861 return NULL; 1972 return NULL;
1862 1973
1863 device_initialize(&master->dev); 1974 device_initialize(&ctlr->dev);
1864 master->bus_num = -1; 1975 ctlr->bus_num = -1;
1865 master->num_chipselect = 1; 1976 ctlr->num_chipselect = 1;
1866 master->dev.class = &spi_master_class; 1977 ctlr->slave = slave;
1867 master->dev.parent = dev; 1978 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
1868 pm_suspend_ignore_children(&master->dev, true); 1979 ctlr->dev.class = &spi_slave_class;
1869 spi_master_set_devdata(master, &master[1]); 1980 else
1981 ctlr->dev.class = &spi_master_class;
1982 ctlr->dev.parent = dev;
1983 pm_suspend_ignore_children(&ctlr->dev, true);
1984 spi_controller_set_devdata(ctlr, &ctlr[1]);
1870 1985
1871 return master; 1986 return ctlr;
1872} 1987}
1873EXPORT_SYMBOL_GPL(spi_alloc_master); 1988EXPORT_SYMBOL_GPL(__spi_alloc_controller);
1874 1989
1875#ifdef CONFIG_OF 1990#ifdef CONFIG_OF
1876static int of_spi_register_master(struct spi_master *master) 1991static int of_spi_register_master(struct spi_controller *ctlr)
1877{ 1992{
1878 int nb, i, *cs; 1993 int nb, i, *cs;
1879 struct device_node *np = master->dev.of_node; 1994 struct device_node *np = ctlr->dev.of_node;
1880 1995
1881 if (!np) 1996 if (!np)
1882 return 0; 1997 return 0;
1883 1998
1884 nb = of_gpio_named_count(np, "cs-gpios"); 1999 nb = of_gpio_named_count(np, "cs-gpios");
1885 master->num_chipselect = max_t(int, nb, master->num_chipselect); 2000 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
1886 2001
1887 /* Return error only for an incorrectly formed cs-gpios property */ 2002 /* Return error only for an incorrectly formed cs-gpios property */
1888 if (nb == 0 || nb == -ENOENT) 2003 if (nb == 0 || nb == -ENOENT)
@@ -1890,15 +2005,14 @@ static int of_spi_register_master(struct spi_master *master)
1890 else if (nb < 0) 2005 else if (nb < 0)
1891 return nb; 2006 return nb;
1892 2007
1893 cs = devm_kzalloc(&master->dev, 2008 cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,
1894 sizeof(int) * master->num_chipselect,
1895 GFP_KERNEL); 2009 GFP_KERNEL);
1896 master->cs_gpios = cs; 2010 ctlr->cs_gpios = cs;
1897 2011
1898 if (!master->cs_gpios) 2012 if (!ctlr->cs_gpios)
1899 return -ENOMEM; 2013 return -ENOMEM;
1900 2014
1901 for (i = 0; i < master->num_chipselect; i++) 2015 for (i = 0; i < ctlr->num_chipselect; i++)
1902 cs[i] = -ENOENT; 2016 cs[i] = -ENOENT;
1903 2017
1904 for (i = 0; i < nb; i++) 2018 for (i = 0; i < nb; i++)
@@ -1907,20 +2021,21 @@ static int of_spi_register_master(struct spi_master *master)
1907 return 0; 2021 return 0;
1908} 2022}
1909#else 2023#else
1910static int of_spi_register_master(struct spi_master *master) 2024static int of_spi_register_master(struct spi_controller *ctlr)
1911{ 2025{
1912 return 0; 2026 return 0;
1913} 2027}
1914#endif 2028#endif
1915 2029
1916/** 2030/**
1917 * spi_register_master - register SPI master controller 2031 * spi_register_controller - register SPI master or slave controller
1918 * @master: initialized master, originally from spi_alloc_master() 2032 * @ctlr: initialized master, originally from spi_alloc_master() or
2033 * spi_alloc_slave()
1919 * Context: can sleep 2034 * Context: can sleep
1920 * 2035 *
1921 * SPI master controllers connect to their drivers using some non-SPI bus, 2036 * SPI controllers connect to their drivers using some non-SPI bus,
1922 * such as the platform bus. The final stage of probe() in that code 2037 * such as the platform bus. The final stage of probe() in that code
1923 * includes calling spi_register_master() to hook up to this SPI bus glue. 2038 * includes calling spi_register_controller() to hook up to this SPI bus glue.
1924 * 2039 *
1925 * SPI controllers use board specific (often SOC specific) bus numbers, 2040 * SPI controllers use board specific (often SOC specific) bus numbers,
1926 * and board-specific addressing for SPI devices combines those numbers 2041 * and board-specific addressing for SPI devices combines those numbers
@@ -1929,16 +2044,16 @@ static int of_spi_register_master(struct spi_master *master)
1929 * chip is at which address. 2044 * chip is at which address.
1930 * 2045 *
1931 * This must be called from context that can sleep. It returns zero on 2046 * This must be called from context that can sleep. It returns zero on
1932 * success, else a negative error code (dropping the master's refcount). 2047 * success, else a negative error code (dropping the controller's refcount).
1933 * After a successful return, the caller is responsible for calling 2048 * After a successful return, the caller is responsible for calling
1934 * spi_unregister_master(). 2049 * spi_unregister_controller().
1935 * 2050 *
1936 * Return: zero on success, else a negative error code. 2051 * Return: zero on success, else a negative error code.
1937 */ 2052 */
1938int spi_register_master(struct spi_master *master) 2053int spi_register_controller(struct spi_controller *ctlr)
1939{ 2054{
1940 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 2055 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1941 struct device *dev = master->dev.parent; 2056 struct device *dev = ctlr->dev.parent;
1942 struct boardinfo *bi; 2057 struct boardinfo *bi;
1943 int status = -ENODEV; 2058 int status = -ENODEV;
1944 int dynamic = 0; 2059 int dynamic = 0;
@@ -1946,103 +2061,109 @@ int spi_register_master(struct spi_master *master)
1946 if (!dev) 2061 if (!dev)
1947 return -ENODEV; 2062 return -ENODEV;
1948 2063
1949 status = of_spi_register_master(master); 2064 if (!spi_controller_is_slave(ctlr)) {
1950 if (status) 2065 status = of_spi_register_master(ctlr);
1951 return status; 2066 if (status)
2067 return status;
2068 }
1952 2069
1953 /* even if it's just one always-selected device, there must 2070 /* even if it's just one always-selected device, there must
1954 * be at least one chipselect 2071 * be at least one chipselect
1955 */ 2072 */
1956 if (master->num_chipselect == 0) 2073 if (ctlr->num_chipselect == 0)
1957 return -EINVAL; 2074 return -EINVAL;
1958 2075
1959 if ((master->bus_num < 0) && master->dev.of_node) 2076 if ((ctlr->bus_num < 0) && ctlr->dev.of_node)
1960 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 2077 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
1961 2078
1962 /* convention: dynamically assigned bus IDs count down from the max */ 2079 /* convention: dynamically assigned bus IDs count down from the max */
1963 if (master->bus_num < 0) { 2080 if (ctlr->bus_num < 0) {
1964 /* FIXME switch to an IDR based scheme, something like 2081 /* FIXME switch to an IDR based scheme, something like
1965 * I2C now uses, so we can't run out of "dynamic" IDs 2082 * I2C now uses, so we can't run out of "dynamic" IDs
1966 */ 2083 */
1967 master->bus_num = atomic_dec_return(&dyn_bus_id); 2084 ctlr->bus_num = atomic_dec_return(&dyn_bus_id);
1968 dynamic = 1; 2085 dynamic = 1;
1969 } 2086 }
1970 2087
1971 INIT_LIST_HEAD(&master->queue); 2088 INIT_LIST_HEAD(&ctlr->queue);
1972 spin_lock_init(&master->queue_lock); 2089 spin_lock_init(&ctlr->queue_lock);
1973 spin_lock_init(&master->bus_lock_spinlock); 2090 spin_lock_init(&ctlr->bus_lock_spinlock);
1974 mutex_init(&master->bus_lock_mutex); 2091 mutex_init(&ctlr->bus_lock_mutex);
1975 mutex_init(&master->io_mutex); 2092 mutex_init(&ctlr->io_mutex);
1976 master->bus_lock_flag = 0; 2093 ctlr->bus_lock_flag = 0;
1977 init_completion(&master->xfer_completion); 2094 init_completion(&ctlr->xfer_completion);
1978 if (!master->max_dma_len) 2095 if (!ctlr->max_dma_len)
1979 master->max_dma_len = INT_MAX; 2096 ctlr->max_dma_len = INT_MAX;
1980 2097
1981 /* register the device, then userspace will see it. 2098 /* register the device, then userspace will see it.
1982 * registration fails if the bus ID is in use. 2099 * registration fails if the bus ID is in use.
1983 */ 2100 */
1984 dev_set_name(&master->dev, "spi%u", master->bus_num); 2101 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
1985 status = device_add(&master->dev); 2102 status = device_add(&ctlr->dev);
1986 if (status < 0) 2103 if (status < 0)
1987 goto done; 2104 goto done;
1988 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 2105 dev_dbg(dev, "registered %s %s%s\n",
1989 dynamic ? " (dynamic)" : ""); 2106 spi_controller_is_slave(ctlr) ? "slave" : "master",
2107 dev_name(&ctlr->dev), dynamic ? " (dynamic)" : "");
1990 2108
1991 /* If we're using a queued driver, start the queue */ 2109 /* If we're using a queued driver, start the queue */
1992 if (master->transfer) 2110 if (ctlr->transfer)
1993 dev_info(dev, "master is unqueued, this is deprecated\n"); 2111 dev_info(dev, "controller is unqueued, this is deprecated\n");
1994 else { 2112 else {
1995 status = spi_master_initialize_queue(master); 2113 status = spi_controller_initialize_queue(ctlr);
1996 if (status) { 2114 if (status) {
1997 device_del(&master->dev); 2115 device_del(&ctlr->dev);
1998 goto done; 2116 goto done;
1999 } 2117 }
2000 } 2118 }
2001 /* add statistics */ 2119 /* add statistics */
2002 spin_lock_init(&master->statistics.lock); 2120 spin_lock_init(&ctlr->statistics.lock);
2003 2121
2004 mutex_lock(&board_lock); 2122 mutex_lock(&board_lock);
2005 list_add_tail(&master->list, &spi_master_list); 2123 list_add_tail(&ctlr->list, &spi_controller_list);
2006 list_for_each_entry(bi, &board_list, list) 2124 list_for_each_entry(bi, &board_list, list)
2007 spi_match_master_to_boardinfo(master, &bi->board_info); 2125 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2008 mutex_unlock(&board_lock); 2126 mutex_unlock(&board_lock);
2009 2127
2010 /* Register devices from the device tree and ACPI */ 2128 /* Register devices from the device tree and ACPI */
2011 of_register_spi_devices(master); 2129 of_register_spi_devices(ctlr);
2012 acpi_register_spi_devices(master); 2130 acpi_register_spi_devices(ctlr);
2013done: 2131done:
2014 return status; 2132 return status;
2015} 2133}
2016EXPORT_SYMBOL_GPL(spi_register_master); 2134EXPORT_SYMBOL_GPL(spi_register_controller);
2017 2135
2018static void devm_spi_unregister(struct device *dev, void *res) 2136static void devm_spi_unregister(struct device *dev, void *res)
2019{ 2137{
2020 spi_unregister_master(*(struct spi_master **)res); 2138 spi_unregister_controller(*(struct spi_controller **)res);
2021} 2139}
2022 2140
2023/** 2141/**
2024 * devm_spi_register_master - register managed SPI master controller 2142 * devm_spi_register_controller - register managed SPI master or slave
2025 * @dev: device managing SPI master 2143 * controller
2026 * @master: initialized master, originally from spi_alloc_master() 2144 * @dev: device managing SPI controller
2145 * @ctlr: initialized controller, originally from spi_alloc_master() or
2146 * spi_alloc_slave()
2027 * Context: can sleep 2147 * Context: can sleep
2028 * 2148 *
2029 * Register a SPI device as with spi_register_master() which will 2149 * Register a SPI device as with spi_register_controller() which will
2030 * automatically be unregister 2150 * automatically be unregister
2031 * 2151 *
2032 * Return: zero on success, else a negative error code. 2152 * Return: zero on success, else a negative error code.
2033 */ 2153 */
2034int devm_spi_register_master(struct device *dev, struct spi_master *master) 2154int devm_spi_register_controller(struct device *dev,
2155 struct spi_controller *ctlr)
2035{ 2156{
2036 struct spi_master **ptr; 2157 struct spi_controller **ptr;
2037 int ret; 2158 int ret;
2038 2159
2039 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2160 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2040 if (!ptr) 2161 if (!ptr)
2041 return -ENOMEM; 2162 return -ENOMEM;
2042 2163
2043 ret = spi_register_master(master); 2164 ret = spi_register_controller(ctlr);
2044 if (!ret) { 2165 if (!ret) {
2045 *ptr = master; 2166 *ptr = ctlr;
2046 devres_add(dev, ptr); 2167 devres_add(dev, ptr);
2047 } else { 2168 } else {
2048 devres_free(ptr); 2169 devres_free(ptr);
@@ -2050,7 +2171,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master)
2050 2171
2051 return ret; 2172 return ret;
2052} 2173}
2053EXPORT_SYMBOL_GPL(devm_spi_register_master); 2174EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2054 2175
2055static int __unregister(struct device *dev, void *null) 2176static int __unregister(struct device *dev, void *null)
2056{ 2177{
@@ -2059,71 +2180,71 @@ static int __unregister(struct device *dev, void *null)
2059} 2180}
2060 2181
2061/** 2182/**
2062 * spi_unregister_master - unregister SPI master controller 2183 * spi_unregister_controller - unregister SPI master or slave controller
2063 * @master: the master being unregistered 2184 * @ctlr: the controller being unregistered
2064 * Context: can sleep 2185 * Context: can sleep
2065 * 2186 *
2066 * This call is used only by SPI master controller drivers, which are the 2187 * This call is used only by SPI controller drivers, which are the
2067 * only ones directly touching chip registers. 2188 * only ones directly touching chip registers.
2068 * 2189 *
2069 * This must be called from context that can sleep. 2190 * This must be called from context that can sleep.
2070 */ 2191 */
2071void spi_unregister_master(struct spi_master *master) 2192void spi_unregister_controller(struct spi_controller *ctlr)
2072{ 2193{
2073 int dummy; 2194 int dummy;
2074 2195
2075 if (master->queued) { 2196 if (ctlr->queued) {
2076 if (spi_destroy_queue(master)) 2197 if (spi_destroy_queue(ctlr))
2077 dev_err(&master->dev, "queue remove failed\n"); 2198 dev_err(&ctlr->dev, "queue remove failed\n");
2078 } 2199 }
2079 2200
2080 mutex_lock(&board_lock); 2201 mutex_lock(&board_lock);
2081 list_del(&master->list); 2202 list_del(&ctlr->list);
2082 mutex_unlock(&board_lock); 2203 mutex_unlock(&board_lock);
2083 2204
2084 dummy = device_for_each_child(&master->dev, NULL, __unregister); 2205 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2085 device_unregister(&master->dev); 2206 device_unregister(&ctlr->dev);
2086} 2207}
2087EXPORT_SYMBOL_GPL(spi_unregister_master); 2208EXPORT_SYMBOL_GPL(spi_unregister_controller);
2088 2209
2089int spi_master_suspend(struct spi_master *master) 2210int spi_controller_suspend(struct spi_controller *ctlr)
2090{ 2211{
2091 int ret; 2212 int ret;
2092 2213
2093 /* Basically no-ops for non-queued masters */ 2214 /* Basically no-ops for non-queued controllers */
2094 if (!master->queued) 2215 if (!ctlr->queued)
2095 return 0; 2216 return 0;
2096 2217
2097 ret = spi_stop_queue(master); 2218 ret = spi_stop_queue(ctlr);
2098 if (ret) 2219 if (ret)
2099 dev_err(&master->dev, "queue stop failed\n"); 2220 dev_err(&ctlr->dev, "queue stop failed\n");
2100 2221
2101 return ret; 2222 return ret;
2102} 2223}
2103EXPORT_SYMBOL_GPL(spi_master_suspend); 2224EXPORT_SYMBOL_GPL(spi_controller_suspend);
2104 2225
2105int spi_master_resume(struct spi_master *master) 2226int spi_controller_resume(struct spi_controller *ctlr)
2106{ 2227{
2107 int ret; 2228 int ret;
2108 2229
2109 if (!master->queued) 2230 if (!ctlr->queued)
2110 return 0; 2231 return 0;
2111 2232
2112 ret = spi_start_queue(master); 2233 ret = spi_start_queue(ctlr);
2113 if (ret) 2234 if (ret)
2114 dev_err(&master->dev, "queue restart failed\n"); 2235 dev_err(&ctlr->dev, "queue restart failed\n");
2115 2236
2116 return ret; 2237 return ret;
2117} 2238}
2118EXPORT_SYMBOL_GPL(spi_master_resume); 2239EXPORT_SYMBOL_GPL(spi_controller_resume);
2119 2240
2120static int __spi_master_match(struct device *dev, const void *data) 2241static int __spi_controller_match(struct device *dev, const void *data)
2121{ 2242{
2122 struct spi_master *m; 2243 struct spi_controller *ctlr;
2123 const u16 *bus_num = data; 2244 const u16 *bus_num = data;
2124 2245
2125 m = container_of(dev, struct spi_master, dev); 2246 ctlr = container_of(dev, struct spi_controller, dev);
2126 return m->bus_num == *bus_num; 2247 return ctlr->bus_num == *bus_num;
2127} 2248}
2128 2249
2129/** 2250/**
@@ -2133,22 +2254,22 @@ static int __spi_master_match(struct device *dev, const void *data)
2133 * 2254 *
2134 * This call may be used with devices that are registered after 2255 * This call may be used with devices that are registered after
2135 * arch init time. It returns a refcounted pointer to the relevant 2256 * arch init time. It returns a refcounted pointer to the relevant
2136 * spi_master (which the caller must release), or NULL if there is 2257 * spi_controller (which the caller must release), or NULL if there is
2137 * no such master registered. 2258 * no such master registered.
2138 * 2259 *
2139 * Return: the SPI master structure on success, else NULL. 2260 * Return: the SPI master structure on success, else NULL.
2140 */ 2261 */
2141struct spi_master *spi_busnum_to_master(u16 bus_num) 2262struct spi_controller *spi_busnum_to_master(u16 bus_num)
2142{ 2263{
2143 struct device *dev; 2264 struct device *dev;
2144 struct spi_master *master = NULL; 2265 struct spi_controller *ctlr = NULL;
2145 2266
2146 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2267 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2147 __spi_master_match); 2268 __spi_controller_match);
2148 if (dev) 2269 if (dev)
2149 master = container_of(dev, struct spi_master, dev); 2270 ctlr = container_of(dev, struct spi_controller, dev);
2150 /* reference got in class_find_device */ 2271 /* reference got in class_find_device */
2151 return master; 2272 return ctlr;
2152} 2273}
2153EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2274EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2154 2275
@@ -2168,7 +2289,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2168 * Return: the pointer to the allocated data 2289 * Return: the pointer to the allocated data
2169 * 2290 *
2170 * This may get enhanced in the future to allocate from a memory pool 2291 * This may get enhanced in the future to allocate from a memory pool
2171 * of the @spi_device or @spi_master to avoid repeated allocations. 2292 * of the @spi_device or @spi_controller to avoid repeated allocations.
2172 */ 2293 */
2173void *spi_res_alloc(struct spi_device *spi, 2294void *spi_res_alloc(struct spi_device *spi,
2174 spi_res_release_t release, 2295 spi_res_release_t release,
@@ -2220,11 +2341,10 @@ EXPORT_SYMBOL_GPL(spi_res_add);
2220 2341
2221/** 2342/**
2222 * spi_res_release - release all spi resources for this message 2343 * spi_res_release - release all spi resources for this message
2223 * @master: the @spi_master 2344 * @ctlr: the @spi_controller
2224 * @message: the @spi_message 2345 * @message: the @spi_message
2225 */ 2346 */
2226void spi_res_release(struct spi_master *master, 2347void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2227 struct spi_message *message)
2228{ 2348{
2229 struct spi_res *res; 2349 struct spi_res *res;
2230 2350
@@ -2233,7 +2353,7 @@ void spi_res_release(struct spi_master *master,
2233 struct spi_res, entry); 2353 struct spi_res, entry);
2234 2354
2235 if (res->release) 2355 if (res->release)
2236 res->release(master, message, res->data); 2356 res->release(ctlr, message, res->data);
2237 2357
2238 list_del(&res->entry); 2358 list_del(&res->entry);
2239 2359
@@ -2246,7 +2366,7 @@ EXPORT_SYMBOL_GPL(spi_res_release);
2246 2366
2247/* Core methods for spi_message alterations */ 2367/* Core methods for spi_message alterations */
2248 2368
2249static void __spi_replace_transfers_release(struct spi_master *master, 2369static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2250 struct spi_message *msg, 2370 struct spi_message *msg,
2251 void *res) 2371 void *res)
2252{ 2372{
@@ -2255,7 +2375,7 @@ static void __spi_replace_transfers_release(struct spi_master *master,
2255 2375
2256 /* call extra callback if requested */ 2376 /* call extra callback if requested */
2257 if (rxfer->release) 2377 if (rxfer->release)
2258 rxfer->release(master, msg, res); 2378 rxfer->release(ctlr, msg, res);
2259 2379
2260 /* insert replaced transfers back into the message */ 2380 /* insert replaced transfers back into the message */
2261 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2381 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
@@ -2375,7 +2495,7 @@ struct spi_replaced_transfers *spi_replace_transfers(
2375} 2495}
2376EXPORT_SYMBOL_GPL(spi_replace_transfers); 2496EXPORT_SYMBOL_GPL(spi_replace_transfers);
2377 2497
2378static int __spi_split_transfer_maxsize(struct spi_master *master, 2498static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2379 struct spi_message *msg, 2499 struct spi_message *msg,
2380 struct spi_transfer **xferp, 2500 struct spi_transfer **xferp,
2381 size_t maxsize, 2501 size_t maxsize,
@@ -2437,7 +2557,7 @@ static int __spi_split_transfer_maxsize(struct spi_master *master,
2437 *xferp = &xfers[count - 1]; 2557 *xferp = &xfers[count - 1];
2438 2558
2439 /* increment statistics counters */ 2559 /* increment statistics counters */
2440 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2560 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2441 transfers_split_maxsize); 2561 transfers_split_maxsize);
2442 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2562 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2443 transfers_split_maxsize); 2563 transfers_split_maxsize);
@@ -2449,14 +2569,14 @@ static int __spi_split_transfer_maxsize(struct spi_master *master,
2449 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2569 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2450 * when an individual transfer exceeds a 2570 * when an individual transfer exceeds a
2451 * certain size 2571 * certain size
2452 * @master: the @spi_master for this transfer 2572 * @ctlr: the @spi_controller for this transfer
2453 * @msg: the @spi_message to transform 2573 * @msg: the @spi_message to transform
2454 * @maxsize: the maximum when to apply this 2574 * @maxsize: the maximum when to apply this
2455 * @gfp: GFP allocation flags 2575 * @gfp: GFP allocation flags
2456 * 2576 *
2457 * Return: status of transformation 2577 * Return: status of transformation
2458 */ 2578 */
2459int spi_split_transfers_maxsize(struct spi_master *master, 2579int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2460 struct spi_message *msg, 2580 struct spi_message *msg,
2461 size_t maxsize, 2581 size_t maxsize,
2462 gfp_t gfp) 2582 gfp_t gfp)
@@ -2472,8 +2592,8 @@ int spi_split_transfers_maxsize(struct spi_master *master,
2472 */ 2592 */
2473 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2593 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2474 if (xfer->len > maxsize) { 2594 if (xfer->len > maxsize) {
2475 ret = __spi_split_transfer_maxsize( 2595 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2476 master, msg, &xfer, maxsize, gfp); 2596 maxsize, gfp);
2477 if (ret) 2597 if (ret)
2478 return ret; 2598 return ret;
2479 } 2599 }
@@ -2485,18 +2605,18 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2485 2605
2486/*-------------------------------------------------------------------------*/ 2606/*-------------------------------------------------------------------------*/
2487 2607
2488/* Core methods for SPI master protocol drivers. Some of the 2608/* Core methods for SPI controller protocol drivers. Some of the
2489 * other core methods are currently defined as inline functions. 2609 * other core methods are currently defined as inline functions.
2490 */ 2610 */
2491 2611
2492static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 2612static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2613 u8 bits_per_word)
2493{ 2614{
2494 if (master->bits_per_word_mask) { 2615 if (ctlr->bits_per_word_mask) {
2495 /* Only 32 bits fit in the mask */ 2616 /* Only 32 bits fit in the mask */
2496 if (bits_per_word > 32) 2617 if (bits_per_word > 32)
2497 return -EINVAL; 2618 return -EINVAL;
2498 if (!(master->bits_per_word_mask & 2619 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2499 SPI_BPW_MASK(bits_per_word)))
2500 return -EINVAL; 2620 return -EINVAL;
2501 } 2621 }
2502 2622
@@ -2542,9 +2662,9 @@ int spi_setup(struct spi_device *spi)
2542 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2662 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2543 return -EINVAL; 2663 return -EINVAL;
2544 /* help drivers fail *cleanly* when they need options 2664 /* help drivers fail *cleanly* when they need options
2545 * that aren't supported with their current master 2665 * that aren't supported with their current controller
2546 */ 2666 */
2547 bad_bits = spi->mode & ~spi->master->mode_bits; 2667 bad_bits = spi->mode & ~spi->controller->mode_bits;
2548 ugly_bits = bad_bits & 2668 ugly_bits = bad_bits &
2549 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2669 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2550 if (ugly_bits) { 2670 if (ugly_bits) {
@@ -2563,15 +2683,16 @@ int spi_setup(struct spi_device *spi)
2563 if (!spi->bits_per_word) 2683 if (!spi->bits_per_word)
2564 spi->bits_per_word = 8; 2684 spi->bits_per_word = 8;
2565 2685
2566 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 2686 status = __spi_validate_bits_per_word(spi->controller,
2687 spi->bits_per_word);
2567 if (status) 2688 if (status)
2568 return status; 2689 return status;
2569 2690
2570 if (!spi->max_speed_hz) 2691 if (!spi->max_speed_hz)
2571 spi->max_speed_hz = spi->master->max_speed_hz; 2692 spi->max_speed_hz = spi->controller->max_speed_hz;
2572 2693
2573 if (spi->master->setup) 2694 if (spi->controller->setup)
2574 status = spi->master->setup(spi); 2695 status = spi->controller->setup(spi);
2575 2696
2576 spi_set_cs(spi, false); 2697 spi_set_cs(spi, false);
2577 2698
@@ -2590,7 +2711,7 @@ EXPORT_SYMBOL_GPL(spi_setup);
2590 2711
2591static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2712static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2592{ 2713{
2593 struct spi_master *master = spi->master; 2714 struct spi_controller *ctlr = spi->controller;
2594 struct spi_transfer *xfer; 2715 struct spi_transfer *xfer;
2595 int w_size; 2716 int w_size;
2596 2717
@@ -2602,16 +2723,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2602 * either MOSI or MISO is missing. They can also be caused by 2723 * either MOSI or MISO is missing. They can also be caused by
2603 * software limitations. 2724 * software limitations.
2604 */ 2725 */
2605 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2726 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
2606 || (spi->mode & SPI_3WIRE)) { 2727 (spi->mode & SPI_3WIRE)) {
2607 unsigned flags = master->flags; 2728 unsigned flags = ctlr->flags;
2608 2729
2609 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2730 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2610 if (xfer->rx_buf && xfer->tx_buf) 2731 if (xfer->rx_buf && xfer->tx_buf)
2611 return -EINVAL; 2732 return -EINVAL;
2612 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2733 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
2613 return -EINVAL; 2734 return -EINVAL;
2614 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2735 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
2615 return -EINVAL; 2736 return -EINVAL;
2616 } 2737 }
2617 } 2738 }
@@ -2631,13 +2752,12 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2631 if (!xfer->speed_hz) 2752 if (!xfer->speed_hz)
2632 xfer->speed_hz = spi->max_speed_hz; 2753 xfer->speed_hz = spi->max_speed_hz;
2633 if (!xfer->speed_hz) 2754 if (!xfer->speed_hz)
2634 xfer->speed_hz = master->max_speed_hz; 2755 xfer->speed_hz = ctlr->max_speed_hz;
2635 2756
2636 if (master->max_speed_hz && 2757 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
2637 xfer->speed_hz > master->max_speed_hz) 2758 xfer->speed_hz = ctlr->max_speed_hz;
2638 xfer->speed_hz = master->max_speed_hz;
2639 2759
2640 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2760 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
2641 return -EINVAL; 2761 return -EINVAL;
2642 2762
2643 /* 2763 /*
@@ -2655,8 +2775,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2655 if (xfer->len % w_size) 2775 if (xfer->len % w_size)
2656 return -EINVAL; 2776 return -EINVAL;
2657 2777
2658 if (xfer->speed_hz && master->min_speed_hz && 2778 if (xfer->speed_hz && ctlr->min_speed_hz &&
2659 xfer->speed_hz < master->min_speed_hz) 2779 xfer->speed_hz < ctlr->min_speed_hz)
2660 return -EINVAL; 2780 return -EINVAL;
2661 2781
2662 if (xfer->tx_buf && !xfer->tx_nbits) 2782 if (xfer->tx_buf && !xfer->tx_nbits)
@@ -2701,16 +2821,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2701 2821
2702static int __spi_async(struct spi_device *spi, struct spi_message *message) 2822static int __spi_async(struct spi_device *spi, struct spi_message *message)
2703{ 2823{
2704 struct spi_master *master = spi->master; 2824 struct spi_controller *ctlr = spi->controller;
2705 2825
2706 message->spi = spi; 2826 message->spi = spi;
2707 2827
2708 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2828 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
2709 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2829 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2710 2830
2711 trace_spi_message_submit(message); 2831 trace_spi_message_submit(message);
2712 2832
2713 return master->transfer(spi, message); 2833 return ctlr->transfer(spi, message);
2714} 2834}
2715 2835
2716/** 2836/**
@@ -2746,7 +2866,7 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
2746 */ 2866 */
2747int spi_async(struct spi_device *spi, struct spi_message *message) 2867int spi_async(struct spi_device *spi, struct spi_message *message)
2748{ 2868{
2749 struct spi_master *master = spi->master; 2869 struct spi_controller *ctlr = spi->controller;
2750 int ret; 2870 int ret;
2751 unsigned long flags; 2871 unsigned long flags;
2752 2872
@@ -2754,14 +2874,14 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
2754 if (ret != 0) 2874 if (ret != 0)
2755 return ret; 2875 return ret;
2756 2876
2757 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2877 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2758 2878
2759 if (master->bus_lock_flag) 2879 if (ctlr->bus_lock_flag)
2760 ret = -EBUSY; 2880 ret = -EBUSY;
2761 else 2881 else
2762 ret = __spi_async(spi, message); 2882 ret = __spi_async(spi, message);
2763 2883
2764 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2884 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2765 2885
2766 return ret; 2886 return ret;
2767} 2887}
@@ -2800,7 +2920,7 @@ EXPORT_SYMBOL_GPL(spi_async);
2800 */ 2920 */
2801int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2921int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2802{ 2922{
2803 struct spi_master *master = spi->master; 2923 struct spi_controller *ctlr = spi->controller;
2804 int ret; 2924 int ret;
2805 unsigned long flags; 2925 unsigned long flags;
2806 2926
@@ -2808,11 +2928,11 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2808 if (ret != 0) 2928 if (ret != 0)
2809 return ret; 2929 return ret;
2810 2930
2811 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2931 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2812 2932
2813 ret = __spi_async(spi, message); 2933 ret = __spi_async(spi, message);
2814 2934
2815 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2935 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2816 2936
2817 return ret; 2937 return ret;
2818 2938
@@ -2824,7 +2944,7 @@ int spi_flash_read(struct spi_device *spi,
2824 struct spi_flash_read_message *msg) 2944 struct spi_flash_read_message *msg)
2825 2945
2826{ 2946{
2827 struct spi_master *master = spi->master; 2947 struct spi_controller *master = spi->controller;
2828 struct device *rx_dev = NULL; 2948 struct device *rx_dev = NULL;
2829 int ret; 2949 int ret;
2830 2950
@@ -2878,7 +2998,7 @@ EXPORT_SYMBOL_GPL(spi_flash_read);
2878 2998
2879/*-------------------------------------------------------------------------*/ 2999/*-------------------------------------------------------------------------*/
2880 3000
2881/* Utility methods for SPI master protocol drivers, layered on 3001/* Utility methods for SPI protocol drivers, layered on
2882 * top of the core. Some other utility methods are defined as 3002 * top of the core. Some other utility methods are defined as
2883 * inline functions. 3003 * inline functions.
2884 */ 3004 */
@@ -2892,7 +3012,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2892{ 3012{
2893 DECLARE_COMPLETION_ONSTACK(done); 3013 DECLARE_COMPLETION_ONSTACK(done);
2894 int status; 3014 int status;
2895 struct spi_master *master = spi->master; 3015 struct spi_controller *ctlr = spi->controller;
2896 unsigned long flags; 3016 unsigned long flags;
2897 3017
2898 status = __spi_validate(spi, message); 3018 status = __spi_validate(spi, message);
@@ -2903,7 +3023,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2903 message->context = &done; 3023 message->context = &done;
2904 message->spi = spi; 3024 message->spi = spi;
2905 3025
2906 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 3026 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
2907 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3027 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2908 3028
2909 /* If we're not using the legacy transfer method then we will 3029 /* If we're not using the legacy transfer method then we will
@@ -2911,14 +3031,14 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2911 * This code would be less tricky if we could remove the 3031 * This code would be less tricky if we could remove the
2912 * support for driver implemented message queues. 3032 * support for driver implemented message queues.
2913 */ 3033 */
2914 if (master->transfer == spi_queued_transfer) { 3034 if (ctlr->transfer == spi_queued_transfer) {
2915 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 3035 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2916 3036
2917 trace_spi_message_submit(message); 3037 trace_spi_message_submit(message);
2918 3038
2919 status = __spi_queued_transfer(spi, message, false); 3039 status = __spi_queued_transfer(spi, message, false);
2920 3040
2921 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 3041 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2922 } else { 3042 } else {
2923 status = spi_async_locked(spi, message); 3043 status = spi_async_locked(spi, message);
2924 } 3044 }
@@ -2927,12 +3047,12 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2927 /* Push out the messages in the calling context if we 3047 /* Push out the messages in the calling context if we
2928 * can. 3048 * can.
2929 */ 3049 */
2930 if (master->transfer == spi_queued_transfer) { 3050 if (ctlr->transfer == spi_queued_transfer) {
2931 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 3051 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2932 spi_sync_immediate); 3052 spi_sync_immediate);
2933 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3053 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2934 spi_sync_immediate); 3054 spi_sync_immediate);
2935 __spi_pump_messages(master, false); 3055 __spi_pump_messages(ctlr, false);
2936 } 3056 }
2937 3057
2938 wait_for_completion(&done); 3058 wait_for_completion(&done);
@@ -2967,9 +3087,9 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
2967{ 3087{
2968 int ret; 3088 int ret;
2969 3089
2970 mutex_lock(&spi->master->bus_lock_mutex); 3090 mutex_lock(&spi->controller->bus_lock_mutex);
2971 ret = __spi_sync(spi, message); 3091 ret = __spi_sync(spi, message);
2972 mutex_unlock(&spi->master->bus_lock_mutex); 3092 mutex_unlock(&spi->controller->bus_lock_mutex);
2973 3093
2974 return ret; 3094 return ret;
2975} 3095}
@@ -2999,7 +3119,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
2999 3119
3000/** 3120/**
3001 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3121 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3002 * @master: SPI bus master that should be locked for exclusive bus access 3122 * @ctlr: SPI bus master that should be locked for exclusive bus access
3003 * Context: can sleep 3123 * Context: can sleep
3004 * 3124 *
3005 * This call may only be used from a context that may sleep. The sleep 3125 * This call may only be used from a context that may sleep. The sleep
@@ -3012,15 +3132,15 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
3012 * 3132 *
3013 * Return: always zero. 3133 * Return: always zero.
3014 */ 3134 */
3015int spi_bus_lock(struct spi_master *master) 3135int spi_bus_lock(struct spi_controller *ctlr)
3016{ 3136{
3017 unsigned long flags; 3137 unsigned long flags;
3018 3138
3019 mutex_lock(&master->bus_lock_mutex); 3139 mutex_lock(&ctlr->bus_lock_mutex);
3020 3140
3021 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 3141 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3022 master->bus_lock_flag = 1; 3142 ctlr->bus_lock_flag = 1;
3023 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 3143 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3024 3144
3025 /* mutex remains locked until spi_bus_unlock is called */ 3145 /* mutex remains locked until spi_bus_unlock is called */
3026 3146
@@ -3030,7 +3150,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
3030 3150
3031/** 3151/**
3032 * spi_bus_unlock - release the lock for exclusive SPI bus usage 3152 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3033 * @master: SPI bus master that was locked for exclusive bus access 3153 * @ctlr: SPI bus master that was locked for exclusive bus access
3034 * Context: can sleep 3154 * Context: can sleep
3035 * 3155 *
3036 * This call may only be used from a context that may sleep. The sleep 3156 * This call may only be used from a context that may sleep. The sleep
@@ -3041,11 +3161,11 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
3041 * 3161 *
3042 * Return: always zero. 3162 * Return: always zero.
3043 */ 3163 */
3044int spi_bus_unlock(struct spi_master *master) 3164int spi_bus_unlock(struct spi_controller *ctlr)
3045{ 3165{
3046 master->bus_lock_flag = 0; 3166 ctlr->bus_lock_flag = 0;
3047 3167
3048 mutex_unlock(&master->bus_lock_mutex); 3168 mutex_unlock(&ctlr->bus_lock_mutex);
3049 3169
3050 return 0; 3170 return 0;
3051} 3171}
@@ -3147,45 +3267,48 @@ static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3147 return dev ? to_spi_device(dev) : NULL; 3267 return dev ? to_spi_device(dev) : NULL;
3148} 3268}
3149 3269
3150static int __spi_of_master_match(struct device *dev, const void *data) 3270static int __spi_of_controller_match(struct device *dev, const void *data)
3151{ 3271{
3152 return dev->of_node == data; 3272 return dev->of_node == data;
3153} 3273}
3154 3274
3155/* the spi masters are not using spi_bus, so we find it with another way */ 3275/* the spi controllers are not using spi_bus, so we find it with another way */
3156static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 3276static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3157{ 3277{
3158 struct device *dev; 3278 struct device *dev;
3159 3279
3160 dev = class_find_device(&spi_master_class, NULL, node, 3280 dev = class_find_device(&spi_master_class, NULL, node,
3161 __spi_of_master_match); 3281 __spi_of_controller_match);
3282 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3283 dev = class_find_device(&spi_slave_class, NULL, node,
3284 __spi_of_controller_match);
3162 if (!dev) 3285 if (!dev)
3163 return NULL; 3286 return NULL;
3164 3287
3165 /* reference got in class_find_device */ 3288 /* reference got in class_find_device */
3166 return container_of(dev, struct spi_master, dev); 3289 return container_of(dev, struct spi_controller, dev);
3167} 3290}
3168 3291
3169static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3292static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3170 void *arg) 3293 void *arg)
3171{ 3294{
3172 struct of_reconfig_data *rd = arg; 3295 struct of_reconfig_data *rd = arg;
3173 struct spi_master *master; 3296 struct spi_controller *ctlr;
3174 struct spi_device *spi; 3297 struct spi_device *spi;
3175 3298
3176 switch (of_reconfig_get_state_change(action, arg)) { 3299 switch (of_reconfig_get_state_change(action, arg)) {
3177 case OF_RECONFIG_CHANGE_ADD: 3300 case OF_RECONFIG_CHANGE_ADD:
3178 master = of_find_spi_master_by_node(rd->dn->parent); 3301 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3179 if (master == NULL) 3302 if (ctlr == NULL)
3180 return NOTIFY_OK; /* not for us */ 3303 return NOTIFY_OK; /* not for us */
3181 3304
3182 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3305 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3183 put_device(&master->dev); 3306 put_device(&ctlr->dev);
3184 return NOTIFY_OK; 3307 return NOTIFY_OK;
3185 } 3308 }
3186 3309
3187 spi = of_register_spi_device(master, rd->dn); 3310 spi = of_register_spi_device(ctlr, rd->dn);
3188 put_device(&master->dev); 3311 put_device(&ctlr->dev);
3189 3312
3190 if (IS_ERR(spi)) { 3313 if (IS_ERR(spi)) {
3191 pr_err("%s: failed to create for '%s'\n", 3314 pr_err("%s: failed to create for '%s'\n",
@@ -3224,7 +3347,7 @@ extern struct notifier_block spi_of_notifier;
3224#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3347#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3225 3348
3226#if IS_ENABLED(CONFIG_ACPI) 3349#if IS_ENABLED(CONFIG_ACPI)
3227static int spi_acpi_master_match(struct device *dev, const void *data) 3350static int spi_acpi_controller_match(struct device *dev, const void *data)
3228{ 3351{
3229 return ACPI_COMPANION(dev->parent) == data; 3352 return ACPI_COMPANION(dev->parent) == data;
3230} 3353}
@@ -3234,16 +3357,19 @@ static int spi_acpi_device_match(struct device *dev, void *data)
3234 return ACPI_COMPANION(dev) == data; 3357 return ACPI_COMPANION(dev) == data;
3235} 3358}
3236 3359
3237static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) 3360static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3238{ 3361{
3239 struct device *dev; 3362 struct device *dev;
3240 3363
3241 dev = class_find_device(&spi_master_class, NULL, adev, 3364 dev = class_find_device(&spi_master_class, NULL, adev,
3242 spi_acpi_master_match); 3365 spi_acpi_controller_match);
3366 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3367 dev = class_find_device(&spi_slave_class, NULL, adev,
3368 spi_acpi_controller_match);
3243 if (!dev) 3369 if (!dev)
3244 return NULL; 3370 return NULL;
3245 3371
3246 return container_of(dev, struct spi_master, dev); 3372 return container_of(dev, struct spi_controller, dev);
3247} 3373}
3248 3374
3249static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 3375static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
@@ -3259,17 +3385,17 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3259 void *arg) 3385 void *arg)
3260{ 3386{
3261 struct acpi_device *adev = arg; 3387 struct acpi_device *adev = arg;
3262 struct spi_master *master; 3388 struct spi_controller *ctlr;
3263 struct spi_device *spi; 3389 struct spi_device *spi;
3264 3390
3265 switch (value) { 3391 switch (value) {
3266 case ACPI_RECONFIG_DEVICE_ADD: 3392 case ACPI_RECONFIG_DEVICE_ADD:
3267 master = acpi_spi_find_master_by_adev(adev->parent); 3393 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3268 if (!master) 3394 if (!ctlr)
3269 break; 3395 break;
3270 3396
3271 acpi_register_spi_device(master, adev); 3397 acpi_register_spi_device(ctlr, adev);
3272 put_device(&master->dev); 3398 put_device(&ctlr->dev);
3273 break; 3399 break;
3274 case ACPI_RECONFIG_DEVICE_REMOVE: 3400 case ACPI_RECONFIG_DEVICE_REMOVE:
3275 if (!acpi_device_enumerated(adev)) 3401 if (!acpi_device_enumerated(adev))
@@ -3312,6 +3438,12 @@ static int __init spi_init(void)
3312 if (status < 0) 3438 if (status < 0)
3313 goto err2; 3439 goto err2;
3314 3440
3441 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3442 status = class_register(&spi_slave_class);
3443 if (status < 0)
3444 goto err3;
3445 }
3446
3315 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3447 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3316 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 3448 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3317 if (IS_ENABLED(CONFIG_ACPI)) 3449 if (IS_ENABLED(CONFIG_ACPI))
@@ -3319,6 +3451,8 @@ static int __init spi_init(void)
3319 3451
3320 return 0; 3452 return 0;
3321 3453
3454err3:
3455 class_unregister(&spi_master_class);
3322err2: 3456err2:
3323 bus_unregister(&spi_bus_type); 3457 bus_unregister(&spi_bus_type);
3324err1: 3458err1:
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 935bd2854ff1..7b2170bfd6e7 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -24,13 +24,13 @@
24 24
25struct dma_chan; 25struct dma_chan;
26struct property_entry; 26struct property_entry;
27struct spi_master; 27struct spi_controller;
28struct spi_transfer; 28struct spi_transfer;
29struct spi_flash_read_message; 29struct spi_flash_read_message;
30 30
31/* 31/*
32 * INTERFACES between SPI master-side drivers and SPI infrastructure. 32 * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
33 * (There's no SPI slave support for Linux yet...) 33 * and SPI infrastructure.
34 */ 34 */
35extern struct bus_type spi_bus_type; 35extern struct bus_type spi_bus_type;
36 36
@@ -84,7 +84,7 @@ struct spi_statistics {
84 84
85void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 85void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
86 struct spi_transfer *xfer, 86 struct spi_transfer *xfer,
87 struct spi_master *master); 87 struct spi_controller *ctlr);
88 88
89#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ 89#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
90 do { \ 90 do { \
@@ -98,13 +98,14 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
98 SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) 98 SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
99 99
100/** 100/**
101 * struct spi_device - Master side proxy for an SPI slave device 101 * struct spi_device - Controller side proxy for an SPI slave device
102 * @dev: Driver model representation of the device. 102 * @dev: Driver model representation of the device.
103 * @master: SPI controller used with the device. 103 * @controller: SPI controller used with the device.
104 * @master: Copy of controller, for backwards compatibility.
104 * @max_speed_hz: Maximum clock rate to be used with this chip 105 * @max_speed_hz: Maximum clock rate to be used with this chip
105 * (on this board); may be changed by the device's driver. 106 * (on this board); may be changed by the device's driver.
106 * The spi_transfer.speed_hz can override this for each transfer. 107 * The spi_transfer.speed_hz can override this for each transfer.
107 * @chip_select: Chipselect, distinguishing chips handled by @master. 108 * @chip_select: Chipselect, distinguishing chips handled by @controller.
108 * @mode: The spi mode defines how data is clocked out and in. 109 * @mode: The spi mode defines how data is clocked out and in.
109 * This may be changed by the device's driver. 110 * This may be changed by the device's driver.
110 * The "active low" default for chipselect mode can be overridden 111 * The "active low" default for chipselect mode can be overridden
@@ -140,7 +141,8 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
140 */ 141 */
141struct spi_device { 142struct spi_device {
142 struct device dev; 143 struct device dev;
143 struct spi_master *master; 144 struct spi_controller *controller;
145 struct spi_controller *master; /* compatibility layer */
144 u32 max_speed_hz; 146 u32 max_speed_hz;
145 u8 chip_select; 147 u8 chip_select;
146 u8 bits_per_word; 148 u8 bits_per_word;
@@ -198,7 +200,7 @@ static inline void spi_dev_put(struct spi_device *spi)
198 put_device(&spi->dev); 200 put_device(&spi->dev);
199} 201}
200 202
201/* ctldata is for the bus_master driver's runtime state */ 203/* ctldata is for the bus_controller driver's runtime state */
202static inline void *spi_get_ctldata(struct spi_device *spi) 204static inline void *spi_get_ctldata(struct spi_device *spi)
203{ 205{
204 return spi->controller_state; 206 return spi->controller_state;
@@ -292,9 +294,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
292 spi_unregister_driver) 294 spi_unregister_driver)
293 295
294/** 296/**
295 * struct spi_master - interface to SPI master controller 297 * struct spi_controller - interface to SPI master or slave controller
296 * @dev: device interface to this driver 298 * @dev: device interface to this driver
297 * @list: link with the global spi_master list 299 * @list: link with the global spi_controller list
298 * @bus_num: board-specific (and often SOC-specific) identifier for a 300 * @bus_num: board-specific (and often SOC-specific) identifier for a
299 * given SPI controller. 301 * given SPI controller.
300 * @num_chipselect: chipselects are used to distinguish individual 302 * @num_chipselect: chipselects are used to distinguish individual
@@ -311,6 +313,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
311 * @min_speed_hz: Lowest supported transfer speed 313 * @min_speed_hz: Lowest supported transfer speed
312 * @max_speed_hz: Highest supported transfer speed 314 * @max_speed_hz: Highest supported transfer speed
313 * @flags: other constraints relevant to this driver 315 * @flags: other constraints relevant to this driver
316 * @slave: indicates that this is an SPI slave controller
314 * @max_transfer_size: function that returns the max transfer size for 317 * @max_transfer_size: function that returns the max transfer size for
315 * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. 318 * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
316 * @max_message_size: function that returns the max message size for 319 * @max_message_size: function that returns the max message size for
@@ -326,8 +329,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
326 * the device whose settings are being modified. 329 * the device whose settings are being modified.
327 * @transfer: adds a message to the controller's transfer queue. 330 * @transfer: adds a message to the controller's transfer queue.
328 * @cleanup: frees controller-specific state 331 * @cleanup: frees controller-specific state
329 * @can_dma: determine whether this master supports DMA 332 * @can_dma: determine whether this controller supports DMA
330 * @queued: whether this master is providing an internal message queue 333 * @queued: whether this controller is providing an internal message queue
331 * @kworker: thread struct for message pump 334 * @kworker: thread struct for message pump
332 * @kworker_task: pointer to task for message pump kworker thread 335 * @kworker_task: pointer to task for message pump kworker thread
333 * @pump_messages: work struct for scheduling work to the message pump 336 * @pump_messages: work struct for scheduling work to the message pump
@@ -374,6 +377,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
374 * @handle_err: the subsystem calls the driver to handle an error that occurs 377 * @handle_err: the subsystem calls the driver to handle an error that occurs
375 * in the generic implementation of transfer_one_message(). 378 * in the generic implementation of transfer_one_message().
376 * @unprepare_message: undo any work done by prepare_message(). 379 * @unprepare_message: undo any work done by prepare_message().
380 * @slave_abort: abort the ongoing transfer request on an SPI slave controller
377 * @spi_flash_read: to support spi-controller hardwares that provide 381 * @spi_flash_read: to support spi-controller hardwares that provide
378 * accelerated interface to read from flash devices. 382 * accelerated interface to read from flash devices.
379 * @spi_flash_can_dma: analogous to can_dma() interface, but for 383 * @spi_flash_can_dma: analogous to can_dma() interface, but for
@@ -382,7 +386,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
382 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 386 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
383 * number. Any individual value may be -ENOENT for CS lines that 387 * number. Any individual value may be -ENOENT for CS lines that
384 * are not GPIOs (driven by the SPI controller itself). 388 * are not GPIOs (driven by the SPI controller itself).
385 * @statistics: statistics for the spi_master 389 * @statistics: statistics for the spi_controller
386 * @dma_tx: DMA transmit channel 390 * @dma_tx: DMA transmit channel
387 * @dma_rx: DMA receive channel 391 * @dma_rx: DMA receive channel
388 * @dummy_rx: dummy receive buffer for full-duplex devices 392 * @dummy_rx: dummy receive buffer for full-duplex devices
@@ -391,7 +395,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
391 * what Linux expects, this optional hook can be used to translate 395 * what Linux expects, this optional hook can be used to translate
392 * between the two. 396 * between the two.
393 * 397 *
394 * Each SPI master controller can communicate with one or more @spi_device 398 * Each SPI controller can communicate with one or more @spi_device
395 * children. These make a small bus, sharing MOSI, MISO and SCK signals 399 * children. These make a small bus, sharing MOSI, MISO and SCK signals
396 * but not chip select signals. Each device may be configured to use a 400 * but not chip select signals. Each device may be configured to use a
397 * different clock rate, since those shared signals are ignored unless 401 * different clock rate, since those shared signals are ignored unless
@@ -402,7 +406,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
402 * an SPI slave device. For each such message it queues, it calls the 406 * an SPI slave device. For each such message it queues, it calls the
403 * message's completion function when the transaction completes. 407 * message's completion function when the transaction completes.
404 */ 408 */
405struct spi_master { 409struct spi_controller {
406 struct device dev; 410 struct device dev;
407 411
408 struct list_head list; 412 struct list_head list;
@@ -440,12 +444,16 @@ struct spi_master {
440 444
441 /* other constraints relevant to this driver */ 445 /* other constraints relevant to this driver */
442 u16 flags; 446 u16 flags;
443#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ 447#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */
444#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ 448#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */
445#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ 449#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */
446#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ 450#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */
447#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ 451#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */
448#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ 452
453#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
454
455 /* flag indicating this is an SPI slave controller */
456 bool slave;
449 457
450 /* 458 /*
451 * on some hardware transfer / message size may be constrained 459 * on some hardware transfer / message size may be constrained
@@ -480,8 +488,8 @@ struct spi_master {
480 * any other request management 488 * any other request management
481 * + To a given spi_device, message queueing is pure fifo 489 * + To a given spi_device, message queueing is pure fifo
482 * 490 *
483 * + The master's main job is to process its message queue, 491 * + The controller's main job is to process its message queue,
484 * selecting a chip then transferring data 492 * selecting a chip (for masters), then transferring data
485 * + If there are multiple spi_device children, the i/o queue 493 * + If there are multiple spi_device children, the i/o queue
486 * arbitration algorithm is unspecified (round robin, fifo, 494 * arbitration algorithm is unspecified (round robin, fifo,
487 * priority, reservations, preemption, etc) 495 * priority, reservations, preemption, etc)
@@ -494,7 +502,7 @@ struct spi_master {
494 int (*transfer)(struct spi_device *spi, 502 int (*transfer)(struct spi_device *spi,
495 struct spi_message *mesg); 503 struct spi_message *mesg);
496 504
497 /* called on release() to free memory provided by spi_master */ 505 /* called on release() to free memory provided by spi_controller */
498 void (*cleanup)(struct spi_device *spi); 506 void (*cleanup)(struct spi_device *spi);
499 507
500 /* 508 /*
@@ -504,13 +512,13 @@ struct spi_master {
504 * not modify or store xfer and dma_tx and dma_rx must be set 512 * not modify or store xfer and dma_tx and dma_rx must be set
505 * while the device is prepared. 513 * while the device is prepared.
506 */ 514 */
507 bool (*can_dma)(struct spi_master *master, 515 bool (*can_dma)(struct spi_controller *ctlr,
508 struct spi_device *spi, 516 struct spi_device *spi,
509 struct spi_transfer *xfer); 517 struct spi_transfer *xfer);
510 518
511 /* 519 /*
512 * These hooks are for drivers that want to use the generic 520 * These hooks are for drivers that want to use the generic
513 * master transfer queueing mechanism. If these are used, the 521 * controller transfer queueing mechanism. If these are used, the
514 * transfer() function above must NOT be specified by the driver. 522 * transfer() function above must NOT be specified by the driver.
515 * Over time we expect SPI drivers to be phased over to this API. 523 * Over time we expect SPI drivers to be phased over to this API.
516 */ 524 */
@@ -531,14 +539,15 @@ struct spi_master {
531 struct completion xfer_completion; 539 struct completion xfer_completion;
532 size_t max_dma_len; 540 size_t max_dma_len;
533 541
534 int (*prepare_transfer_hardware)(struct spi_master *master); 542 int (*prepare_transfer_hardware)(struct spi_controller *ctlr);
535 int (*transfer_one_message)(struct spi_master *master, 543 int (*transfer_one_message)(struct spi_controller *ctlr,
536 struct spi_message *mesg); 544 struct spi_message *mesg);
537 int (*unprepare_transfer_hardware)(struct spi_master *master); 545 int (*unprepare_transfer_hardware)(struct spi_controller *ctlr);
538 int (*prepare_message)(struct spi_master *master, 546 int (*prepare_message)(struct spi_controller *ctlr,
539 struct spi_message *message); 547 struct spi_message *message);
540 int (*unprepare_message)(struct spi_master *master, 548 int (*unprepare_message)(struct spi_controller *ctlr,
541 struct spi_message *message); 549 struct spi_message *message);
550 int (*slave_abort)(struct spi_controller *ctlr);
542 int (*spi_flash_read)(struct spi_device *spi, 551 int (*spi_flash_read)(struct spi_device *spi,
543 struct spi_flash_read_message *msg); 552 struct spi_flash_read_message *msg);
544 bool (*spi_flash_can_dma)(struct spi_device *spi, 553 bool (*spi_flash_can_dma)(struct spi_device *spi,
@@ -550,9 +559,9 @@ struct spi_master {
550 * of transfer_one_message() provied by the core. 559 * of transfer_one_message() provied by the core.
551 */ 560 */
552 void (*set_cs)(struct spi_device *spi, bool enable); 561 void (*set_cs)(struct spi_device *spi, bool enable);
553 int (*transfer_one)(struct spi_master *master, struct spi_device *spi, 562 int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
554 struct spi_transfer *transfer); 563 struct spi_transfer *transfer);
555 void (*handle_err)(struct spi_master *master, 564 void (*handle_err)(struct spi_controller *ctlr,
556 struct spi_message *message); 565 struct spi_message *message);
557 566
558 /* gpio chip select */ 567 /* gpio chip select */
@@ -569,57 +578,78 @@ struct spi_master {
569 void *dummy_rx; 578 void *dummy_rx;
570 void *dummy_tx; 579 void *dummy_tx;
571 580
572 int (*fw_translate_cs)(struct spi_master *master, unsigned cs); 581 int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs);
573}; 582};
574 583
575static inline void *spi_master_get_devdata(struct spi_master *master) 584static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
576{ 585{
577 return dev_get_drvdata(&master->dev); 586 return dev_get_drvdata(&ctlr->dev);
578} 587}
579 588
580static inline void spi_master_set_devdata(struct spi_master *master, void *data) 589static inline void spi_controller_set_devdata(struct spi_controller *ctlr,
590 void *data)
581{ 591{
582 dev_set_drvdata(&master->dev, data); 592 dev_set_drvdata(&ctlr->dev, data);
583} 593}
584 594
585static inline struct spi_master *spi_master_get(struct spi_master *master) 595static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr)
586{ 596{
587 if (!master || !get_device(&master->dev)) 597 if (!ctlr || !get_device(&ctlr->dev))
588 return NULL; 598 return NULL;
589 return master; 599 return ctlr;
600}
601
602static inline void spi_controller_put(struct spi_controller *ctlr)
603{
604 if (ctlr)
605 put_device(&ctlr->dev);
590} 606}
591 607
592static inline void spi_master_put(struct spi_master *master) 608static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
593{ 609{
594 if (master) 610 return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
595 put_device(&master->dev);
596} 611}
597 612
598/* PM calls that need to be issued by the driver */ 613/* PM calls that need to be issued by the driver */
599extern int spi_master_suspend(struct spi_master *master); 614extern int spi_controller_suspend(struct spi_controller *ctlr);
600extern int spi_master_resume(struct spi_master *master); 615extern int spi_controller_resume(struct spi_controller *ctlr);
601 616
602/* Calls the driver make to interact with the message queue */ 617/* Calls the driver make to interact with the message queue */
603extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); 618extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr);
604extern void spi_finalize_current_message(struct spi_master *master); 619extern void spi_finalize_current_message(struct spi_controller *ctlr);
605extern void spi_finalize_current_transfer(struct spi_master *master); 620extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
606 621
607/* the spi driver core manages memory for the spi_master classdev */ 622/* the spi driver core manages memory for the spi_controller classdev */
608extern struct spi_master * 623extern struct spi_controller *__spi_alloc_controller(struct device *host,
609spi_alloc_master(struct device *host, unsigned size); 624 unsigned int size, bool slave);
610 625
611extern int spi_register_master(struct spi_master *master); 626static inline struct spi_controller *spi_alloc_master(struct device *host,
612extern int devm_spi_register_master(struct device *dev, 627 unsigned int size)
613 struct spi_master *master); 628{
614extern void spi_unregister_master(struct spi_master *master); 629 return __spi_alloc_controller(host, size, false);
630}
615 631
616extern struct spi_master *spi_busnum_to_master(u16 busnum); 632static inline struct spi_controller *spi_alloc_slave(struct device *host,
633 unsigned int size)
634{
635 if (!IS_ENABLED(CONFIG_SPI_SLAVE))
636 return NULL;
637
638 return __spi_alloc_controller(host, size, true);
639}
640
641extern int spi_register_controller(struct spi_controller *ctlr);
642extern int devm_spi_register_controller(struct device *dev,
643 struct spi_controller *ctlr);
644extern void spi_unregister_controller(struct spi_controller *ctlr);
645
646extern struct spi_controller *spi_busnum_to_master(u16 busnum);
617 647
618/* 648/*
619 * SPI resource management while processing a SPI message 649 * SPI resource management while processing a SPI message
620 */ 650 */
621 651
622typedef void (*spi_res_release_t)(struct spi_master *master, 652typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
623 struct spi_message *msg, 653 struct spi_message *msg,
624 void *res); 654 void *res);
625 655
@@ -644,7 +674,7 @@ extern void *spi_res_alloc(struct spi_device *spi,
644extern void spi_res_add(struct spi_message *message, void *res); 674extern void spi_res_add(struct spi_message *message, void *res);
645extern void spi_res_free(void *res); 675extern void spi_res_free(void *res);
646 676
647extern void spi_res_release(struct spi_master *master, 677extern void spi_res_release(struct spi_controller *ctlr,
648 struct spi_message *message); 678 struct spi_message *message);
649 679
650/*---------------------------------------------------------------------------*/ 680/*---------------------------------------------------------------------------*/
@@ -828,7 +858,7 @@ struct spi_message {
828 858
829 /* for optional use by whatever driver currently owns the 859 /* for optional use by whatever driver currently owns the
830 * spi_message ... between calls to spi_async and then later 860 * spi_message ... between calls to spi_async and then later
831 * complete(), that's the spi_master controller driver. 861 * complete(), that's the spi_controller controller driver.
832 */ 862 */
833 struct list_head queue; 863 struct list_head queue;
834 void *state; 864 void *state;
@@ -912,25 +942,27 @@ extern int spi_setup(struct spi_device *spi);
912extern int spi_async(struct spi_device *spi, struct spi_message *message); 942extern int spi_async(struct spi_device *spi, struct spi_message *message);
913extern int spi_async_locked(struct spi_device *spi, 943extern int spi_async_locked(struct spi_device *spi,
914 struct spi_message *message); 944 struct spi_message *message);
945extern int spi_slave_abort(struct spi_device *spi);
915 946
916static inline size_t 947static inline size_t
917spi_max_message_size(struct spi_device *spi) 948spi_max_message_size(struct spi_device *spi)
918{ 949{
919 struct spi_master *master = spi->master; 950 struct spi_controller *ctlr = spi->controller;
920 if (!master->max_message_size) 951
952 if (!ctlr->max_message_size)
921 return SIZE_MAX; 953 return SIZE_MAX;
922 return master->max_message_size(spi); 954 return ctlr->max_message_size(spi);
923} 955}
924 956
925static inline size_t 957static inline size_t
926spi_max_transfer_size(struct spi_device *spi) 958spi_max_transfer_size(struct spi_device *spi)
927{ 959{
928 struct spi_master *master = spi->master; 960 struct spi_controller *ctlr = spi->controller;
929 size_t tr_max = SIZE_MAX; 961 size_t tr_max = SIZE_MAX;
930 size_t msg_max = spi_max_message_size(spi); 962 size_t msg_max = spi_max_message_size(spi);
931 963
932 if (master->max_transfer_size) 964 if (ctlr->max_transfer_size)
933 tr_max = master->max_transfer_size(spi); 965 tr_max = ctlr->max_transfer_size(spi);
934 966
935 /* transfer size limit must not be greater than messsage size limit */ 967 /* transfer size limit must not be greater than messsage size limit */
936 return min(tr_max, msg_max); 968 return min(tr_max, msg_max);
@@ -941,7 +973,7 @@ spi_max_transfer_size(struct spi_device *spi)
941/* SPI transfer replacement methods which make use of spi_res */ 973/* SPI transfer replacement methods which make use of spi_res */
942 974
943struct spi_replaced_transfers; 975struct spi_replaced_transfers;
944typedef void (*spi_replaced_release_t)(struct spi_master *master, 976typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
945 struct spi_message *msg, 977 struct spi_message *msg,
946 struct spi_replaced_transfers *res); 978 struct spi_replaced_transfers *res);
947/** 979/**
@@ -985,7 +1017,7 @@ extern struct spi_replaced_transfers *spi_replace_transfers(
985 1017
986/* SPI transfer transformation methods */ 1018/* SPI transfer transformation methods */
987 1019
988extern int spi_split_transfers_maxsize(struct spi_master *master, 1020extern int spi_split_transfers_maxsize(struct spi_controller *ctlr,
989 struct spi_message *msg, 1021 struct spi_message *msg,
990 size_t maxsize, 1022 size_t maxsize,
991 gfp_t gfp); 1023 gfp_t gfp);
@@ -999,8 +1031,8 @@ extern int spi_split_transfers_maxsize(struct spi_master *master,
999 1031
1000extern int spi_sync(struct spi_device *spi, struct spi_message *message); 1032extern int spi_sync(struct spi_device *spi, struct spi_message *message);
1001extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); 1033extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
1002extern int spi_bus_lock(struct spi_master *master); 1034extern int spi_bus_lock(struct spi_controller *ctlr);
1003extern int spi_bus_unlock(struct spi_master *master); 1035extern int spi_bus_unlock(struct spi_controller *ctlr);
1004 1036
1005/** 1037/**
1006 * spi_sync_transfer - synchronous SPI data transfer 1038 * spi_sync_transfer - synchronous SPI data transfer
@@ -1185,9 +1217,9 @@ struct spi_flash_read_message {
1185/* SPI core interface for flash read support */ 1217/* SPI core interface for flash read support */
1186static inline bool spi_flash_read_supported(struct spi_device *spi) 1218static inline bool spi_flash_read_supported(struct spi_device *spi)
1187{ 1219{
1188 return spi->master->spi_flash_read && 1220 return spi->controller->spi_flash_read &&
1189 (!spi->master->flash_read_supported || 1221 (!spi->controller->flash_read_supported ||
1190 spi->master->flash_read_supported(spi)); 1222 spi->controller->flash_read_supported(spi));
1191} 1223}
1192 1224
1193int spi_flash_read(struct spi_device *spi, 1225int spi_flash_read(struct spi_device *spi,
@@ -1220,7 +1252,7 @@ int spi_flash_read(struct spi_device *spi,
1220 * @irq: Initializes spi_device.irq; depends on how the board is wired. 1252 * @irq: Initializes spi_device.irq; depends on how the board is wired.
1221 * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits 1253 * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits
1222 * from the chip datasheet and board-specific signal quality issues. 1254 * from the chip datasheet and board-specific signal quality issues.
1223 * @bus_num: Identifies which spi_master parents the spi_device; unused 1255 * @bus_num: Identifies which spi_controller parents the spi_device; unused
1224 * by spi_new_device(), and otherwise depends on board wiring. 1256 * by spi_new_device(), and otherwise depends on board wiring.
1225 * @chip_select: Initializes spi_device.chip_select; depends on how 1257 * @chip_select: Initializes spi_device.chip_select; depends on how
1226 * the board is wired. 1258 * the board is wired.
@@ -1261,7 +1293,7 @@ struct spi_board_info {
1261 1293
1262 1294
1263 /* bus_num is board specific and matches the bus_num of some 1295 /* bus_num is board specific and matches the bus_num of some
1264 * spi_master that will probably be registered later. 1296 * spi_controller that will probably be registered later.
1265 * 1297 *
1266 * chip_select reflects how this chip is wired to that master; 1298 * chip_select reflects how this chip is wired to that master;
1267 * it's less than num_chipselect. 1299 * it's less than num_chipselect.
@@ -1295,7 +1327,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
1295/* If you're hotplugging an adapter with devices (parport, usb, etc) 1327/* If you're hotplugging an adapter with devices (parport, usb, etc)
1296 * use spi_new_device() to describe each device. You can also call 1328 * use spi_new_device() to describe each device. You can also call
1297 * spi_unregister_device() to start making that device vanish, but 1329 * spi_unregister_device() to start making that device vanish, but
1298 * normally that would be handled by spi_unregister_master(). 1330 * normally that would be handled by spi_unregister_controller().
1299 * 1331 *
1300 * You can also use spi_alloc_device() and spi_add_device() to use a two 1332 * You can also use spi_alloc_device() and spi_add_device() to use a two
1301 * stage registration sequence for each spi_device. This gives the caller 1333 * stage registration sequence for each spi_device. This gives the caller
@@ -1304,13 +1336,13 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
1304 * be defined using the board info. 1336 * be defined using the board info.
1305 */ 1337 */
1306extern struct spi_device * 1338extern struct spi_device *
1307spi_alloc_device(struct spi_master *master); 1339spi_alloc_device(struct spi_controller *ctlr);
1308 1340
1309extern int 1341extern int
1310spi_add_device(struct spi_device *spi); 1342spi_add_device(struct spi_device *spi);
1311 1343
1312extern struct spi_device * 1344extern struct spi_device *
1313spi_new_device(struct spi_master *, struct spi_board_info *); 1345spi_new_device(struct spi_controller *, struct spi_board_info *);
1314 1346
1315extern void spi_unregister_device(struct spi_device *spi); 1347extern void spi_unregister_device(struct spi_device *spi);
1316 1348
@@ -1318,9 +1350,32 @@ extern const struct spi_device_id *
1318spi_get_device_id(const struct spi_device *sdev); 1350spi_get_device_id(const struct spi_device *sdev);
1319 1351
1320static inline bool 1352static inline bool
1321spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) 1353spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
1322{ 1354{
1323 return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); 1355 return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
1324} 1356}
1325 1357
1358
1359/* Compatibility layer */
1360#define spi_master spi_controller
1361
1362#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
1363#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
1364#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
1365#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
1366#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
1367
1368#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
1369#define spi_master_set_devdata(_ctlr, _data) \
1370 spi_controller_set_devdata(_ctlr, _data)
1371#define spi_master_get(_ctlr) spi_controller_get(_ctlr)
1372#define spi_master_put(_ctlr) spi_controller_put(_ctlr)
1373#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr)
1374#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr)
1375
1376#define spi_register_master(_ctlr) spi_register_controller(_ctlr)
1377#define devm_spi_register_master(_dev, _ctlr) \
1378 devm_spi_register_controller(_dev, _ctlr)
1379#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr)
1380
1326#endif /* __LINUX_SPI_H */ 1381#endif /* __LINUX_SPI_H */
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index 7e02c983bbe2..f9f702b6ae2e 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -7,37 +7,37 @@
7#include <linux/ktime.h> 7#include <linux/ktime.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10DECLARE_EVENT_CLASS(spi_master, 10DECLARE_EVENT_CLASS(spi_controller,
11 11
12 TP_PROTO(struct spi_master *master), 12 TP_PROTO(struct spi_controller *controller),
13 13
14 TP_ARGS(master), 14 TP_ARGS(controller),
15 15
16 TP_STRUCT__entry( 16 TP_STRUCT__entry(
17 __field( int, bus_num ) 17 __field( int, bus_num )
18 ), 18 ),
19 19
20 TP_fast_assign( 20 TP_fast_assign(
21 __entry->bus_num = master->bus_num; 21 __entry->bus_num = controller->bus_num;
22 ), 22 ),
23 23
24 TP_printk("spi%d", (int)__entry->bus_num) 24 TP_printk("spi%d", (int)__entry->bus_num)
25 25
26); 26);
27 27
28DEFINE_EVENT(spi_master, spi_master_idle, 28DEFINE_EVENT(spi_controller, spi_controller_idle,
29 29
30 TP_PROTO(struct spi_master *master), 30 TP_PROTO(struct spi_controller *controller),
31 31
32 TP_ARGS(master) 32 TP_ARGS(controller)
33 33
34); 34);
35 35
36DEFINE_EVENT(spi_master, spi_master_busy, 36DEFINE_EVENT(spi_controller, spi_controller_busy,
37 37
38 TP_PROTO(struct spi_master *master), 38 TP_PROTO(struct spi_controller *controller),
39 39
40 TP_ARGS(master) 40 TP_ARGS(controller)
41 41
42); 42);
43 43
@@ -54,7 +54,7 @@ DECLARE_EVENT_CLASS(spi_message,
54 ), 54 ),
55 55
56 TP_fast_assign( 56 TP_fast_assign(
57 __entry->bus_num = msg->spi->master->bus_num; 57 __entry->bus_num = msg->spi->controller->bus_num;
58 __entry->chip_select = msg->spi->chip_select; 58 __entry->chip_select = msg->spi->chip_select;
59 __entry->msg = msg; 59 __entry->msg = msg;
60 ), 60 ),
@@ -95,7 +95,7 @@ TRACE_EVENT(spi_message_done,
95 ), 95 ),
96 96
97 TP_fast_assign( 97 TP_fast_assign(
98 __entry->bus_num = msg->spi->master->bus_num; 98 __entry->bus_num = msg->spi->controller->bus_num;
99 __entry->chip_select = msg->spi->chip_select; 99 __entry->chip_select = msg->spi->chip_select;
100 __entry->msg = msg; 100 __entry->msg = msg;
101 __entry->frame = msg->frame_length; 101 __entry->frame = msg->frame_length;
@@ -122,7 +122,7 @@ DECLARE_EVENT_CLASS(spi_transfer,
122 ), 122 ),
123 123
124 TP_fast_assign( 124 TP_fast_assign(
125 __entry->bus_num = msg->spi->master->bus_num; 125 __entry->bus_num = msg->spi->controller->bus_num;
126 __entry->chip_select = msg->spi->chip_select; 126 __entry->chip_select = msg->spi->chip_select;
127 __entry->xfer = xfer; 127 __entry->xfer = xfer;
128 __entry->len = xfer->len; 128 __entry->len = xfer->len;